repo
string
commit
string
message
string
diff
string
mitechie/pyvim
b61997dc60f15f0aa9b6244264f4ef85aae28b69
mousehide doesn't work for out for me
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index 430b63b..b759a4e 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,8 +1,11 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =6 +let g:netrw_dirhist_cnt =9 let g:netrw_dirhist_1='/home/rharding/src/wadllib/datetime-924240/src/wadllib/tests/data' let g:netrw_dirhist_2='/home/rharding/configs/pyvim' let g:netrw_dirhist_3='/home/rharding/src/shownotes/shownotes' let g:netrw_dirhist_4='/home/rharding/launchpad/lp-sourcedeps/eggs/lazr.lifecycle-1.1-py2.6.egg/lazr/lifecycle' let g:netrw_dirhist_5='/home/rharding/launchpad/lp-branches/email_notice_959482' let g:netrw_dirhist_6='/home/rharding/launchpad/lp-branches/email_notice_959482/lib' +let g:netrw_dirhist_7='/home/rharding/.pip' +let g:netrw_dirhist_8='/home/rharding/src/wadllib/wadllib' +let g:netrw_dirhist_9='/home/rharding/src/docs/minime' diff --git a/.vimrc b/.vimrc index 4f862fb..1500107 100644 --- a/.vimrc +++ b/.vimrc @@ -1,619 +1,619 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,V - load .vimrc " ,VV - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font " set guifont=Envy\ Code\ R\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= - set mousehide + set nomousehide " colorscheme lucius " colorscheme twilight " colorscheme aldmeris " colorscheme solarized colorscheme void " To set the toolbars off (icons on top of the screen) set guioptions-=T " Try to keep backups across sessions set undodir=~/.vim/backups set undofile else set background=dark " adapt colors for dark background set t_Co=256 colorscheme lucius colorscheme twilight endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=78 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] hi StatusLine guifg=#fcf4ba guibg=#333333 hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " auto save when focus is lost au FocusLost * :wa " run the current file with F5 map <F5> <Esc>:w<CR>:!%:p<CR> " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " Map ;; to swap out the file with the previous one nnoremap ;; <c-^> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,VV brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>V :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>VV :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " Scroll the viewport 3 lines vs just 1 line at a time nnoremap <C-e> 3<C-e> nnoremap <C-y> 3<C-y> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on " autocmd BufEnter * lcd %:p:h " Trying out this trick to get cwd tricks cnoremap %% <C-R>=expand('%:h').'/'<cr> map <leader>e :edit %% map <leader>v :view %% " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" " au BufRead *.py compiler nose " au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m au BufRead *.py set tags=tags-py;/ " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au FileType javascript set errorformat=%f:\ line\ %l\\,\ col\ %c\\,\ %m au FileType javascript set makeprg=jshint\ % au FileType javascript set tags=tags-js;/ autocmd BufRead,BufNewFile *.json set filetype=json command Js silent %!jp command Jc silent %!jcompress autocmd FileType json Js " ================================================== " CSS " ================================================== " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " GoLang " ================================================== " Highlight word and 'K' to get GoDoc output for word. au BufRead,BufNewFile *.go set filetype=go " ,m will run gomake au BufRead *.go set makeprg=gomake " ,M will run gofmt on the code to lint it autocmd FileType go map <buffer> <leader>M :Fmt<CR>:cw<CR> " ================================================== " Git Tricks " ================================================== " Show the diff in the preview window of the commit during git commit autocmd FileType gitcommit DiffGitCached | wincmd p " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " CtrlP " https://github.com/kien/ctrlp.vim let g:ctrlp_working_path_mode = 0 let g:ctrlp_custom_ignore = { \ 'dir': '\.git$\|\.hg$\|\.svn\|\.bzr$\|develop-eggs$\|site-packages', \ 'file': '\.pyc$|\.exe$\|\.so$\|\.dll$\|\.swp$', \ 'link': 'download-cache|eggs|yui', \ } map <leader>gt :CtrlP templates/<cr> map <leader>gj :CtrlP static/js/<cr> " Fugative " https://github.com/tpope/vim-fugitive " " Commands: " Gedit " Gsplit " Gvsplit " GStatus " Gblame " Gmove " Gremove " Ggrep " Gwrite " Gbrowse " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim let g:pyflakes_use_quickfix = 0 " tslime " https://github.com/evhan/tslime.vim.git " let g:tmux_sessionname = "default" let g:tmux_windowname = 1 let g:tmux_panenumber = 0 nmap <leader>mt :call Send_to_Tmux("make test"."\n")<CR> nmap <leader>lt :call Send_to_Tmux("./bin/test -x -cvvt \"test_".expand("%:t:r")."\"\n")<CR> nmap <leader>rst :call Send_to_Tmux("rst2html.py ".expand("%")." > /tmp/".expand("%:t:r").".html\n")<CR> " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar nmap <silent> <leader>L :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen
mitechie/pyvim
f8b923564254a8a5cd02329e97246dbfcfebcf55
Add some tslime fun and Go programming tricks
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index a02c227..430b63b 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,4 +1,8 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =2 +let g:netrw_dirhist_cnt =6 let g:netrw_dirhist_1='/home/rharding/src/wadllib/datetime-924240/src/wadllib/tests/data' let g:netrw_dirhist_2='/home/rharding/configs/pyvim' +let g:netrw_dirhist_3='/home/rharding/src/shownotes/shownotes' +let g:netrw_dirhist_4='/home/rharding/launchpad/lp-sourcedeps/eggs/lazr.lifecycle-1.1-py2.6.egg/lazr/lifecycle' +let g:netrw_dirhist_5='/home/rharding/launchpad/lp-branches/email_notice_959482' +let g:netrw_dirhist_6='/home/rharding/launchpad/lp-branches/email_notice_959482/lib' diff --git a/.vim/colors/lucius.vim b/.vim/colors/lucius.vim index c6a8fb0..9fcea60 100644 --- a/.vim/colors/lucius.vim +++ b/.vim/colors/lucius.vim @@ -1,266 +1,352 @@ " Lucius vim color file " Maintainer: Jonathan Filip <[email protected]> -" Version: 7.0.0 +" Version: 7.1.0 hi clear if exists("syntax_on") syntax reset endif let colors_name="lucius" " Summary: " Color scheme with dark and light versions (GUI and 256 color terminal). " " Description: " This color scheme was originally created by combining my favorite parts of " the following color schemes: " " * oceandeep (vimscript #368) " * peaksea (vimscript #760) " * wombat (vimscript #1778) " * moria (vimscript #1464) " * zenburn (vimscript #415) " " Version 7 has unified the 256 color terminal and GUI versions (the GUI " version only uses colors available on the 256 color terminal). The overall " colors were also toned down a little bit (light version is now a light gray " instead of white and the dark version is slightly lighter) to make it easier " on the eyes. " " Version 6+ has been revamped a bit from the original color scheme. If you " prefer the old style, or the 'blue' version, use the 5Final release. Version " 6+ only has a light and dark version. The new version tries to unify some of " the colors and also adds more contrast between text and interface. " " The color scheme is dark, by default. You can change this by setting the -" g:lucius_style variable to "light" or "dark". Once the color scheme is -" loaded, you can use the commands "LuciusLight" or "LuciusDark" to change -" schemes quickly. +" g:lucius_style variable to "light", "dark", or "dark_dim". Once the color +" scheme is loaded, you can use the commands "LuciusLight", "LuciusDark", or +" "LuciusDarkDim" to change schemes quickly. " " Screenshots of version 7: " " * Dark: http://i.imgur.com/ktdFm.png +" * DarkDim: http://i.imgur.com/0bOCv.png " * Light: http://i.imgur.com/ndd9A.png " " Screenshots of version 6: " " * Dark: http://i.imgur.com/IzYcB.png " * Light: http://i.imgur.com/kfJcm.png " " Screenshots of the version 5Final: " " * Dark: http://i.imgur.com/z0bDr.png " * Light: http://i.imgur.com/BXDiv.png " * Blue: http://i.imgur.com/Ea1Gq.png " " colorsupport.vim (vimscript #2682) is used to help with mapping the GUI " settings to the 256 terminal colors. " " This color scheme also has custom colors defined for the following plugins: " " * vimwiki (vimscript #2226) " * tagbar (vimscript #3465) " " Installation: " Copy the file to your vim colors directory and then do :colorscheme lucius. set background=dark if exists("g:lucius_style") if g:lucius_style == "light" set background=light endif else let g:lucius_style = "dark" endif " set colorcolumn=21,37,53,68,86,100 if g:lucius_style == "dark" hi Normal guifg=#d7d7d7 guibg=#303030 ctermfg=188 ctermbg=236 gui=none cterm=none hi Comment guifg=#808080 guibg=NONE ctermfg=244 ctermbg=NONE gui=none cterm=none hi Constant guifg=#d7d7af guibg=NONE ctermfg=187 ctermbg=NONE gui=none cterm=none hi BConstant guifg=#d7d7af guibg=NONE ctermfg=187 ctermbg=NONE gui=bold cterm=bold hi Identifier guifg=#afd787 guibg=NONE ctermfg=150 ctermbg=NONE gui=none cterm=none hi BIdentifier guifg=#afd787 guibg=NONE ctermfg=150 ctermbg=NONE gui=bold cterm=bold hi Statement guifg=#87d7ff guibg=NONE ctermfg=117 ctermbg=NONE gui=none cterm=none hi BStatement guifg=#87d7ff guibg=NONE ctermfg=117 ctermbg=NONE gui=bold cterm=bold hi PreProc guifg=#87d7af guibg=NONE ctermfg=115 ctermbg=NONE gui=none cterm=none hi BPreProc guifg=#87d7af guibg=NONE ctermfg=115 ctermbg=NONE gui=bold cterm=bold hi Type guifg=#87d7d7 guibg=NONE ctermfg=116 ctermbg=NONE gui=none cterm=none hi BType guifg=#87d7d7 guibg=NONE ctermfg=116 ctermbg=NONE gui=bold cterm=bold hi Special guifg=#d7afd7 guibg=NONE ctermfg=182 ctermbg=NONE gui=none cterm=none hi BSpecial guifg=#d7afd7 guibg=NONE ctermfg=182 ctermbg=NONE gui=bold cterm=bold " ## Text Markup ## hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline hi Error guifg=#ff8787 guibg=#870000 ctermfg=210 ctermbg=88 gui=none cterm=none hi Todo guifg=#d7d75f guibg=#5f5f00 ctermfg=185 ctermbg=58 gui=none cterm=none hi MatchParen guifg=bg guibg=#afd75f ctermfg=bg ctermbg=149 gui=none cterm=bold hi NonText guifg=#5f5f87 guibg=NONE ctermfg=60 ctermbg=NONE gui=none cterm=none hi SpecialKey guifg=#5f875f guibg=NONE ctermfg=65 ctermbg=NONE gui=none cterm=none hi Title guifg=#5fafd7 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold " ## Text Selection ## hi Cursor guifg=bg guibg=#87afd7 ctermfg=bg ctermbg=110 gui=none cterm=none hi CursorIM guifg=bg guibg=#87afd7 ctermfg=bg ctermbg=110 gui=none cterm=none hi CursorColumn guifg=NONE guibg=#444444 ctermfg=NONE ctermbg=238 gui=none cterm=none hi CursorLine guifg=NONE guibg=#444444 ctermfg=NONE ctermbg=238 gui=none cterm=none hi Visual guifg=NONE guibg=#005f87 ctermfg=NONE ctermbg=24 gui=none cterm=none hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline hi IncSearch guifg=bg guibg=#57d7d7 ctermfg=bg ctermbg=80 gui=none cterm=none hi Search guifg=bg guibg=#d78700 ctermfg=bg ctermbg=172 gui=none cterm=none " == UI == hi Pmenu guifg=bg guibg=#b2b2b2 ctermfg=bg ctermbg=249 gui=none cterm=none hi PmenuSel guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=none cterm=none - hi PMenuSbar guifg=#b2b2b2 guibg=#d0d0d0 ctermfg=249 ctermbg=252 gui=none cterm=none - hi PMenuThumb guifg=fg guibg=#808080 ctermfg=fg ctermbg=244 gui=none cterm=none + hi PmenuSbar guifg=#b2b2b2 guibg=#d0d0d0 ctermfg=249 ctermbg=252 gui=none cterm=none + hi PmenuThumb guifg=fg guibg=#808080 ctermfg=fg ctermbg=244 gui=none cterm=none hi StatusLine guifg=bg guibg=#b2b2b2 ctermfg=bg ctermbg=249 gui=bold cterm=bold hi StatusLineNC guifg=#444444 guibg=#b2b2b2 ctermfg=238 ctermbg=249 gui=none cterm=none hi TabLine guifg=bg guibg=#b2b2b2 ctermfg=bg ctermbg=249 gui=none cterm=none hi TabLineFill guifg=#444444 guibg=#b2b2b2 ctermfg=238 ctermbg=249 gui=none cterm=none hi TabLineSel guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=bold cterm=bold hi VertSplit guifg=#626262 guibg=#b2b2b2 ctermfg=241 ctermbg=249 gui=none cterm=none hi Folded guifg=#bcbcbc guibg=#4e4e4e ctermfg=250 ctermbg=239 gui=bold cterm=none hi FoldColumn guifg=#bcbcbc guibg=#4e4e4e ctermfg=250 ctermbg=239 gui=bold cterm=none " ## Spelling ## - hi SpellBad guisp=#d70000 ctermfg=bg ctermbg=160 gui=undercurl cterm=undercurl - hi SpellCap guisp=#00afd7 ctermfg=bg ctermbg=38 gui=undercurl cterm=undercurl - hi SpellRare guisp=#5faf00 ctermfg=bg ctermbg=70 gui=undercurl cterm=undercurl - hi SpellLocal guisp=#d7af00 ctermfg=bg ctermbg=178 gui=undercurl cterm=undercurl + hi SpellBad guisp=#d70000 ctermfg=160 ctermbg=NONE gui=undercurl cterm=underline + hi SpellCap guisp=#00afd7 ctermfg=38 ctermbg=NONE gui=undercurl cterm=underline + hi SpellRare guisp=#5faf00 ctermfg=70 ctermbg=NONE gui=undercurl cterm=underline + hi SpellLocal guisp=#d7af00 ctermfg=178 ctermbg=NONE gui=undercurl cterm=underline " ## Diff ## hi DiffAdd guifg=fg guibg=#5f875f ctermfg=fg ctermbg=65 gui=none cterm=none hi DiffChange guifg=fg guibg=#87875f ctermfg=fg ctermbg=101 gui=none cterm=none hi DiffDelete guifg=fg guibg=#875f5f ctermfg=fg ctermbg=95 gui=none cterm=none hi DiffText guifg=#ffff87 guibg=#87875f ctermfg=228 ctermbg=101 gui=none cterm=none " ## Misc ## hi Directory guifg=#afd7af guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none hi ErrorMsg guifg=#ff5f5f guibg=NONE ctermfg=203 ctermbg=NONE gui=none cterm=none hi SignColumn guifg=#b2b2b2 guibg=#4e4e4e ctermfg=249 ctermbg=239 gui=none cterm=none hi LineNr guifg=#626262 guibg=#444444 ctermfg=241 ctermbg=238 gui=none cterm=none hi MoreMsg guifg=#5fd7d7 guibg=NONE ctermfg=80 ctermbg=NONE gui=none cterm=none hi ModeMsg guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none hi Question guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none hi WarningMsg guifg=#d7875f guibg=NONE ctermfg=173 ctermbg=NONE gui=none cterm=none hi WildMenu guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=none cterm=none hi ColorColumn guifg=NONE guibg=#87875f ctermfg=NONE ctermbg=101 gui=none cterm=none hi Ignore guifg=bg ctermfg=bg +elseif g:lucius_style == "dark_dim" + + + hi Normal guifg=#bcbcbc guibg=#303030 ctermfg=250 ctermbg=236 gui=none cterm=none + + hi Comment guifg=#6c6c6c guibg=NONE ctermfg=242 ctermbg=NONE gui=none cterm=none + + hi Constant guifg=#afaf87 guibg=NONE ctermfg=144 ctermbg=NONE gui=none cterm=none + hi BConstant guifg=#afaf87 guibg=NONE ctermfg=144 ctermbg=NONE gui=bold cterm=bold + + hi Identifier guifg=#87af5f guibg=NONE ctermfg=107 ctermbg=NONE gui=none cterm=none + hi BIdentifier guifg=#87af5f guibg=NONE ctermfg=107 ctermbg=NONE gui=bold cterm=bold + + hi Statement guifg=#57afd7 guibg=NONE ctermfg=74 ctermbg=NONE gui=none cterm=none + hi BStatement guifg=#57afd7 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold + + hi PreProc guifg=#5faf87 guibg=NONE ctermfg=72 ctermbg=NONE gui=none cterm=none + hi BPreProc guifg=#5faf87 guibg=NONE ctermfg=72 ctermbg=NONE gui=bold cterm=bold + + hi Type guifg=#5fafaf guibg=NONE ctermfg=73 ctermbg=NONE gui=none cterm=none + hi BType guifg=#5fafaf guibg=NONE ctermfg=73 ctermbg=NONE gui=bold cterm=bold + + hi Special guifg=#af87af guibg=NONE ctermfg=139 ctermbg=NONE gui=none cterm=none + hi BSpecial guifg=#af87af guibg=NONE ctermfg=139 ctermbg=NONE gui=bold cterm=bold + + " ## Text Markup ## + hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline + hi Error guifg=#d75f5f guibg=#870000 ctermfg=167 ctermbg=88 gui=none cterm=none + hi Todo guifg=#afaf00 guibg=#5f5f00 ctermfg=142 ctermbg=58 gui=none cterm=none + hi MatchParen guifg=bg guibg=#87af5f ctermfg=bg ctermbg=107 gui=none cterm=bold + hi NonText guifg=#5f5f87 guibg=NONE ctermfg=60 ctermbg=NONE gui=none cterm=none + hi SpecialKey guifg=#5f875f guibg=NONE ctermfg=65 ctermbg=NONE gui=none cterm=none + hi Title guifg=#00afd7 guibg=NONE ctermfg=38 ctermbg=NONE gui=bold cterm=bold + + " ## Text Selection ## + hi Cursor guifg=bg guibg=#5f87af ctermfg=bg ctermbg=67 gui=none cterm=none + hi CursorIM guifg=bg guibg=#5f87af ctermfg=bg ctermbg=67 gui=none cterm=none + hi CursorColumn guifg=NONE guibg=#444444 ctermfg=NONE ctermbg=238 gui=none cterm=none + hi CursorLine guifg=NONE guibg=#444444 ctermfg=NONE ctermbg=238 gui=none cterm=none + hi Visual guifg=NONE guibg=#005f87 ctermfg=NONE ctermbg=24 gui=none cterm=none + hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline + hi IncSearch guifg=bg guibg=#00afaf ctermfg=bg ctermbg=37 gui=none cterm=none + hi Search guifg=bg guibg=#d78700 ctermfg=bg ctermbg=172 gui=none cterm=none + + " == UI == + hi Pmenu guifg=bg guibg=#8a8a8a ctermfg=bg ctermbg=245 gui=none cterm=none + hi PmenuSel guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=none cterm=none + hi PmenuSbar guifg=#8a8a8a guibg=#bcbcbc ctermfg=245 ctermbg=250 gui=none cterm=none + hi PmenuThumb guifg=fg guibg=#585858 ctermfg=fg ctermbg=240 gui=none cterm=none + hi StatusLine guifg=bg guibg=#8a8a8a ctermfg=bg ctermbg=245 gui=bold cterm=bold + hi StatusLineNC guifg=#444444 guibg=#8a8a8a ctermfg=238 ctermbg=245 gui=none cterm=none + hi TabLine guifg=bg guibg=#8a8a8a ctermfg=bg ctermbg=245 gui=none cterm=none + hi TabLineFill guifg=#444444 guibg=#8a8a8a ctermfg=238 ctermbg=245 gui=none cterm=none + hi TabLineSel guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=bold cterm=bold + hi VertSplit guifg=#626262 guibg=#8a8a8a ctermfg=241 ctermbg=245 gui=none cterm=none + hi Folded guifg=#a8a8a8 guibg=#4e4e4e ctermfg=248 ctermbg=239 gui=bold cterm=none + hi FoldColumn guifg=#a8a8a8 guibg=#4e4e4e ctermfg=248 ctermbg=239 gui=bold cterm=none + + " ## Spelling ## + hi SpellBad guisp=#d70000 ctermfg=160 ctermbg=NONE gui=undercurl cterm=underline + hi SpellCap guisp=#00afd7 ctermfg=38 ctermbg=NONE gui=undercurl cterm=underline + hi SpellRare guisp=#5faf00 ctermfg=70 ctermbg=NONE gui=undercurl cterm=underline + hi SpellLocal guisp=#d7af00 ctermfg=178 ctermbg=NONE gui=undercurl cterm=underline + + " ## Diff ## + hi DiffAdd guifg=fg guibg=#5f875f ctermfg=fg ctermbg=65 gui=none cterm=none + hi DiffChange guifg=fg guibg=#87875f ctermfg=fg ctermbg=101 gui=none cterm=none + hi DiffDelete guifg=fg guibg=#875f5f ctermfg=fg ctermbg=95 gui=none cterm=none + hi DiffText guifg=#d7d75f guibg=#87875f ctermfg=185 ctermbg=101 gui=none cterm=none + + " ## Misc ## + hi Directory guifg=#87af87 guibg=NONE ctermfg=108 ctermbg=NONE gui=none cterm=none + hi ErrorMsg guifg=#d75f5f guibg=NONE ctermfg=167 ctermbg=NONE gui=none cterm=none + hi SignColumn guifg=#8a8a8a guibg=#4e4e4e ctermfg=245 ctermbg=239 gui=none cterm=none + hi LineNr guifg=#626262 guibg=#444444 ctermfg=241 ctermbg=238 gui=none cterm=none + hi MoreMsg guifg=#00afaf guibg=NONE ctermfg=37 ctermbg=NONE gui=none cterm=none + hi ModeMsg guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none + hi Question guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none + hi WarningMsg guifg=#af875f guibg=NONE ctermfg=173 ctermbg=NONE gui=none cterm=none + hi WildMenu guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=none cterm=none + hi ColorColumn guifg=NONE guibg=#87875f ctermfg=NONE ctermbg=101 gui=none cterm=none + hi Ignore guifg=bg ctermfg=bg + + elseif g:lucius_style == "light" hi Normal guifg=#3a3a3a guibg=#eeeeee ctermfg=237 ctermbg=255 gui=none cterm=none hi Comment guifg=#808080 guibg=NONE ctermfg=244 ctermbg=NONE gui=none cterm=none hi Constant guifg=#af5f00 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=none hi BConstant guifg=#af5f00 guibg=NONE ctermfg=130 ctermbg=NONE gui=bold cterm=bold hi Identifier guifg=#008700 guibg=NONE ctermfg=28 ctermbg=NONE gui=none cterm=none hi BIdentifier guifg=#008700 guibg=NONE ctermfg=28 ctermbg=NONE gui=bold cterm=bold hi Statement guifg=#005faf guibg=NONE ctermfg=25 ctermbg=NONE gui=none cterm=none hi BStatement guifg=#005faf guibg=NONE ctermfg=25 ctermbg=NONE gui=bold cterm=bold hi PreProc guifg=#008787 guibg=NONE ctermfg=30 ctermbg=NONE gui=none cterm=none hi BPreProc guifg=#008787 guibg=NONE ctermfg=30 ctermbg=NONE gui=bold cterm=bold hi Type guifg=#005f87 guibg=NONE ctermfg=24 ctermbg=NONE gui=none cterm=none hi BType guifg=#005f87 guibg=NONE ctermfg=24 ctermbg=NONE gui=bold cterm=bold hi Special guifg=#870087 guibg=NONE ctermfg=90 ctermbg=NONE gui=none cterm=none hi BSpecial guifg=#870087 guibg=NONE ctermfg=90 ctermbg=NONE gui=bold cterm=bold " ## Text Markup ## hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline hi Error guifg=#af0000 guibg=#ffafaf ctermfg=124 ctermbg=217 gui=none cterm=none hi Todo guifg=#875f00 guibg=#ffff87 ctermfg=94 ctermbg=228 gui=none cterm=none hi MatchParen guifg=NONE guibg=#5fd7d7 ctermfg=NONE ctermbg=80 gui=none cterm=none hi NonText guifg=#afafd7 guibg=NONE ctermfg=146 ctermbg=NONE gui=none cterm=none hi SpecialKey guifg=#afd7af guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none hi Title guifg=#005faf guibg=NONE ctermfg=25 ctermbg=NONE gui=bold cterm=bold " ## Text Selection ## hi Cursor guifg=bg guibg=#5f87af ctermfg=bg ctermbg=67 gui=none cterm=none hi CursorIM guifg=bg guibg=#5f87af ctermfg=bg ctermbg=67 gui=none cterm=none hi CursorColumn guifg=NONE guibg=#dadada ctermfg=NONE ctermbg=253 gui=none cterm=none hi CursorLine guifg=NONE guibg=#dadada ctermfg=NONE ctermbg=253 gui=none cterm=none hi Visual guifg=NONE guibg=#afd7ff ctermfg=NONE ctermbg=153 gui=none cterm=none hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline hi IncSearch guifg=fg guibg=#57d7d7 ctermfg=fg ctermbg=80 gui=none cterm=none hi Search guifg=fg guibg=#ffaf00 ctermfg=fg ctermbg=214 gui=none cterm=none " ## UI ## hi Pmenu guifg=bg guibg=#808080 ctermfg=bg ctermbg=244 gui=none cterm=none hi PmenuSel guifg=fg guibg=#afd7ff ctermfg=fg ctermbg=153 gui=none cterm=none - hi PMenuSbar guifg=#808080 guibg=#444444 ctermfg=244 ctermbg=238 gui=none cterm=none - hi PMenuThumb guifg=fg guibg=#9e9e9e ctermfg=fg ctermbg=247 gui=none cterm=none + hi PmenuSbar guifg=#808080 guibg=#444444 ctermfg=244 ctermbg=238 gui=none cterm=none + hi PmenuThumb guifg=fg guibg=#9e9e9e ctermfg=fg ctermbg=247 gui=none cterm=none hi StatusLine guifg=bg guibg=#808080 ctermfg=bg ctermbg=244 gui=bold cterm=bold hi StatusLineNC guifg=#e4e4e4 guibg=#808080 ctermfg=254 ctermbg=244 gui=none cterm=none hi TabLine guifg=bg guibg=#808080 ctermfg=bg ctermbg=244 gui=none cterm=none hi TabLineFill guifg=#b2b2b2 guibg=#808080 ctermfg=249 ctermbg=244 gui=none cterm=none hi TabLineSel guifg=fg guibg=#afd7ff ctermfg=fg ctermbg=153 gui=none cterm=none hi VertSplit guifg=#e4e4e4 guibg=#808080 ctermfg=254 ctermbg=244 gui=none cterm=none hi Folded guifg=#626262 guibg=#bcbcbc ctermfg=241 ctermbg=250 gui=bold cterm=none hi FoldColumn guifg=#626262 guibg=#bcbcbc ctermfg=241 ctermbg=250 gui=bold cterm=none " ## Spelling ## - hi SpellBad guisp=#d70000 ctermfg=fg ctermbg=160 gui=undercurl cterm=undercurl - hi SpellCap guisp=#00afd7 ctermfg=fg ctermbg=38 gui=undercurl cterm=undercurl - hi SpellRare guisp=#5faf00 ctermfg=fg ctermbg=70 gui=undercurl cterm=undercurl - hi SpellLocal guisp=#d7af00 ctermfg=fg ctermbg=178 gui=undercurl cterm=undercurl + hi SpellBad guisp=#d70000 ctermfg=160 ctermbg=NONE gui=undercurl cterm=underline + hi SpellCap guisp=#00afd7 ctermfg=38 ctermbg=NONE gui=undercurl cterm=underline + hi SpellRare guisp=#5faf00 ctermfg=70 ctermbg=NONE gui=undercurl cterm=underline + hi SpellLocal guisp=#d7af00 ctermfg=178 ctermbg=NONE gui=undercurl cterm=underline " ## Diff ## hi DiffAdd guifg=fg guibg=#afd7af ctermfg=fg ctermbg=151 gui=none cterm=none hi DiffChange guifg=fg guibg=#d7d7af ctermfg=fg ctermbg=187 gui=none cterm=none hi DiffDelete guifg=fg guibg=#d7afaf ctermfg=fg ctermbg=181 gui=none cterm=none hi DiffText guifg=#d75f00 guibg=#d7d7af ctermfg=166 ctermbg=187 gui=bold cterm=bold " ## Misc ## hi Directory guifg=#00875f guibg=NONE ctermfg=29 ctermbg=NONE gui=none cterm=none hi ErrorMsg guifg=#af0000 guibg=NONE ctermfg=124 ctermbg=NONE gui=none cterm=none hi SignColumn guifg=#626262 guibg=#d0d0d0 ctermfg=241 ctermbg=252 gui=none cterm=none hi LineNr guifg=#9e9e9e guibg=#dadada ctermfg=247 ctermbg=253 gui=none cterm=none hi MoreMsg guifg=#005fd7 guibg=NONE ctermfg=26 ctermbg=NONE gui=none cterm=none hi ModeMsg guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none hi Question guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none hi WarningMsg guifg=#af5700 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=none hi WildMenu guifg=fg guibg=#afd7ff ctermfg=fg ctermbg=153 gui=none cterm=none hi ColorColumn guifg=NONE guibg=#d7d7af ctermfg=NONE ctermbg=187 gui=none cterm=none hi Ignore guifg=bg ctermfg=bg endif " ## Vimwiki Colors ## hi link VimwikiHeader1 BIdentifier hi link VimwikiHeader2 BPreProc hi link VimwikiHeader3 BStatement hi link VimwikiHeader4 BSpecial hi link VimwikiHeader5 BConstant hi link VimwikiHeader6 BType " ## Tagbar Colors ## hi link TagbarAccessPublic Constant hi link TagbarAccessProtected Type hi link TagbarAccessPrivate PreProc " ## Commands ## command! LuciusLight let g:lucius_style = "light" | colorscheme lucius command! LuciusDark let g:lucius_style = "dark" | colorscheme lucius +command! LuciusDarkDim let g:lucius_style = "dark_dim" | colorscheme lucius diff --git a/.vim/colors/void.vim b/.vim/colors/void.vim index f2a8ebc..06da9a0 100644 --- a/.vim/colors/void.vim +++ b/.vim/colors/void.vim @@ -1,107 +1,109 @@ " Vim color file " Maintainer: Andrew Lyon <[email protected]> -" Last Change: $Date: 2010-11-20 12:27:30PST $ -" Version: 1.1 +" Last Change: 2012-03-21 06:01:00 PST +" Version: 2.1 " Note that this color scheme is loosely based off of desert.vim (Hans Fugal " <[email protected]>) mixed with some of slate.vim (Ralph Amissah " <[email protected]>) but with much of my own modification. set background=dark if version > 580 " no guarantees for version 5.8 and below, but this makes it stop " complaining hi clear if exists("syntax_on") syntax reset endif endif let g:colors_name="void" -hi Normal guifg=#f5f5f5 guibg=grey15 +hi Normal guifg=#e0e0e0 guibg=grey15 " highlight groups hi Cursor guibg=khaki guifg=slategrey "hi CursorIM "hi Directory "hi DiffAdd "hi DiffChange "hi DiffDelete "hi DiffText "hi ErrorMsg hi VertSplit guibg=black guifg=black gui=none hi Folded guibg=grey30 guifg=gold hi FoldColumn guibg=grey30 guifg=tan hi IncSearch guifg=slategrey guibg=khaki "hi LineNr hi ModeMsg guifg=goldenrod hi MoreMsg guifg=SeaGreen hi NonText guifg=LightBlue guibg=grey30 hi Question guifg=springgreen hi Search guibg=peru guifg=wheat hi SpecialKey guifg=yellowgreen hi StatusLine guibg=black guifg=#cccccc gui=none hi StatusLineNC guibg=black guifg=grey40 gui=none hi Title guifg=indianred hi Visual gui=none guifg=khaki guibg=olivedrab "hi VisualNOS hi WarningMsg guifg=salmon "hi WildMenu "hi Menu "hi Scrollbar "hi Tooltip " syntax highlighting groups -hi Comment guifg=grey50 -hi Constant guifg=#e09085 -hi Identifier guifg=#f5f5f5 -hi Statement guifg=darkkhaki " #bbccee +hi Comment guifg=grey50 ctermfg=darkcyan +hi Constant guifg=#e09085 ctermfg=brown +hi Identifier guifg=#d0d0b0 +hi Statement guifg=#ccaa88 gui=bold cterm=bold term=bold +"hi Statement guifg=darkkhaki hi PreProc guifg=#c8e0b0 -hi Type guifg=darkkhaki +hi Type guifg=#99cccc term=NONE cterm=NONE gui=NONE hi Special guifg=#bbccee cterm=bold term=bold hi Operator guifg=navajowhite cterm=NONE -hi Function guifg=#c8e0b0 cterm=NONE "hi Underlined hi Ignore guifg=grey40 "hi Error hi Todo guifg=orangered guibg=yellow2 hi Todo guifg=orange guibg=gray40 +" Fuf/menu stuff +hi Pmenu guifg=#aadddd guibg=#333333 +hi PmenuSel guifg=#ddeeee guibg=#335533 + " color terminal definitions hi SpecialKey ctermfg=darkgreen hi NonText guibg=grey15 cterm=bold ctermfg=darkblue hi Directory ctermfg=brown guifg=#ddbb66 hi ErrorMsg cterm=bold ctermfg=7 ctermbg=1 hi IncSearch cterm=NONE ctermfg=yellow ctermbg=green hi Search cterm=NONE ctermfg=grey ctermbg=blue hi MoreMsg ctermfg=darkgreen hi ModeMsg cterm=NONE ctermfg=brown hi LineNr guifg=grey50 ctermfg=3 hi Question ctermfg=green hi StatusLine cterm=bold,reverse hi StatusLineNC cterm=reverse hi VertSplit cterm=reverse hi Title ctermfg=5 hi Visual cterm=reverse hi VisualNOS cterm=bold,underline hi WarningMsg ctermfg=1 hi WildMenu ctermfg=0 ctermbg=3 hi Folded ctermfg=darkgrey ctermbg=NONE hi FoldColumn ctermfg=darkgrey ctermbg=NONE hi DiffAdd ctermbg=4 hi DiffChange ctermbg=5 hi DiffDelete cterm=bold ctermfg=4 ctermbg=6 hi DiffText cterm=bold ctermbg=1 -hi Comment ctermfg=darkcyan -hi Constant ctermfg=brown hi Special ctermfg=5 hi Identifier ctermfg=6 hi Statement ctermfg=3 hi PreProc ctermfg=5 hi Type ctermfg=2 hi Underlined cterm=underline ctermfg=5 hi Ignore cterm=bold ctermfg=7 hi Ignore ctermfg=darkgrey hi Error cterm=bold ctermfg=7 ctermbg=1 diff --git a/.vim/dict.add b/.vim/dict.add index 2b1c1f3..ec50283 100644 --- a/.vim/dict.add +++ b/.vim/dict.add @@ -1,27 +1,28 @@ sqlalchemy SQLAlchemy sessionmaker Metadata metadata metadata config hostname app Ubuntu blog unicode plugin plugins ajax json hotalert hotalerts HotAlert SqlAlchemy admin popup username Codemash added actions buildout +$/! diff --git a/.vim/dict.add.spl b/.vim/dict.add.spl index 9e2947f..e9c4d46 100644 Binary files a/.vim/dict.add.spl and b/.vim/dict.add.spl differ diff --git a/.vimrc b/.vimrc index 1eb86ac..4f862fb 100644 --- a/.vimrc +++ b/.vimrc @@ -1,624 +1,647 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist -" ,v - load .vimrc -" ,V - reload .vimrc +" ,V - load .vimrc +" ,VV - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font " set guifont=Envy\ Code\ R\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide - colorscheme lucius - colorscheme twilight - colorscheme aldmeris - colorscheme solarized + " colorscheme lucius + " colorscheme twilight + " colorscheme aldmeris + " colorscheme solarized + colorscheme void " To set the toolbars off (icons on top of the screen) set guioptions-=T " Try to keep backups across sessions set undodir=~/.vim/backups set undofile else set background=dark " adapt colors for dark background set t_Co=256 colorscheme lucius colorscheme twilight endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=78 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... -set statusline=%{fugitive#statusline()}%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] +set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] hi StatusLine guifg=#fcf4ba guibg=#333333 hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " auto save when focus is lost au FocusLost * :wa +" run the current file with F5 +map <F5> <Esc>:w<CR>:!%:p<CR> + " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " Map ;; to swap out the file with the previous one nnoremap ;; <c-^> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,VV brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>V :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>VV :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " Scroll the viewport 3 lines vs just 1 line at a time nnoremap <C-e> 3<C-e> nnoremap <C-y> 3<C-y> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on " autocmd BufEnter * lcd %:p:h " Trying out this trick to get cwd tricks cnoremap %% <C-R>=expand('%:h').'/'<cr> map <leader>e :edit %% map <leader>v :view %% " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" " au BufRead *.py compiler nose " au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m au BufRead *.py set tags=tags-py;/ " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au FileType javascript set errorformat=%f:\ line\ %l\\,\ col\ %c\\,\ %m au FileType javascript set makeprg=jshint\ % -au FileType javascript set textwidth=78 au FileType javascript set tags=tags-js;/ autocmd BufRead,BufNewFile *.json set filetype=json command Js silent %!jp command Jc silent %!jcompress autocmd FileType json Js " ================================================== " CSS " ================================================== " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf +" ================================================== +" GoLang +" ================================================== +" Highlight word and 'K' to get GoDoc output for word. +au BufRead,BufNewFile *.go set filetype=go +" ,m will run gomake +au BufRead *.go set makeprg=gomake +" ,M will run gofmt on the code to lint it +autocmd FileType go map <buffer> <leader>M :Fmt<CR>:cw<CR> " ================================================== " Git Tricks " ================================================== " Show the diff in the preview window of the commit during git commit autocmd FileType gitcommit DiffGitCached | wincmd p " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " CtrlP " https://github.com/kien/ctrlp.vim let g:ctrlp_working_path_mode = 0 let g:ctrlp_custom_ignore = { \ 'dir': '\.git$\|\.hg$\|\.svn\|\.bzr$\|develop-eggs$\|site-packages', \ 'file': '\.pyc$|\.exe$\|\.so$\|\.dll$\|\.swp$', \ 'link': 'download-cache|eggs|yui', \ } map <leader>gt :CtrlP templates/<cr> map <leader>gj :CtrlP static/js/<cr> " Fugative " https://github.com/tpope/vim-fugitive " " Commands: " Gedit " Gsplit " Gvsplit " GStatus " Gblame " Gmove " Gremove " Ggrep " Gwrite " Gbrowse " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim let g:pyflakes_use_quickfix = 0 +" tslime +" https://github.com/evhan/tslime.vim.git +" let g:tmux_sessionname = "default" +let g:tmux_windowname = 1 +let g:tmux_panenumber = 0 +nmap <leader>mt :call Send_to_Tmux("make test"."\n")<CR> +nmap <leader>lt :call Send_to_Tmux("./bin/test -x -cvvt \"test_".expand("%:t:r")."\"\n")<CR> +nmap <leader>rst :call Send_to_Tmux("rst2html.py ".expand("%")." > /tmp/".expand("%:t:r").".html\n")<CR> + + " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar nmap <silent> <leader>L :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> + " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> diff --git a/bundle_list b/bundle_list index 44f3128..0334e91 100644 --- a/bundle_list +++ b/bundle_list @@ -1,29 +1,32 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git #git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git https://github.com/nvie/vim-flake8.git http://github.com/tomtom/tlib_vim.git https://github.com/ap/vim-css-color.git https://github.com/Bogdanp/quicksilver.vim.git git://github.com/majutsushi/tagbar https://github.com/bolasblack/csslint.vim.git https://github.com/pangloss/vim-javascript.git https://github.com/yui/vim-yui3.git https://github.com/Lokaltog/vim-powerline http://github.com/depuracao/vim-darkdevel.git https://github.com/vim-scripts/jshint.vim.git https://github.com/kien/ctrlp.vim https://github.com/tpope/vim-fugitive +https://github.com/evhan/tslime.vim.git +https://github.com/kana/vim-smartinput.git +https://github.com/mitechie/govim.git
mitechie/pyvim
d44073b4df9c4096bf8a3251cc69d5052ba9aafd
Add fugative, try out solorized theme
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index ba18018..a02c227 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,3 +1,4 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =1 +let g:netrw_dirhist_cnt =2 let g:netrw_dirhist_1='/home/rharding/src/wadllib/datetime-924240/src/wadllib/tests/data' +let g:netrw_dirhist_2='/home/rharding/configs/pyvim'
mitechie/pyvim
e65f6d288edada8332e391f34a4b6c97d92fc751
Add in fugative, trying out solarized theme, wheee
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index dabbdfd..ba18018 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,10 +1,3 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =8 -let g:netrw_dirhist_1='/home/rharding/src/bookie/bookie/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg-info' -let g:netrw_dirhist_2='/home/rharding/src/convoy/convoy' -let g:netrw_dirhist_3='/home/rharding/launchpad/lp-branches/watch_jsbuild/lib/lp/app/javascript/anim' -let g:netrw_dirhist_4='/home/rharding/src/docs/minime' -let g:netrw_dirhist_5='/home/rharding/src/bookie/bookie/bookie/static/js/bookie' -let g:netrw_dirhist_6='/home/rharding/configs/pyvim/.vim/bundle/jshint.vim/plugin' -let g:netrw_dirhist_7='/home/rharding/src/bookie/bookie/jsdoc' -let g:netrw_dirhist_8='/etc/apt/sources.list.d' +let g:netrw_dirhist_cnt =1 +let g:netrw_dirhist_1='/home/rharding/src/wadllib/datetime-924240/src/wadllib/tests/data' diff --git a/.vimrc b/.vimrc index 0ec7afd..1eb86ac 100644 --- a/.vimrc +++ b/.vimrc @@ -1,605 +1,624 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font " set guifont=Envy\ Code\ R\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme twilight colorscheme aldmeris + colorscheme solarized " To set the toolbars off (icons on top of the screen) set guioptions-=T " Try to keep backups across sessions set undodir=~/.vim/backups set undofile else set background=dark " adapt colors for dark background set t_Co=256 colorscheme lucius colorscheme twilight endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=78 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... -set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] +set statusline=%{fugitive#statusline()}%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] hi StatusLine guifg=#fcf4ba guibg=#333333 hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " auto save when focus is lost au FocusLost * :wa " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> +" Map ;; to swap out the file with the previous one +nnoremap ;; <c-^> + " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,VV brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>V :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>VV :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " Scroll the viewport 3 lines vs just 1 line at a time nnoremap <C-e> 3<C-e> nnoremap <C-y> 3<C-y> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on " autocmd BufEnter * lcd %:p:h " Trying out this trick to get cwd tricks cnoremap %% <C-R>=expand('%:h').'/'<cr> map <leader>e :edit %% map <leader>v :view %% " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" " au BufRead *.py compiler nose " au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m au BufRead *.py set tags=tags-py;/ " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au FileType javascript set errorformat=%f:\ line\ %l\\,\ col\ %c\\,\ %m au FileType javascript set makeprg=jshint\ % au FileType javascript set textwidth=78 au FileType javascript set tags=tags-js;/ autocmd BufRead,BufNewFile *.json set filetype=json command Js silent %!jp command Jc silent %!jcompress autocmd FileType json Js " ================================================== " CSS " ================================================== " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " Git Tricks " ================================================== " Show the diff in the preview window of the commit during git commit autocmd FileType gitcommit DiffGitCached | wincmd p " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " CtrlP " https://github.com/kien/ctrlp.vim let g:ctrlp_working_path_mode = 0 let g:ctrlp_custom_ignore = { \ 'dir': '\.git$\|\.hg$\|\.svn\|\.bzr$\|develop-eggs$\|site-packages', \ 'file': '\.pyc$|\.exe$\|\.so$\|\.dll$\|\.swp$', \ 'link': 'download-cache|eggs|yui', \ } map <leader>gt :CtrlP templates/<cr> map <leader>gj :CtrlP static/js/<cr> +" Fugative +" https://github.com/tpope/vim-fugitive +" +" Commands: +" Gedit +" Gsplit +" Gvsplit +" GStatus +" Gblame +" Gmove +" Gremove +" Ggrep +" Gwrite +" Gbrowse + " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim let g:pyflakes_use_quickfix = 0 " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar nmap <silent> <leader>L :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> diff --git a/bundle_list b/bundle_list index c12c725..44f3128 100644 --- a/bundle_list +++ b/bundle_list @@ -1,28 +1,29 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git #git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git https://github.com/nvie/vim-flake8.git http://github.com/tomtom/tlib_vim.git https://github.com/ap/vim-css-color.git https://github.com/Bogdanp/quicksilver.vim.git git://github.com/majutsushi/tagbar https://github.com/bolasblack/csslint.vim.git https://github.com/pangloss/vim-javascript.git https://github.com/yui/vim-yui3.git https://github.com/Lokaltog/vim-powerline http://github.com/depuracao/vim-darkdevel.git https://github.com/vim-scripts/jshint.vim.git https://github.com/kien/ctrlp.vim +https://github.com/tpope/vim-fugitive
mitechie/pyvim
0ed90067c67096093aeaad1a04244edc429336fd
Try out the ctrlp plugin
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index e991524..dabbdfd 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,5 +1,10 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =3 +let g:netrw_dirhist_cnt =8 let g:netrw_dirhist_1='/home/rharding/src/bookie/bookie/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg-info' let g:netrw_dirhist_2='/home/rharding/src/convoy/convoy' let g:netrw_dirhist_3='/home/rharding/launchpad/lp-branches/watch_jsbuild/lib/lp/app/javascript/anim' +let g:netrw_dirhist_4='/home/rharding/src/docs/minime' +let g:netrw_dirhist_5='/home/rharding/src/bookie/bookie/bookie/static/js/bookie' +let g:netrw_dirhist_6='/home/rharding/configs/pyvim/.vim/bundle/jshint.vim/plugin' +let g:netrw_dirhist_7='/home/rharding/src/bookie/bookie/jsdoc' +let g:netrw_dirhist_8='/etc/apt/sources.list.d' diff --git a/.vimrc b/.vimrc index df36e9d..0ec7afd 100644 --- a/.vimrc +++ b/.vimrc @@ -1,630 +1,605 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font " set guifont=Envy\ Code\ R\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme twilight colorscheme aldmeris " To set the toolbars off (icons on top of the screen) set guioptions-=T " Try to keep backups across sessions set undodir=~/.vim/backups set undofile else set background=dark " adapt colors for dark background set t_Co=256 colorscheme lucius colorscheme twilight endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=78 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] hi StatusLine guifg=#fcf4ba guibg=#333333 hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " auto save when focus is lost au FocusLost * :wa " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> -" ,v brings up my .vimrc +" ,VV brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) -map <leader>v :sp ~/.vimrc<CR><C-W>_ -map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> +map <leader>V :sp ~/.vimrc<CR><C-W>_ +map <silent> <leader>VV :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " Scroll the viewport 3 lines vs just 1 line at a time nnoremap <C-e> 3<C-e> nnoremap <C-y> 3<C-y> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h -" Hints for other movements -" <c-w><c-r> rotate window to next spot -" <c-w><c-x> swap window with current one - " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> -" mapping to make movements operate on 1 screen line in wrap mode -" function! ScreenMovement(movement) -" if &wrap -" return "g" . a:movement -" else -" return a:movement -" endif -" endfunction -" onoremap <silent> <expr> j ScreenMovement("j") -" onoremap <silent> <expr> k ScreenMovement("k") -" onoremap <silent> <expr> 0 ScreenMovement("0") -" onoremap <silent> <expr> ^ ScreenMovement("^") -" onoremap <silent> <expr> $ ScreenMovement("$") -" nnoremap <silent> <expr> j ScreenMovement("j") -" nnoremap <silent> <expr> k ScreenMovement("k") -" nnoremap <silent> <expr> 0 ScreenMovement("0") -" nnoremap <silent> <expr> ^ ScreenMovement("^") -" nnoremap <silent> <expr> $ ScreenMovement("$") -" - " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> -" Clean all end of line extra whitespace with ,S -" Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 -" deletes excess space but maintains the list of jumps unchanged -" for more details see: h keepjumps -fun! CleanExtraSpaces() - let save_cursor = getpos(".") - let old_query = getreg('/') - :%s/\s\+$//e - call setpos('.', save_cursor) - call setreg('/', old_query) -endfun -map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> - " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest -let g:SuperTabDefaultCompletionType = "context" -let g:SuperTabContextDefaultCompletionType = "<c-n>" -let g:SuperTabLongestHighlight = 1 -let g:SuperTabMidWordCompletion = 1 - - " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on -autocmd BufEnter * lcd %:p:h +" autocmd BufEnter * lcd %:p:h +" Trying out this trick to get cwd tricks +cnoremap %% <C-R>=expand('%:h').'/'<cr> +map <leader>e :edit %% +map <leader>v :view %% " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" " au BufRead *.py compiler nose " au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m au BufRead *.py set tags=tags-py;/ " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen - -au BufRead *.js set textwidth=78 -au BufRead *.js set tags=tags-js;/ -au BufRead *.js set makeprg=/usr/bin/jslint\ --maxlen=78\ --goodparts\ --nomen\ --indent=4\ % -au BufRead *.js set errorformat=%-P%f, - \%-G/*jslint\ %.%#*/, - \%*[\ ]%n\ %l\\,%c:\ %m, - \%-G\ \ \ \ %.%#, - \%-GNo\ errors\ found., - \%-Q +au FileType javascript set errorformat=%f:\ line\ %l\\,\ col\ %c\\,\ %m +au FileType javascript set makeprg=jshint\ % +au FileType javascript set textwidth=78 +au FileType javascript set tags=tags-js;/ autocmd BufRead,BufNewFile *.json set filetype=json command Js silent %!jp command Jc silent %!jcompress autocmd FileType json Js " ================================================== " CSS " ================================================== " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " Git Tricks " ================================================== " Show the diff in the preview window of the commit during git commit autocmd FileType gitcommit DiffGitCached | wincmd p " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== +" CtrlP +" https://github.com/kien/ctrlp.vim +let g:ctrlp_working_path_mode = 0 +let g:ctrlp_custom_ignore = { + \ 'dir': '\.git$\|\.hg$\|\.svn\|\.bzr$\|develop-eggs$\|site-packages', + \ 'file': '\.pyc$|\.exe$\|\.so$\|\.dll$\|\.swp$', + \ 'link': 'download-cache|eggs|yui', + \ } + +map <leader>gt :CtrlP templates/<cr> +map <leader>gj :CtrlP static/js/<cr> + " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp +let g:SuperTabDefaultCompletionType = "context" +let g:SuperTabContextDefaultCompletionType = "<c-n>" +let g:SuperTabLongestHighlight = 1 +let g:SuperTabMidWordCompletion = 1 " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim -" let g:pyflakes_use_quickfix = 0 - -" Syntastic -let g:syntastic_python_checker = 'pyflakes' +let g:pyflakes_use_quickfix = 0 " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 - -" TwitVim -" http://vim.sourceforge.net/scripts/script.php?script_id=2204 -" Twitter/Identica client for vim -" F7/F8 for loading identica/twitter -"source ~/.vim/twitvim.vim - " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar -nmap <silent> <leader>l :TagbarToggle<CR> +nmap <silent> <leader>L :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction + +" Clean all end of line extra whitespace with ,S +" Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 +" deletes excess space but maintains the list of jumps unchanged +" for more details see: h keepjumps +fun! CleanExtraSpaces() + let save_cursor = getpos(".") + let old_query = getreg('/') + :%s/\s\+$//e + call setpos('.', save_cursor) + call setreg('/', old_query) +endfun +map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> + diff --git a/bundle_list b/bundle_list index 4efe9af..c12c725 100644 --- a/bundle_list +++ b/bundle_list @@ -1,26 +1,28 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git #git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git https://github.com/nvie/vim-flake8.git http://github.com/tomtom/tlib_vim.git https://github.com/ap/vim-css-color.git https://github.com/Bogdanp/quicksilver.vim.git git://github.com/majutsushi/tagbar https://github.com/bolasblack/csslint.vim.git https://github.com/pangloss/vim-javascript.git https://github.com/yui/vim-yui3.git https://github.com/Lokaltog/vim-powerline http://github.com/depuracao/vim-darkdevel.git +https://github.com/vim-scripts/jshint.vim.git +https://github.com/kien/ctrlp.vim
mitechie/pyvim
269554ef7d68ffff44f17ff4cb8e290d4bf13586
Add moin syntax
diff --git a/.vim/after/syntax/moin.vim b/.vim/after/syntax/moin.vim new file mode 100644 index 0000000..36df06d --- /dev/null +++ b/.vim/after/syntax/moin.vim @@ -0,0 +1,143 @@ +" Vim syntax file +" Language: MoinMoin +" Maintainer: Michael Lamertz <[email protected]> +" Contributors: David O'Callaghan <[email protected]> +" Tony Garland <[email protected]> +" Ingo Karkat <[email protected]> +" Last Change: 27-Jan-2008 +" 27-Jan-2008 Incorporated syntax changes of MoinMoin 1.6; the +" syntax version is configurable via g:moin_version (globally) or +" b:moin_version (for the current buffer only). +" Small BFs: 'elsif', 'did_acedb_...'. +" 22-Aug-2007 Added moinEmbedded highlighting. +" 08-May-2007 Added contains=@NoSpell for URLs, Email, pragmas and +" (configurable via g:moin_spell_wikiwords) WikiWords. + +" Bugs: Parsing of mixed bold-italic not yet implemented +" Tables not yet implemented + +if version < 600 + syntax clear +elseif exists("b:current_syntax") + finish +endif + +function! s:GetMoinVersion() + if exists('b:moin_version') + return b:moin_version + elseif exists('g:moin_version') + return g:moin_version + else + return 999 + endif +endfunction + +" headings +syn match moinHeader /^\(=\{1,5}\).*\1$/ + +" inline markup +syn match moinItalic /\('\{2}\)[^']\+\1/ +syn match moinBold /\('\{3}\)[^']\+\1/ +syn match moinBoldItalic /\('\{5}\)[^']\+\1/ +syn match moinUnderline /\(_\{2}\).\{-}\1/ +syn match moinSubscript /\(,\{2}\).\{-}\1/ +syn match moinSuperscript /\(\^\).\{-}\1/ +syn match moinTypewriter /\(`\).\{-}\1/ +if s:GetMoinVersion() < 160 + syn match moinMacro /\[\{2}.\{-}\]\{2}/ +else + syn match moinMacro /<\{2}.\{-}>\{2}/ +endif + +" Codeblocks +syn region moinPreformatted start=/{{{/ end=/}}}/ + +" Links +if exists('g:moin_spell_wikiwords') + syn match moinWikiWord /\(\w\+:\)\?\u[a-z0-9]\+\u[a-z0-9]\+\(\u[a-z0-9]\+\)*/ + syn match moinSubLink /\(\w\+\|\.\.\)\?\// nextgroup=moinWikiWord +else + syn match moinWikiWord /\(\w\+:\)\?\u[a-z0-9]\+\u[a-z0-9]\+\(\u[a-z0-9]\+\)*/ contains=@NoSpell + syn match moinSubLink /\(\w\+\|\.\.\)\?\// nextgroup=moinWikiWord contains=@NoSpell +endif +syn match moinNormalURL /\w\+:\/\/\S\+/ contains=@NoSpell +syn match moinEmail /\S\+@\S\+/ contains=@NoSpell +if s:GetMoinVersion() < 160 + syn match moinBracketLink /\[[^[\]]\+\]/ contains=@NoSpell + syn match moinEmbedded /attachment:\S\+/ contains=@NoSpell + syn match moinEmbedded /inline:\S\+/ contains=@NoSpell +else + syn match moinBracketLink /\[\{2}.\{-}\]\{2}/ contains=@NoSpell + syn match moinEmbedded /{\{2}[^{].\{-}}\{2}/ contains=@NoSpell +endif + + +" lists +syn match moinBulletList /^\(\s\+\)\zs\*\ze\s/ +syn match moinNumberedList /^\(\s\+\)\zs1\.\ze\s/ +syn match moinAlphalist /^\(\s\+\)\zsa\.\ze\s/ +syn match moinRomanlist /^\(\s\+\)\zsi\.\ze\s/ +syn match moinBigromanlist /^\(\s\+\)\zsI\.\ze\s/ +syn match moinDescriptionlist /^\(\s\+\)\zs.\{-}::\ze\s/ + +" rules +syn match moinRule /^-\{4,}/ + +" comments/pragmas +syn match moinComment /^##.*$/ +syn match moinPragma /^#\w\+.*$/ contains=@NoSpell + +" variables +syn match moinVariable /@\w\+@/ + + +" Define the default highlighting. +" For version 5.7 and earlier: only when not done already +" For version 5.8 and later: only when an item doesn't have highlighting yet +if version >= 508 || !exists("did_moin_syn_inits") + if version < 508 + let did_moin_syn_inits = 1 + command -nargs=+ HiLink hi link <args> + else + command -nargs=+ HiLink hi def link <args> + endif + + HiLink moinHeader Function + + HiLink moinItalic Identifier + HiLink moinBold Identifier + HiLink moinBoldItalic Identifier + HiLink moinUnderline Identifier + HiLink moinSubscript Identifier + HiLink moinSuperscript Identifier + HiLink moinTypewriter Identifier + HiLink moinMacro Define + HiLink moinPragma Define + HiLink moinEmbedded Include + + HiLink moinPreformatted String + + HiLink moinWikiWord Statement + HiLink moinBracketLink Statement + HiLink moinNormalURL Statement + HiLink moinSubLink Statement + HiLink moinInterLink Statement + HiLink moinEmail Statement + + HiLink moinBulletList Type + HiLink moinNumberedList Type + HiLink moinAlphalist Type + HiLink moinRomanlist Type + HiLink moinBigromanlist Type + HiLink moinDescriptionlist Type + + HiLink moinRule Special + + HiLink moinComment Comment + + HiLink moinVariable Macro + + delcommand HiLink +endif + +let b:current_syntax = "moin" diff --git a/.vimrc b/.vimrc index fcd7a8e..df36e9d 100644 --- a/.vimrc +++ b/.vimrc @@ -1,631 +1,630 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font " set guifont=Envy\ Code\ R\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme twilight colorscheme aldmeris " To set the toolbars off (icons on top of the screen) set guioptions-=T " Try to keep backups across sessions set undodir=~/.vim/backups set undofile else set background=dark " adapt colors for dark background set t_Co=256 - colorscheme xorium colorscheme lucius colorscheme twilight endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=78 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] hi StatusLine guifg=#fcf4ba guibg=#333333 hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " auto save when focus is lost au FocusLost * :wa " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " Scroll the viewport 3 lines vs just 1 line at a time nnoremap <C-e> 3<C-e> nnoremap <C-y> 3<C-y> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode " function! ScreenMovement(movement) " if &wrap " return "g" . a:movement " else " return a:movement " endif " endfunction " onoremap <silent> <expr> j ScreenMovement("j") " onoremap <silent> <expr> k ScreenMovement("k") " onoremap <silent> <expr> 0 ScreenMovement("0") " onoremap <silent> <expr> ^ ScreenMovement("^") " onoremap <silent> <expr> $ ScreenMovement("$") " nnoremap <silent> <expr> j ScreenMovement("j") " nnoremap <silent> <expr> k ScreenMovement("k") " nnoremap <silent> <expr> 0 ScreenMovement("0") " nnoremap <silent> <expr> ^ ScreenMovement("^") " nnoremap <silent> <expr> $ ScreenMovement("$") " " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" " au BufRead *.py compiler nose " au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m au BufRead *.py set tags=tags-py;/ " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set textwidth=78 au BufRead *.js set tags=tags-js;/ au BufRead *.js set makeprg=/usr/bin/jslint\ --maxlen=78\ --goodparts\ --nomen\ --indent=4\ % au BufRead *.js set errorformat=%-P%f, \%-G/*jslint\ %.%#*/, \%*[\ ]%n\ %l\\,%c:\ %m, \%-G\ \ \ \ %.%#, \%-GNo\ errors\ found., \%-Q autocmd BufRead,BufNewFile *.json set filetype=json command Js silent %!jp command Jc silent %!jcompress autocmd FileType json Js " ================================================== " CSS " ================================================== " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " Git Tricks " ================================================== " Show the diff in the preview window of the commit during git commit autocmd FileType gitcommit DiffGitCached | wincmd p " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim " let g:pyflakes_use_quickfix = 0 " Syntastic let g:syntastic_python_checker = 'pyflakes' " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar nmap <silent> <leader>l :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction
mitechie/pyvim
f2c0b8bec824d69a00b6e0b97fa4654395ce6ba3
Bunch of small tweaks and updates
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index f49d62a..e991524 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,7 +1,5 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =5 -let g:netrw_dirhist_1='/home/rharding/src/node' -let g:netrw_dirhist_2='/home/rharding/.offlineimap' -let g:netrw_dirhist_3='/home/rharding/src/docs/networks' -let g:netrw_dirhist_4='/home/rharding/launchpad/lp-branches/devel/lib/lp/app/templates' -let g:netrw_dirhist_5='/home/rharding/src/python-oops-tools' +let g:netrw_dirhist_cnt =3 +let g:netrw_dirhist_1='/home/rharding/src/bookie/bookie/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg-info' +let g:netrw_dirhist_2='/home/rharding/src/convoy/convoy' +let g:netrw_dirhist_3='/home/rharding/launchpad/lp-branches/watch_jsbuild/lib/lp/app/javascript/anim' diff --git a/.vim/colors/darkdevel.vim b/.vim/colors/darkdevel.vim deleted file mode 100644 index 02a506f..0000000 --- a/.vim/colors/darkdevel.vim +++ /dev/null @@ -1,217 +0,0 @@ -" Vim color scheme -" -" Name: darkdevel.vim -" Maintainer: Hallison Batista <[email protected]> -" Last Change: 26 Fev 2009 -" License: Public Domain -" Version: 1.0.0 - -highlight clear - -if exists("syntax_on") - syntax reset -endif - -let g:colors_name = "darkdevel" - -" General settings -" ================ -set background=dark -set cursorline -"set cursorcolumn - -" Cursor style -" ============ - highlight Cursor ctermfg=NONE guifg=#000000 ctermbg=NONE guibg=#FFFFFF - "CursorIM - highlight CursorColumn ctermfg=NONE guifg=NONE ctermbg=DarkGray guibg=#0F0F0F - highlight CursorLine ctermfg=NONE guifg=NONE ctermbg=DarkGray guibg=#0F0F0F - -" Directory style -" =============== - "highlight Directory ctermbg=NONE guifg=NONE ctermbg=NONE guibg=NONE cterm=bold gui=underline - -" Diff text style -" =============== - highlight DiffAdd ctermfg=DarkGreen guifg=#32BE32 ctermbg=NONE guibg=NONE - "highlight DiffChange - highlight DiffDelete ctermfg=DarkRed guifg=#BE3232 ctermbg=NONE guibg=NONE - "highlight DiffText - -" Text and message style -" ====================== - "highlight ErrorMsg - "highlight VertSplit - "highlight Folded - "highlight FoldColumn - "highlight SignColumn - "highlight IncSearch - highlight LineNr ctermfg=DarkGray guifg=#777777 ctermbg=DarkGray guibg=#0F0F0F - "highlight MatchParen - "highlight ModeMsg - "highlight MoreMsg - highlight NonText ctermfg=Gray guifg=#777777 ctermbg=NONE guibg=#111111 - highlight Normal ctermfg=Gray guifg=#CCCCCC ctermbg=NONE guibg=#111111 - -" Popup menu style -" ================ - highlight Pmenu ctermfg=Gray guifg=#FFFFFF ctermbg=DarkGray guibg=#0F0F0F - highlight PmenuSel ctermfg=White guifg=#0F0F0F ctermbg=Gray guibg=#FFFFFF - highlight PmenuSbar ctermfg=NONE guifg=NONE ctermbg=DarkGray guibg=#777777 - highlight PmenuThumb ctermfg=Gray guifg=#CCCCCC ctermbg=NONE guibg=NONE - - "highlight Question - "highlight Search - "highlight SpecialKey - -" Spelling style -" ============== - "highlight SpellBad - "highlight SpellCap - "highlight SpellLocal - "highlight SpellRare - -" Status style -" ============ - highlight StatusLine ctermfg=DarkGray guifg=#0F0F0F ctermbg=Gray guibg=#777777 cterm=bold gui=bold - highlight StatusLineNC ctermfg=DarkGray guifg=#777777 ctermbg=DarkGray guibg=#0F0F0F cterm=bold gui=bold - - "highlight TabLine - "highlight TabLineFill - "highlight TabLineSel - - highlight Title ctermfg=Gray guifg=#0F0F0F - highlight Visual ctermfg=NONE guifg=NONE ctermbg=DarkBlue guibg=#505064 - highlight VisualNOS ctermfg=NONE guifg=NONE ctermbg=DarkGreen guibg=#506450 - highlight WarningMsg ctermfg=White guifg=#FFFFFF ctermbg=DarkRed guibg=#964B4B - highlight WildMenu ctermfg=NONE guifg=#777777 ctermbg=DarkGray guibg=#0F0F0F - -" Win32 specific style -" -------------------- - "highlight Menu - "highlight Scrollbar - "highlight Tooltip - -" Syntax style -" ============ - -" Style for constants -" ------------------- - highlight Constant ctermfg=DarkRed guifg=#6496C8 - highlight String ctermfg=DarkGreen guifg=#64C896 - highlight Character ctermfg=DarkBlue guifg=#6496C8 - highlight Number ctermfg=DarkGreen guifg=#64C896 - highlight Boolean ctermfg=DarkBlue guifg=#6496C8 - highlight Float ctermfg=DarkGreen guifg=#64C896 - - highlight Comment ctermfg=DarkGray guifg=#646464 ctermbg=NONE guibg=NONE - -" Style for identifier and variable names -" ---------------------------------------- - highlight Identifier ctermfg=DarkCyan guifg=#6496C8 gui=NONE - highlight Function ctermfg=Yellow guifg=#FFC864 gui=NONE - -" Style for statements -" --------------------- - "Statement any statement - highlight Statement ctermfg=Brown guifg=#C89664 gui=NONE - highlight link Conditional Statement - highlight link Repeat Statement - highlight link Label Statement - highlight Operator ctermfg=Green guifg=#64C864 - highlight Keyword ctermfg=DarkRed guifg=#C86432 - highlight link Exception Statement - -" Style for generic preprocessor -" ------------------------------ - highlight PreProc ctermfg=Gray guifg=#DCDCDC ctermbg=NONE guibg=NONE - highlight Include ctermfg=DarkRed guifg=#C86432 ctermbg=NONE guibg=NONE - highlight link Define Include - highlight link Macro Include - highlight link PreCondit Include - -" Style for types and objects -" --------------------------- - highlight Type ctermfg=DarkRed guifg=#DC4B32 gui=NONE - highlight link StorageClass Type - highlight link Structure Type - highlight link Typedef Type - - - highlight Error guifg=#000000 guibg=#FF0000 - highlight Search guibg=#FFFF00 - -" Style for special symbols -" ------------------------- - "highlight Special - "highlight SpecialChar - "highlight Tag - "highlight Delimiter - "highlight SpecialComment - "highlight Debug - -" Style for text format -" --------------------- - "highlight Underlined - "highlight Ignore - "highlight Error - "highlight Todo - -" Style for Shell Syntax -" ---------------------- - "highlight shTest - "highlight shCase - "highlight shCaseExSingleQuote - "highlight shCaseEsac - "highlight shDo - "highlight shExpr - "highlight shFor - "highlight shIf - "highlight shSubSh - "highlight shComma - "highlight shDerefVarArray - "highlight shDblBrace - "highlight shSpecialVar - "highlight shDblParen - "highlight shCurlyIn - "highlight bashSpecialVariables - "highlight bashStatement - "highlight bashAdminStatement - "highlight kshSpecialVariables - "highlight kshStatement - "highlight shSetIdentifier - "highlight shFunctionTwo - "highlight shFunctionStart - "highlight shFunctionOne - "highlight shDerefPattern - "highlight shDerefEscape - "highlight shDerefPPSleft - "highlight shDerefPPSright - "highlight shCaseEsacSync - "highlight shDoSync - "highlight shForSync - "highlight shIfSync - "highlight shUntilSync - "highlight shWhileSync - -" Style for Ruby Syntax -" --------------------- - highlight rubyBlockParameter guifg=#FFFFFF - highlight rubyClass guifg=#FFFFFF - highlight rubyConstant guifg=#DA4939 - highlight rubyInstanceVariable guifg=#D0D0FF - highlight rubyInterpolation guifg=#519F50 - highlight rubyLocalVariableOrMethod guifg=#D0D0FF - highlight rubyPredefinedConstant guifg=#DA4939 - highlight rubyPseudoVariable guifg=#FFC66D - highlight rubyStringDelimiter guifg=#A5C261 - -" Style for XML and (X)HTML Syntax - highlight xmlTag guifg=#E8BF6A - highlight xmlTagName guifg=#E8BF6A - highlight xmlEndTag guifg=#E8BF6A - - highlight link htmlTag xmlTag - highlight link htmlTagName xmlTagName - highlight link htmlEndTag xmlEndTag - diff --git a/.vim/colors/lucius.vim b/.vim/colors/lucius.vim index 7ca7c05..c6a8fb0 100644 --- a/.vim/colors/lucius.vim +++ b/.vim/colors/lucius.vim @@ -1,259 +1,266 @@ " Lucius vim color file " Maintainer: Jonathan Filip <[email protected]> -" Version: 6.1.0 +" Version: 7.0.0 hi clear if exists("syntax_on") syntax reset endif let colors_name="lucius" " Summary: " Color scheme with dark and light versions (GUI and 256 color terminal). - +" " Description: " This color scheme was originally created by combining my favorite parts of " the following color schemes: -" +" " * oceandeep (vimscript #368) " * peaksea (vimscript #760) " * wombat (vimscript #1778) " * moria (vimscript #1464) " * zenburn (vimscript #415) -" +" +" Version 7 has unified the 256 color terminal and GUI versions (the GUI +" version only uses colors available on the 256 color terminal). The overall +" colors were also toned down a little bit (light version is now a light gray +" instead of white and the dark version is slightly lighter) to make it easier +" on the eyes. +" " Version 6+ has been revamped a bit from the original color scheme. If you " prefer the old style, or the 'blue' version, use the 5Final release. Version " 6+ only has a light and dark version. The new version tries to unify some of " the colors and also adds more contrast between text and interface. -" +" " The color scheme is dark, by default. You can change this by setting the " g:lucius_style variable to "light" or "dark". Once the color scheme is " loaded, you can use the commands "LuciusLight" or "LuciusDark" to change " schemes quickly. -" -" Screenshots of the new version (6+): -" +" +" Screenshots of version 7: +" +" * Dark: http://i.imgur.com/ktdFm.png +" * Light: http://i.imgur.com/ndd9A.png +" +" Screenshots of version 6: +" " * Dark: http://i.imgur.com/IzYcB.png " * Light: http://i.imgur.com/kfJcm.png -" -" Screenshots of the old versions (5Final): -" +" +" Screenshots of the version 5Final: +" " * Dark: http://i.imgur.com/z0bDr.png " * Light: http://i.imgur.com/BXDiv.png -" * Blue: http://i.imgur.com/Ea1Gq.png -" +" * Blue: http://i.imgur.com/Ea1Gq.png +" " colorsupport.vim (vimscript #2682) is used to help with mapping the GUI " settings to the 256 terminal colors. -" +" " This color scheme also has custom colors defined for the following plugins: " " * vimwiki (vimscript #2226) " * tagbar (vimscript #3465) " " Installation: " Copy the file to your vim colors directory and then do :colorscheme lucius. set background=dark if exists("g:lucius_style") if g:lucius_style == "light" set background=light endif else - let g:lucius_style="dark" + let g:lucius_style = "dark" endif " set colorcolumn=21,37,53,68,86,100 -if g:lucius_style == "dark" || g:lucius_style == "blue" - +if g:lucius_style == "dark" - hi Normal guifg=#e0e0e0 guibg=#202020 ctermfg=253 ctermbg=234 gui=none cterm=none - if g:lucius_style == "blue" - hi Normal guibg=#002b36 - endif + hi Normal guifg=#d7d7d7 guibg=#303030 ctermfg=188 ctermbg=236 gui=none cterm=none - hi Comment guifg=#707070 guibg=NONE ctermfg=240 ctermbg=NONE gui=none cterm=none + hi Comment guifg=#808080 guibg=NONE ctermfg=244 ctermbg=NONE gui=none cterm=none - hi Constant guifg=#e0e090 guibg=NONE ctermfg=187 ctermbg=NONE gui=none cterm=none - hi BConstant guifg=#e0e090 guibg=NONE ctermfg=187 ctermbg=NONE gui=bold cterm=bold + hi Constant guifg=#d7d7af guibg=NONE ctermfg=187 ctermbg=NONE gui=none cterm=none + hi BConstant guifg=#d7d7af guibg=NONE ctermfg=187 ctermbg=NONE gui=bold cterm=bold - hi Identifier guifg=#c0e0a0 guibg=NONE ctermfg=150 ctermbg=NONE gui=none cterm=none - hi BIdentifier guifg=#c0e0a0 guibg=NONE ctermfg=150 ctermbg=NONE gui=bold cterm=bold + hi Identifier guifg=#afd787 guibg=NONE ctermfg=150 ctermbg=NONE gui=none cterm=none + hi BIdentifier guifg=#afd787 guibg=NONE ctermfg=150 ctermbg=NONE gui=bold cterm=bold - hi Statement guifg=#80d0f0 guibg=NONE ctermfg=74 ctermbg=NONE gui=none cterm=none - hi BStatement guifg=#80d0f0 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold + hi Statement guifg=#87d7ff guibg=NONE ctermfg=117 ctermbg=NONE gui=none cterm=none + hi BStatement guifg=#87d7ff guibg=NONE ctermfg=117 ctermbg=NONE gui=bold cterm=bold - hi PreProc guifg=#a0e0d0 guibg=NONE ctermfg=115 ctermbg=NONE gui=none cterm=none - hi BPreProc guifg=#a0e0d0 guibg=NONE ctermfg=115 ctermbg=NONE gui=bold cterm=bold + hi PreProc guifg=#87d7af guibg=NONE ctermfg=115 ctermbg=NONE gui=none cterm=none + hi BPreProc guifg=#87d7af guibg=NONE ctermfg=115 ctermbg=NONE gui=bold cterm=bold - hi Type guifg=#a0d0e0 guibg=NONE ctermfg=116 ctermbg=NONE gui=none cterm=none - hi BType guifg=#a0d0e0 guibg=NONE ctermfg=116 ctermbg=NONE gui=bold cterm=bold + hi Type guifg=#87d7d7 guibg=NONE ctermfg=116 ctermbg=NONE gui=none cterm=none + hi BType guifg=#87d7d7 guibg=NONE ctermfg=116 ctermbg=NONE gui=bold cterm=bold - hi Special guifg=#c0a0d0 guibg=NONE ctermfg=182 ctermbg=NONE gui=none cterm=none - hi BSpecial guifg=#c0a0d0 guibg=NONE ctermfg=182 ctermbg=NONE gui=bold cterm=bold + hi Special guifg=#d7afd7 guibg=NONE ctermfg=182 ctermbg=NONE gui=none cterm=none + hi BSpecial guifg=#d7afd7 guibg=NONE ctermfg=182 ctermbg=NONE gui=bold cterm=bold - " == Text Markup == + " ## Text Markup ## hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline - hi Error guifg=#e07070 guibg=#503030 ctermfg=167 ctermbg=236 gui=none cterm=none - hi Todo guifg=#e0e090 guibg=#505000 ctermfg=186 ctermbg=NONE gui=none cterm=none - hi MatchParen guifg=bg guibg=#c0e070 ctermfg=bg ctermbg=192 gui=none cterm=bold - hi NonText guifg=#405060 guibg=NONE ctermfg=24 ctermbg=NONE gui=none cterm=none - hi SpecialKey guifg=#406050 guibg=NONE ctermfg=23 ctermbg=NONE gui=none cterm=none - hi Title guifg=#50b0d0 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold - - " == Text Selection == - hi Cursor guifg=bg guibg=fg ctermfg=bg ctermbg=fg gui=none cterm=none - hi CursorIM guifg=bg guibg=fg ctermfg=bg ctermbg=fg gui=none cterm=none - hi CursorColumn guifg=NONE guibg=#484848 ctermfg=NONE ctermbg=237 gui=none cterm=none - hi CursorLine guifg=NONE guibg=#484848 ctermfg=NONE ctermbg=237 gui=none cterm=none - hi Visual guifg=NONE guibg=#205070 ctermfg=NONE ctermbg=24 gui=none cterm=none + hi Error guifg=#ff8787 guibg=#870000 ctermfg=210 ctermbg=88 gui=none cterm=none + hi Todo guifg=#d7d75f guibg=#5f5f00 ctermfg=185 ctermbg=58 gui=none cterm=none + hi MatchParen guifg=bg guibg=#afd75f ctermfg=bg ctermbg=149 gui=none cterm=bold + hi NonText guifg=#5f5f87 guibg=NONE ctermfg=60 ctermbg=NONE gui=none cterm=none + hi SpecialKey guifg=#5f875f guibg=NONE ctermfg=65 ctermbg=NONE gui=none cterm=none + hi Title guifg=#5fafd7 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold + + " ## Text Selection ## + hi Cursor guifg=bg guibg=#87afd7 ctermfg=bg ctermbg=110 gui=none cterm=none + hi CursorIM guifg=bg guibg=#87afd7 ctermfg=bg ctermbg=110 gui=none cterm=none + hi CursorColumn guifg=NONE guibg=#444444 ctermfg=NONE ctermbg=238 gui=none cterm=none + hi CursorLine guifg=NONE guibg=#444444 ctermfg=NONE ctermbg=238 gui=none cterm=none + hi Visual guifg=NONE guibg=#005f87 ctermfg=NONE ctermbg=24 gui=none cterm=none hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline - hi IncSearch guifg=bg guibg=#50d0d0 ctermfg=bg ctermbg=116 gui=none cterm=none - hi Search guifg=bg guibg=#e0a020 ctermfg=bg ctermbg=214 gui=none cterm=none + hi IncSearch guifg=bg guibg=#57d7d7 ctermfg=bg ctermbg=80 gui=none cterm=none + hi Search guifg=bg guibg=#d78700 ctermfg=bg ctermbg=172 gui=none cterm=none " == UI == - hi Pmenu guifg=#000000 guibg=#b0b0b0 ctermfg=bg ctermbg=252 gui=none cterm=none - hi PmenuSel guifg=#e0e0e0 guibg=#205070 ctermfg=fg ctermbg=24 gui=none cterm=none - hi PMenuSbar guifg=bg guibg=#b0b0b0 ctermfg=bg ctermbg=254 gui=none cterm=none - hi PMenuThumb guifg=NONE guibg=#808080 ctermfg=fg ctermbg=244 gui=none cterm=none - hi StatusLine guifg=bg guibg=#b0b0b0 ctermfg=bg ctermbg=252 gui=bold cterm=bold - hi StatusLineNC guifg=#404040 guibg=#b0b0b0 ctermfg=240 ctermbg=252 gui=none cterm=none - hi TabLine guifg=bg guibg=#b0b0b0 ctermfg=bg ctermbg=252 gui=none cterm=none - hi TabLineFill guifg=#404040 guibg=#b0b0b0 ctermfg=240 ctermbg=252 gui=none cterm=none - hi TabLineSel guifg=#e0e0e0 guibg=#205070 ctermfg=fg ctermbg=24 gui=bold cterm=bold - hi VertSplit guifg=#606060 guibg=#b0b0b0 ctermfg=245 ctermbg=252 gui=none cterm=none - hi Folded guifg=bg guibg=#808080 ctermfg=bg ctermbg=246 gui=none cterm=none - hi FoldColumn guifg=bg guibg=#808080 ctermfg=bg ctermbg=246 gui=none cterm=none - - " == Spelling =="{{{ - hi SpellBad guisp=#ee0000 ctermfg=fg ctermbg=160 gui=undercurl cterm=undercurl - hi SpellCap guisp=#eeee00 ctermfg=bg ctermbg=226 gui=undercurl cterm=undercurl - hi SpellRare guisp=#ffa500 ctermfg=bg ctermbg=214 gui=undercurl cterm=undercurl - hi SpellLocal guisp=#ffa500 ctermfg=bg ctermbg=214 gui=undercurl cterm=undercurl"}}} - - " == Diff == - hi DiffAdd guifg=fg guibg=#405040 ctermfg=fg ctermbg=22 gui=none cterm=none - hi DiffChange guifg=fg guibg=#605040 ctermfg=fg ctermbg=58 gui=none cterm=none - hi DiffDelete guifg=fg guibg=#504040 ctermfg=fg ctermbg=52 gui=none cterm=none - hi DiffText guifg=#e0b050 guibg=#605040 ctermfg=220 ctermbg=58 gui=bold cterm=bold - - " == Misc == - hi Directory guifg=#b0d0a0 guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none - hi ErrorMsg guifg=#ee0000 guibg=NONE ctermfg=196 ctermbg=NONE gui=none cterm=none - hi SignColumn guifg=#a0b0b0 guibg=#282828 ctermfg=145 ctermbg=233 gui=none cterm=none - hi LineNr guifg=bg guibg=#808080 ctermfg=bg ctermbg=246 gui=none cterm=none - hi MoreMsg guifg=#60c0d0 guibg=NONE ctermfg=117 ctermbg=NONE gui=none cterm=none + hi Pmenu guifg=bg guibg=#b2b2b2 ctermfg=bg ctermbg=249 gui=none cterm=none + hi PmenuSel guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=none cterm=none + hi PMenuSbar guifg=#b2b2b2 guibg=#d0d0d0 ctermfg=249 ctermbg=252 gui=none cterm=none + hi PMenuThumb guifg=fg guibg=#808080 ctermfg=fg ctermbg=244 gui=none cterm=none + hi StatusLine guifg=bg guibg=#b2b2b2 ctermfg=bg ctermbg=249 gui=bold cterm=bold + hi StatusLineNC guifg=#444444 guibg=#b2b2b2 ctermfg=238 ctermbg=249 gui=none cterm=none + hi TabLine guifg=bg guibg=#b2b2b2 ctermfg=bg ctermbg=249 gui=none cterm=none + hi TabLineFill guifg=#444444 guibg=#b2b2b2 ctermfg=238 ctermbg=249 gui=none cterm=none + hi TabLineSel guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=bold cterm=bold + hi VertSplit guifg=#626262 guibg=#b2b2b2 ctermfg=241 ctermbg=249 gui=none cterm=none + hi Folded guifg=#bcbcbc guibg=#4e4e4e ctermfg=250 ctermbg=239 gui=bold cterm=none + hi FoldColumn guifg=#bcbcbc guibg=#4e4e4e ctermfg=250 ctermbg=239 gui=bold cterm=none + + " ## Spelling ## + hi SpellBad guisp=#d70000 ctermfg=bg ctermbg=160 gui=undercurl cterm=undercurl + hi SpellCap guisp=#00afd7 ctermfg=bg ctermbg=38 gui=undercurl cterm=undercurl + hi SpellRare guisp=#5faf00 ctermfg=bg ctermbg=70 gui=undercurl cterm=undercurl + hi SpellLocal guisp=#d7af00 ctermfg=bg ctermbg=178 gui=undercurl cterm=undercurl + + " ## Diff ## + hi DiffAdd guifg=fg guibg=#5f875f ctermfg=fg ctermbg=65 gui=none cterm=none + hi DiffChange guifg=fg guibg=#87875f ctermfg=fg ctermbg=101 gui=none cterm=none + hi DiffDelete guifg=fg guibg=#875f5f ctermfg=fg ctermbg=95 gui=none cterm=none + hi DiffText guifg=#ffff87 guibg=#87875f ctermfg=228 ctermbg=101 gui=none cterm=none + + " ## Misc ## + hi Directory guifg=#afd7af guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none + hi ErrorMsg guifg=#ff5f5f guibg=NONE ctermfg=203 ctermbg=NONE gui=none cterm=none + hi SignColumn guifg=#b2b2b2 guibg=#4e4e4e ctermfg=249 ctermbg=239 gui=none cterm=none + hi LineNr guifg=#626262 guibg=#444444 ctermfg=241 ctermbg=238 gui=none cterm=none + hi MoreMsg guifg=#5fd7d7 guibg=NONE ctermfg=80 ctermbg=NONE gui=none cterm=none hi ModeMsg guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none hi Question guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none - hi WarningMsg guifg=#e07060 guibg=NONE ctermfg=173 ctermbg=NONE gui=none cterm=none - hi WildMenu guifg=NONE guibg=#205070 ctermfg=NONE ctermbg=24 gui=none cterm=none - hi ColorColumn guifg=NONE guibg=#484038 ctermfg=NONE ctermbg=101 gui=none cterm=none + hi WarningMsg guifg=#d7875f guibg=NONE ctermfg=173 ctermbg=NONE gui=none cterm=none + hi WildMenu guifg=fg guibg=#005f87 ctermfg=fg ctermbg=24 gui=none cterm=none + hi ColorColumn guifg=NONE guibg=#87875f ctermfg=NONE ctermbg=101 gui=none cterm=none hi Ignore guifg=bg ctermfg=bg elseif g:lucius_style == "light" - hi Normal guifg=#000000 guibg=#ffffff ctermfg=16 ctermbg=231 gui=none cterm=none + hi Normal guifg=#3a3a3a guibg=#eeeeee ctermfg=237 ctermbg=255 gui=none cterm=none - hi Comment guifg=#909090 guibg=NONE ctermfg=246 ctermbg=NONE gui=none cterm=none + hi Comment guifg=#808080 guibg=NONE ctermfg=244 ctermbg=NONE gui=none cterm=none - hi Constant guifg=#a05000 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=none - hi BConstant guifg=#a05000 guibg=NONE ctermfg=130 ctermbg=NONE gui=bold cterm=bold + hi Constant guifg=#af5f00 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=none + hi BConstant guifg=#af5f00 guibg=NONE ctermfg=130 ctermbg=NONE gui=bold cterm=bold - hi Identifier guifg=#008000 guibg=NONE ctermfg=22 ctermbg=NONE gui=none cterm=none - hi BIdentifier guifg=#008000 guibg=NONE ctermfg=22 ctermbg=NONE gui=bold cterm=bold + hi Identifier guifg=#008700 guibg=NONE ctermfg=28 ctermbg=NONE gui=none cterm=none + hi BIdentifier guifg=#008700 guibg=NONE ctermfg=28 ctermbg=NONE gui=bold cterm=bold - hi Statement guifg=#0040c0 guibg=NONE ctermfg=19 ctermbg=NONE gui=none cterm=none - hi BStatement guifg=#0040c0 guibg=NONE ctermfg=19 ctermbg=NONE gui=bold cterm=bold + hi Statement guifg=#005faf guibg=NONE ctermfg=25 ctermbg=NONE gui=none cterm=none + hi BStatement guifg=#005faf guibg=NONE ctermfg=25 ctermbg=NONE gui=bold cterm=bold - hi PreProc guifg=#009080 guibg=NONE ctermfg=30 ctermbg=NONE gui=none cterm=none - hi BPreProc guifg=#009080 guibg=NONE ctermfg=30 ctermbg=NONE gui=bold cterm=bold + hi PreProc guifg=#008787 guibg=NONE ctermfg=30 ctermbg=NONE gui=none cterm=none + hi BPreProc guifg=#008787 guibg=NONE ctermfg=30 ctermbg=NONE gui=bold cterm=bold - hi Type guifg=#0070a0 guibg=NONE ctermfg=25 ctermbg=NONE gui=none cterm=none - hi BType guifg=#0070a0 guibg=NONE ctermfg=25 ctermbg=NONE gui=bold cterm=bold + hi Type guifg=#005f87 guibg=NONE ctermfg=24 ctermbg=NONE gui=none cterm=none + hi BType guifg=#005f87 guibg=NONE ctermfg=24 ctermbg=NONE gui=bold cterm=bold - hi Special guifg=#800080 guibg=NONE ctermfg=5 ctermbg=NONE gui=none cterm=none - hi BSpecial guifg=#800080 guibg=NONE ctermfg=5 ctermbg=NONE gui=bold cterm=bold + hi Special guifg=#870087 guibg=NONE ctermfg=90 ctermbg=NONE gui=none cterm=none + hi BSpecial guifg=#870087 guibg=NONE ctermfg=90 ctermbg=NONE gui=bold cterm=bold - " == Text Markup == + " ## Text Markup ## hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline - hi Error guifg=#c02620 guibg=#f0c6c0 ctermfg=1 ctermbg=181 gui=none cterm=none - hi Todo guifg=#504000 guibg=#f6f080 ctermfg=58 ctermbg=228 gui=none cterm=none - hi MatchParen guifg=NONE guibg=#40d0d0 ctermfg=NONE ctermbg=80 gui=none cterm=none - hi NonText guifg=#b0c0d0 guibg=NONE ctermfg=146 ctermbg=NONE gui=none cterm=none - hi SpecialKey guifg=#b0d0c0 guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none - hi Title guifg=#0060a0 guibg=NONE ctermfg=26 ctermbg=NONE gui=bold cterm=bold - - " == Text Selection == - hi Cursor guifg=bg guibg=#505050 ctermfg=bg ctermbg=239 gui=none cterm=none - hi CursorIM guifg=bg guibg=#505050 ctermfg=bg ctermbg=239 gui=none cterm=none - hi CursorColumn guifg=NONE guibg=#e8e8e8 ctermfg=NONE ctermbg=254 gui=none cterm=none - hi CursorLine guifg=NONE guibg=#e8e8e8 ctermfg=NONE ctermbg=254 gui=none cterm=none - hi Visual guifg=NONE guibg=#b0d0f0 ctermfg=NONE ctermbg=153 gui=none cterm=none + hi Error guifg=#af0000 guibg=#ffafaf ctermfg=124 ctermbg=217 gui=none cterm=none + hi Todo guifg=#875f00 guibg=#ffff87 ctermfg=94 ctermbg=228 gui=none cterm=none + hi MatchParen guifg=NONE guibg=#5fd7d7 ctermfg=NONE ctermbg=80 gui=none cterm=none + hi NonText guifg=#afafd7 guibg=NONE ctermfg=146 ctermbg=NONE gui=none cterm=none + hi SpecialKey guifg=#afd7af guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none + hi Title guifg=#005faf guibg=NONE ctermfg=25 ctermbg=NONE gui=bold cterm=bold + + " ## Text Selection ## + hi Cursor guifg=bg guibg=#5f87af ctermfg=bg ctermbg=67 gui=none cterm=none + hi CursorIM guifg=bg guibg=#5f87af ctermfg=bg ctermbg=67 gui=none cterm=none + hi CursorColumn guifg=NONE guibg=#dadada ctermfg=NONE ctermbg=253 gui=none cterm=none + hi CursorLine guifg=NONE guibg=#dadada ctermfg=NONE ctermbg=253 gui=none cterm=none + hi Visual guifg=NONE guibg=#afd7ff ctermfg=NONE ctermbg=153 gui=none cterm=none hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline - hi IncSearch guifg=#000000 guibg=#90d0d0 ctermfg=fg ctermbg=116 gui=none cterm=none - hi Search guifg=#000000 guibg=#f0b060 ctermfg=fg ctermbg=215 gui=none cterm=none - - " == UI == - hi Pmenu guifg=bg guibg=#505050 ctermfg=231 ctermbg=239 gui=none cterm=none - hi PmenuSel guifg=#000000 guibg=#c0e0ff ctermfg=16 ctermbg=153 gui=none cterm=none - hi PMenuSbar guifg=bg guibg=#404040 ctermfg=231 ctermbg=238 gui=none cterm=none - hi PMenuThumb guifg=#000000 guibg=#a0a0a0 ctermfg=16 ctermbg=247 gui=none cterm=none - hi StatusLine guifg=bg guibg=#505050 ctermfg=231 ctermbg=239 gui=bold cterm=bold - hi StatusLineNC guifg=#e0e0e0 guibg=#505050 ctermfg=254 ctermbg=239 gui=none cterm=none - hi TabLine guifg=bg guibg=#505050 ctermfg=231 ctermbg=239 gui=none cterm=none - hi TabLineFill guifg=#a0a0a0 guibg=#505050 ctermfg=247 ctermbg=239 gui=none cterm=none - hi TabLineSel guifg=#000000 guibg=#c0e0ff ctermfg=16 ctermbg=153 gui=none cterm=none - hi VertSplit guifg=#868686 guibg=#505050 ctermfg=102 ctermbg=239 gui=none cterm=none - hi Folded guifg=bg guibg=#a0a0a0 ctermfg=231 ctermbg=247 gui=none cterm=none - hi FoldColumn guifg=bg guibg=#a0a0a0 ctermfg=231 ctermbg=247 gui=none cterm=none - - " == Spelling == - hi SpellBad guisp=#ee0000 ctermbg=210 gui=undercurl cterm=undercurl - hi SpellCap guisp=#eeee00 ctermbg=227 gui=undercurl cterm=undercurl - hi SpellRare guisp=#ffa500 ctermbg=221 gui=undercurl cterm=undercurl - hi SpellLocal guisp=#ffa500 ctermbg=221 gui=undercurl cterm=undercurl - - " == Diff == - hi DiffAdd guifg=fg guibg=#d0e0d0 ctermfg=fg ctermbg=151 gui=none cterm=none - hi DiffChange guifg=fg guibg=#e0d6c0 ctermfg=fg ctermbg=187 gui=none cterm=none - hi DiffDelete guifg=fg guibg=#f0d0d0 ctermfg=fg ctermbg=181 gui=none cterm=none - hi DiffText guifg=#d05000 guibg=#e0d6c0 ctermfg=160 ctermbg=187 gui=bold cterm=bold - - " == Misc == - hi Directory guifg=#008000 guibg=NONE ctermfg=29 ctermbg=NONE gui=none cterm=none - hi ErrorMsg guifg=#a00000 guibg=NONE ctermfg=124 ctermbg=NONE gui=none cterm=none - hi SignColumn guifg=#708090 guibg=#f8f8f8 ctermfg=66 ctermbg=231 gui=none cterm=none - hi LineNr guifg=bg guibg=#a0a0a0 ctermfg=231 ctermbg=247 gui=none cterm=none - hi MoreMsg guifg=#2060c0 guibg=NONE ctermfg=4 ctermbg=NONE gui=none cterm=none - hi ModeMsg guifg=#000000 guibg=NONE ctermfg=16 ctermbg=NONE gui=none cterm=none - hi Question guifg=fg guibg=NONE ctermfg=NONE ctermbg=NONE gui=none cterm=none - hi WarningMsg guifg=#b03000 guibg=NONE ctermfg=9 ctermbg=NONE gui=none cterm=none - hi WildMenu guifg=#000000 guibg=#c0e0ff ctermfg=16 ctermbg=153 gui=none cterm=none - hi ColorColumn guifg=NONE guibg=#f0f0e0 ctermfg=NONE ctermbg=230 gui=none cterm=none + hi IncSearch guifg=fg guibg=#57d7d7 ctermfg=fg ctermbg=80 gui=none cterm=none + hi Search guifg=fg guibg=#ffaf00 ctermfg=fg ctermbg=214 gui=none cterm=none + + " ## UI ## + hi Pmenu guifg=bg guibg=#808080 ctermfg=bg ctermbg=244 gui=none cterm=none + hi PmenuSel guifg=fg guibg=#afd7ff ctermfg=fg ctermbg=153 gui=none cterm=none + hi PMenuSbar guifg=#808080 guibg=#444444 ctermfg=244 ctermbg=238 gui=none cterm=none + hi PMenuThumb guifg=fg guibg=#9e9e9e ctermfg=fg ctermbg=247 gui=none cterm=none + hi StatusLine guifg=bg guibg=#808080 ctermfg=bg ctermbg=244 gui=bold cterm=bold + hi StatusLineNC guifg=#e4e4e4 guibg=#808080 ctermfg=254 ctermbg=244 gui=none cterm=none + hi TabLine guifg=bg guibg=#808080 ctermfg=bg ctermbg=244 gui=none cterm=none + hi TabLineFill guifg=#b2b2b2 guibg=#808080 ctermfg=249 ctermbg=244 gui=none cterm=none + hi TabLineSel guifg=fg guibg=#afd7ff ctermfg=fg ctermbg=153 gui=none cterm=none + hi VertSplit guifg=#e4e4e4 guibg=#808080 ctermfg=254 ctermbg=244 gui=none cterm=none + hi Folded guifg=#626262 guibg=#bcbcbc ctermfg=241 ctermbg=250 gui=bold cterm=none + hi FoldColumn guifg=#626262 guibg=#bcbcbc ctermfg=241 ctermbg=250 gui=bold cterm=none + + " ## Spelling ## + hi SpellBad guisp=#d70000 ctermfg=fg ctermbg=160 gui=undercurl cterm=undercurl + hi SpellCap guisp=#00afd7 ctermfg=fg ctermbg=38 gui=undercurl cterm=undercurl + hi SpellRare guisp=#5faf00 ctermfg=fg ctermbg=70 gui=undercurl cterm=undercurl + hi SpellLocal guisp=#d7af00 ctermfg=fg ctermbg=178 gui=undercurl cterm=undercurl + + " ## Diff ## + hi DiffAdd guifg=fg guibg=#afd7af ctermfg=fg ctermbg=151 gui=none cterm=none + hi DiffChange guifg=fg guibg=#d7d7af ctermfg=fg ctermbg=187 gui=none cterm=none + hi DiffDelete guifg=fg guibg=#d7afaf ctermfg=fg ctermbg=181 gui=none cterm=none + hi DiffText guifg=#d75f00 guibg=#d7d7af ctermfg=166 ctermbg=187 gui=bold cterm=bold + + " ## Misc ## + hi Directory guifg=#00875f guibg=NONE ctermfg=29 ctermbg=NONE gui=none cterm=none + hi ErrorMsg guifg=#af0000 guibg=NONE ctermfg=124 ctermbg=NONE gui=none cterm=none + hi SignColumn guifg=#626262 guibg=#d0d0d0 ctermfg=241 ctermbg=252 gui=none cterm=none + hi LineNr guifg=#9e9e9e guibg=#dadada ctermfg=247 ctermbg=253 gui=none cterm=none + hi MoreMsg guifg=#005fd7 guibg=NONE ctermfg=26 ctermbg=NONE gui=none cterm=none + hi ModeMsg guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none + hi Question guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none + hi WarningMsg guifg=#af5700 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=none + hi WildMenu guifg=fg guibg=#afd7ff ctermfg=fg ctermbg=153 gui=none cterm=none + hi ColorColumn guifg=NONE guibg=#d7d7af ctermfg=NONE ctermbg=187 gui=none cterm=none hi Ignore guifg=bg ctermfg=bg endif -" == Vimwiki Colors == + +" ## Vimwiki Colors ## hi link VimwikiHeader1 BIdentifier hi link VimwikiHeader2 BPreProc hi link VimwikiHeader3 BStatement hi link VimwikiHeader4 BSpecial hi link VimwikiHeader5 BConstant hi link VimwikiHeader6 BType -" == Tagbar Colors == +" ## Tagbar Colors ## hi link TagbarAccessPublic Constant hi link TagbarAccessProtected Type hi link TagbarAccessPrivate PreProc -" == Commands == +" ## Commands ## command! LuciusLight let g:lucius_style = "light" | colorscheme lucius command! LuciusDark let g:lucius_style = "dark" | colorscheme lucius -command! LuciusBlue let g:lucius_style = "blue" | colorscheme lucius diff --git a/.vim/colors/twilight.vim b/.vim/colors/twilight.vim new file mode 100644 index 0000000..239023e --- /dev/null +++ b/.vim/colors/twilight.vim @@ -0,0 +1,511 @@ +" Twilight based on famouse Jellybeans theme +" Modified by M.L. + +" Vim color file +" +" " __ _ _ _ " +" " \ \ ___| | |_ _| |__ ___ __ _ _ __ ___ " +" " \ \/ _ \ | | | | | _ \ / _ \/ _ | _ \/ __| " +" " /\_/ / __/ | | |_| | |_| | __/ |_| | | | \__ \ " +" " \___/ \___|_|_|\__ |____/ \___|\____|_| |_|___/ " +" " \___/ " +" +" "A colorful, dark color scheme for Vim." +" +" File: jellybeans.vim +" Maintainer: NanoTech <http://nanotech.nanotechcorp.net/> +" Version: 1.5~git +" Last Change: April 11th, 2011 +" Contributors: Daniel Herbert <http://pocket-ninja.com>, +" Henry So, Jr. <[email protected]>, +" David Liang <bmdavll at gmail dot com> +" +" Copyright (c) 2009-2011 NanoTech +" +" Permission is hereby granted, free of charge, to any person obtaining a copy +" of this software and associated documentation files (the "Software"), to deal +" in the Software without restriction, including without limitation the rights +" to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +" copies of the Software, and to permit persons to whom the Software is +" furnished to do so, subject to the following conditions: +" +" The above copyright notice and this permission notice shall be included in +" all copies or substantial portions of the Software. +" +" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +" IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +" FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +" AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +" LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +" OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +" THE SOFTWARE. + +set background=dark + +hi clear + +if exists("syntax_on") + syntax reset +endif + +let colors_name = "jellybeans" + +if has("gui_running") || &t_Co == 88 || &t_Co == 256 + let s:low_color = 0 +else + let s:low_color = 1 +endif + +" Color approximation functions by Henry So, Jr. and David Liang {{{ +" Added to jellybeans.vim by Daniel Herbert + +" returns an approximate grey index for the given grey level +fun! s:grey_number(x) + if &t_Co == 88 + if a:x < 23 + return 0 + elseif a:x < 69 + return 1 + elseif a:x < 103 + return 2 + elseif a:x < 127 + return 3 + elseif a:x < 150 + return 4 + elseif a:x < 173 + return 5 + elseif a:x < 196 + return 6 + elseif a:x < 219 + return 7 + elseif a:x < 243 + return 8 + else + return 9 + endif + else + if a:x < 14 + return 0 + else + let l:n = (a:x - 8) / 10 + let l:m = (a:x - 8) % 10 + if l:m < 5 + return l:n + else + return l:n + 1 + endif + endif + endif +endfun + +" returns the actual grey level represented by the grey index +fun! s:grey_level(n) + if &t_Co == 88 + if a:n == 0 + return 0 + elseif a:n == 1 + return 46 + elseif a:n == 2 + return 92 + elseif a:n == 3 + return 115 + elseif a:n == 4 + return 139 + elseif a:n == 5 + return 162 + elseif a:n == 6 + return 185 + elseif a:n == 7 + return 208 + elseif a:n == 8 + return 231 + else + return 255 + endif + else + if a:n == 0 + return 0 + else + return 8 + (a:n * 10) + endif + endif +endfun + +" returns the palette index for the given grey index +fun! s:grey_color(n) + if &t_Co == 88 + if a:n == 0 + return 16 + elseif a:n == 9 + return 79 + else + return 79 + a:n + endif + else + if a:n == 0 + return 16 + elseif a:n == 25 + return 231 + else + return 231 + a:n + endif + endif +endfun + +" returns an approximate color index for the given color level +fun! s:rgb_number(x) + if &t_Co == 88 + if a:x < 69 + return 0 + elseif a:x < 172 + return 1 + elseif a:x < 230 + return 2 + else + return 3 + endif + else + if a:x < 75 + return 0 + else + let l:n = (a:x - 55) / 40 + let l:m = (a:x - 55) % 40 + if l:m < 20 + return l:n + else + return l:n + 1 + endif + endif + endif +endfun + +" returns the actual color level for the given color index +fun! s:rgb_level(n) + if &t_Co == 88 + if a:n == 0 + return 0 + elseif a:n == 1 + return 139 + elseif a:n == 2 + return 205 + else + return 255 + endif + else + if a:n == 0 + return 0 + else + return 55 + (a:n * 40) + endif + endif +endfun + +" returns the palette index for the given R/G/B color indices +fun! s:rgb_color(x, y, z) + if &t_Co == 88 + return 16 + (a:x * 16) + (a:y * 4) + a:z + else + return 16 + (a:x * 36) + (a:y * 6) + a:z + endif +endfun + +" returns the palette index to approximate the given R/G/B color levels +fun! s:color(r, g, b) + " get the closest grey + let l:gx = s:grey_number(a:r) + let l:gy = s:grey_number(a:g) + let l:gz = s:grey_number(a:b) + + " get the closest color + let l:x = s:rgb_number(a:r) + let l:y = s:rgb_number(a:g) + let l:z = s:rgb_number(a:b) + + if l:gx == l:gy && l:gy == l:gz + " there are two possibilities + let l:dgr = s:grey_level(l:gx) - a:r + let l:dgg = s:grey_level(l:gy) - a:g + let l:dgb = s:grey_level(l:gz) - a:b + let l:dgrey = (l:dgr * l:dgr) + (l:dgg * l:dgg) + (l:dgb * l:dgb) + let l:dr = s:rgb_level(l:gx) - a:r + let l:dg = s:rgb_level(l:gy) - a:g + let l:db = s:rgb_level(l:gz) - a:b + let l:drgb = (l:dr * l:dr) + (l:dg * l:dg) + (l:db * l:db) + if l:dgrey < l:drgb + " use the grey + return s:grey_color(l:gx) + else + " use the color + return s:rgb_color(l:x, l:y, l:z) + endif + else + " only one possibility + return s:rgb_color(l:x, l:y, l:z) + endif +endfun + +" returns the palette index to approximate the 'rrggbb' hex string +fun! s:rgb(rgb) + let l:r = ("0x" . strpart(a:rgb, 0, 2)) + 0 + let l:g = ("0x" . strpart(a:rgb, 2, 2)) + 0 + let l:b = ("0x" . strpart(a:rgb, 4, 2)) + 0 + return s:color(l:r, l:g, l:b) +endfun + +" sets the highlighting for the given group +fun! s:X(group, fg, bg, attr, lcfg, lcbg) + if s:low_color + let l:fge = empty(a:lcfg) + let l:bge = empty(a:lcbg) + + if !l:fge && !l:bge + exec "hi ".a:group." ctermfg=".a:lcfg." ctermbg=".a:lcbg + elseif !l:fge && l:bge + exec "hi ".a:group." ctermfg=".a:lcfg." ctermbg=NONE" + elseif l:fge && !l:bge + exec "hi ".a:group." ctermfg=NONE ctermbg=".a:lcbg + endif + else + let l:fge = empty(a:fg) + let l:bge = empty(a:bg) + + if !l:fge && !l:bge + exec "hi ".a:group." guifg=#".a:fg." guibg=#".a:bg." ctermfg=".s:rgb(a:fg)." ctermbg=".s:rgb(a:bg) + elseif !l:fge && l:bge + exec "hi ".a:group." guifg=#".a:fg." guibg=NONE ctermfg=".s:rgb(a:fg)." ctermbg=NONE" + elseif l:fge && !l:bge + exec "hi ".a:group." guifg=NONE guibg=#".a:bg." ctermfg=NONE ctermbg=".s:rgb(a:bg) + endif + endif + + if a:attr == "" + exec "hi ".a:group." gui=none cterm=none" + else + let noitalic = join(filter(split(a:attr, ","), "v:val !=? 'italic'"), ",") + if empty(noitalic) + let noitalic = "none" + endif + exec "hi ".a:group." gui=".a:attr." cterm=".noitalic + endif +endfun +" }}} + +call s:X("Normal","e8e8d3","151515","","White","") +set background=dark + +if version >= 700 + call s:X("CursorLine","","1c1c1c","","","Black") + call s:X("CursorColumn","","1c1c1c","","","Black") + call s:X("MatchParen","ffffff","80a090","bold","","DarkCyan") + + call s:X("TabLine","000000","b0b8c0","italic","","Black") + call s:X("TabLineFill","9098a0","","","","Black") + call s:X("TabLineSel","000000","f0f0f0","italic,bold","Black","White") + + " Auto-completion + call s:X("Pmenu","ffffff","606060","","White","Black") + call s:X("PmenuSel","101010","eeeeee","","Black","White") +endif + +call s:X("Visual","","404040","","","Black") +call s:X("Cursor","","b0d0f0","","","") + +"call s:X("LineNr","605958","151515","none","Black","") +call s:X("LineNr","4e4e4e","262626","none","DarkGrey","") +"call s:X("LineNr","666666","454545","none","Black","") +call s:X("Comment","888888","","italic","Grey","") +call s:X("Todo","808080","","bold","White","Black") + +call s:X("StatusLine","000000","dddddd","italic","Black","White") +call s:X("StatusLineNC","ffffff","403c41","italic","White","Black") +call s:X("VertSplit","777777","403c41","italic","Black","Black") +call s:X("WildMenu","f0a0c0","302028","","Magenta","") + +call s:X("Folded","a0a8b0","384048","italic","Black","") +call s:X("FoldColumn","535D66","1f1f1f","","","Black") +call s:X("SignColumn","777777","333333","","","Black") +call s:X("ColorColumn","","000000","","","Black") + +call s:X("Title","70b950","","bold","Green","") + +call s:X("Constant","cf6a4c","","","Red","") +call s:X("Special","799d6a","","","Green","") +call s:X("Delimiter","668799","","","Grey","") + +call s:X("String","99ad6a","","","Green","") +call s:X("StringDelimiter","556633","","","DarkGreen","") + + +call s:X("Identifier","c6b6ee","","","LightCyan","") + +call s:X("Structure","8fbfdc","","","LightCyan","") +call s:X("Function","ffb964","","","Yellow","") +"call s:X("Function","fad07a","","","Yellow","") +"call s:X("Statement","8197bf","","","DarkBlue","") +call s:X("Statement","6577bf","","","DarkBlue","") +call s:X("PreProc","8fbfdc","","","LightBlue","") + + + +call s:X("Type","ffb964","","","Yellow","") +call s:X("NonText","606060","151515","","Black","") + +call s:X("SpecialKey","444444","1c1c1c","","Black","") + +call s:X("Search","f0a0c0","302028","underline","Magenta","") + +call s:X("Directory","dad085","","","Yellow","") +call s:X("ErrorMsg","","902020","","","DarkRed") +hi! link Error ErrorMsg +hi! link MoreMsg Special +call s:X("Question","65C254","","","Green","") + + +" Spell Checking + +call s:X("SpellBad","","902020","underline","","DarkRed") +call s:X("SpellCap","","0000df","underline","","Blue") +call s:X("SpellRare","","540063","underline","","DarkMagenta") +call s:X("SpellLocal","","2D7067","underline","","Green") + +" Diff + +hi! link diffRemoved Constant +hi! link diffAdded String + +" VimDiff + +call s:X("DiffAdd","D2EBBE","437019","","White","DarkGreen") +call s:X("DiffDelete","40000A","700009","","DarkRed","DarkRed") +call s:X("DiffChange","","2B5B77","","White","DarkBlue") +call s:X("DiffText","8fbfdc","000000","reverse","Yellow","") + +" Python +hi! link pythonPreCondit Import +hi! link pythonBuiltinObj BuiltinObj +hi! link pythonBuiltinFunc BuiltinFunc +hi! link pythonDecorator Decorator +hi! link pythonDottedName DottedName +hi! link pythonAssignment Assignment +hi! link pythonCalOperator CalOperator +hi! link pythonSuperclass Superclass +"hi! link pythonSuperclasses Superclass +call s:X("Import","cda869","","","Brown","") +call s:X("BuiltinFunc","dad085","","","LightMagenta","") +call s:X("Definition","f8ed97","","","LightYellow","") +call s:X("Statement","87afd7","","","DarkBlue","") +call s:X("Number","ca6f4c","","","Red","") +"call s:X("Function","ffb964","","","Yellow","") +call s:X("Operator","afd7ff","","","Cyan","") +call s:X("CalOperator","af5f00","","","Yellow","") +call s:X("Decorator","57d700","","","Grean","") +call s:X("DottedName","57d700","","","Grean","") +call s:X("Comment","5f5f5f","","","DarkGrey","") +call s:X("Assignment","ffaf5f","","","LightOrange","") +call s:X("ParamName","5f87d7","","","LightBule","") +call s:X("Entity","ffb964","","","Yellow","") +call s:X("Superclass","9b5c2e","","","LightMagenta","") +call s:X("BuiltinObj","9b859d","","","LightCyan","") + +" PHP + +hi! link phpFunctions Function +call s:X("StorageClass","c59f6f","","","Red","") +hi! link phpSuperglobal Identifier +hi! link phpQuoteSingle StringDelimiter +hi! link phpQuoteDouble StringDelimiter +hi! link phpBoolean Constant +hi! link phpNull Constant +hi! link phpArrayPair Operator + +" Ruby + +hi! link rubySharpBang Comment +call s:X("rubyClass","447799","","","DarkBlue","") +call s:X("rubyIdentifier","c6b6fe","","","Cyan","") +hi! link rubyConstant Type +hi! link rubyFunction Function + +call s:X("rubyInstanceVariable","c6b6fe","","","Cyan","") +call s:X("rubySymbol","7697d6","","","Blue","") +hi! link rubyGlobalVariable rubyInstanceVariable +hi! link rubyModule rubyClass +call s:X("rubyControl","7597c6","","","Blue","") + +hi! link rubyString String +hi! link rubyStringDelimiter StringDelimiter +hi! link rubyInterpolationDelimiter Identifier + +call s:X("rubyRegexpDelimiter","540063","","","Magenta","") +call s:X("rubyRegexp","dd0093","","","DarkMagenta","") +call s:X("rubyRegexpSpecial","a40073","","","Magenta","") + +call s:X("rubyPredefinedIdentifier","de5577","","","Red","") + +" JavaScript + +hi! link javaScriptValue Constant +hi! link javaScriptRegexpString rubyRegexp + +" CoffeeScript + +hi! link coffeeRegExp javaScriptRegexpString + +" Lua + +hi! link luaOperator Conditional + +" C + +hi! link cOperator Constant + +" Objective-C/Cocoa + +hi! link objcClass Type +hi! link cocoaClass objcClass +hi! link objcSubclass objcClass +hi! link objcSuperclass objcClass +hi! link objcDirective rubyClass +hi! link cocoaFunction Function +hi! link objcMethodName Identifier +hi! link objcMethodArg Normal +hi! link objcMessageName Identifier + +" Debugger.vim + +call s:X("DbgCurrent","DEEBFE","345FA8","","White","DarkBlue") +call s:X("DbgBreakPt","","4F0037","","","DarkMagenta") + +" Plugins, etc. + +hi! link TagListFileName Directory +call s:X("PreciseJumpTarget","B9ED67","405026","","White","Green") + +" Manual overrides for 256-color terminals. Dark colors auto-map badly. +if !s:low_color + hi StatusLineNC ctermbg=235 + hi Folded ctermbg=236 + hi FoldColumn ctermbg=234 + hi SignColumn ctermbg=236 + hi CursorColumn ctermbg=234 + hi CursorLine ctermbg=234 + hi SpecialKey ctermbg=234 + hi NonText ctermbg=233 + hi LineNr ctermbg=233 + hi DiffText ctermfg=81 + hi Normal ctermbg=233 + hi DbgBreakPt ctermbg=53 +endif + +" delete functions {{{ +delf s:X +delf s:rgb +delf s:color +delf s:rgb_color +delf s:rgb_level +delf s:rgb_number +delf s:grey_color +delf s:grey_level +delf s:grey_number +" }}} + diff --git a/.vim/colors/xoria256-1.6.vim b/.vim/colors/xoria256-1.6.vim new file mode 100644 index 0000000..3be5b7e --- /dev/null +++ b/.vim/colors/xoria256-1.6.vim @@ -0,0 +1,145 @@ +" Vim color file +" +" Name: xoria256.vim +" Version: 1.6 +" Maintainer: Dmitriy Y. Zotikov (xio) <[email protected]> +" +" Should work in recent 256 color terminals. 88-color terms like urxvt are +" NOT supported. +" +" Don't forget to install 'ncurses-term' and set TERM to xterm-256color or +" similar value. +" +" Color numbers (0-255) see: +" http://www.calmar.ws/vim/256-xterm-24bit-rgb-color-chart.html +" +" For a specific filetype highlighting rules issue :syntax list when a file of +" that type is opened. +" +" TODO: link colours instead of setting values explicitly + +" Initialization {{{ +if &t_Co != 256 && ! has("gui_running") + echoerr "Please use GUI or a 256-color terminal (which sets t_Co=256)." + finish +endif + +set background=dark + +hi clear + +if exists("syntax_on") + syntax reset +endif + +let colors_name = "xoria256" +"}}} +" Colours {{{1 +"" General {{{2 +hi Normal ctermfg=252 guifg=#d0d0d0 ctermbg=234 guibg=#1c1c1c cterm=none gui=none +hi Cursor ctermbg=214 guibg=#ffaf00 +hi CursorColumn ctermbg=238 guibg=#444444 +hi CursorLine ctermbg=237 guibg=#3a3a3a cterm=none gui=none +hi Error ctermfg=15 guifg=#ffffff ctermbg=1 guibg=#800000 +hi ErrorMsg ctermfg=15 guifg=#ffffff ctermbg=1 guibg=#800000 +hi FoldColumn ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 +hi Folded ctermfg=255 guifg=#eeeeee ctermbg=60 guibg=#5f5f87 +hi IncSearch ctermfg=0 guifg=#000000 ctermbg=223 guibg=#ffdfaf cterm=none gui=none +hi LineNr ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 +hi MatchParen ctermfg=188 guifg=#dfdfdf ctermbg=68 guibg=#5f87df cterm=bold gui=bold +" TODO +" hi MoreMsg +hi NonText ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 cterm=bold gui=bold +hi Pmenu ctermfg=0 guifg=#000000 ctermbg=250 guibg=#bcbcbc +hi PmenuSel ctermfg=255 guifg=#eeeeee ctermbg=243 guibg=#767676 +hi PmenuSbar ctermbg=252 guibg=#d0d0d0 +hi PmenuThumb ctermfg=243 guifg=#767676 +hi Search ctermfg=0 guifg=#000000 ctermbg=149 guibg=#afdf5f +hi SignColumn ctermfg=248 guifg=#a8a8a8 +hi SpecialKey ctermfg=77 guifg=#5fdf5f +hi SpellBad ctermfg=160 guifg=fg ctermbg=bg cterm=underline guisp=#df0000 +hi SpellCap ctermfg=189 guifg=#dfdfff ctermbg=bg guibg=bg cterm=underline gui=underline +hi SpellRare ctermfg=168 guifg=#df5f87 ctermbg=bg guibg=bg cterm=underline gui=underline +hi SpellLocal ctermfg=98 guifg=#875fdf ctermbg=bg guibg=bg cterm=underline gui=underline +hi StatusLine ctermfg=15 guifg=#ffffff ctermbg=239 guibg=#4e4e4e cterm=bold gui=bold +hi StatusLineNC ctermfg=249 guifg=#b2b2b2 ctermbg=237 guibg=#3a3a3a cterm=none gui=none +hi TabLine ctermfg=fg guifg=fg ctermbg=242 guibg=#666666 cterm=none gui=none +hi TabLineFill ctermfg=fg guifg=fg ctermbg=237 guibg=#3a3a3a cterm=none gui=none +" FIXME +hi Title ctermfg=225 guifg=#ffdfff +hi Todo ctermfg=0 guifg=#000000 ctermbg=184 guibg=#dfdf00 +hi Underlined ctermfg=39 guifg=#00afff cterm=underline gui=underline +hi VertSplit ctermfg=237 guifg=#3a3a3a ctermbg=237 guibg=#3a3a3a cterm=none gui=none +" hi VIsualNOS ctermfg=24 guifg=#005f87 ctermbg=153 guibg=#afdfff cterm=none gui=none +" hi Visual ctermfg=24 guifg=#005f87 ctermbg=153 guibg=#afdfff +hi Visual ctermfg=255 guifg=#eeeeee ctermbg=96 guibg=#875f87 +" hi Visual ctermfg=255 guifg=#eeeeee ctermbg=24 guibg=#005f87 +hi VisualNOS ctermfg=255 guifg=#eeeeee ctermbg=60 guibg=#5f5f87 +hi WildMenu ctermfg=0 guifg=#000000 ctermbg=150 guibg=#afdf87 cterm=bold gui=bold + +"" Syntax highlighting {{{2 +" hi Comment ctermfg=244 guifg=#808080 +hi Comment ctermfg=150 guifg=#afdf87 +hi Constant ctermfg=229 guifg=#ffffaf +hi Identifier ctermfg=182 guifg=#dfafdf cterm=none +hi Ignore ctermfg=238 guifg=#444444 +hi Number ctermfg=180 guifg=#dfaf87 +" hi PreProc ctermfg=150 guifg=#afdf87 +hi PreProc ctermfg=152 guifg=#afdfdf +hi Special ctermfg=174 guifg=#df8787 +hi Statement ctermfg=110 guifg=#87afdf cterm=none gui=none +hi Type ctermfg=146 guifg=#afafdf cterm=none gui=none + +"" Special {{{2 +""" .diff {{{3 +hi diffAdded ctermfg=150 guifg=#afdf87 +hi diffRemoved ctermfg=174 guifg=#df8787 +""" vimdiff {{{3 +hi diffAdd ctermfg=bg guifg=bg ctermbg=151 guibg=#afdfaf +"hi diffDelete ctermfg=bg guifg=bg ctermbg=186 guibg=#dfdf87 cterm=none gui=none +hi diffDelete ctermfg=bg guifg=bg ctermbg=246 guibg=#949494 cterm=none gui=none +hi diffChange ctermfg=bg guifg=bg ctermbg=181 guibg=#dfafaf +hi diffText ctermfg=bg guifg=bg ctermbg=174 guibg=#df8787 cterm=none gui=none +""" HTML {{{3 +" hi htmlTag ctermfg=146 guifg=#afafdf +" hi htmlEndTag ctermfg=146 guifg=#afafdf +hi htmlTag ctermfg=244 +hi htmlEndTag ctermfg=244 +hi htmlArg ctermfg=182 guifg=#dfafdf +hi htmlValue ctermfg=187 guifg=#dfdfaf +hi htmlTitle ctermfg=254 ctermbg=95 +" hi htmlArg ctermfg=146 +" hi htmlTagName ctermfg=146 +" hi htmlString ctermfg=187 +""" django {{{3 +hi djangoVarBlock ctermfg=180 +hi djangoTagBlock ctermfg=150 +hi djangoStatement ctermfg=146 +hi djangoFilter ctermfg=174 +""" python {{{3 +hi pythonExceptions ctermfg=174 +""" NERDTree {{{3 +hi Directory ctermfg=110 guifg=#87afdf +hi treeCWD ctermfg=180 guifg=#dfaf87 +hi treeClosable ctermfg=174 guifg=#df8787 +hi treeOpenable ctermfg=150 guifg=#afdf87 +hi treePart ctermfg=244 guifg=#808080 +hi treeDirSlash ctermfg=244 guifg=#808080 +hi treeLink ctermfg=182 guifg=#dfafdf +""" rst #{{{3 +hi link rstEmphasis Number + +""" VimDebug {{{3 +" FIXME +" you may want to set SignColumn highlight in your .vimrc +" :help sign +" :help SignColumn + +" hi currentLine term=reverse cterm=reverse gui=reverse +" hi breakPoint term=NONE cterm=NONE gui=NONE +" hi empty term=NONE cterm=NONE gui=NONE + +" sign define currentLine linehl=currentLine +" sign define breakPoint linehl=breakPoint text=>> +" sign define both linehl=currentLine text=>> +" sign define empty linehl=empty diff --git a/.vim/colors/xorium.vim b/.vim/colors/xorium.vim index 49c089b..9eee231 100644 --- a/.vim/colors/xorium.vim +++ b/.vim/colors/xorium.vim @@ -1,118 +1,118 @@ " Vim color file " " Name: xorium.vim " Version: 1.0 " Maintainer: AUAnonymous <[email protected]> " " Based off of Dmitriy Y. Zotikov's (xio, <[email protected]>) xoria256 " " Should work in recent 256 color terminals. 88-color terms like urxvt are " NOT supported. " " Don't forget to install 'ncurses-term' and set TERM to xterm-256color or " similar value. " " Color numbers (0-255) see: " http://www.calmar.ws/vim/256-xterm-24bit-rgb-color-chart.html " " For a specific filetype highlighting rules issue :syntax list when a file of " that type is opened. " " Initialization {{{ if &t_Co != 256 && ! has("gui_running") echomsg "" echomsg "err: please use GUI or a 256-color terminal (so that t_Co=256 could be set)" echomsg "" finish endif set background=dark hi clear if exists("syntax_on") syntax reset endif let colors_name = "xorium" " Group ctermfg guifg ctermbg guibg cterm gui hi Normal ctermfg=252 guifg=#d0d0d0 ctermbg=234 guibg=#1c1c1c cterm=none gui=none hi Cursor ctermbg=214 guibg=#ffaf00 hi CursorColumn ctermbg=236 guibg=#2c2c2c hi CursorLine ctermbg=236 guibg=#2c2c2c cterm=none gui=none hi Error ctermfg=15 guifg=#ffffff ctermbg=88 guibg=#800000 hi ErrorMsg ctermfg=15 guifg=#ffffff ctermbg=88 guibg=#800000 hi FoldColumn ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 hi Folded ctermfg=255 guifg=#eeeeee ctermbg=60 guibg=#5f5f87 hi IncSearch ctermfg=0 guifg=#000000 ctermbg=223 guibg=#ffdfaf cterm=none gui=none hi LineNr ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 hi MatchParen ctermfg=188 guifg=#dfdfdf ctermbg=68 guibg=#5f87df cterm=none gui=none " TODO " hi MoreMsg -hi NonText ctermfg=236 guifg=#2c2c2c cterm=none gui=none +hi NonText ctermfg=236 guifg=#2c2c2c ctermbg=234 cterm=none gui=none hi Pmenu ctermfg=0 guifg=#000000 ctermbg=250 guibg=#bcbcbc hi PmenuSel ctermfg=255 guifg=#eeeeee ctermbg=243 guibg=#767676 hi PmenuSbar ctermbg=252 guibg=#d0d0d0 hi PmenuThumb ctermfg=243 guifg=#767676 hi Search ctermfg=0 guifg=#000000 ctermbg=149 guibg=#afdf5f ctermbg=149 hi SignColumn ctermfg=248 guifg=#a8a8a8 hi SpecialKey ctermfg=237 guifg=#3d3d3d hi SpellBad ctermfg=160 guifg=fg ctermbg=bg cterm=underline guisp=#df0000 hi SpellCap ctermfg=189 guifg=#dfdfff ctermbg=bg guibg=bg cterm=underline gui=underline hi SpellRare ctermfg=168 guifg=#df5f87 ctermbg=bg guibg=bg cterm=underline gui=underline hi SpellLocal ctermfg=98 guifg=#875fdf ctermbg=bg guibg=bg cterm=underline gui=underline hi StatusLine ctermfg=15 guifg=#ffffff ctermbg=239 guibg=#4e4e4e cterm=bold gui=none hi StatusLineNC ctermfg=249 guifg=#b2b2b2 ctermbg=237 guibg=#3a3a3a cterm=none gui=none hi TabLine ctermfg=fg guifg=fg ctermbg=242 guibg=#666666 cterm=none gui=none hi TabLineFill ctermfg=fg guifg=fg ctermbg=237 guibg=#3a3a3a cterm=none gui=none " FIXME hi Title ctermfg=225 guifg=#ffdfff hi TODO ctermfg=0 guifg=#000000 ctermbg=220 guibg=#ffc400 hi Underlined ctermfg=39 guifg=#00afff cterm=underline gui=underline hi VertSplit ctermfg=237 guifg=#3a3a3a ctermbg=237 guibg=#3a3a3a cterm=none gui=none hi Visual ctermfg=234 guifg=#202020 ctermbg=220 guibg=#FFD000 hi VisualNOS ctermfg=255 guifg=#eeeeee ctermbg=60 guibg=#5f5f87 hi WildMenu ctermfg=0 guifg=#000000 ctermbg=150 guibg=#afdf87 cterm=bold gui=bold "" Syntax highlighting {{{2 hi comment ctermfg=66 guifg=#5f7682 hi constant ctermfg=111 guifg=#94bbff hi Identifier ctermfg=212 guifg=#f786ea cterm=none hi Ignore ctermfg=238 guifg=#444444 hi number ctermfg=028 guifg=#ed7715 hi preproc ctermfg=149 guifg=#a1de6f hi Special ctermfg=174 guifg=#eb8181 hi Statement ctermfg=227 guifg=#ffff70 cterm=none gui=none hi type ctermfg=104 guifg=#9b9bde cterm=none gui=none "" Special {{{2 """ .diff {{{3 hi diffAdded ctermfg=150 guifg=#afdf87 hi diffRemoved ctermfg=174 guifg=#df8787 """ vimdiff {{{3 hi diffAdd ctermfg=bg guifg=bg ctermbg=151 guibg=#afdfaf hi diffDelete ctermfg=bg guifg=bg ctermbg=246 guibg=#949494 cterm=none gui=none hi diffChange ctermfg=bg guifg=bg ctermbg=181 guibg=#dfafaf hi diffText ctermfg=bg guifg=bg ctermbg=174 guibg=#df8787 cterm=none gui=none """ HTML {{{3 hi htmlTag ctermfg=244 hi htmlEndTag ctermfg=244 hi htmlArg ctermfg=182 guifg=#dfafdf hi htmlValue ctermfg=187 guifg=#dfdfaf hi htmlTitle ctermfg=254 ctermbg=95 """ django {{{3 hi djangoVarBlock ctermfg=180 hi djangoTagBlock ctermfg=150 hi djangoStatement ctermfg=146 hi djangoFilter ctermfg=174 """ python {{{3 hi pythonExceptions ctermfg=174 """ NERDTree {{{3 hi Directory ctermfg=110 guifg=#87afdf hi treeCWD ctermfg=180 guifg=#dfaf87 hi treeClosable ctermfg=174 guifg=#df8787 hi treeOpenable ctermfg=150 guifg=#afdf87 hi treePart ctermfg=244 guifg=#808080 hi treeDirSlash ctermfg=244 guifg=#808080 hi treeLink ctermfg=182 guifg=#dfafdf diff --git a/.vim/dict.add b/.vim/dict.add index a0f90db..2b1c1f3 100644 --- a/.vim/dict.add +++ b/.vim/dict.add @@ -1,26 +1,27 @@ sqlalchemy SQLAlchemy sessionmaker Metadata metadata metadata config hostname app Ubuntu blog unicode plugin plugins ajax json hotalert hotalerts HotAlert SqlAlchemy admin popup username Codemash added actions +buildout diff --git a/.vimrc b/.vimrc index 3f3981f..fcd7a8e 100644 --- a/.vimrc +++ b/.vimrc @@ -1,612 +1,631 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font " set guifont=Envy\ Code\ R\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme twilight - colorscheme aldmeris - colorscheme void + colorscheme aldmeris " To set the toolbars off (icons on top of the screen) set guioptions-=T + + " Try to keep backups across sessions + set undodir=~/.vim/backups + set undofile else set background=dark " adapt colors for dark background - colorscheme void set t_Co=256 + colorscheme xorium + colorscheme lucius + colorscheme twilight + endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=78 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] hi StatusLine guifg=#fcf4ba guibg=#333333 hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep +" auto save when focus is lost +au FocusLost * :wa + " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " Scroll the viewport 3 lines vs just 1 line at a time nnoremap <C-e> 3<C-e> nnoremap <C-y> 3<C-y> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode " function! ScreenMovement(movement) " if &wrap " return "g" . a:movement " else " return a:movement " endif " endfunction " onoremap <silent> <expr> j ScreenMovement("j") " onoremap <silent> <expr> k ScreenMovement("k") " onoremap <silent> <expr> 0 ScreenMovement("0") " onoremap <silent> <expr> ^ ScreenMovement("^") " onoremap <silent> <expr> $ ScreenMovement("$") " nnoremap <silent> <expr> j ScreenMovement("j") " nnoremap <silent> <expr> k ScreenMovement("k") " nnoremap <silent> <expr> 0 ScreenMovement("0") " nnoremap <silent> <expr> ^ ScreenMovement("^") " nnoremap <silent> <expr> $ ScreenMovement("$") " " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search -nmap <silent> <C-N> :silent noh<CR> nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" " au BufRead *.py compiler nose " au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m au BufRead *.py set tags=tags-py;/ " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set textwidth=78 au BufRead *.js set tags=tags-js;/ au BufRead *.js set makeprg=/usr/bin/jslint\ --maxlen=78\ --goodparts\ --nomen\ --indent=4\ % au BufRead *.js set errorformat=%-P%f, \%-G/*jslint\ %.%#*/, \%*[\ ]%n\ %l\\,%c:\ %m, \%-G\ \ \ \ %.%#, \%-GNo\ errors\ found., \%-Q autocmd BufRead,BufNewFile *.json set filetype=json command Js silent %!jp command Jc silent %!jcompress autocmd FileType json Js " ================================================== " CSS " ================================================== " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf + +" ================================================== +" Git Tricks +" ================================================== +" Show the diff in the preview window of the commit during git commit +autocmd FileType gitcommit DiffGitCached | wincmd p + " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim -let g:pyflakes_use_quickfix = 0 +" let g:pyflakes_use_quickfix = 0 + +" Syntastic +let g:syntastic_python_checker = 'pyflakes' " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 + " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar nmap <silent> <leader>l :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction diff --git a/bundle_list b/bundle_list index 4eb015d..4efe9af 100644 --- a/bundle_list +++ b/bundle_list @@ -1,24 +1,26 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git -git://github.com/mitechie/pyflakes-pathogen.git +#git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git +https://github.com/nvie/vim-flake8.git http://github.com/tomtom/tlib_vim.git https://github.com/ap/vim-css-color.git https://github.com/Bogdanp/quicksilver.vim.git git://github.com/majutsushi/tagbar https://github.com/bolasblack/csslint.vim.git https://github.com/pangloss/vim-javascript.git https://github.com/yui/vim-yui3.git https://github.com/Lokaltog/vim-powerline +http://github.com/depuracao/vim-darkdevel.git diff --git a/custom_snippets/html-custom.snippets b/custom_snippets/html-custom.snippets index 7012821..f562c95 100644 --- a/custom_snippets/html-custom.snippets +++ b/custom_snippets/html-custom.snippets @@ -1,14 +1,63 @@ snippet yuib <span id="${1:id}" class="yui-button yui-link-button"> <em class="first-child"> <a href="${2:link}" id="${3:id_link}" class="add">${4:text}</a> </em> </span> snippet wpcode [codeblock lang="${1:lang}"] ${2:code} [/codeblock] snippet a <a href="${1:link}" title="${2:title}">${3:text}</a> + +snippet lpjstest + <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" + "http://www.w3.org/TR/html4/strict.dtd"> + <!-- + Copyright 2012 Canonical Ltd. This software is licensed under the + GNU Affero General Public License version 3 (see the file LICENSE). + --> + + <html> + <head> + <title>Test ${LIBRARY}</title> + + <!-- YUI and test setup --> + <script type="text/javascript" + src="../../../../../../build/js/yui/yui/yui.js"> + </script> + <link rel="stylesheet" + href="../../../../../../build/js/yui/console/assets/console-core.css" /> + <link rel="stylesheet" + href="../../../../../../build/js/yui/console/assets/skins/sam/console.css" /> + <link rel="stylesheet" + href="../../../../../../build/js/yui/test/assets/skins/sam/test.css" /> + + <script type="text/javascript" + src="../../../../../../build/js/lp/app/testing/testrunner.js"></script> + + <link rel="stylesheet" href="../../../../app/javascript/testing/test.css" /> + + <!-- Dependencies --> + <!-- <script type="text/javascript" src="../../../../../../build/js/lp/..."></script> --> + + <!-- The module under test. --> + <script type="text/javascript" src="../${LIBRARY}.js"></script> + + <!-- Any css assert for this module. --> + <!-- <link rel="stylesheet" href="../assets/${LIBRARY}-core.css" /> --> + + <!-- The test suite. --> + <script type="text/javascript" src="test_${LIBRARY}.js"></script> + + </head> + <body class="yui3-skin-sam"> + <ul id="suites"> + <!-- <li>lp.large_indicator.test</li> --> + <li>lp.${LIBRARY}.test</li> + </ul> + </body> + </html> diff --git a/custom_snippets/javascript-custom.snippets b/custom_snippets/javascript-custom.snippets index 6dd882d..44d16d1 100644 --- a/custom_snippets/javascript-custom.snippets +++ b/custom_snippets/javascript-custom.snippets @@ -1,50 +1,108 @@ # dump out to console snippet dmp console.log(${1|somevar}); snippet deb debugger; snippet g get('${1|prop}') snippet doc /** * ${1|some_doc} * */ snippet jslint /*jslint eqeqeq: false, browser: true, debug: true, onevar: true, plusplus: false, newcap: false */ /*global $: false, window: false, self: false, escape: false, mor: false, sprintf: false */ snippet clos (function () { ${1|code} }()); snippet setup setUp: function () { }, tearDown: function () { }, snippet test test_${1|name}: function () { ${2|code} } snippet true Y.Assert.isTrue(${1|cond}, "${2|text}"); snippet false Y.Assert.isFalse(${1|cond}, "${2|text}"); snippet eq Y.Assert.areEqual(${1|cond1}, ${2|cond2}, "${3|text}"); snippet neq Y.Assert.areNotEqual(${1|cond1}, ${2|cond2}, "${3|text}"); + +snippet lpjstest + /* Copyright (c) 2012, Canonical Ltd. All rights reserved. */ + + YUI.add('lp.${LIBRARY}.test', function (Y) { + + var tests = Y.namespace('lp.${LIBRARY}.test'); + tests.suite = new Y.Test.Suite('${LIBRARY} Tests'); + + tests.suite.add(new Y.Test.Case({ + name: '${LIBRARY}_tests', + + setUp: function () {}, + tearDown: function () {}, + + test_library_exists: function () { + Y.Assert.isObject(Y.lp.${LIBRARY}, + "We should be able to locate the lp.${LIBRARY} module"); + } + + })); + + }, '0.1', {'requires': ['test', 'console', 'lp.${LIBRARY}']}); + +snippet doc_attr + /** + * @attribute ${1|name} + * @default ${2|default} + * @type ${3|type} + * + */ + +snippet doc_method + /** + * ${1|description} + * + * @method ${2|name} + * @param {${3|type}} ${4|description} + * + */ + +snippet doc_class + /** + * ${1|description} + * + * @class ${2|name} + * @extends ${3|extends} + * + */ + +snippet doc_module + /** + * ${1|description} + * + * @namespace ${2|name} + * @module ${3|module} + * + */
mitechie/pyvim
e048481116330c187aa9c6bcef23a0eda4e39be2
Add the vim powerline plugin
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index a3b66d2..f49d62a 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,5 +1,7 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =3 +let g:netrw_dirhist_cnt =5 let g:netrw_dirhist_1='/home/rharding/src/node' let g:netrw_dirhist_2='/home/rharding/.offlineimap' let g:netrw_dirhist_3='/home/rharding/src/docs/networks' +let g:netrw_dirhist_4='/home/rharding/launchpad/lp-branches/devel/lib/lp/app/templates' +let g:netrw_dirhist_5='/home/rharding/src/python-oops-tools' diff --git a/bundle_list b/bundle_list index da9244a..4eb015d 100644 --- a/bundle_list +++ b/bundle_list @@ -1,23 +1,24 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git http://github.com/tomtom/tlib_vim.git https://github.com/ap/vim-css-color.git https://github.com/Bogdanp/quicksilver.vim.git git://github.com/majutsushi/tagbar https://github.com/bolasblack/csslint.vim.git https://github.com/pangloss/vim-javascript.git https://github.com/yui/vim-yui3.git +https://github.com/Lokaltog/vim-powerline
mitechie/pyvim
34fd4a70dfc95399d163682af2fbca379147fb15
Add some colors we've played with lately
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index b66b45b..7771a52 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,9 +1,2 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =7 -let g:netrw_dirhist_1='/home/rharding/configs/pyvim/.vim/bundle/Vim-nosecompiler' -let g:netrw_dirhist_2='/home/rharding/configs/pyvim/.vim/bundle/vim-makegreen' -let g:netrw_dirhist_3='/home/rharding/configs/pyvim/.vim/bundle/Vim-nosecompiler' -let g:netrw_dirhist_4='/home/rharding/.config/awesome' -let g:netrw_dirhist_5='/home/rharding/configs/dotfiles/awesome/autostart' -let g:netrw_dirhist_6='/var/lib/postgres/data' -let g:netrw_dirhist_7='/home/rharding/Dropbox/docs/mug_loco' +let g:netrw_dirhist_cnt =0 diff --git a/.vim/colors/Tomorrow-Night.vim b/.vim/colors/Tomorrow-Night.vim new file mode 100644 index 0000000..d13325d --- /dev/null +++ b/.vim/colors/Tomorrow-Night.vim @@ -0,0 +1,310 @@ +" Tomorrow Night - Full Colour and 256 Colour +" http://chriskempson.com +" +" Hex colour conversion functions borrowed from the theme "Desert256"" + +let g:colors_name = "Tomorrow-Night" + +" Default GUI Colours +let s:foreground = "c5c8c6" +let s:background = "1d1f21" +let s:selection = "373b41" +let s:line = "282a2e" +let s:comment = "969896" +let s:red = "cc6666" +let s:orange = "de935f" +let s:yellow = "f0c674" +let s:green = "b5bd68" +let s:blue = "81a2be" +let s:purple = "b294bb" + +" Console 256 Colours +if !has("gui_running") + let s:background = "303030" + let s:line = "3a3a3a" + let s:selection = "585858" +end + +set background=dark +hi clear +syntax reset + +if has("gui_running") || &t_Co == 88 || &t_Co == 256 + " Returns an approximate grey index for the given grey level + fun <SID>grey_number(x) + if &t_Co == 88 + if a:x < 23 + return 0 + elseif a:x < 69 + return 1 + elseif a:x < 103 + return 2 + elseif a:x < 127 + return 3 + elseif a:x < 150 + return 4 + elseif a:x < 173 + return 5 + elseif a:x < 196 + return 6 + elseif a:x < 219 + return 7 + elseif a:x < 243 + return 8 + else + return 9 + endif + else + if a:x < 14 + return 0 + else + let l:n = (a:x - 8) / 10 + let l:m = (a:x - 8) % 10 + if l:m < 5 + return l:n + else + return l:n + 1 + endif + endif + endif + endfun + + " Returns the actual grey level represented by the grey index + fun <SID>grey_level(n) + if &t_Co == 88 + if a:n == 0 + return 0 + elseif a:n == 1 + return 46 + elseif a:n == 2 + return 92 + elseif a:n == 3 + return 115 + elseif a:n == 4 + return 139 + elseif a:n == 5 + return 162 + elseif a:n == 6 + return 185 + elseif a:n == 7 + return 208 + elseif a:n == 8 + return 231 + else + return 255 + endif + else + if a:n == 0 + return 0 + else + return 8 + (a:n * 10) + endif + endif + endfun + + " Returns the palette index for the given grey index + fun <SID>grey_colour(n) + if &t_Co == 88 + if a:n == 0 + return 16 + elseif a:n == 9 + return 79 + else + return 79 + a:n + endif + else + if a:n == 0 + return 16 + elseif a:n == 25 + return 231 + else + return 231 + a:n + endif + endif + endfun + + " Returns an approximate colour index for the given colour level + fun <SID>rgb_number(x) + if &t_Co == 88 + if a:x < 69 + return 0 + elseif a:x < 172 + return 1 + elseif a:x < 230 + return 2 + else + return 3 + endif + else + if a:x < 75 + return 0 + else + let l:n = (a:x - 55) / 40 + let l:m = (a:x - 55) % 40 + if l:m < 20 + return l:n + else + return l:n + 1 + endif + endif + endif + endfun + + " Returns the actual colour level for the given colour index + fun <SID>rgb_level(n) + if &t_Co == 88 + if a:n == 0 + return 0 + elseif a:n == 1 + return 139 + elseif a:n == 2 + return 205 + else + return 255 + endif + else + if a:n == 0 + return 0 + else + return 55 + (a:n * 40) + endif + endif + endfun + + " Returns the palette index for the given R/G/B colour indices + fun <SID>rgb_colour(x, y, z) + if &t_Co == 88 + return 16 + (a:x * 16) + (a:y * 4) + a:z + else + return 16 + (a:x * 36) + (a:y * 6) + a:z + endif + endfun + + " Returns the palette index to approximate the given R/G/B colour levels + fun <SID>colour(r, g, b) + " Get the closest grey + let l:gx = <SID>grey_number(a:r) + let l:gy = <SID>grey_number(a:g) + let l:gz = <SID>grey_number(a:b) + + " Get the closest colour + let l:x = <SID>rgb_number(a:r) + let l:y = <SID>rgb_number(a:g) + let l:z = <SID>rgb_number(a:b) + + if l:gx == l:gy && l:gy == l:gz + " There are two possibilities + let l:dgr = <SID>grey_level(l:gx) - a:r + let l:dgg = <SID>grey_level(l:gy) - a:g + let l:dgb = <SID>grey_level(l:gz) - a:b + let l:dgrey = (l:dgr * l:dgr) + (l:dgg * l:dgg) + (l:dgb * l:dgb) + let l:dr = <SID>rgb_level(l:gx) - a:r + let l:dg = <SID>rgb_level(l:gy) - a:g + let l:db = <SID>rgb_level(l:gz) - a:b + let l:drgb = (l:dr * l:dr) + (l:dg * l:dg) + (l:db * l:db) + if l:dgrey < l:drgb + " Use the grey + return <SID>grey_colour(l:gx) + else + " Use the colour + return <SID>rgb_colour(l:x, l:y, l:z) + endif + else + " Only one possibility + return <SID>rgb_colour(l:x, l:y, l:z) + endif + endfun + + " Returns the palette index to approximate the 'rrggbb' hex string + fun <SID>rgb(rgb) + let l:r = ("0x" . strpart(a:rgb, 0, 2)) + 0 + let l:g = ("0x" . strpart(a:rgb, 2, 2)) + 0 + let l:b = ("0x" . strpart(a:rgb, 4, 2)) + 0 + + return <SID>colour(l:r, l:g, l:b) + endfun + + " Sets the highlighting for the given group + fun <SID>X(group, fg, bg, attr) + if a:fg != "" + exec "hi " . a:group . " guifg=#" . a:fg . " ctermfg=" . <SID>rgb(a:fg) + endif + if a:bg != "" + exec "hi " . a:group . " guibg=#" . a:bg . " ctermbg=" . <SID>rgb(a:bg) + endif + if a:attr != "" + exec "hi " . a:group . " gui=" . a:attr . " cterm=" . a:attr + endif + endfun + + " Vim Highlighting + call <SID>X("Normal", s:foreground, s:background, "") + call <SID>X("LineNr", s:foreground, "", "") + call <SID>X("NonText", s:selection, "", "") + call <SID>X("SpecialKey", s:selection, "", "") + call <SID>X("Search", "", s:selection, "") + call <SID>X("StatusLine", s:foreground, s:background, "reverse") + call <SID>X("StatusLineNC", s:foreground, s:background, "reverse") + call <SID>X("Visual", "", s:selection, "") + call <SID>X("Directory", s:blue, "", "") + call <SID>X("ModeMsg", s:green, "", "") + call <SID>X("MoreMsg", s:green, "", "") + call <SID>X("Question", s:green, "", "") + call <SID>X("WarningMsg", s:red, "", "") + if version >= 700 + call <SID>X("CursorLine", "", s:line, "none") + call <SID>X("CursorColumn", "", s:line, "none") + call <SID>X("PMenu", s:foreground, s:selection, "none") + call <SID>X("PMenuSel", s:foreground, s:selection, "reverse") + end + + " Standard Highlighting + call <SID>X("Comment", s:comment, "", "") + call <SID>X("Todo", s:comment, "", "") + call <SID>X("Title", s:comment, "", "") + call <SID>X("Identifier", s:red, "", "none") + call <SID>X("Statement", s:foreground, "", "") + call <SID>X("Conditional", s:foreground, "", "") + call <SID>X("Repeat", s:foreground, "", "") + call <SID>X("Structure", s:purple, "", "") + call <SID>X("Function", s:blue, "", "") + call <SID>X("Constant", s:orange, "", "") + call <SID>X("String", s:green, "", "") + call <SID>X("Special", s:foreground, "", "") + call <SID>X("PreProc", s:purple, "", "") + call <SID>X("Operator", s:foreground, "", "none") + call <SID>X("Type", s:blue, "", "none") + call <SID>X("Define", s:purple, "", "none") + call <SID>X("Include", s:blue, "", "") + "call <SID>X("Ignore", "666666", "", "") + + " Vim Highlighting + call <SID>X("vimCommand", s:red, "", "none") + + " C Highlighting + call <SID>X("cType", s:yellow, "", "") + call <SID>X("cStorageClass", s:purple, "", "") + + " PHP Highlighting + call <SID>X("phpVarSelector", s:red, "", "") + call <SID>X("phpKeyword", s:purple, "", "") + + " Ruby Highlighting + call <SID>X("rubySymbol", s:green, "", "") + call <SID>X("rubyConstant", s:yellow, "", "") + call <SID>X("rubyAttribute", s:blue, "", "") + call <SID>X("rubyInclude", s:blue, "", "") + call <SID>X("rubyLocalVariableOrMethod", s:orange, "", "") + call <SID>X("rubyCurlyBlock", s:orange, "", "") + + " Delete Functions + delf <SID>X + delf <SID>rgb + delf <SID>colour + delf <SID>rgb_colour + delf <SID>rgb_level + delf <SID>rgb_number + delf <SID>grey_colour + delf <SID>grey_level + delf <SID>grey_number +endif \ No newline at end of file diff --git a/.vim/colors/aldmeris.vim b/.vim/colors/aldmeris.vim new file mode 100644 index 0000000..a4b70e7 --- /dev/null +++ b/.vim/colors/aldmeris.vim @@ -0,0 +1,362 @@ +" Vim color scheme +" Name: aldmeris.vim +" Maintainer: Vincent Velociter <[email protected]> +" Version: 1.0 +" Url: https://github.com/veloce/vim-aldmeris + +" Aldmeris is based on the gedit oblivion theme for vim. +" The original theme is part of GtkSourceView. +" Original author: Paolo Borelli <[email protected]>. +" + +" Usage {{{ +" ------------ +" NOTE FOR TERMINAL USERS: +" ----------------------- +" The colorscheme is compatible with terminals that support 256 colors. See: +" +" http://vim.wikia.com/wiki/256_colors_in_vim +" +" If your terminal uses the tango palette for its base colors (which is, +" I believe, the default in gnome-terminal), you can obtain the most accurate +" result with aldmeris by setting this option in your .vimrc: +" +" let g:aldmeris_termcolors = "tango" +" +" Below is an .Xdefaults example of that palette: +" +" ! Black +" *color0: #2E3436 +" *color8: #555753 +" ! Red +" *color1: #CC0000 +" *color9: #EF2929 +" ! Green +" *color2: #4E9A06 +" *color10: #8AE234 +" ! Yellow +" *color3: #C4A000 +" *color11: #FCE94F +" ! Blue +" *color4: #3465A4 +" *color12: #729FCF +" ! Purple +" *color5: #75507B +" *color13: #AD7FA8 +" ! Cyan +" *color6: #06989A +" *color14: #34E2E2 +" ! White +" *color7: #D3D7CF +" *color15: #EEEEEC +" +" TANGO PALETTE: +" ---------------------------------- +" Name Gui xterm/hex +" ----------- -------- ----------- +" butter1 #fce94f 221 #ffd75f +" butter2 #edd400 220 #ffd700 +" butter3 #c4a000 178 #d7af00 +" chameleon1 #8ae234 113 #87d75f +" chameleon2 #73d216 76 #5fd700 +" chameleon3 #4e9a06 64 #5f8700 +" orange1 #fcaf3e 215 #ffaf5f +" orange2 #f57900 208 #ff8700 +" orange3 #ce5c00 166 #d75f00 +" skyblue1 #729fcf 74 #5fafd7 +" skyblue2 #3465a4 61 #5f5faf +" skyblue3 #204a87 24 #005f87 +" plum1 #ad7fa8 139 #af87af +" plum2 #75507b 96 #875f87 +" plum3 #5c3566 239 #4e4e4e +" chocolate1 #e9b96e 179 #d7af5f +" chocolate2 #c17d11 136 #af8700 +" chocolate3 #8f5902 94 #875f00 +" scarletred1 #ef2929 196 #ff0000 +" scarletred2 #cc0000 160 #d70000 +" scarletred3 #a40000 124 #af0000 +" aluminium1 #eeeeec 231 #ffffff +" aluminium2 #d3d7cf 252 #d0d0d0 +" aluminium3 #babdb6 250 #bcbcbc +" aluminium4 #888a85 102 #878787 +" aluminium5 #555753 240 #585858 +" aluminium6 #2e3436 236 #303030 +" }}} +" Colorscheme initialization {{{ +" -------------------------- +if has("gui_running") + set background=dark +endif +hi clear +if exists("syntax_on") + syntax reset +endif + +let g:colors_name = "aldmeris" +" }}} +" Custom highlight function {{{ +if has("gui_running") + let s:hi_args = ['guibg', 'guifg', 'gui', 'guisp'] +else + let s:hi_args = ['ctermbg', 'ctermfg', 'cterm'] +endif +function! s:Hi(name, ...) + let command = 'hi ' . a:name + for i in range(0,len(a:000)-1) + let command .= ' ' . s:hi_args[i] . '=' . a:000[i] + endfor + exe command +endfunc +" }}} +" Default options and env settings {{{ +" -------------------------------- +if !exists("g:aldmeris_termcolors") + let g:aldmeris_termcolors = "xterm" +endif + +" List terminals that support italics (I'm sure only for xrvt) +let s:terms_italic = ["rxvt", "rxvt-unicode", "rxvt-unicode-256color"] +" }}} +" Gui & term palettes definition {{{ +" ------------------------------ +if has("gui_running") + let s:butter1 = "#fce94f" + let s:butter2 = "#edd400" + let s:chameleon1 = "#8ae234" + let s:chameleon3 = "#4e9a06" + let s:orange1 = "#fcaf3e" + let s:orange2 = "#f57900" + let s:orange3 = "#ce5c00" + let s:skyblue1 = "#729fcf" + let s:skyblue2 = "#3465a4" + let s:plum1 = "#ad7fa8" + let s:plum2 = "#75507b" + let s:scarletred1 = "#ef2929" + let s:scarletred2 = "#cc0000" + let s:aluminium1 = "#eeeeec" + let s:aluminium2 = "#d3d7cf" + let s:aluminium3 = "#babdb6" + let s:aluminium4 = "#888a85" + let s:aluminium5 = "#555753" + let s:aluminium6 = "#2e3436" + let s:black = "#000000" +elseif &t_Co == 256 && g:aldmeris_termcolors == "tango" + let s:butter1 = "11" + let s:butter2 = "220" + let s:chameleon1 = "10" + let s:chameleon3 = "2" + let s:orange1 = "215" + let s:orange2 = "208" + let s:orange3 = "166" + let s:skyblue1 = "12" + let s:skyblue2 = "4" + let s:plum1 = "13" + let s:plum2 = "5" + let s:scarletred1 = "9" + let s:scarletred2 = "1" + let s:aluminium1 = "15" + let s:aluminium2 = "7" + let s:aluminium3 = "250" + let s:aluminium4 = "102" + let s:aluminium5 = "8" + let s:aluminium6 = "0" + let s:black = "16" +elseif &t_Co == 256 + let s:butter1 = "221" + let s:butter2 = "220" + let s:chameleon1 = "113" + let s:chameleon3 = "64" + let s:orange1 = "215" + let s:orange2 = "208" + let s:orange3 = "166" + let s:skyblue1 = "74" + let s:skyblue2 = "61" + let s:plum1 = "139" + let s:plum2 = "96" + let s:scarletred1 = "196" + let s:scarletred2 = "160" + let s:aluminium1 = "231" + let s:aluminium2 = "252" + let s:aluminium3 = "250" + let s:aluminium4 = "102" + let s:aluminium5 = "240" + let s:aluminium6 = "236" + let s:black = "16" +else + let s:butter1 = "LightYellow" + let s:butter2 = "Yellow" + let s:chameleon1 = "LightGreen" + let s:chameleon3 = "DarkGreen" + let s:orange1 = "LightCyan" + let s:orange2 = "Cyan" + let s:orange3 = "DarkCyan" + let s:skyblue1 = "LightBlue" + let s:skyblue2 = "Blue" + let s:plum1 = "LightMagenta" + let s:plum2 = "Magenta" + let s:scarletred1 = "LightRed" + let s:scarletred2 = "Red" + let s:aluminium1 = "White" + let s:aluminium2 = "White" + let s:aluminium3 = "LightGray" + let s:aluminium4 = "Gray" + let s:aluminium5 = "DarkGray" + let s:aluminium6 = "DarkGrey" + let s:black = "Black" +endif +" }}} +" Syntax groups colors (:help group-name) {{{ +" --------------------------------------- +call s:Hi( 'Normal', s:aluminium6, s:aluminium2 ) +call s:Hi( 'Comment', "bg", s:aluminium4, "italic" ) +call s:Hi( 'Constant', "bg", s:butter2 ) +call s:Hi( 'Boolean', "bg", s:orange3 ) +call s:Hi( 'Identifier', "bg", s:skyblue1, "NONE" ) +call s:Hi( 'Statement', "bg", s:aluminium1, "bold" ) +call s:Hi( 'PreProc', "bg", s:plum1 ) +call s:Hi( 'Type', "bg", s:chameleon1, "bold" ) +call s:Hi( 'Special', "bg", s:orange1 ) +call s:Hi( 'SpecialChar', "bg", s:orange3 ) +call s:Hi( 'Underlined', "bg", s:skyblue1, "underline" ) +call s:Hi( 'Error', s:scarletred2, s:aluminium1, "bold" ) +call s:Hi( 'Todo', s:butter1, s:aluminium4, "bold" ) + +" italic is a special case +if !has("gui_running") + if (index(s:terms_italic, &term) < 0) + hi Comment cterm=NONE + endif +endif +" }}} +" Highlight default (:help highlight-default) {{{ +" ------------------------------------------- +" ColorColumn used for the columns set with 'colorcolumn' +" Conceal placeholder characters substituted for concealed +call s:Hi( 'Cursor', s:aluminium2, s:black ) +" CursorIM like Cursor, but used when in IME mode +call s:Hi( 'CursorColumn', s:aluminium5, "NONE", "NONE" ) +call s:Hi( 'CursorLine', s:aluminium5, "NONE", "NONE" ) +call s:Hi( 'Directory', "bg", s:skyblue1, "NONE" ) +call s:Hi( 'DiffAdd', s:aluminium5, s:chameleon3, "bold" ) +call s:Hi( 'DiffChange', s:aluminium5, s:orange1, "bold" ) +call s:Hi( 'DiffDelete', s:aluminium5, s:scarletred2, "bold" ) +call s:Hi( 'DiffText', s:aluminium5, s:skyblue1, "bold" ) +call s:Hi( 'ErrorMsg', s:scarletred2, s:aluminium1 ) +call s:Hi( 'VertSplit', s:aluminium4, s:aluminium6, "bold" ) +call s:Hi( 'Folded', "bg", s:aluminium3, "bold,underline" ) +call s:Hi( 'FoldColumn', s:aluminium3, s:aluminium5 ) +call s:Hi( 'SignColumn', s:aluminium3, s:aluminium5 ) +" IncSearch 'incsearch' highlighting +call s:Hi( 'LineNr', s:black, s:aluminium5 ) +call s:Hi( 'MatchParen', s:plum1, s:aluminium1 ) +" ModeMsg 'showmode' message (e.g. , "-- INSERT --") +call s:Hi( 'MoreMsg', "bg", s:chameleon1, "bold" ) +call s:Hi( 'NonText', "bg", s:aluminium5 ) +call s:Hi( 'Pmenu', s:black, s:aluminium3 ) +call s:Hi( 'PmenuSel', s:aluminium5, s:aluminium1 ) +call s:Hi( 'PmenuSbar', s:aluminium5, s:aluminium5 ) +call s:Hi( 'PmenuThumb', s:aluminium4, s:aluminium4 ) +call s:Hi( 'Question', "bg", s:chameleon1, "bold" ) +call s:Hi( 'Search', s:chameleon3, s:aluminium1 ) +call s:Hi( 'SpecialKey', "bg", s:aluminium5 ) + if has("gui_running") +call s:Hi( 'SpellBad', "bg", "NONE", "undercurl", s:scarletred1 ) +call s:Hi( 'SpellCap', "bg", "NONE", "undercurl", s:skyblue1 ) +call s:Hi( 'SpellLocal', "bg", "NONE", "undercurl", s:orange1 ) +call s:Hi( 'SpellRare', "bg", "NONE", "undercurl", s:plum1 ) + else +call s:Hi( 'SpellBad', s:scarletred2, "NONE", "undercurl" ) +call s:Hi( 'SpellCap', s:skyblue2, "NONE", "undercurl" ) +call s:Hi( 'SpellLocal', s:orange2, "NONE", "undercurl" ) +call s:Hi( 'SpellRare', s:plum2, "NONE", "undercurl" ) + endif +call s:Hi( 'StatusLine', s:aluminium6, s:aluminium3, "bold,reverse" ) +call s:Hi( 'StatusLineNC', s:aluminium4, s:aluminium6, "NONE" ) +" TabLine tab pages line, not active tab page label +" TabLineFill tab pages line, where there are no labels +" TabLineSel tab pages line, active tab page label +call s:Hi( 'Title', "bg", s:butter1, "bold" ) +call s:Hi( 'Visual', s:aluminium4, s:aluminium1 ) +" VisualNOS Visual mode selection when vim is "Not Owning the Selection". +call s:Hi( 'WarningMsg', "bg", s:scarletred1 ) +call s:Hi( 'WildMenu', s:butter2, s:aluminium6 ) +" }}} +" gitcommit colors {{{ +" ---------------- +hi link gitEmail SpecialChar +hi link gitEmailDelimiter gitEmail +hi link gitcommitComment Comment +hi link gitcommitHeader gitcommitComment +hi link gitcommitUntracked gitcommitComment +hi link gitcommitDiscarded gitcommitComment +hi link gitcommitSelected gitcommitComment +call s:Hi( 'gitcommitDiscardedType', "NONE", s:scarletred1 ) +call s:Hi( 'gitcommitSelectedType', "NONE", s:chameleon1 ) +call s:Hi( 'gitcommitUnmergedType', "NONE", s:butter1 ) +call s:Hi( 'gitcommitUntrackedFile', "NONE", s:plum1, "bold" ) +call s:Hi( 'gitcommitDiscardedFile', "NONE", s:scarletred1, "bold" ) +call s:Hi( 'gitcommitSelectedFile', "NONE", s:chameleon1, "bold" ) +call s:Hi( 'gitcommitUnmergedFile', "NONE", s:butter1, "bold" ) +" }}} +" diff colors {{{ +hi link diffFile Special +hi link diffNewFile diffFile +call s:Hi( 'diffAdded', "NONE", s:chameleon1 ) +call s:Hi( 'diffRemoved', "NONE", s:scarletred1 ) +" }}} +" XML Colors {{{ +" ---------- +hi link xmlTag Identifier +hi link xmlEndTag xmlTag +hi link xmlTagName xmlTag +hi link xmlString String +" }}} +"HTML Colors {{{ +"----------- +hi link htmlTag xmlTag +hi link htmlEndTag xmlEndTag +hi link htmlTagName xmlTagName +hi link htmlSpecialTagName xmlTagName +hi link htmlString xmlString +hi link htmlSpecialChar PreProc +hi link Javascript Normal +" }}} +" Ruby colors {{{ +" ----------- +hi link rubyClass Keyword +hi link rubyDefine Keyword +" }}} +" Javascript colors {{{ +hi link javaScriptBraces normal +" following syntax from https://github.com/pangloss/vim-javascript +hi link javaScriptLabel Identifier +hi link javaScriptThis Identifier +" }}} +" PHP Colors {{{ +" ---------- +hi link phpVarSelector Identifier +hi link phpIdentifier Identifier +hi link phpParent Normal +hi link phpFunctions Function +hi link phpSpecial Special +hi link phpMemberSelector Keyword +hi link phpBoolean Boolean +hi link phpNumber Number +" }}} +" CSS Colors {{{ +" ---------- +hi link cssIdentifier Normal +hi link cssClassName Normal +hi link cssTagName Normal +hi link cssBraces Normal +hi link cssRenderProp Keyword +hi link cssBoxProp cssRenderProp +hi link cssUIProp cssRenderProp +hi link cssColorProp cssRenderProp +hi link cssFontProp cssRenderProp +hi link cssTextProp cssRenderProp +hi link cssGeneratedContentProp cssRenderProp +hi link cssValueLength Boolean +hi link cssPseudoClassId Identifier +" }}} +" vim:foldmethod=marker diff --git a/.vim/colors/diablo3.vim b/.vim/colors/diablo3.vim new file mode 100644 index 0000000..0a4806b --- /dev/null +++ b/.vim/colors/diablo3.vim @@ -0,0 +1,226 @@ +" Vim color file +" Name: diablo3 +" Maintainer: Vayn <[email protected]> +" Last Change: 2011å¹´ 05月 30日 星期一 12:52:10 CST +" Thanks To: lilydjwg, Tomas Restrepo (author of molokai.vim) +" Options: +" +" If you want to set long line warning, copy this in your vimrc: +" +" let g:diablo3_longline = 1 +" +" +" The default length of a line is 120, you can change it by putting +" the following in your vimrc: +" +" let g:diablo3_len = 79 +" +" Notice the length here is the length you want to set plus 1. +" +" + +hi clear + +set background=dark +if version > 580 + hi clear + if exists("syntax_on") + syntax reset + endif +endif +let g:colors_name="diablo3" + +" Error format when a line is longer than g:diablo3_longlen, default +" length is 120. +if exists('g:diablo3_longline') && g:diablo3_longline == 1 + if ! exists('g:diablo3_len') + let g:diablo3_len = 121 + end + exe 'match LongLineWarning "\%'.g:diablo3_len.'v.*"' +end + +hi Boolean guifg=#ae81ff gui=bold +hi Character guifg=#e6db74 +hi Number guifg=#ae81ff +hi String guifg=#fadc11 +hi Conditional guifg=#6d8fd9 gui=bold +hi Constant guifg=#d4ff36 gui=bold +hi Cursor guifg=#000000 guibg=#f8f8f0 +hi Debug guifg=#bca3a3 gui=bold +hi Define guifg=#66d9ef +hi Delimiter guifg=#8f8f8f +hi DiffAdd guibg=#13354a +hi DiffChange guifg=#89807d guibg=#4c4745 +hi DiffDelete guifg=#960050 guibg=#1e0010 +hi DiffText guibg=#4c4745 gui=italic,bold + +hi Directory guifg=#a6e22e gui=bold +hi Error guifg=#960050 guibg=#1e0010 +hi ErrorMsg guifg=#f92672 guibg=#232526 gui=bold +hi Exception guifg=#a6e22E gui=bold +hi Float guifg=#ae81fF +hi FoldColumn guifg=#465457 guibg=#000000 +hi Folded guifg=#465457 guibg=#000000 +hi Function guifg=#cbfe28 +hi Identifier guifg=#ff9900 +hi Ignore guifg=#808080 guibg=bg +hi IncSearch guifg=#c4be89 guibg=#000000 + +hi Keyword guifg=#f92672 gui=bold +hi Label guifg=#e6db74 gui=none +hi Macro guifg=#c4be89 gui=italic +hi SpecialKey guifg=#d3fe36 gui=italic + +hi MatchParen guifg=#000000 guibg=#fd971f gui=bold +hi ModeMsg guifg=#e6db74 +hi MoreMsg guifg=#e6db74 +hi Operator guifg=#6d8fd9 + +" complete menu +hi Pmenu guifg=#66d9ef guibg=#000000 +hi PmenuSel guibg=#808080 +hi PmenuSbar guibg=#080808 +hi PmenuThumb guifg=#66d9ef + +hi PreCondit guifg=#a6e22e gui=bold +hi PreProc guifg=#a6e22e +hi Question guifg=#66d9ef +hi Repeat guifg=#f92672 gui=bold +hi Search guifg=#fffffF guibg=#0099ff +" marks column +hi SignColumn guifg=#a6e22e guibg=#232526 +hi SpecialChar guifg=#f92672 gui=bold +hi SpecialComment guifg=#465457 gui=bold +hi Special guifg=#66d9ef guibg=bg gui=italic +hi SpecialKey guifg=#888A85 gui=italic +if has("spell") + hi SpellBad guisp=#ff0000 gui=undercurl + hi SpellCap guisp=#7070F0 gui=undercurl + hi SpellLocal guisp=#70f0f0 gui=undercurl + hi SpellRare guisp=#ffffff gui=undercurl +endif +hi Statement guifg=#6d8fd9 gui=bold +hi htmlStatement guifg=#99b5d9 +hi StatusLine guifg=#455354 guibg=fg +hi StatusLineNC guifg=#808080 guibg=#080808 +hi StorageClass guifg=#fd971f gui=italic +hi Structure guifg=#66d9ef +hi Tag guifg=#f92672 gui=italic +hi Title guifg=#ef5939 +hi Todo guifg=#ffffff guibg=bg gui=bold + +hi Typedef guifg=#66d9ef +hi Type guifg=#66d9ef gui=none +hi Underlined guifg=#808080 gui=underline + +hi VertSplit guifg=#808080 guibg=#080808 gui=bold +hi VisualNOS guibg=#403d3d +hi Visual guibg=#403d3d +hi WarningMsg guifg=#ffffff guibg=#333333 gui=bold +hi WildMenu guifg=#66d9ef guibg=#000000 + +hi Normal guifg=#f4f4f4 guibg=#070914 +hi Comment guifg=#666666 +hi CursorLine guibg=#293739 +hi CursorColumn guibg=#293739 +hi LineNr guifg=#bcbcbc guibg=#151825 +hi NonText guifg=#151825 guibg=#151825 + +hi LongLineWarning guifg=#960050 guibg=#1e0010 gui=underline + +" +" Support for 256-color terminal +" +if &t_Co > 255 + highlight Boolean cterm=bold ctermfg=141 + highlight Character ctermfg=185 + highlight Number ctermfg=210 + highlight String ctermfg=209 + highlight Conditional ctermfg=214 + highlight Constant cterm=bold ctermfg=191 + highlight Cursor ctermbg=231 ctermfg=16 + highlight Debug cterm=bold ctermfg=250 + highlight Define ctermfg=81 + highlight Delimiter ctermfg=245 + highlight DiffAdd ctermbg=23 + highlight DiffChange ctermbg=239 ctermfg=245 + highlight DiffDelete ctermbg=53 ctermfg=89 + highlight DiffText cterm=bold ctermbg=239 + + highlight Directory cterm=bold ctermfg=112 + highlight Error ctermbg=53 ctermfg=152 + highlight ErrorMsg cterm=bold ctermbg=235 ctermfg=197 + highlight Exception cterm=bold ctermfg=112 + highlight Float ctermfg=141 + highlight FoldColumn ctermbg=31 ctermfg=195 + highlight Folded ctermbg=31 ctermfg=195 + highlight Function ctermfg=154 + highlight Identifier ctermfg=208 + highlight Ignore ctermfg=244 + highlight IncSearch ctermbg=16 ctermfg=186 + + highlight Keyword cterm=bold ctermfg=197 + highlight Label cterm=none ctermfg=185 + highlight Macro ctermfg=186 + highlight SpecialKey ctermfg=191 + + highlight MatchParen cterm=bold ctermbg=208 ctermfg=16 + + highlight MoreMsg ctermfg=185 + highlight Operator cterm=bold ctermfg=33 + + " complete menu + highlight Pmenu ctermbg=16 ctermfg=81 + highlight PmenuSel ctermbg=244 + highlight PmenuSbar ctermbg=232 + highlight PmenuThumb ctermfg=81 + + highlight PreCondit cterm=bold ctermfg=112 + highlight PreProc ctermfg=112 + highlight Question ctermfg=81 + highlight Repeat cterm=bold ctermfg=33 + highlight Search ctermbg=33 ctermfg=231 + " marks column + highlight SignColumn ctermbg=235 ctermfg=112 + highlight SpecialChar cterm=bold ctermfg=197 + highlight SpecialComment cterm=bold ctermfg=240 + highlight Special ctermfg=81 + highlight SpecialKey ctermfg=245 + + if has("spell") + highlight SpellBad cterm=undercurl + highlight SpellCap cterm=undercurl + highlight SpellLocal cterm=undercurl + highlight SpellRare cterm=undercurl + endif + + highlight Statement cterm=bold ctermfg=33 + highlight htmlStatement ctermfg=67 + highlight StatusLine ctermfg=240 + highlight StatusLineNC ctermbg=232 ctermfg=244 + highlight StorageClass ctermfg=208 + highlight Structure ctermfg=81 + highlight Tag ctermfg=197 + highlight Title ctermfg=209 + highlight Todo cterm=bold ctermfg=234 + + highlight Typedef ctermfg=81 + highlight Type cterm=none ctermfg=81 + highlight Underlined cterm=underline ctermfg=244 + + highlight VertSplit cterm=bold ctermbg=232 ctermfg=244 + highlight VisualNOS ctermbg=238 + highlight Visual ctermbg=238 + highlight WarningMsg cterm=bold ctermbg=236 ctermfg=231 + highlight WildMenu ctermbg=16 ctermfg=81 + + highlight Normal ctermbg=17 ctermfg=231 + highlight Comment ctermfg=248 + highlight CursorLine cterm=none ctermbg=237 + highlight CursorColumn ctermbg=237 + highlight LineNr ctermbg=18 ctermfg=250 + highlight NonText ctermbg=18 ctermfg=235 + + highlight LongLineWarning cterm=underline ctermbg=53 ctermfg=152 +end + diff --git a/.vim/colors/fu.vim b/.vim/colors/fu.vim new file mode 100644 index 0000000..2a96dc4 --- /dev/null +++ b/.vim/colors/fu.vim @@ -0,0 +1,121 @@ +" Vim color file +" +" Name: fu.vim +" Version: 1.1 +" Maintainer: Aaron Mueller <[email protected]> +" Contributors: Florian Eitel <[email protected]> +" Tinou <[email protected]> +" +" This is a compositon from railscast, mustang and xoria256 with a lot of +" improvemts in the colors. Want to change toe colors to your needs? Go to +" this page to see what number is set wo what color: +" http://www.calmar.ws/vim/256-xterm-24bit-rgb-color-chart.html +" +" History: +" 2010-06-09 - Merge changes from Florian Eitel in this file. There was many +" whitespace issues and some unused highlight settings which are removed +" now. Also merged Tinous GUI version of the whole colorscheme. Thanks a +" lot dudes! +" +" 2010-06-09 - Initial setup and creation of this file. Additional colors for +" Ruby and the diff view are added. +" + +if &t_Co != 256 && ! has("gui_running") + echomsg "err: please use GUI or a 256-color terminal (so that t_Co=256 could be set)" + finish +endif + +set background=dark +hi clear + +if exists("syntax_on") + syntax reset +endif +let colors_name = "fu" + + +" General colors +hi Normal ctermfg=252 ctermbg=234 guifg=#d0d0d0 guibg=#1c1c1c +hi CursorColumn ctermbg=238 guibg=#444444 +hi Cursor ctermbg=214 guibg=#ffaf00 +hi CursorLine ctermbg=238 guibg=#444444 +hi FoldColumn ctermfg=248 ctermbg=bg guifg=#a8a8a8 guibg=#000000 +hi Folded ctermfg=255 ctermbg=60 guifg=#eeeeee guibg=#5f5f87 +hi IncSearch ctermfg=0 ctermbg=223 guifg=#000000 guibg=#ffd7af +hi NonText ctermfg=248 ctermbg=233 cterm=bold guifg=#a8a8a8 guibg=#121212 +hi Search ctermfg=0 ctermbg=149 guifg=#000000 guibg=#afd75f +hi SignColumn ctermfg=248 guifg=#a8a8a8 +hi SpecialKey ctermfg=77 guifg=#5fd75f +hi StatusLine ctermfg=232 ctermbg=255 guifg=#080808 guibg=#eeeeee +hi StatusLineNC ctermfg=237 ctermbg=253 guifg=#3a3a3a guibg=#dadada +hi TabLine ctermfg=253 ctermbg=237 guifg=#dadada guibg=#3a3a3a +hi TabLineFill ctermfg=0 ctermbg=0 guifg=#000000 guibg=#000000 +hi TabLineSel ctermfg=255 ctermbg=33 guifg=#eeeeee guibg=#0087ff +hi VertSplit ctermfg=237 ctermbg=237 guifg=#3a3a3a guibg=#3a3a3a +hi Visual ctermfg=24 ctermbg=153 guifg=#005f87 guibg=#afd7ff +hi VIsualNOS ctermfg=24 ctermbg=153 guifg=#005f87 guibg=#afd7ff +hi LineNr ctermfg=248 ctermbg=232 guifg=#a8a8a8 guibg=#080808 +hi ModeMsg ctermfg=220 guifg=#ffd700 + +hi ErrorMsg ctermfg=196 ctermbg=52 guifg=#ff0000 guibg=#5f0000 +hi SpellBad ctermfg=196 ctermbg=52 + +if version >= 700 + hi CursorLine ctermbg=236 guibg=#303030 + hi CursorColumn ctermbg=236 guibg=#303030 + hi MatchParen ctermfg=157 ctermbg=237 cterm=bold guifg=#afffaf guibg=#3a3a3a + hi Pmenu ctermfg=255 ctermbg=236 guifg=#eeeeee guibg=#303030 + hi PmenuSel ctermfg=0 ctermbg=74 guifg=#000000 guibg=#5fafd7 + hi PmenuSbar ctermbg=243 guibg=#767676 + hi PmenuThumb ctermbg=252 guibg=#d0d0d0 + hi WildMenu ctermfg=255 ctermbg=33 guifg=#eeeeee guibg=#0087ff +endif + +" Syntax highlighting +hi Comment ctermfg=244 guifg=#808080 + +hi Constant ctermfg=220 cterm=bold guifg=#ffd700 +hi String ctermfg=107 ctermbg=233 guifg=#87af5f guibg=#121212 +hi Character ctermfg=228 ctermbg=16 guifg=#ffff87 guibg=#000000 +hi Number ctermfg=214 guifg=#ffaf00 +hi Boolean ctermfg=148 guifg=#afd700 + +hi Identifier ctermfg=149 guifg=#afd75f +hi Function ctermfg=231 guifg=#ffffff + +hi Statement ctermfg=103 guifg=#8787af +hi Conditional ctermfg=105 guifg=#8787ff +hi Repeat ctermfg=105 guifg=#8787ff +hi Label ctermfg=105 guifg=#8787ff +hi Operator ctermfg=243 guifg=#767676 +hi Keyword ctermfg=190 guifg=#d7ff00 +hi Exception ctermfg=166 ctermbg=0 guifg=#d75f00 guibg=#000000 + +hi PreProc ctermfg=229 guifg=#ffffaf + +hi Type ctermfg=111 guifg=#87afff +hi Structure ctermfg=111 ctermbg=233 guifg=#87afff guibg=#121212 + +hi Special ctermfg=220 guifg=#ffd700 +hi SpecialComment ctermfg=228 ctermbg=16 guifg=#ffff87 guibg=#000000 + +hi Error ctermfg=196 ctermbg=52 guifg=#ff0000 guibg=#5f0000 +hi Todo ctermfg=46 ctermbg=22 guifg=#00ff00 guibg=#005f00 + +" Diff +hi diffAdd ctermfg=bg ctermbg=151 guifg=#afd787 +hi diffDelete ctermfg=bg ctermbg=246 guifg=#d78787 +hi diffChange ctermfg=bg ctermbg=181 guifg=#000000 guibg=#afd7af +hi diffText ctermfg=bg ctermbg=174 cterm=bold guifg=#000000 guibg=#949494 + +" Ruby +hi rubyBlockParameter ctermfg=27 guifg=#005fff +hi rubyClass ctermfg=75 guifg=#5fafff +hi rubyConstant ctermfg=167 guifg=#d75f5f +hi rubyInterpolation ctermfg=107 guifg=#87af5f +hi rubyLocalVariableOrMethod ctermfg=189 guifg=#d7d7ff +hi rubyPredefinedConstant ctermfg=167 guifg=#d75f5f +hi rubyPseudoVariable ctermfg=221 guifg=#ffd75f +hi rubyStringDelimiter ctermfg=143 guifg=#afaf5f + diff --git a/.vim/colors/liquidcarbon.vim b/.vim/colors/liquidcarbon.vim new file mode 100644 index 0000000..d5164f1 --- /dev/null +++ b/.vim/colors/liquidcarbon.vim @@ -0,0 +1,126 @@ +" File: liquidcarbon.vim +" Author: Jeet Sukumaran +" Description: Vim color file +" Last Modified: October 06, 2010 + +" Initialization and Setup {{{1 +" ============================================================================ +set background=dark +highlight clear +if exists("syntax_on") + syntax reset +endif +let colors_name = "liquidcarbon" +" 1}}} + +" Normal Color {{{1 +" ============================================================================ +hi Normal guifg=#bdcdcd guibg=#303030 +" 1}}} + +" Core Highlights {{{1 +" ============================================================================ +hi ColorColumn guifg=NONE guibg=#3A3A3A +hi Cursor guifg=bg guibg=fg gui=NONE +hi CursorIM guifg=bg guibg=fg gui=NONE +hi CursorLine guifg=NONE guibg=#303030 gui=NONE +hi lCursor guifg=bg guibg=fg gui=NONE +hi DiffAdd guifg=#000000 guibg=#3cb371 gui=NONE +hi DiffChange guifg=#000000 guibg=#4f94cd gui=NONE +hi DiffDelete guifg=#000000 guibg=#8b3626 gui=NONE +hi DiffText guifg=#000000 guibg=#8ee5ee gui=NONE +hi Directory guifg=#1e90ff guibg=bg gui=NONE +hi ErrorMsg guifg=#ff6a6a guibg=NONE gui=bold +hi FoldColumn guifg=#68838b guibg=#4B4B4B gui=bold +hi Folded guifg=#68838b guibg=#4B4B4B gui=NONE +hi IncSearch guifg=#ffffff guibg=#ff4500 gui=bold +hi LineNr guifg=#767676 guibg=#222222 gui=NONE +hi MatchParen guifg=#fff000 guibg=#000000 gui=bold +hi ModeMsg guifg=#000000 guibg=#00ff00 gui=bold +hi MoreMsg guifg=#2e8b57 guibg=bg gui=bold +hi NonText guifg=#9ac0cd guibg=bg gui=NONE +hi Pmenu guifg=#0000ff guibg=#c0c8cf gui=bold +hi PmenuSel guifg=#c0c8cf guibg=#0000ff gui=bold +hi PmenuSbar guifg=#ffffff guibg=#c1cdc1 gui=NONE +hi PmenuThumb guifg=#ffffff guibg=#838b83 gui=NONE +hi Question guifg=#00ee00 guibg=NONE gui=bold +hi Search guifg=#000000 guibg=#fff68f gui=bold +hi SignColumn guifg=#ffffff guibg=#cdcdb4 gui=NONE +hi SpecialKey guifg=#666666 guibg=NONE gui=NONE +hi SpellBad guisp=#ee2c2c gui=undercurl +hi SpellCap guisp=#0000ff gui=undercurl +hi SpellLocal guisp=#008b8b gui=undercurl +hi SpellRare guisp=#ff00ff gui=undercurl +hi StatusLine guifg=#ddeeff guibg=#445566 gui=NONE +hi StatusLineNC guifg=#999999 guibg=#445566 gui=italic +hi TabLine guifg=fg guibg=#d3d3d3 gui=underline +hi TabLineFill guifg=fg guibg=bg gui=reverse +hi TabLineSel guifg=fg guibg=bg gui=bold +hi Title guifg=#009acd guibg=bg gui=bold +hi VertSplit guifg=#445566 guibg=#445566 +hi Visual guifg=#000000 guibg=#90ccff gui=NONE +hi WarningMsg guifg=#ee9a00 guibg=bg gui=NONE +hi WildMenu guifg=#000000 guibg=#87ceeb gui=NONE +" 1}}} + +" Syntax {{{1 +" ============================================================================ + +" General {{{2 +" ----------------------------------------------------------------------------- +hi Comment guifg=#809090 guibg=NONE gui=italic +hi Constant guifg=#cdad00 guibg=NONE gui=NONE +hi String guifg=#559b70 guibg=NONE gui=NONE +hi Boolean guifg=#cd69c9 guibg=NONE gui=NONE +hi Identifier guifg=#9f79ee guibg=NONE gui=NONE +hi Function guifg=#92a5de guibg=NONE gui=NONE +hi Statement guifg=#009acd guibg=NONE gui=NONE +hi PreProc guifg=#009acd guibg=NONE gui=NONE +hi Keyword guifg=#7ac5cd guibg=NONE gui=NONE +hi Type guifg=#4169e1 guibg=NONE gui=NONE +hi Special guifg=#7f9f44 guibg=NONE gui=NONE +hi Ignore guifg=bg guibg=NONE gui=NONE +hi Error guifg=#ff3030 guibg=NONE gui=underline +hi Todo guifg=#ff88ee guibg=NONE gui=bold +" 2}}} + +" Vim {{{2 +" ----------------------------------------------------------------------------- +hi VimError guifg=#ff0000 guibg=#000000 gui=bold +hi VimCommentTitle guifg=#528b8b guibg=bg gui=bold,italic +" 2}}} + +" QuickFix {{{2 +" ----------------------------------------------------------------------------- +hi qfFileName guifg=#607b8b guibg=NONE gui=italic +hi qfLineNr guifg=#0088aa guibg=NONE gui=bold +hi qfError guifg=#ff0000 guibg=NONE gui=bold +" 2}}} + +" Python {{{2 +" ----------------------------------------------------------------------------- +hi pythonDecorator guifg=#cd8500 guibg=NONE gui=NONE +hi link pythonDecoratorFunction pythonDecorator +" 2}}} + +" Diff {{{2 +" ----------------------------------------------------------------------------- +hi diffOldFile guifg=#da70d6 guibg=NONE gui=italic +hi diffNewFile guifg=#ffff00 guibg=NONE gui=italic +hi diffFile guifg=#ffa500 guibg=NONE gui=italic +hi diffLine guifg=#ff00ff guibg=NONE gui=italic +hi link diffOnly Constant +hi link diffIdentical Constant +hi link diffDiffer Constant +hi link diffBDiffer Constant +hi link diffIsA Constant +hi link diffNoEOL Constant +hi link diffCommon Constant +hi diffRemoved guifg=#cd5555 guibg=NONE gui=NONE +hi diffChanged guifg=#4f94cd guibg=NONE gui=NONE +hi diffAdded guifg=#00cd00 guibg=NONE gui=NONE +hi link diffSubname diffLine +hi link diffComment Comment +" 2}}} + +" 1}}} diff --git a/.vim/colors/lucius.vim b/.vim/colors/lucius.vim index 10a53fd..333a0e3 100644 --- a/.vim/colors/lucius.vim +++ b/.vim/colors/lucius.vim @@ -1,353 +1,253 @@ -" Vim color file -" Maintainer: Jonathan Filip <[email protected]> -" Last Modified: Mon Apr 19, 2010 10:24AM -" Version: 3.5 -" -" GUI / 256 color terminal -" -" I started out trying to combine my favorite parts of other schemes and ended -" up with this (oceandeep, moria, peaksea, wombat, zenburn). -" -" This file also tries to have descriptive comments for each higlighting group -" so it is easy to understand what each part does. +" Lucius vim color file +" Maintainer: Jonathan Filip <[email protected]> +" Version: 6.02 - -set background=dark hi clear if exists("syntax_on") syntax reset endif let colors_name="lucius" -" Some other colors to save -" blue: 3eb8e5 -" green: 92d400 -" c green: d5f876, cae682 -" new blue: 002D62 -" new gray: CCCCCC - - -" Base color -" ---------- -hi Normal guifg=#e0e0e0 guibg=#202020 -hi Normal ctermfg=253 ctermbg=235 - - -" Comment Group -" ------------- -" any comment -hi Comment guifg=#606060 gui=none -hi Comment ctermfg=240 cterm=none - - -" Constant Group -" -------------- -" any constant -hi Constant guifg=#8cd0d3 gui=none -hi Constant ctermfg=116 cterm=none -" strings -hi String guifg=#80c0d9 gui=none -hi String ctermfg=110 cterm=none -" character constant -hi Character guifg=#80c0d9 gui=none -hi Character ctermfg=110 cterm=none -" numbers decimal/hex -hi Number guifg=#8cd0d3 gui=none -hi Number ctermfg=116 cterm=none -" true, false -hi Boolean guifg=#8cd0d3 gui=none -hi Boolean ctermfg=116 cterm=none -" float -hi Float guifg=#8cd0d3 gui=none -hi Float ctermfg=116 cterm=none - - -" Identifier Group -" ---------------- -" any variable name -hi Identifier guifg=#e6c080 gui=none -hi Identifier ctermfg=180 cterm=none -" function, method, class -hi Function guifg=#e6c080 gui=none -hi Function ctermfg=180 cterm=none - - -" Statement Group -" --------------- -" any statement -hi Statement guifg=#b3d38c gui=none -hi Statement ctermfg=150 cterm=none -" if, then, else -hi Conditional guifg=#b3d38c gui=none -hi Conditional ctermfg=150 cterm=none -" try, catch, throw, raise -hi Exception guifg=#b3d38c gui=none -hi Exception ctermfg=150 cterm=none -" for, while, do -hi Repeat guifg=#b3d38c gui=none -hi Repeat ctermfg=150 cterm=none -" case, default -hi Label guifg=#b3d38c gui=none -hi Label ctermfg=150 cterm=none -" sizeof, +, * -hi Operator guifg=#b3d38c gui=none -hi Operator ctermfg=150 cterm=none -" any other keyword -hi Keyword guifg=#b3d38c gui=none -hi Keyword ctermfg=150 cterm=none - - -" Preprocessor Group -" ------------------ -" generic preprocessor -hi PreProc guifg=#e9dfaf gui=none -hi PreProc ctermfg=187 cterm=none -" #include -hi Include guifg=#e9dfaf gui=none -hi Include ctermfg=187 cterm=none -" #define -hi Define guifg=#e9dfaf gui=none -hi Define ctermfg=187 cterm=none -" same as define -hi Macro guifg=#e9dfaf gui=none -hi Macro ctermfg=187 cterm=none -" #if, #else, #endif -hi PreCondit guifg=#e9dfaf gui=none -hi PreCondit ctermfg=187 cterm=none - - -" Type Group -" ---------- -" int, long, char -hi Type guifg=#93d6a9 gui=none -hi Type ctermfg=115 cterm=none -" static, register, volative -hi StorageClass guifg=#93d6a9 gui=none -hi StorageClass ctermfg=115 cterm=none -" struct, union, enum -hi Structure guifg=#93d6a9 gui=none -hi Structure ctermfg=115 cterm=none -" typedef -hi Typedef guifg=#93d6a9 gui=none -hi Typedef ctermfg=115 cterm=none - - -" Special Group -" ------------- -" any special symbol -hi Special guifg=#cca3b3 gui=none -hi Special ctermfg=181 cterm=none -" special character in a constant -hi SpecialChar guifg=#cca3b3 gui=none -hi SpecialChar ctermfg=181 cterm=none -" things you can CTRL-] -hi Tag guifg=#cca3b3 gui=none -hi Tag ctermfg=181 cterm=none -" character that needs attention -hi Delimiter guifg=#cca3b3 gui=none -hi Delimiter ctermfg=181 cterm=none -" special things inside a comment -hi SpecialComment guifg=#cca3b3 gui=none -hi SpecialComment ctermfg=181 cterm=none -" debugging statements -hi Debug guifg=#cca3b3 guibg=NONE gui=none -hi Debug ctermfg=181 ctermbg=NONE cterm=none - - -" Underlined Group -" ---------------- -" text that stands out, html links -hi Underlined guifg=fg gui=underline -hi Underlined ctermfg=fg cterm=underline - - -" Ignore Group -" ------------ -" left blank, hidden -hi Ignore guifg=bg -hi Ignore ctermfg=bg - - -" Error Group -" ----------- -" any erroneous construct -hi Error guifg=#e37170 guibg=#432323 gui=none -hi Error ctermfg=167 ctermbg=52 cterm=none - - -" Todo Group -" ---------- -" todo, fixme, note, xxx -hi Todo guifg=#efef8f guibg=NONE gui=underline -hi Todo ctermfg=228 ctermbg=NONE cterm=underline - - -" Spelling -" -------- -" word not recognized -hi SpellBad guisp=#ee0000 gui=undercurl -hi SpellBad ctermbg=196 cterm=undercurl -" word not capitalized -hi SpellCap guisp=#eeee00 gui=undercurl -hi SpellCap ctermbg=226 cterm=undercurl -" rare word -hi SpellRare guisp=#ffa500 gui=undercurl -hi SpellRare ctermbg=214 cterm=undercurl -" wrong spelling for selected region -hi SpellLocal guisp=#ffa500 gui=undercurl -hi SpellLocal ctermbg=214 cterm=undercurl - - -" Cursor -" ------ -" character under the cursor -hi Cursor guifg=bg guibg=#a3e3ed -hi Cursor ctermfg=bg ctermbg=153 -" like cursor, but used when in IME mode -hi CursorIM guifg=bg guibg=#96cdcd -hi CursorIM ctermfg=bg ctermbg=116 -" cursor column -hi CursorColumn guifg=NONE guibg=#404448 gui=none -hi CursorColumn ctermfg=NONE ctermbg=236 cterm=none -" cursor line/row -hi CursorLine gui=NONE guibg=#404448 gui=none -hi CursorLine cterm=NONE ctermbg=236 cterm=none - - -" Misc -" ---- -" directory names and other special names in listings -hi Directory guifg=#c0e0b0 gui=none -hi Directory ctermfg=151 cterm=none -" error messages on the command line -hi ErrorMsg guifg=#ee0000 guibg=NONE gui=none -hi ErrorMsg ctermfg=196 ctermbg=NONE cterm=none -" column separating vertically split windows -hi VertSplit guifg=#777777 guibg=#363946 gui=none -hi VertSplit ctermfg=242 ctermbg=237 cterm=none -" columns where signs are displayed (used in IDEs) -hi SignColumn guifg=#9fafaf guibg=#181818 gui=none -hi SignColumn ctermfg=145 ctermbg=233 cterm=none -" line numbers -hi LineNr guifg=#818698 guibg=#363946 -hi LineNr ctermfg=245 ctermbg=237 -" match parenthesis, brackets -hi MatchParen guifg=#00ff00 guibg=NONE gui=bold -hi MatchParen ctermfg=46 ctermbg=NONE cterm=bold -" the 'more' prompt when output takes more than one line -hi MoreMsg guifg=#2e8b57 gui=none -hi MoreMsg ctermfg=29 cterm=none -" text showing what mode you are in -hi ModeMsg guifg=#76d5f8 guibg=NONE gui=none -hi ModeMsg ctermfg=117 ctermbg=NONE cterm=none -" the '~' and '@' and showbreak, '>' double wide char doesn't fit on line -hi NonText guifg=#404040 gui=none -hi NonText ctermfg=238 cterm=none -" the hit-enter prompt (show more output) and yes/no questions -hi Question guifg=fg gui=none -hi Question ctermfg=fg cterm=none -" meta and special keys used with map, unprintable characters -hi SpecialKey guifg=#405060 -hi SpecialKey ctermfg=239 -" titles for output from :set all, :autocmd, etc -hi Title guifg=#62bdde gui=none -hi Title ctermfg=74 cterm=none -"hi Title guifg=#5ec8e5 gui=none -" warning messages -hi WarningMsg guifg=#e5786d gui=none -hi WarningMsg ctermfg=173 cterm=none -" current match in the wildmenu completion -hi WildMenu guifg=#cae682 guibg=#363946 gui=bold,underline -hi WildMenu ctermfg=16 ctermbg=186 cterm=bold -" color column highlighting -hi ColorColumn guifg=NONE guibg=#403630 gui=none -hi ColorColumn ctermfg=NONE ctermbg=94 cterm=none - - -" Diff -" ---- -" added line -hi DiffAdd guifg=#80a090 guibg=#313c36 gui=none -hi DiffAdd ctermfg=fg ctermbg=22 cterm=none -" changed line -hi DiffChange guifg=NONE guibg=#4a343a gui=none -hi DiffChange ctermfg=fg ctermbg=52 cterm=none -" deleted line -hi DiffDelete guifg=#6c6661 guibg=#3c3631 gui=none -hi DiffDelete ctermfg=fg ctermbg=58 cterm=none -" changed text within line -hi DiffText guifg=#f05060 guibg=#4a343a gui=bold -hi DiffText ctermfg=203 ctermbg=52 cterm=bold - - -" Folds -" ----- -" line used for closed folds -"hi Folded guifg=#91d6f8 guibg=#363946 gui=none -"hi Folded ctermfg=117 ctermbg=238 cterm=none -hi Folded guifg=#d0e0f0 guibg=#202020 gui=none -hi Folded ctermfg=117 ctermbg=235 cterm=none -" column on side used to indicated open and closed folds -hi FoldColumn guifg=#91d6f8 guibg=#363946 gui=none -hi FoldColumn guifg=#c0c0d0 guibg=#363946 gui=none -hi FoldColumn ctermfg=117 ctermbg=238 cterm=none - - -" Search -" ------ -" highlight incremental search text; also highlight text replaced with :s///c -hi IncSearch guifg=#66ffff gui=reverse -hi IncSearch ctermfg=87 cterm=reverse -" hlsearch (last search pattern), also used for quickfix -hi Search guibg=#ffaa33 gui=none -hi Search ctermbg=214 cterm=none - - -" Popup Menu -" ---------- -" normal item in popup -hi Pmenu guifg=#e0e0e0 guibg=#303840 gui=none -hi Pmenu ctermfg=253 ctermbg=233 cterm=none -" selected item in popup -hi PmenuSel guifg=#cae682 guibg=#505860 gui=none -hi PmenuSel ctermfg=186 ctermbg=237 cterm=none -" scrollbar in popup -hi PMenuSbar guibg=#505860 gui=none -hi PMenuSbar ctermbg=59 cterm=none -" thumb of the scrollbar in the popup -hi PMenuThumb guibg=#808890 gui=none -hi PMenuThumb ctermbg=102 cterm=none - - -" Status Line -" ----------- -" status line for current window -hi StatusLine guifg=#e0e0e0 guibg=#363946 gui=bold -hi StatusLine ctermfg=254 ctermbg=237 cterm=bold -" status line for non-current windows -hi StatusLineNC guifg=#767986 guibg=#363946 gui=none -hi StatusLineNC ctermfg=244 ctermbg=237 cterm=none - - -" Tab Lines -" --------- -" tab pages line, not active tab page label -hi TabLine guifg=#b6bf98 guibg=#363946 gui=none -hi TabLine ctermfg=244 ctermbg=236 cterm=none -" tab pages line, where there are no labels -hi TabLineFill guifg=#cfcfaf guibg=#363946 gui=none -hi TabLineFill ctermfg=187 ctermbg=236 cterm=none -" tab pages line, active tab page label -hi TabLineSel guifg=#efefef guibg=#414658 gui=bold -hi TabLineSel ctermfg=254 ctermbg=236 cterm=bold - - -" Visual -" ------ -" visual mode selection -hi Visual guifg=NONE guibg=#364458 -hi Visual ctermfg=NONE ctermbg=24 -" visual mode selection when vim is not owning the selection (x11 only) -hi VisualNOS guifg=fg gui=underline -hi VisualNOS ctermfg=fg cterm=underline +" Summary: +" Color scheme with dark and light versions (GUI and 256 color terminal). + +" Description: +" This color scheme was originally created by combining my favorite parts of +" the following color schemes: +" +" * oceandeep (vimscript #368) +" * peaksea (vimscript #760) +" * wombat (vimscript #1778) +" * moria (vimscript #1464) +" * zenburn (vimscript #415) +" +" Version 6+ has been revamped a bit from the original color scheme. If you +" prefer the old style, or the 'blue' version, use the 5Final release. Version +" 6+ only has a light and dark version. The new version tries to unify some of +" the colors and also adds more contrast between text and interface. +" +" The color scheme is dark, by default. You can change this by setting the +" g:lucius_style variable to "light" or "dark". Once the color scheme is +" loaded, you can use the commands "LuciusLight" or "LuciusDark" to change +" schemes quickly. +" +" Screeshots of the new version (6+): +" +" * Dark: http://i.imgur.com/IzYcB.png +" * Light: http://i.imgur.com/kfJcm.png +" +" Screenshots of the old versions (5Final): +" +" * Dark: http://i.imgur.com/z0bDr.png +" * Light: http://i.imgur.com/BXDiv.png +" * Blue: http://i.imgur.com/Ea1Gq.png +" +" colorsupport.vim (vimscript #2682) is used to help with mapping the GUI +" settings to the 256 terminal colors. +" +" This color scheme also has custom colors defined for the following plugins: +" +" * vimwiki (vimscript #2226) +" * tagbar (vimscript #3465) +" +" Installation: +" Copy the file to your vim colors directory and then do :colorscheme lucius. + +set background=dark +if exists("g:lucius_style") + if g:lucius_style == "light" + set background=light + endif +else + let g:lucius_style="dark" +endif + +" set colorcolumn=21,37,53,68,86,100 + +if g:lucius_style == "dark" + + + hi Normal guifg=#e8e8e8 guibg=#202020 ctermfg=253 ctermbg=234 gui=none cterm=none + + hi Comment guifg=#606060 guibg=NONE ctermfg=240 ctermbg=NONE gui=none cterm=none + + hi Constant guifg=#b0d090 guibg=NONE ctermfg=150 ctermbg=NONE gui=none cterm=none + hi BConstant guifg=#b0d090 guibg=NONE ctermfg=150 ctermbg=NONE gui=bold cterm=bold + + hi Identifier guifg=#90d0c0 guibg=NONE ctermfg=115 ctermbg=NONE gui=none cterm=none + hi BIdentifier guifg=#90d0c0 guibg=NONE ctermfg=115 ctermbg=NONE gui=bold cterm=bold + + hi Statement guifg=#70c0e0 guibg=NONE ctermfg=74 ctermbg=NONE gui=none cterm=none + hi BStatement guifg=#70c0e0 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold + + hi PreProc guifg=#e0e0b0 guibg=NONE ctermfg=187 ctermbg=NONE gui=none cterm=none + hi BPreProc guifg=#e0e0b0 guibg=NONE ctermfg=187 ctermbg=NONE gui=bold cterm=bold + + hi Type guifg=#90c0d0 guibg=NONE ctermfg=116 ctermbg=NONE gui=none cterm=none + hi BType guifg=#90c0d0 guibg=NONE ctermfg=116 ctermbg=NONE gui=bold cterm=bold + + hi Special guifg=#b0a0c0 guibg=NONE ctermfg=182 ctermbg=NONE gui=none cterm=none + hi BSpecial guifg=#b0a0c0 guibg=NONE ctermfg=182 ctermbg=NONE gui=bold cterm=bold + + " == Text Markup == + hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline + hi Error guifg=#e07070 guibg=#402020 ctermfg=167 ctermbg=236 gui=none cterm=none + hi Todo guifg=#e0e090 guibg=#404000 ctermfg=186 ctermbg=NONE gui=none cterm=none + hi MatchParen guifg=bg guibg=#d0f080 ctermfg=bg ctermbg=192 gui=none cterm=bold + hi NonText guifg=#405060 guibg=NONE ctermfg=24 ctermbg=NONE gui=none cterm=none + hi SpecialKey guifg=#406050 guibg=NONE ctermfg=23 ctermbg=NONE gui=none cterm=none + hi Title guifg=#60c0e0 guibg=NONE ctermfg=74 ctermbg=NONE gui=bold cterm=bold + + " == Text Selection == + hi Cursor guifg=bg guibg=fg ctermfg=bg ctermbg=fg gui=none cterm=none + hi CursorIM guifg=bg guibg=fg ctermfg=bg ctermbg=fg gui=none cterm=none + hi CursorColumn guifg=NONE guibg=#404040 ctermfg=NONE ctermbg=237 gui=none cterm=none + hi CursorLine guifg=NONE guibg=#404040 ctermfg=NONE ctermbg=237 gui=none cterm=none + hi Visual guifg=NONE guibg=#304050 ctermfg=NONE ctermbg=24 gui=none cterm=none + hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline + hi IncSearch guifg=bg guibg=#60e0e0 ctermfg=bg ctermbg=116 gui=none cterm=none + hi Search guifg=bg guibg=#f0b030 ctermfg=bg ctermbg=214 gui=none cterm=none + + " == UI == + hi Pmenu guifg=bg guibg=#c0c0c0 ctermfg=bg ctermbg=252 gui=none cterm=none + hi PmenuSel guifg=#e0e0e0 guibg=#304050 ctermfg=fg ctermbg=24 gui=none cterm=none + hi PMenuSbar guifg=#202020 guibg=#d0d0d0 ctermfg=bg ctermbg=254 gui=none cterm=none + hi PMenuThumb guifg=NONE guibg=#808080 ctermfg=fg ctermbg=244 gui=none cterm=none + hi StatusLine guifg=#202020 guibg=#c0c0c0 ctermfg=bg ctermbg=252 gui=bold cterm=bold + hi StatusLineNC guifg=#404040 guibg=#c0c0c0 ctermfg=240 ctermbg=252 gui=none cterm=none + hi TabLine guifg=#202020 guibg=#e0e0e0 ctermfg=bg ctermbg=252 gui=none cterm=none + hi TabLineFill guifg=#404040 guibg=#e0e0e0 ctermfg=240 ctermbg=252 gui=none cterm=none + hi TabLineSel guifg=#e0e0e0 guibg=#304050 ctermfg=fg ctermbg=24 gui=bold cterm=bold + hi VertSplit guifg=#606060 guibg=#c0c0c0 ctermfg=245 ctermbg=252 gui=none cterm=none + hi Folded guifg=#202020 guibg=#808080 ctermfg=bg ctermbg=246 gui=none cterm=none + hi FoldColumn guifg=#202020 guibg=#808080 ctermfg=bg ctermbg=246 gui=none cterm=none + + " == Spelling == + hi SpellBad guisp=#ee0000 ctermfg=fg ctermbg=160 gui=undercurl cterm=undercurl + hi SpellCap guisp=#eeee00 ctermfg=bg ctermbg=226 gui=undercurl cterm=undercurl + hi SpellRare guisp=#ffa500 ctermfg=bg ctermbg=214 gui=undercurl cterm=undercurl + hi SpellLocal guisp=#ffa500 ctermfg=bg ctermbg=214 gui=undercurl cterm=undercurl + + " == Diff == + hi DiffAdd guifg=fg guibg=#304030 ctermfg=fg ctermbg=22 gui=none cterm=none + hi DiffChange guifg=fg guibg=#504030 ctermfg=fg ctermbg=58 gui=none cterm=none + hi DiffDelete guifg=fg guibg=#403030 ctermfg=fg ctermbg=52 gui=none cterm=none + hi DiffText guifg=#d0c060 guibg=#504030 ctermfg=220 ctermbg=58 gui=bold cterm=bold + + " == Misc == + hi Directory guifg=#c0e0b0 guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none + hi ErrorMsg guifg=#ee0000 guibg=NONE ctermfg=196 ctermbg=NONE gui=none cterm=none + hi SignColumn guifg=#a0b0b0 guibg=#282828 ctermfg=145 ctermbg=233 gui=none cterm=none + hi LineNr guifg=#202020 guibg=#808080 ctermfg=bg ctermbg=246 gui=none cterm=none + hi MoreMsg guifg=#70d0f0 guibg=NONE ctermfg=117 ctermbg=NONE gui=none cterm=none + hi ModeMsg guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none + hi Question guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=none cterm=none + hi WarningMsg guifg=#e87870 guibg=NONE ctermfg=173 ctermbg=NONE gui=none cterm=none + hi WildMenu guifg=NONE guibg=#304050 ctermfg=NONE ctermbg=24 gui=none cterm=none + hi ColorColumn guifg=NONE guibg=#403630 ctermfg=NONE ctermbg=101 gui=none cterm=none + hi Ignore guifg=bg ctermfg=bg + + +elseif g:lucius_style == "light" + + + hi Normal guifg=#000000 guibg=#ffffff ctermfg=16 ctermbg=231 gui=none cterm=none + + hi Comment guifg=#909090 guibg=NONE ctermfg=246 ctermbg=NONE gui=none cterm=none + + hi Constant guifg=#007000 guibg=NONE ctermfg=22 ctermbg=NONE gui=none cterm=none + hi BConstant guifg=#007000 guibg=NONE ctermfg=22 ctermbg=NONE gui=none cterm=bold + + hi Identifier guifg=#008080 guibg=NONE ctermfg=30 ctermbg=NONE gui=none cterm=none + hi BIdentifier guifg=#008080 guibg=NONE ctermfg=30 ctermbg=NONE gui=none cterm=bold + + hi Statement guifg=#0030b0 guibg=NONE ctermfg=19 ctermbg=NONE gui=none cterm=none + hi BStatement guifg=#0030b0 guibg=NONE ctermfg=19 ctermbg=NONE gui=none cterm=bold + + hi PreProc guifg=#a06000 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=none + hi BPreProc guifg=#a06000 guibg=NONE ctermfg=130 ctermbg=NONE gui=none cterm=bold + + hi Type guifg=#0070a0 guibg=NONE ctermfg=25 ctermbg=NONE gui=none cterm=none + hi BType guifg=#0070a0 guibg=NONE ctermfg=25 ctermbg=NONE gui=none cterm=bold + + hi Special guifg=#703080 guibg=NONE ctermfg=5 ctermbg=NONE gui=none cterm=none + hi BSpecial guifg=#703080 guibg=NONE ctermfg=5 ctermbg=NONE gui=none cterm=bold + + " == Text Markup == + hi Underlined guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline + hi Error guifg=#c02620 guibg=#f0c6c0 ctermfg=1 ctermbg=181 gui=none cterm=none + hi Todo guifg=#504000 guibg=#f6f080 ctermfg=58 ctermbg=228 gui=none cterm=none + hi MatchParen guifg=NONE guibg=#d0f080 ctermfg=NONE ctermbg=192 gui=none cterm=none + hi NonText guifg=#b0c0d0 guibg=NONE ctermfg=146 ctermbg=NONE gui=none cterm=none + hi SpecialKey guifg=#b0d0c0 guibg=NONE ctermfg=151 ctermbg=NONE gui=none cterm=none + hi Title guifg=#0080e0 guibg=NONE ctermfg=26 ctermbg=NONE gui=bold cterm=bold + + " == Text Selection == + hi Cursor guifg=bg guibg=#505050 ctermfg=bg ctermbg=239 gui=none cterm=none + hi CursorIM guifg=bg guibg=#505050 ctermfg=bg ctermbg=239 gui=none cterm=none + hi CursorColumn guifg=NONE guibg=#e8e8e8 ctermfg=NONE ctermbg=254 gui=none cterm=none + hi CursorLine guifg=NONE guibg=#e8e8e8 ctermfg=NONE ctermbg=254 gui=none cterm=none + hi Visual guifg=NONE guibg=#d0e0f0 ctermfg=NONE ctermbg=153 gui=none cterm=none + hi VisualNOS guifg=fg guibg=NONE ctermfg=fg ctermbg=NONE gui=underline cterm=underline + hi IncSearch guifg=#000000 guibg=#90d0d0 ctermfg=fg ctermbg=116 gui=none cterm=none + hi Search guifg=#000000 guibg=#f0b060 ctermfg=fg ctermbg=215 gui=none cterm=none + + " == UI == + hi Pmenu guifg=#ffffff guibg=#505050 ctermfg=231 ctermbg=239 gui=none cterm=none + hi PmenuSel guifg=#000000 guibg=#d0e0f0 ctermfg=16 ctermbg=153 gui=none cterm=none + hi PMenuSbar guifg=#ffffff guibg=#404040 ctermfg=231 ctermbg=238 gui=none cterm=none + hi PMenuThumb guifg=#000000 guibg=#a0a0a0 ctermfg=16 ctermbg=247 gui=none cterm=none + hi StatusLine guifg=#ffffff guibg=#505050 ctermfg=231 ctermbg=239 gui=bold cterm=bold + hi StatusLineNC guifg=#e0e0e0 guibg=#505050 ctermfg=254 ctermbg=239 gui=none cterm=none + hi TabLine guifg=#ffffff guibg=#505050 ctermfg=231 ctermbg=239 gui=none cterm=none + hi TabLineFill guifg=#a0a0a0 guibg=#505050 ctermfg=247 ctermbg=239 gui=none cterm=none + hi TabLineSel guifg=#000000 guibg=#d0e0f0 ctermfg=16 ctermbg=153 gui=none cterm=none + hi VertSplit guifg=#868686 guibg=#505050 ctermfg=102 ctermbg=239 gui=none cterm=none + hi Folded guifg=#ffffff guibg=#a0a0a0 ctermfg=231 ctermbg=247 gui=none cterm=none + hi FoldColumn guifg=#ffffff guibg=#a0a0a0 ctermfg=231 ctermbg=247 gui=none cterm=none + + " == Spelling == + hi SpellBad guisp=#ee0000 ctermbg=210 gui=undercurl cterm=undercurl + hi SpellCap guisp=#eeee00 ctermbg=227 gui=undercurl cterm=undercurl + hi SpellRare guisp=#ffa500 ctermbg=221 gui=undercurl cterm=undercurl + hi SpellLocal guisp=#ffa500 ctermbg=221 gui=undercurl cterm=undercurl + + " == Diff == + hi DiffAdd guifg=fg guibg=#d0e0d0 ctermfg=fg ctermbg=151 gui=none cterm=none + hi DiffChange guifg=fg guibg=#e0d6c0 ctermfg=fg ctermbg=187 gui=none cterm=none + hi DiffDelete guifg=fg guibg=#f0d0d0 ctermfg=fg ctermbg=181 gui=none cterm=none + hi DiffText guifg=#d05000 guibg=#e0d6c0 ctermfg=160 ctermbg=187 gui=bold cterm=bold + + " == Misc == + hi Directory guifg=#009040 guibg=NONE ctermfg=29 ctermbg=NONE gui=none cterm=none + hi ErrorMsg guifg=#a00000 guibg=NONE ctermfg=124 ctermbg=NONE gui=none cterm=none + hi SignColumn guifg=#708090 guibg=#f8f8f8 ctermfg=66 ctermbg=231 gui=none cterm=none + hi LineNr guifg=#ffffff guibg=#a0a0a0 ctermfg=231 ctermbg=247 gui=none cterm=none + hi MoreMsg guifg=#2060c0 guibg=NONE ctermfg=4 ctermbg=NONE gui=none cterm=none + hi ModeMsg guifg=#000000 guibg=NONE ctermfg=16 ctermbg=NONE gui=none cterm=none + hi Question guifg=fg guibg=NONE ctermfg=NONE ctermbg=NONE gui=none cterm=none + hi WarningMsg guifg=#d04020 guibg=NONE ctermfg=9 ctermbg=NONE gui=none cterm=none + hi WildMenu guifg=#000000 guibg=#d0e0f0 ctermfg=16 ctermbg=153 gui=none cterm=none + hi ColorColumn guifg=NONE guibg=#f0f0e0 ctermfg=NONE ctermbg=230 gui=none cterm=none + hi Ignore guifg=bg ctermfg=bg + + +endif + +" == Vimwiki Colors == +hi link VimwikiHeader1 BConstant +hi link VimwikiHeader2 BIdentifier +hi link VimwikiHeader3 BStatement +hi link VimwikiHeader4 BSpecial +hi link VimwikiHeader5 BPreProc +hi link VimwikiHeader6 BType + +" == Tagbar Colors == +hi link TagbarAccessPublic Constant +hi link TagbarAccessProtected Type +hi link TagbarAccessPrivate PreProc + +" == Commands == +command! LuciusLight let g:lucius_style = "light" | colorscheme lucius +command! LuciusDark let g:lucius_style = "dark" | colorscheme lucius diff --git a/.vim/colors/pigraph.vim b/.vim/colors/pigraph.vim new file mode 100644 index 0000000..fc52551 --- /dev/null +++ b/.vim/colors/pigraph.vim @@ -0,0 +1,73 @@ +"pigraph theme based on blackdust +"[email protected] + + +set background=dark +hi clear + +if exists("syntax_on") + syntax reset +endif + +hi Boolean guifg=#eee689 +hi Character guifg=#eee689 +hi Comment guifg=#7f7f7f +hi Condtional guifg=#8fffff +hi Constant guifg=#eee689 gui=none +hi Cursor guifg=#000000 guibg=#aeaeae +hi Debug guifg=#eee689 +hi Define guifg=#83b1d4 +hi Delimiter guifg=#8f8f8f +hi DiffAdd guibg=#613c46 +hi DiffChange guibg=#333333 +hi DiffDelete guifg=#333333 guibg=#464646 gui=none +hi DiffText guifg=#ffffff guibg=#1f1f1f +hi Directory guifg=#ffffff +hi Error guifg=#000000 guibg=#00ffff +hi ErrorMsg guifg=#000000 guibg=#00c0cf +hi Exception guifg=#8fffff gui=underline +hi Float guifg=#9c93b3 +hi FoldColumn guifg=#eee689 guibg=#464646 +hi Folded guifg=#eee689 guibg=#333333 +hi Function guifg=#d38e63 +hi Identifier guifg=#ffffff +hi Include guifg=#ee8a37 +hi IncSearch guifg=#000000 guibg=#b1d631 +hi Keyword guifg=#ffffff +hi Label guifg=#8fffff gui=underline +hi Macro guifg=#ee8a37 +hi MatchParen guifg=#d0ffc0 guibg=#202020 ctermfg=157 ctermbg=237 cterm=bold +hi ModeMsg guifg=#eee689 +hi MoreMsg guifg=#ffffff +hi NonText guifg=#1f1f1f +hi LineNr guifg=#7f7f7f guibg=#343a3f +hi Normal guifg=#d6dbdf guibg=#2c3237 gui=none +hi Number guifg=#aca0a3 +hi Operator guifg=#ffffff +hi Pmenu guifg=#ffffff guibg=#202020 ctermfg=255 ctermbg=238 +hi PmenuSel guifg=#000000 guibg=#b1d631 ctermfg=0 ctermbg=148 +hi PreCondit guifg=#dfaf8f +hi PreProc guifg=#ee8a37 +hi Question guifg=#ffffff +hi Repeat guifg=#8fffff gui=underline +hi Search guifg=#000000 guibg=#b1d631 +hi SpecialChar guifg=#eee689 +hi SpecialComment guifg=#eee689 +hi Special guifg=#7f7f7f +hi SpecialKey guifg=#7e7e7e +hi Statement guifg=#8fffff +hi StatusLine guifg=#b1d631 guibg=#000000 +hi StatusLineNC guifg=#333333 guibg=#cccccc +hi StorageClass guifg=#ffffff +hi String guifg=#dbf0b3 +hi Structure guifg=#ffffff gui=underline +hi Tag guifg=#eee689 +hi Title guifg=#ffffff guibg=#333333 +hi Todo guifg=#ffffff guibg=#000000 +hi Typedef guifg=#ffffff gui=underline +hi Type guifg=#ffffff +hi VertSplit guifg=#444444 guibg=#303030 gui=none ctermfg=238 ctermbg=238 +hi Visual guifg=#000000 guibg=#b1d631 +hi VisualNOS guifg=#343a3f guibg=#f18c96 gui=underline +hi WarningMsg guifg=#ffffff guibg=#333333 +hi WildMenu guifg=#000000 guibg=#eee689 diff --git a/.vim/colors/solarized.vim b/.vim/colors/solarized.vim new file mode 100644 index 0000000..81c2c99 --- /dev/null +++ b/.vim/colors/solarized.vim @@ -0,0 +1,964 @@ +" Name: Solarized vim colorscheme +" Author: Ethan Schoonover <[email protected]> +" URL: http://ethanschoonover.com/solarized +" (see this url for latest release & screenshots) +" License: OSI approved MIT license (see end of this file) +" +" Usage "{{{ +" +" --------------------------------------------------------------------- +" ABOUT: +" --------------------------------------------------------------------- +" Solarized is a carefully designed selective contrast colorscheme with dual +" light and dark modes that runs in both GUI, 256 and 16 color modes. +" +" See the homepage above for screenshots and details. +" +" --------------------------------------------------------------------- +" INSTALLATION: +" --------------------------------------------------------------------- +" +" Two options for installation: manual or pathogen +" +" MANUAL INSTALLATION OPTION: +" --------------------------------------------------------------------- +" +" 1. Put the files in the right place! +" 2. Move `solarized.vim` to your `.vim/colors` directory. +" +" RECOMMENDED PATHOGEN INSTALLATION OPTION: +" --------------------------------------------------------------------- +" +" 1. Download and install Tim Pope's Pathogen from: +" https://github.com/tpope/vim-pathogen +" +" 2. Next, move or clone the `vim-colors-solarized` directory so that it is +" a subdirectory of the `.vim/bundle` directory. +" +" a. **clone with git:** +" +" $ cd ~/.vim/bundle +" $ git clone git://github.com/altercation/vim-colors-solarized.git +" +" b. **or move manually into the pathogen bundle directory:** +" In the parent directory of vim-colors-solarized: +" +" $ mv vim-colors-solarized ~/.vim/bundle/ +" +" MODIFY VIMRC: +" +" After either Option 1 or Option 2 above, put the following two lines in your +" .vimrc: +" +" set background=dark +" colorscheme solarized +" +" or, for the light background mode of Solarized: +" +" set background=light +" colorscheme solarized +" +" I like to have a different background in GUI and terminal modes, so I can use +" the following if-then. However, I find vim's background autodetection to be +" pretty good and, at least with MacVim, I can leave this background value +" assignment out entirely and get the same results. +" +" if has('gui_running') +" set background=light +" else +" set background=dark +" endif +" +" See the Solarized homepage at http://ethanschoonover.com/solarized for +" screenshots which will help you select either the light or dark background. +" +" Other options are detailed below. +" +" IMPORTANT NOTE FOR TERMINAL USERS: +" +" If you are going to use Solarized in Terminal mode (i.e. not in a GUI +" version like gvim or macvim), **please please please** consider setting your +" terminal emulator's colorscheme to used the Solarized palette. I've included +" palettes for some popular terminal emulator as well as Xdefaults in the +" official Solarized download available from: +" +" http://ethanschoonover.com/solarized +" +" If you use Solarized without these colors, Solarized will by default use an +" approximate set of 256 colors. It isn't bad looking and has been extensively +" tweaked, but it's still not quite the real thing. +" +" If you do use the custom terminal colors, simply add the following line +" *before* the `colorschem solarized` line: +" +" let g:solarized_termcolors=16 +" +" --------------------------------------------------------------------- +" TOGGLE BACKGROUND FUNCTION +" --------------------------------------------------------------------- +" Here's a quick script that toggles the background color, using F5 in this +" example. You can drop this into .vimrc: +" +" function! ToggleBackground() +" if (g:solarized_style=="dark") +" let g:solarized_style="light" +" colorscheme solarized +" else +" let g:solarized_style="dark" +" colorscheme solarized +" endif +" endfunction +" command! Togbg call ToggleBackground() +" nnoremap <F5> :call ToggleBackground()<CR> +" inoremap <F5> <ESC>:call ToggleBackground()<CR>a +" vnoremap <F5> <ESC>:call ToggleBackground()<CR> +" +" --------------------------------------------------------------------- +" OPTIONS +" --------------------------------------------------------------------- +" +" Set these in your vimrc file prior to calling the colorscheme. +" +" option name default optional +" ------------------------------------------------ +" g:solarized_termcolors= 256 | 16 +" g:solarized_termtrans = 0 | 1 +" g:solarized_degrade = 0 | 1 +" g:solarized_bold = 1 | 0 +" g:solarized_underline = 1 | 0 +" g:solarized_italic = 1 | 0 +" g:solarized_style = "dark" | "light" +" g:solarized_contrast = "normal"| "high" or "low" +" ------------------------------------------------ +" +" OPTION DETAILS +" +" ------------------------------------------------ +" g:solarized_termcolors= 256 | 16 +" ------------------------------------------------ +" The most important option if you are using vim in terminal (non gui) mode! +" This tells Solarized to use the 256 degraded color mode if running in a 256 +" color capable terminal. Otherwise, if set to `16` it will use the terminal +" emulators colorscheme (best option as long as you've set the emulators colors +" to the Solarized palette). +" +" If you are going to use Solarized in Terminal mode (i.e. not in a GUI +" version like gvim or macvim), **please please please** consider setting your +" terminal emulator's colorscheme to used the Solarized palette. I've included +" palettes for some popular terminal emulator as well as Xdefaults in the +" official Solarized download available from: +" http://ethanschoonover.com/solarized . If you use Solarized without these +" colors, Solarized will by default use an approximate set of 256 colors. It +" isn't bad looking and has been extensively tweaked, but it's still not quite +" the real thing. +" +" ------------------------------------------------ +" g:solarized_termtrans = 0 | 1 +" ------------------------------------------------ +" If you use a terminal emulator with a transparent background and Solarized +" isn't displaying the background color transparently, set this to 1 and +" Solarized will use the default (transparent) background of the terminal +" emulator. *urxvt* required this in my testing; Terminal.app/iTerm2 did not. +" +" ------------------------------------------------ +" g:solarized_degrade = 0 | 1 +" ------------------------------------------------ +" For test purposes only; forces Solarized to use the 256 degraded color mode +" to test the approximate color values for accuracy. +" +" ------------------------------------------------ +" g:solarized_bold = 1 | 0 +" ------------------------------------------------ +" ------------------------------------------------ +" g:solarized_underline = 1 | 0 +" ------------------------------------------------ +" ------------------------------------------------ +" g:solarized_italic = 1 | 0 +" ------------------------------------------------ +" If you wish to stop Solarized from displaying bold, underlined or +" italicized typefaces, simply assign a zero value to the appropriate +" variable, for example: `let g:solarized_italic=0` +" +" ------------------------------------------------ +" g:solarized_style = "dark" | "light" +" ------------------------------------------------ +" Simply another way to force Solarized to use a dark or light background. +" It's better to use `set background=dark` or `set background=light` in your +" .vimrc file. This option is mostly used in scripts (quick background color +" change) or for testing. Note that, if set, g:solarized_style overrides the +" setting for "background". +" +" ------------------------------------------------ +" g:solarized_contrast = "normal"| "high" or "low" +" ------------------------------------------------ +" Stick with normal! It's been carefully tested. Setting this option to high +" or low does use the same Solarized palette but simply shifts some values up +" or down in order to expand or compress the tonal range displayed. +" +" --------------------------------------------------------------------- +" COLOR VALUES +" --------------------------------------------------------------------- +" Download palettes and files from: http://ethanschoonover.com/solarized +" +" L\*a\*b values are canonical (White D65, Reference D50), other values are +" matched in sRGB space. +" +" SOLARIZED HEX 16/8 TERMCOL XTERM/HEX L*A*B sRGB HSB +" --------- ------- ---- ------- ----------- ---------- ----------- ----------- +" base03 #002b36 8/4 brblack 234 #1c1c1c 15 -12 -12 0 43 54 193 100 21 +" base02 #073642 0/4 black 235 #262626 20 -12 -12 7 54 66 192 90 26 +" base01 #586e75 10/7 brgreen 240 #4e4e4e 45 -07 -07 88 110 117 194 25 46 +" base00 #657b83 11/7 bryellow 241 #585858 50 -07 -07 101 123 131 195 23 51 +" base0 #839496 12/6 brblue 244 #808080 60 -06 -03 131 148 150 186 13 59 +" base1 #93a1a1 14/4 brcyan 245 #8a8a8a 65 -05 -02 147 161 161 180 9 63 +" base2 #eee8d5 7/7 white 254 #d7d7af 92 -00 10 238 232 213 44 11 93 +" base3 #fdf6e3 15/7 brwhite 230 #ffffd7 97 00 10 253 246 227 44 10 99 +" yellow #b58900 3/3 yellow 136 #af8700 60 10 65 181 137 0 45 100 71 +" orange #cb4b16 9/3 brred 166 #d75f00 50 50 55 203 75 22 18 89 80 +" red #dc322f 1/1 red 160 #d70000 50 65 45 220 50 47 1 79 86 +" magenta #d33682 5/5 magenta 125 #af005f 50 65 -05 211 54 130 331 74 83 +" violet #6c71c4 13/5 brmagenta 61 #5f5faf 50 15 -45 108 113 196 237 45 77 +" blue #268bd2 4/4 blue 33 #0087ff 55 -10 -45 38 139 210 205 82 82 +" cyan #2aa198 6/6 cyan 37 #00afaf 60 -35 -05 42 161 152 175 74 63 +" green #859900 2/2 green 64 #5f8700 60 -20 65 133 153 0 68 100 60 +" +" --------------------------------------------------------------------- +" COLORSCHEME HACKING +" --------------------------------------------------------------------- +" +" Useful commands for testing colorschemes: +" :source $VIMRUNTIME/syntax/hitest.vim +" :help highlight-groups +" :help cterm-colors +" :help group-name +" +" Useful links for developing colorschemes: +" http://www.vim.org/scripts/script.php?script_id=2937 +" http://vimcasts.org/episodes/creating-colorschemes-for-vim/ +" http://www.frexx.de/xterm-256-notes/" +" +" +" }}} +" Default option values"{{{ +" --------------------------------------------------------------------- +if !exists("g:solarized_termtrans") + let g:solarized_termtrans = 0 +endif +if !exists("g:solarized_degrade") + let g:solarized_degrade = 0 +endif +if !exists("g:solarized_bold") + let g:solarized_bold = 1 +endif +if !exists("g:solarized_underline") + let g:solarized_underline = 1 +endif +if !exists("g:solarized_italic") + let g:solarized_italic = 1 +endif +if !exists("g:solarized_termcolors") + let g:solarized_termcolors = 256 +endif +if !exists("g:solarized_style") && !exists("g:solarized_style") + let g:solarized_style = &background +endif +if !exists("g:solarized_contrast") + let g:solarized_contrast = "normal" +endif +"}}} +" Colorscheme basic settings"{{{ +" --------------------------------------------------------------------- +if g:solarized_style == "dark" + set background=dark +elseif g:solarized_style == "light" + set background=light +else + let g:solarized_style = &background +endif + +hi clear +if exists("syntax_on") + syntax reset +endif + +let colors_name = "solarized" +"let colors_name = &background +"if background == "light" +" let colors_name = &background +"else +" let colors_name = &background +"endif +"}}} +" GUI & CSApprox hexadecimal palettes"{{{ +" --------------------------------------------------------------------- +" +" Set both gui and terminal color values in separate conditional statements +" Due to possibility that CSApprox is running (though I suppose we could just +" leave the hex values out entirely in that case and include only cterm colors) +" We also check to see if user has set solarized (force use of the +" neutral gray monotone palette component) +if has("gui_running") && g:solarized_degrade == 0 + let s:g_back = "#002b36" + let s:g_base03 = "#002b36" + let s:g_base02 = "#073642" + let s:g_base01 = "#586e75" + let s:g_base00 = "#657b83" + let s:g_base0 = "#839496" + let s:g_base1 = "#93a1a1" + let s:g_base2 = "#eee8d5" + let s:g_base3 = "#fdf6e3" + let s:g_yellow = "#b58900" + let s:g_orange = "#cb4b16" + let s:g_red = "#dc322f" + let s:g_magenta = "#d33682" + let s:g_violet = "#6c71c4" + let s:g_blue = "#268bd2" + let s:g_cyan = "#2aa198" + let s:g_green = "#859900" +else + " these colors are for non-gui vim when CSApprox is installed. CSApprox + " degrades the base colors poorly (bright blues instead of muted gray + " blues) so we set all hex values here to ones which CSApprox will not + " change and which we approve of. Perhaps I should just can the hex values + " and use just the color table values, leaving these blank. Not much + " difference either way and I'd rather be thorough about it. + " They can also be used by setting g:solarized_degrade to 1 in vimrc + let s:g_back = "#1c1c1c" + let s:g_base03 = "#1c1c1c" + let s:g_base02 = "#262626" + let s:g_base01 = "#4e4e4e" + let s:g_base00 = "#585858" + let s:g_base0 = "#808080" + let s:g_base1 = "#8a8a8a" + let s:g_base2 = "#d7d7af" + let s:g_base3 = "#ffffd7" + let s:g_yellow = "#af8700" + let s:g_orange = "#d75f00" + let s:g_red = "#af0000" + let s:g_magenta = "#af005f" + let s:g_violet = "#5f5faf" + let s:g_blue = "#0087ff" + let s:g_cyan = "#00afaf" + let s:g_green = "#5f8700" +endif +"}}} +" 256 Terminal (no CSApprox) and 16 color fallback palettes"{{{ +" --------------------------------------------------------------------- +" We also set this if gui is running as we use the optional formatting +" values that get set here (ou==optional underline, ob==opt bold). +if (has("gui_running") || &t_Co == 256) && g:solarized_termcolors != 16 + let s:c_back = "234" + let s:c_base03 = "234" + let s:c_base02 = "235" + let s:c_base01 = "239" + let s:c_base00 = "240" + let s:c_base0 = "244" + let s:c_base1 = "245" + let s:c_base2 = "187" + let s:c_base3 = "230" + let s:c_yellow = "136" + let s:c_orange = "166" + let s:c_red = "124" + let s:c_magenta = "125" + let s:c_violet = "61" + let s:c_blue = "33" + let s:c_cyan = "37" + let s:c_green = "64" + let s:ou = "" + let s:ob = "" +elseif &t_Co > 8 || g:solarized_termcolors == 16 + " NOTE: this requires terminal colors to be set to solarized standard + " 16 colors (see top of this file for details) + let s:c_back = "NONE" + let s:c_base03 = "8" + let s:c_base02 = "0" + let s:c_base01 = "10" + let s:c_base00 = "11" + let s:c_base0 = "12" + let s:c_base1 = "14" + let s:c_base2 = "7" + let s:c_base3 = "15" + let s:c_green = "2" + let s:c_yellow = "3" + let s:c_orange = "9" + let s:c_red = "1" + let s:c_magenta = "5" + let s:c_violet = "13" + let s:c_blue = "4" + let s:c_cyan = "6" + let s:ou = "" + let s:ob = "" +else " must be in an 8 color or less terminal + let s:c_back = "NONE" + let s:c_base03 = "4" + let s:c_base02 = "darkgrey" + let s:c_base01 = "grey" + let s:c_base00 = "darkgrey" + let s:c_base0 = "6" + let s:c_base1 = "4" + let s:c_base2 = "7" + let s:c_base3 = "7" + let s:c_green = "2" + let s:c_yellow = "3" + let s:c_orange = "3" + let s:c_red = "1" + let s:c_magenta = "5" + let s:c_violet = "5" + let s:c_blue = "4" + let s:c_cyan = "6" + let s:ou = ",underline" + let s:ob = ",bold" +endif +"}}} +" Formatting options and null values for passthrough effect"{{{ +" --------------------------------------------------------------------- +let s:g_none = "NONE" +let s:c_none = "NONE" +let s:t_none = "NONE" +let s:n = "NONE" +let s:c = ",undercurl" +let s:r = ",reverse" +let s:s = ",standout" +"}}} +" Alternate light scheme "{{{ +" --------------------------------------------------------------------- +if g:solarized_style == "light" + let s:c_temp03 = s:c_base03 + let s:c_temp02 = s:c_base02 + let s:c_temp01 = s:c_base01 + let s:c_temp00 = s:c_base00 + let s:c_base03 = s:c_base3 + let s:c_base02 = s:c_base2 + let s:c_base01 = s:c_base1 + let s:c_base00 = s:c_base0 + let s:c_base0 = s:c_temp00 + let s:c_base1 = s:c_temp01 + let s:c_base2 = s:c_temp02 + let s:c_base3 = s:c_temp03 + let s:c_back = s:c_base03 + let s:g_temp03 = s:g_base03 + let s:g_temp02 = s:g_base02 + let s:g_temp01 = s:g_base01 + let s:g_temp00 = s:g_base00 + let s:g_base03 = s:g_base3 + let s:g_base02 = s:g_base2 + let s:g_base01 = s:g_base1 + let s:g_base00 = s:g_base0 + let s:g_base0 = s:g_temp00 + let s:g_base1 = s:g_temp01 + let s:g_base2 = s:g_temp02 + let s:g_base3 = s:g_temp03 + let s:g_back = s:g_base03 +endif +"}}} +" Alternate inverted background scheme "{{{ +" --------------------------------------------------------------------- +if g:solarized_style == "inverted" + let s:c_temp03 = s:c_base03 + let s:c_temp02 = s:c_base02 + let s:c_base03 = s:c_temp02 + let s:c_base02 = s:c_temp03 + let s:c_back = s:c_base03 + let s:g_temp03 = s:g_base03 + let s:g_temp02 = s:g_base02 + let s:g_base03 = s:g_temp02 + let s:g_base02 = s:g_temp03 + let s:g_back = s:g_base03 +endif +"}}} +" Optional contrast schemes "{{{ +" --------------------------------------------------------------------- +if g:solarized_contrast == "high" + let s:g_base03 = s:g_base03 + let s:g_base02 = s:g_base02 + let s:g_base01 = s:g_base00 + let s:g_base00 = s:g_base0 + let s:g_base0 = s:g_base1 + let s:g_base1 = s:g_base2 + let s:g_base2 = s:g_base3 + let s:g_base3 = s:g_base3 + let s:g_back = s:g_back +endif +if g:solarized_contrast == "low" + let s:g_back = s:g_base02 + let s:ou = ",underline" +endif +"}}} +" Overrides dependent on user specified values"{{{ +" --------------------------------------------------------------------- +if g:solarized_termtrans == 1 + let s:c_back = "NONE" +endif + +if g:solarized_bold == 1 + let s:b = ",bold" +else + let s:b = "" +endif + +if g:solarized_underline == 1 + let s:u = ",underline" +else + let s:u = "" +endif + +if g:solarized_italic == 1 + let s:i = ",italic" +else + let s:i = "" +endif +"}}} +" Highlighting primitives"{{{ +" --------------------------------------------------------------------- + +exe "let s:bg_none = ' ctermbg=".s:c_none ." guibg=".s:g_none ."'" +exe "let s:bg_back = ' ctermbg=".s:c_back ." guibg=".s:g_back ."'" +exe "let s:bg_base03 = ' ctermbg=".s:c_base03 ." guibg=".s:g_base03 ."'" +exe "let s:bg_base02 = ' ctermbg=".s:c_base02 ." guibg=".s:g_base02 ."'" +exe "let s:bg_base01 = ' ctermbg=".s:c_base01 ." guibg=".s:g_base01 ."'" +exe "let s:bg_base00 = ' ctermbg=".s:c_base00 ." guibg=".s:g_base00 ."'" +exe "let s:bg_base0 = ' ctermbg=".s:c_base0 ." guibg=".s:g_base0 ."'" +exe "let s:bg_base1 = ' ctermbg=".s:c_base1 ." guibg=".s:g_base1 ."'" +exe "let s:bg_base2 = ' ctermbg=".s:c_base2 ." guibg=".s:g_base2 ."'" +exe "let s:bg_base3 = ' ctermbg=".s:c_base3 ." guibg=".s:g_base3 ."'" +exe "let s:bg_green = ' ctermbg=".s:c_green ." guibg=".s:g_green ."'" +exe "let s:bg_yellow = ' ctermbg=".s:c_yellow ." guibg=".s:g_yellow ."'" +exe "let s:bg_orange = ' ctermbg=".s:c_orange ." guibg=".s:g_orange ."'" +exe "let s:bg_red = ' ctermbg=".s:c_red ." guibg=".s:g_red ."'" +exe "let s:bg_magenta = ' ctermbg=".s:c_magenta." guibg=".s:g_magenta."'" +exe "let s:bg_violet = ' ctermbg=".s:c_violet ." guibg=".s:g_violet ."'" +exe "let s:bg_blue = ' ctermbg=".s:c_blue ." guibg=".s:g_blue ."'" +exe "let s:bg_cyan = ' ctermbg=".s:c_cyan ." guibg=".s:g_cyan ."'" + +exe "let s:fg_none = ' ctermfg=".s:c_none ." guifg=".s:g_none ."'" +exe "let s:fg_back = ' ctermfg=".s:c_back ." guifg=".s:g_back ."'" +exe "let s:fg_base03 = ' ctermfg=".s:c_base03 ." guifg=".s:g_base03 ."'" +exe "let s:fg_base02 = ' ctermfg=".s:c_base02 ." guifg=".s:g_base02 ."'" +exe "let s:fg_base01 = ' ctermfg=".s:c_base01 ." guifg=".s:g_base01 ."'" +exe "let s:fg_base00 = ' ctermfg=".s:c_base00 ." guifg=".s:g_base00 ."'" +exe "let s:fg_base0 = ' ctermfg=".s:c_base0 ." guifg=".s:g_base0 ."'" +exe "let s:fg_base1 = ' ctermfg=".s:c_base1 ." guifg=".s:g_base1 ."'" +exe "let s:fg_base2 = ' ctermfg=".s:c_base2 ." guifg=".s:g_base2 ."'" +exe "let s:fg_base3 = ' ctermfg=".s:c_base3 ." guifg=".s:g_base3 ."'" +exe "let s:fg_green = ' ctermfg=".s:c_green ." guifg=".s:g_green ."'" +exe "let s:fg_yellow = ' ctermfg=".s:c_yellow ." guifg=".s:g_yellow ."'" +exe "let s:fg_orange = ' ctermfg=".s:c_orange ." guifg=".s:g_orange ."'" +exe "let s:fg_red = ' ctermfg=".s:c_red ." guifg=".s:g_red ."'" +exe "let s:fg_magenta = ' ctermfg=".s:c_magenta." guifg=".s:g_magenta."'" +exe "let s:fg_violet = ' ctermfg=".s:c_violet ." guifg=".s:g_violet ."'" +exe "let s:fg_blue = ' ctermfg=".s:c_blue ." guifg=".s:g_blue ."'" +exe "let s:fg_cyan = ' ctermfg=".s:c_cyan ." guifg=".s:g_cyan ."'" + +exe "let s:sp_none = ' guisp=".s:g_none ."'" +exe "let s:sp_back = ' guisp=".s:g_back ."'" +exe "let s:sp_base03 = ' guisp=".s:g_base03 ."'" +exe "let s:sp_base02 = ' guisp=".s:g_base02 ."'" +exe "let s:sp_base01 = ' guisp=".s:g_base01 ."'" +exe "let s:sp_base00 = ' guisp=".s:g_base00 ."'" +exe "let s:sp_base0 = ' guisp=".s:g_base0 ."'" +exe "let s:sp_base1 = ' guisp=".s:g_base1 ."'" +exe "let s:sp_base2 = ' guisp=".s:g_base2 ."'" +exe "let s:sp_base3 = ' guisp=".s:g_base3 ."'" +exe "let s:sp_green = ' guisp=".s:g_green ."'" +exe "let s:sp_yellow = ' guisp=".s:g_yellow ."'" +exe "let s:sp_orange = ' guisp=".s:g_orange ."'" +exe "let s:sp_red = ' guisp=".s:g_red ."'" +exe "let s:sp_magenta = ' guisp=".s:g_magenta."'" +exe "let s:sp_violet = ' guisp=".s:g_violet ."'" +exe "let s:sp_blue = ' guisp=".s:g_blue ."'" +exe "let s:sp_cyan = ' guisp=".s:g_cyan ."'" + +exe "let s:fmt_none = ' cterm=NONE". " gui=NONE". " term=NONE". "'" +exe "let s:fmt_bold = ' cterm=NONE".s:b." gui=NONE".s:b." term=NONE".s:b."'" +exe "let s:fmt_bldi = ' cterm=NONE".s:b." gui=NONE".s:b.s:i." term=NONE".s:b."'" +exe "let s:fmt_undr = ' cterm=NONE".s:u." gui=NONE".s:u." term=NONE".s:u."'" +exe "let s:fmt_undb = ' cterm=NONE".s:u.s:b." gui=NONE".s:u.s:b. + \" term=NONE".s:u.s:b."'" +exe "let s:fmt_undi = ' cterm=NONE".s:u." gui=NONE".s:u.s:i. + \" term=NONE".s:u."'" +exe "let s:fmt_uopt = ' cterm=NONE".s:ou." gui=NONE".s:ou. + \" term=NONE".s:ou."'" +exe "let s:fmt_bopt = ' cterm=NONE".s:ob." gui=NONE".s:ob. + \" term=NONE".s:ob."'" +exe "let s:fmt_curl = ' cterm=NONE".s:c." gui=NONE".s:c." term=NONE".s:c."'" +exe "let s:fmt_ital = ' cterm=NONE". " gui=NONE".s:i." term=NONE". "'" +exe "let s:fmt_revr = ' cterm=NONE".s:r." gui=NONE".s:r." term=NONE".s:r."'" +exe "let s:fmt_stnd = ' cterm=NONE".s:s." gui=NONE".s:s." term=NONE".s:s."'" +"}}} +" Basic highlighting"{{{ +" --------------------------------------------------------------------- +" note that link syntax to avoid duplicate configuration doesn't work with the +" exe compiled formats + +exe "hi Normal" . s:fg_base0 .s:bg_back .s:fmt_none + +exe "hi Comment" . s:fg_base01 .s:bg_none .s:fmt_ital +" *Comment any comment + +exe "hi Constant" . s:fg_cyan .s:bg_none .s:fmt_none +"exe "hi String" . s:fg_yellow .s:bg_none .s:fmt_none +" *Constant any constant +" String a string constant: "this is a string" +" Character a character constant: 'c', '\n' +" Number a number constant: 234, 0xff +" Boolean a boolean constant: TRUE, false +" Float a floating point constant: 2.3e10 + +exe "hi Identifier" . s:fg_blue .s:bg_none .s:fmt_none +" *Identifier any variable name +" Function function name (also: methods for classes) +" +exe "hi Statement" . s:fg_green .s:bg_none .s:fmt_none +" *Statement any statement +" Conditional if, then, else, endif, switch, etc. +" Repeat for, do, while, etc. +" Label case, default, etc. +" Operator "sizeof", "+", "*", etc. +" Keyword any other keyword +" Exception try, catch, throw + +exe "hi PreProc" . s:fg_orange .s:bg_none .s:fmt_none +" *PreProc generic Preprocessor +" Include preprocessor #include +" Define preprocessor #define +" Macro same as Define +" PreCondit preprocessor #if, #else, #endif, etc. + +exe "hi Type" . s:fg_yellow .s:bg_none .s:fmt_none +" *Type int, long, char, etc. +" StorageClass static, register, volatile, etc. +" Structure struct, union, enum, etc. +" Typedef A typedef + +exe "hi Special" . s:fg_red .s:bg_none .s:fmt_none +" *Special any special symbol +" SpecialChar special character in a constant +" Tag you can use CTRL-] on this +" Delimiter character that needs attention +" SpecialComment special things inside a comment +" Debug debugging statements + +exe "hi Underlined" . s:fg_violet .s:bg_none .s:fmt_none +" *Underlined text that stands out, HTML links + +exe "hi Ignore" . s:fg_none .s:bg_none .s:fmt_none +" *Ignore left blank, hidden |hl-Ignore| + +exe "hi Error" . s:fg_red .s:bg_none .s:fmt_bold +" *Error any erroneous construct + +exe "hi Todo" . s:fg_magenta.s:bg_none .s:fmt_bold +" *Todo anything that needs extra attention; mostly the +" keywords TODO FIXME and XXX +" +"Highlighting groups for various occasions +"----------------------------------------- +exe "hi SpecialKey" . s:fg_base02 .s:bg_none .s:fmt_none +exe "hi NonText" . s:fg_base02 .s:bg_none .s:fmt_bold +exe "hi Directory" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi ErrorMsg" . s:fg_red .s:bg_none .s:fmt_revr +exe "hi IncSearch" . s:fg_yellow .s:bg_none .s:fmt_revr +exe "hi Search" . s:fg_yellow .s:bg_none .s:fmt_stnd +exe "hi MoreMsg" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi ModeMsg" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi LineNr" . s:fg_base01 .s:bg_base02 .s:fmt_none +exe "hi Question" . s:fg_cyan .s:bg_none .s:fmt_bold +exe "hi StatusLine" . s:fg_base0 .s:bg_base02 .s:fmt_none +exe "hi StatusLineNC" . s:fg_base1 .s:bg_base02 .s:fmt_none +exe "hi VertSplit" . s:fg_base0 .s:bg_base02 .s:fmt_none +exe "hi Title" . s:fg_orange .s:bg_none .s:fmt_bold +exe "hi Visual" . s:fg_none .s:bg_base02 .s:fmt_stnd +exe "hi VisualNOS" . s:fg_none .s:bg_base02 .s:fmt_stnd +exe "hi WarningMsg" . s:fg_red .s:bg_none .s:fmt_bold +exe "hi WildMenu" . s:fg_base1 .s:bg_base02 .s:fmt_none +exe "hi Folded" . s:fg_base0 .s:bg_base02 .s:fmt_undr .s:sp_base03 +exe "hi FoldColumn" . s:fg_base0 .s:bg_base02 .s:fmt_bold +exe "hi DiffAdd" . s:fg_green .s:bg_none .s:fmt_revr +exe "hi DiffChange" . s:fg_yellow .s:bg_none .s:fmt_revr +exe "hi DiffDelete" . s:fg_red .s:bg_none .s:fmt_revr +exe "hi DiffText" . s:fg_blue .s:bg_none .s:fmt_revr +exe "hi SignColumn" . s:fg_base0 .s:bg_base02 .s:fmt_none +exe "hi Conceal" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi SpellBad" . s:fg_none .s:bg_none .s:fmt_curl .s:sp_red +exe "hi SpellCap" . s:fg_none .s:bg_none .s:fmt_curl .s:sp_violet +exe "hi SpellRare" . s:fg_none .s:bg_none .s:fmt_curl .s:sp_cyan +exe "hi SpellLocal" . s:fg_none .s:bg_none .s:fmt_curl .s:sp_yellow +exe "hi Pmenu" . s:fg_base0 .s:bg_base02 .s:fmt_none +exe "hi PmenuSel" . s:fg_base2 .s:bg_base01 .s:fmt_none +exe "hi PmenuSbar" . s:fg_base0 .s:bg_base2 .s:fmt_none +exe "hi PmenuThumb" . s:fg_base03 .s:bg_base0 .s:fmt_none +exe "hi TabLine" . s:fg_base0 .s:bg_base02 .s:fmt_undr .s:sp_base0 +exe "hi TabLineSel" . s:fg_base2 .s:bg_base01 .s:fmt_undr .s:sp_base0 +exe "hi TabLineFill" . s:fg_base0 .s:bg_base02 .s:fmt_undr .s:sp_base0 +exe "hi CursorColumn" . s:fg_none .s:bg_base02 .s:fmt_none +exe "hi CursorLine" . s:fg_none .s:bg_base02 .s:fmt_uopt .s:sp_base1 +exe "hi ColorColumn" . s:fg_none .s:bg_base02 .s:fmt_none +exe "hi Cursor" . s:fg_none .s:bg_none .s:fmt_revr +exe "hi lCursor" . s:fg_none .s:bg_none .s:fmt_stnd +exe "hi MatchParen" . s:fg_red .s:bg_base01 .s:fmt_bold + +"}}} +" Extended highlighting "{{{ +" --------------------------------------------------------------------- +"}}} +" vim syntax highlighting "{{{ +" --------------------------------------------------------------------- +exe "hi vimLineComment" . s:fg_base01 .s:bg_none .s:fmt_ital +exe "hi vimCommentString".s:fg_violet .s:bg_none .s:fmt_none +hi link vimVar Identifier +hi link vimFunc Function +hi link vimUserFunc Function +exe "hi vimCommand" . s:fg_yellow .s:bg_none .s:fmt_none +exe "hi vimCmdSep" . s:fg_blue .s:bg_none .s:fmt_bold +exe "hi helpExample" . s:fg_base1 .s:bg_none .s:fmt_none +hi link helpSpecial Special +"exe "hi helpSpecial" . s:fg_yellow .s:bg_none .s:fmt_none +exe "hi helpOption" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi helpNote" . s:fg_magenta.s:bg_none .s:fmt_none +exe "hi helpVim" . s:fg_magenta.s:bg_none .s:fmt_none +exe "hi helpHyperTextJump".s:fg_blue .s:bg_none .s:fmt_undr +exe "hi helpHyperTextEntry".s:fg_green .s:bg_none .s:fmt_none +exe "hi vimIsCommand" . s:fg_base00 .s:bg_none .s:fmt_none +exe "hi vimSynMtchOpt". s:fg_yellow .s:bg_none .s:fmt_none +exe "hi vimSynType" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi vimHiLink" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi vimHiGroup" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi vimGroup" . s:fg_blue .s:bg_none .s:fmt_undb +"}}} +" html highlighting "{{{ +" --------------------------------------------------------------------- +exe "hi htmlTag" . s:fg_base01 .s:bg_none .s:fmt_none +exe "hi htmlEndTag" . s:fg_base01 .s:bg_none .s:fmt_none +exe "hi htmlTagN" . s:fg_base1 .s:bg_none .s:fmt_bold +exe "hi htmlTagName" . s:fg_blue .s:bg_none .s:fmt_bold +exe "hi htmlSpecialTagName". s:fg_blue .s:bg_none .s:fmt_ital +exe "hi htmlArg" . s:fg_base00 .s:bg_none .s:fmt_none +exe "hi javaScript" . s:fg_yellow .s:bg_none .s:fmt_none +"}}} +" perl highlighting "{{{ +" --------------------------------------------------------------------- +exe "hi perlHereDoc" . s:fg_base1 .s:bg_back .s:fmt_none +exe "hi perlVarPlain" . s:fg_yellow .s:bg_back .s:fmt_none +exe "hi perlStatementFileDesc" . s:fg_cyan .s:bg_back .s:fmt_none + +"}}} +" tex highlighting "{{{ +" --------------------------------------------------------------------- +exe "hi texStatement" . s:fg_cyan .s:bg_back .s:fmt_none +exe "hi texMathZoneX" . s:fg_yellow .s:bg_back .s:fmt_none +exe "hi texMathMatcher" . s:fg_yellow .s:bg_back .s:fmt_none +exe "hi texMathMatcher" . s:fg_yellow .s:bg_back .s:fmt_none +exe "hi texRefLabel" . s:fg_yellow .s:bg_back .s:fmt_none +"}}} +" ruby highlighting "{{{ +" --------------------------------------------------------------------- +exe "hi rubyDefine" . s:fg_base1 .s:bg_back .s:fmt_bold +"rubyInclude +"rubySharpBang +"rubyAccess +"rubyPredefinedVariable +"rubyBoolean +"rubyClassVariable +"rubyBeginEnd +"rubyRepeatModifier +"hi link rubyArrayDelimiter Special " [ , , ] +"rubyCurlyBlock { , , } + +"hi link rubyClass Keyword +"hi link rubyModule Keyword +"hi link rubyKeyword Keyword +"hi link rubyOperator Operator +"hi link rubyIdentifier Identifier +"hi link rubyInstanceVariable Identifier +"hi link rubyGlobalVariable Identifier +"hi link rubyClassVariable Identifier +"hi link rubyConstant Type +"}}} +" haskell syntax highlighting"{{{ +" --------------------------------------------------------------------- +" For use with syntax/haskell.vim : Haskell Syntax File +" http://www.vim.org/scripts/script.php?script_id=3034 +" See also Steffen Siering's github repository: +" http://github.com/urso/dotrc/blob/master/vim/syntax/haskell.vim +" --------------------------------------------------------------------- +" +" Treat True and False specially, see the plugin referenced above +let hs_highlight_boolean=1 +" highlight delims, see the plugin referenced above +let hs_highlight_delimiters=1 + +exe "hi cPreCondit". s:fg_orange.s:bg_none .s:fmt_none + +exe "hi VarId" . s:fg_blue .s:bg_none .s:fmt_none +exe "hi ConId" . s:fg_yellow .s:bg_none .s:fmt_none +exe "hi hsImport" . s:fg_magenta.s:bg_none .s:fmt_none +exe "hi hsString" . s:fg_base00 .s:bg_none .s:fmt_none + +exe "hi hsStructure" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi hs_hlFunctionName" . s:fg_blue .s:bg_none +exe "hi hsStatement" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi hsImportLabel" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi hs_OpFunctionName" . s:fg_yellow .s:bg_none .s:fmt_none +exe "hi hs_DeclareFunction" . s:fg_orange .s:bg_none .s:fmt_none +exe "hi hsVarSym" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi hsType" . s:fg_yellow .s:bg_none .s:fmt_none +exe "hi hsTypedef" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi hsModuleName" . s:fg_green .s:bg_none .s:fmt_undr +exe "hi hsModuleStartLabel" . s:fg_magenta.s:bg_none .s:fmt_none +hi link hsImportParams Delimiter +hi link hsDelimTypeExport Delimiter +hi link hsModuleStartLabel hsStructure +hi link hsModuleWhereLabel hsModuleStartLabel + +" following is for the haskell-conceal plugin +" the first two items don't have an impact, but better safe +exe "hi hsNiceOperator" . s:fg_cyan .s:bg_none .s:fmt_none +exe "hi hsniceoperator" . s:fg_cyan .s:bg_none .s:fmt_none + +"}}} +" pandoc markdown syntax highlighting "{{{ +" --------------------------------------------------------------------- + +"PandocHiLink pandocNormalBlock +exe "hi pandocTitleBlock" .s:fg_blue .s:bg_none .s:fmt_none +exe "hi pandocTitleBlockTitle" .s:fg_blue .s:bg_none .s:fmt_bold +exe "hi pandocTitleComment" .s:fg_blue .s:bg_none .s:fmt_bold +exe "hi pandocComment" .s:fg_base01 .s:bg_none .s:fmt_ital +exe "hi pandocVerbatimBlock" .s:fg_yellow .s:bg_none .s:fmt_none +hi link pandocVerbatimBlockDeep pandocVerbatimBlock +hi link pandocCodeBlock pandocVerbatimBlock +hi link pandocCodeBlockDelim pandocVerbatimBlock +exe "hi pandocBlockQuote" .s:fg_blue .s:bg_none .s:fmt_none +exe "hi pandocBlockQuoteLeader1" .s:fg_blue .s:bg_none .s:fmt_none +exe "hi pandocBlockQuoteLeader2" .s:fg_cyan .s:bg_none .s:fmt_none +exe "hi pandocBlockQuoteLeader3" .s:fg_yellow .s:bg_none .s:fmt_none +exe "hi pandocBlockQuoteLeader4" .s:fg_red .s:bg_none .s:fmt_none +exe "hi pandocBlockQuoteLeader5" .s:fg_base0 .s:bg_none .s:fmt_none +exe "hi pandocBlockQuoteLeader6" .s:fg_base01 .s:bg_none .s:fmt_none +exe "hi pandocListMarker" .s:fg_magenta.s:bg_none .s:fmt_none +exe "hi pandocListReference" .s:fg_magenta.s:bg_none .s:fmt_undr + +" Definitions +" --------------------------------------------------------------------- +let s:fg_pdef = s:fg_violet +exe "hi pandocDefinitionBlock" .s:fg_pdef .s:bg_none .s:fmt_none +exe "hi pandocDefinitionTerm" .s:fg_pdef .s:bg_none .s:fmt_stnd +exe "hi pandocDefinitionIndctr" .s:fg_pdef .s:bg_none .s:fmt_bold +exe "hi pandocEmphasisDefinition" .s:fg_pdef .s:bg_none .s:fmt_ital +exe "hi pandocEmphasisNestedDefinition" .s:fg_pdef .s:bg_none .s:fmt_bldi +exe "hi pandocStrongEmphasisDefinition" .s:fg_pdef .s:bg_none .s:fmt_bold +exe "hi pandocStrongEmphasisNestedDefinition" .s:fg_pdef.s:bg_none.s:fmt_bldi +exe "hi pandocStrongEmphasisEmphasisDefinition" .s:fg_pdef.s:bg_none.s:fmt_bldi +exe "hi pandocStrikeoutDefinition" .s:fg_pdef .s:bg_none .s:fmt_revr +exe "hi pandocVerbatimInlineDefinition" .s:fg_pdef .s:bg_none .s:fmt_none +exe "hi pandocSuperscriptDefinition" .s:fg_pdef .s:bg_none .s:fmt_none +exe "hi pandocSubscriptDefinition" .s:fg_pdef .s:bg_none .s:fmt_none + +" Tables +" --------------------------------------------------------------------- +let s:fg_ptable = s:fg_blue +exe "hi pandocTable" .s:fg_ptable.s:bg_none .s:fmt_none +exe "hi pandocTableStructure" .s:fg_ptable.s:bg_none .s:fmt_none +hi link pandocTableStructureTop pandocTableStructre +hi link pandocTableStructureEnd pandocTableStructre +exe "hi pandocTableZebraLight" .s:fg_ptable.s:bg_base03.s:fmt_none +exe "hi pandocTableZebraDark" .s:fg_ptable.s:bg_base02.s:fmt_none +exe "hi pandocEmphasisTable" .s:fg_ptable.s:bg_none .s:fmt_ital +exe "hi pandocEmphasisNestedTable" .s:fg_ptable.s:bg_none .s:fmt_bldi +exe "hi pandocStrongEmphasisTable" .s:fg_ptable.s:bg_none .s:fmt_bold +exe "hi pandocStrongEmphasisNestedTable" .s:fg_ptable.s:bg_none .s:fmt_bldi +exe "hi pandocStrongEmphasisEmphasisTable" .s:fg_ptable.s:bg_none .s:fmt_bldi +exe "hi pandocStrikeoutTable" .s:fg_ptable.s:bg_none .s:fmt_revr +exe "hi pandocVerbatimInlineTable" .s:fg_ptable.s:bg_none .s:fmt_none +exe "hi pandocSuperscriptTable" .s:fg_ptable.s:bg_none .s:fmt_none +exe "hi pandocSubscriptTable" .s:fg_ptable.s:bg_none .s:fmt_none + +" Headings +" --------------------------------------------------------------------- +let s:fg_phead = s:fg_orange +exe "hi pandocHeading" .s:fg_phead .s:bg_none.s:fmt_bold +exe "hi pandocHeadingMarker" .s:fg_yellow.s:bg_none.s:fmt_bold +exe "hi pandocEmphasisHeading" .s:fg_phead .s:bg_none.s:fmt_bldi +exe "hi pandocEmphasisNestedHeading" .s:fg_phead .s:bg_none.s:fmt_bldi +exe "hi pandocStrongEmphasisHeading" .s:fg_phead .s:bg_none.s:fmt_bold +exe "hi pandocStrongEmphasisNestedHeading" .s:fg_phead .s:bg_none.s:fmt_bldi +exe "hi pandocStrongEmphasisEmphasisHeading".s:fg_phead .s:bg_none.s:fmt_bldi +exe "hi pandocStrikeoutHeading" .s:fg_phead .s:bg_none.s:fmt_revr +exe "hi pandocVerbatimInlineHeading" .s:fg_phead .s:bg_none.s:fmt_bold +exe "hi pandocSuperscriptHeading" .s:fg_phead .s:bg_none.s:fmt_bold +exe "hi pandocSubscriptHeading" .s:fg_phead .s:bg_none.s:fmt_bold + +" Links +" --------------------------------------------------------------------- +exe "hi pandocLinkDelim" .s:fg_base01 .s:bg_none .s:fmt_none +exe "hi pandocLinkLabel" .s:fg_blue .s:bg_none .s:fmt_undr +exe "hi pandocLinkText" .s:fg_blue .s:bg_none .s:fmt_undb +exe "hi pandocLinkURL" .s:fg_base00 .s:bg_none .s:fmt_undr +exe "hi pandocLinkTitle" .s:fg_base00 .s:bg_none .s:fmt_undi +exe "hi pandocLinkTitleDelim" .s:fg_base01 .s:bg_none .s:fmt_undi .s:sp_base00 +exe "hi pandocLinkDefinition" .s:fg_cyan .s:bg_none .s:fmt_undr .s:sp_base00 +exe "hi pandocLinkDefinitionID" .s:fg_blue .s:bg_none .s:fmt_bold +exe "hi pandocImageCaption" .s:fg_violet .s:bg_none .s:fmt_undb +exe "hi pandocFootnoteLink" .s:fg_green .s:bg_none .s:fmt_undr +exe "hi pandocFootnoteDefLink" .s:fg_green .s:bg_none .s:fmt_bold +exe "hi pandocFootnoteInline" .s:fg_green .s:bg_none .s:fmt_undb +exe "hi pandocFootnote" .s:fg_green .s:bg_none .s:fmt_none +exe "hi pandocCitationDelim" .s:fg_magenta.s:bg_none .s:fmt_none +exe "hi pandocCitation" .s:fg_magenta.s:bg_none .s:fmt_none +exe "hi pandocCitationID" .s:fg_magenta.s:bg_none .s:fmt_undr +exe "hi pandocCitationRef" .s:fg_magenta.s:bg_none .s:fmt_none + +" Main Styles +" --------------------------------------------------------------------- +exe "hi pandocStyleDelim" .s:fg_base01 .s:bg_none .s:fmt_none +exe "hi pandocEmphasis" .s:fg_base0 .s:bg_none .s:fmt_ital +exe "hi pandocEmphasisNested" .s:fg_base0 .s:bg_none .s:fmt_bldi +exe "hi pandocStrongEmphasis" .s:fg_base0 .s:bg_none .s:fmt_bold +exe "hi pandocStrongEmphasisNested" .s:fg_base0 .s:bg_none .s:fmt_bldi +exe "hi pandocStrongEmphasisEmphasis" .s:fg_base0 .s:bg_none .s:fmt_bldi +exe "hi pandocStrikeout" .s:fg_base01 .s:bg_none .s:fmt_revr +exe "hi pandocVerbatimInline" .s:fg_yellow .s:bg_none .s:fmt_none +exe "hi pandocSuperscript" .s:fg_violet .s:bg_none .s:fmt_none +exe "hi pandocSubscript" .s:fg_violet .s:bg_none .s:fmt_none + +exe "hi pandocRule" .s:fg_blue .s:bg_none .s:fmt_bold +exe "hi pandocRuleLine" .s:fg_blue .s:bg_none .s:fmt_bold +exe "hi pandocEscapePair" .s:fg_red .s:bg_none .s:fmt_bold +exe "hi pandocCitationRef" .s:fg_magenta.s:bg_none .s:fmt_none +exe "hi pandocNonBreakingSpace" . s:fg_red .s:bg_none .s:fmt_revr +hi link pandocEscapedCharacter pandocEscapePair +hi link pandocLineBreak pandocEscapePair + +" Embedded Code +" --------------------------------------------------------------------- +exe "hi pandocMetadataDelim" .s:fg_base01 .s:bg_none .s:fmt_none +exe "hi pandocMetadata" .s:fg_blue .s:bg_none .s:fmt_none +exe "hi pandocMetadataKey" .s:fg_blue .s:bg_none .s:fmt_none +exe "hi pandocMetadata" .s:fg_blue .s:bg_none .s:fmt_bold +hi link pandocMetadataTitle pandocMetadata + +"}}} +" License "{{{ +" --------------------------------------------------------------------- +" +" Copyright (c) 2011 Ethan Schoonover +" +" Permission is hereby granted, free of charge, to any person obtaining a copy +" of this software and associated documentation files (the "Software"), to deal +" in the Software without restriction, including without limitation the rights +" to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +" copies of the Software, and to permit persons to whom the Software is +" furnished to do so, subject to the following conditions: +" +" The above copyright notice and this permission notice shall be included in +" all copies or substantial portions of the Software. +" +" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +" IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +" FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +" AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +" LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +" OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +" THE SOFTWARE. +" +"}}} diff --git a/.vim/colors/terse.vim b/.vim/colors/terse.vim new file mode 100644 index 0000000..53b1567 --- /dev/null +++ b/.vim/colors/terse.vim @@ -0,0 +1,126 @@ +" Terse Verses Of Utter Control +" A Vim Color Theme +" Author: Jevgeni Tarasov <[email protected]> +" Version: 1 +" +" A terse color theme. No candy colors, no wild contrasts, no green on black. +" N.B! The cterm version is woefully underdeveloped in this version. Don't use +" it. + +"-- Init ---------------------------------------------------------------------- +hi clear +set background=light +if version > 580 + hi clear + if exists("syntax_on") + syntax reset + endif +endif +let g:colors_name="terse" + + +"-- Text Elements ------------------------------------------------------------- +hi Normal gui=NONE guifg=#00011f guibg=#ecebe7 +\ cterm=NONE ctermfg=black ctermbg=white + +hi Statement gui=bold guifg=#00011f guibg=NONE +\ cterm=bold ctermfg=black ctermbg=white + +hi Comment gui=italic guifg=#38362b guibg=NONE +\ cterm=italic ctermfg=black ctermbg=white + +hi Type gui=NONE guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi Special gui=bold guifg=NONE guibg=NONE +\ cterm=bold ctermfg=black ctermbg=white + +hi Identifier gui=underline guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi PreProc gui=bold,italic guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi Constant gui=italic guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi String gui=italic guifg=NONE guibg=#e2e1dd +\ cterm=NONE ctermfg=black ctermbg=white + +hi StatusLine gui=italic guifg=#ecebe7 guibg=#00011f +\ cterm=NONE ctermfg=white ctermbg=grey +hi StatusLineNC gui=italic guifg=#ecebe7 guibg=#00011f +\ cterm=NONE ctermfg=white ctermbg=grey + + +hi Error gui=NONE guifg=white guibg=#c80f3f +\ cterm=NONE ctermfg=black ctermbg=white + +hi Todo gui=bold,italic guifg=#e4115b guibg=#e2e1dd +\ cterm=NONE ctermfg=black ctermbg=white + +hi Underlined gui=underline guifg=NONE guibg=NONE +\ cterm=underline ctermfg=black ctermbg=white + + +"-- Non-Text Elements --------------------------------------------------------- +hi NonText gui=NONE guifg=#58595b guibg=#e2e1dd +\ cterm=NONE ctermfg=grey ctermbg=black + +hi Search gui=NONE guifg=NONE guibg=#fbf285 +\ cterm=NONE ctermfg=black ctermbg=yellow +hi IncSearch gui=NONE guifg=NONE guibg=#fbf285 +\ cterm=NONE ctermfg=black ctermbg=yellow + +hi Directory gui=bold guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi MoreMsg gui=italic guifg=NONE guibg=#e2e1dd +\ cterm=NONE ctermfg=black ctermbg=white + +hi LineNr gui=italic guifg=#58595b guibg=#e2e1dd +\ cterm=NONE ctermfg=grey ctermbg=black + +hi VertSplit gui=italic guifg=#00011f guibg=#00011f +\ cterm=NONE ctermfg=grey ctermbg=grey + +hi Question gui=NONE guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi Title gui=bold,italic guifg=NONE guibg=NONE +\ cterm=NONE ctermfg=black ctermbg=white + +hi WarningMsg gui=NONE guifg=white guibg=#c80f3f +\ cterm=NONE ctermfg=black ctermbg=white + +hi Folded gui=NONE guifg=NONE guibg=#e2e1dd +\ cterm=NONE ctermfg=black ctermbg=white +hi FoldColumn gui=NONE guifg=NONE guibg=#e2e1dd +\ cterm=NONE ctermfg=black ctermbg=white + +hi DiffAdd gui=NONE guifg=NONE guibg=#acd58e +\ cterm=NONE ctermfg=black ctermbg=white +hi DiffChange gui=NONE guifg=#00254f guibg=#93d2f3 +\ cterm=NONE ctermfg=black ctermbg=white +hi DiffDelete gui=NONE guifg=#820056 guibg=#edb2d1 +\ cterm=NONE ctermfg=black ctermbg=white +hi DiffText gui=NONE guifg=#00254f guibg=#93d2f3 +\ cterm=NONE ctermfg=black ctermbg=white + +hi Cursor gui=NONE guifg=white guibg=black +\ cterm=NONE ctermfg=white ctermbg=black + +hi Visual gui=NONE guifg=NONE guibg=#efbf28 +\ cterm=NONE ctermfg=black ctermbg=white + +hi MatchParen gui=bold guifg=#c80f3f guibg=NONE +\ cterm=NONE ctermfg=white ctermbg=black + + +" MacVim was changing the color of Visual everytime focus was lost +" The following code snippet was stolen from macvim.vim, to counter that effect. +if has("gui_macvim") && !exists("s:augroups_defined") + au FocusLost * if exists("colors_name") && colors_name == "terse" | hi Visual guibg=#efbf285 | endif + au FocusGained * if exists("colors_name") && colors_name == "terse" | hi Visual guibg=#efbf285 | endif + let s:augroups_defined = 1 +endif diff --git a/.vim/colors/vydark.vim b/.vim/colors/vydark.vim new file mode 100644 index 0000000..549c189 --- /dev/null +++ b/.vim/colors/vydark.vim @@ -0,0 +1,80 @@ +" +" Vim colour file +" +" Maintainer: Vy-Shane Sin Fat <[email protected]> +" Version: 1.0 +" +" This colour file is meant for GUI use. +" + +set background=dark +hi clear +if exists("syntax_on") + syntax reset +endif +let g:colors_name="vydark" + + +hi Normal guifg=#bbbbbb guibg=#282828 +hi Title guifg=white +hi Cursor guibg=#ffffff +hi LineNr guifg=#444455 guibg=#292929 +hi Visual guibg=#555555 +hi NonText guifg=#292929 guibg=#292929 +hi StatusLine guifg=#bbbbbb guibg=#353535 gui=none +hi StatusLineNC guifg=#777777 guibg=#353535 gui=none +hi VertSplit guifg=#353535 guibg=#353535 gui=none +hi ModeMsg guifg=#99dd99 guibg=#394439 gui=none +hi ErrorMsg guifg=#222222 guibg=#ff8888 gui=none +hi Error guifg=#ffaaaa guibg=#333333 gui=none + + +" Vim 7.x specific +if version >= 700 + hi MatchParen guibg=#364836 gui=none + hi Pmenu guifg=#bbbbbb guibg=#444444 gui=none + hi PmenuSel guifg=#222222 guibg=#99bbdd gui=none + hi PmenuSbar guifg=#494949 guibg=#494949 gui=bold + hi PmenuThumb guifg=#666666 guibg=#666666 gui=bold + hi Search guifg=#dddd99 guibg=#444433 gui=none + hi IncSearch guifg=#eeeeaa guibg=#666633 gui=bold + hi CursorLine guibg=#353535 gui=none + hi ColorColumn guibg=#292929 +endif + + +" Syntax highlighting +hi Comment guifg=#666677 gui=none +hi Todo guifg=#8888aa guibg=#303030 gui=italic +hi Operator guifg=#bbbbbb gui=none +hi Identifier guifg=#bbbbbb gui=none +hi Statement guifg=#bbbbbb gui=none +hi Type guifg=#99bbcc gui=none +hi Constant guifg=#88cc99 gui=none +hi Conditional guifg=#99bbcc gui=none +hi Delimiter guifg=#99bbdd gui=none +hi PreProc guifg=#88ddcc gui=none +hi Special guifg=#99dd99 gui=bold +hi Keyword guifg=#bbbbbb gui=none + +hi link Function Normal +hi link Character Constant +hi link String Constant +hi link Boolean Constant +hi link Number Constant +hi link Float Number +hi link Repeat Conditional +hi link Label Statement +hi link Exception Statement +hi link Include Normal +hi link Define Type +hi link Macro PreProc +hi link PreCondit PreProc +hi link StorageClass Type +hi link Structure Type +hi link Typedef Type +hi link Tag Special +hi link SpecialChar Special +hi link SpecialComment Special +hi link Debug Special + diff --git a/.vim/colors/werks.vim b/.vim/colors/werks.vim new file mode 100644 index 0000000..b789837 --- /dev/null +++ b/.vim/colors/werks.vim @@ -0,0 +1,75 @@ +" Vim color file +" inspired by the desert color scheme by Hans Fugal <hans[AT]fugal[DOT]net> +" Maintainer: Bidit Mazumder <biditm[AT]users[DOT]sf[DOT]net> +" Last Change: December 5, 2009 + +" background +set background=dark +let g:colors_name="werks" + + +" highlight normal +hi Normal guifg=#F5E8D3 guibg=#333333 + + +" gui highlight cursor +hi iCursor guifg=#000000 guibg=#F5E8D3 +hi Cursor guifg=#FFFFFF guibg=#DAA520 + +set guicursor+=n-c:blinkwait800-iCursor + +set guicursor+=i-v:ver10-Cursor +set guicursor+=r:hor10-Cursor + +set guicursor+=ci:ver10-iCursor +set guicursor+=cr:hor10-iCursor + +set guicursor+=sm:iCursor +set guicursor+=o:hor50-Error + + +" highlight groups +hi DiffAdd guifg=#FAEBD7 guibg=#483D8B +hi DiffChange guifg=#FAEBD7 guibg=#B03060 +hi DiffDelete guifg=#FAEBD7 guibg=#00595A +hi DiffText guifg=#FAEBD7 guibg=#D2691E +hi Directory guifg=#00FFFF +hi ErrorMsg guifg=#F5F5F5 guibg=#8F0000 +hi FoldColumn guifg=#FFEC8B guibg=#4D4D4D +hi Folded guifg=#DAA520 guibg=#4D4D4D +hi IncSearch guifg=#708090 guibg=#F0E68C +hi LineNr guifg=#F3F2AE guibg=#4D4D4D +hi ModeMsg guifg=#FF4500 +hi MoreMsg guifg=#2E8B57 +hi NonText guifg=#4D4D4D +hi Question guifg=#90EE90 +hi Search guifg=#F5E8D3 guibg=#708090 +hi SignColumn guifg=#2F4F4F guibg=#8FBC8F +hi SpecialKey guifg=#C080D0 +hi StatusLine guifg=#000000 guibg=#C2BFA5 gui=none +hi StatusLineNC guifg=#666666 guibg=#C2BFA5 gui=none +hi Title guifg=#F5E8D3 +hi VertSplit guifg=#666666 guibg=#C2BFA5 gui=none +hi Visual guifg=#FAEBD7 guibg=#74873B gui=none +hi VisualNOS guifg=#FFFFFF +hi WarningMsg guifg=#DC143C +hi WildMenu guifg=#191970 guibg=#BBBB00 +"hi CursorIM +"hi Menu +"hi Scrollbar +"hi Tooltip +"hi Pmenu +"hi PmenuSel +"hi PmenuSbar +"hi PmenuThumb +"hi TabLine +"hi TabLineSel +"hi TabLineFill +"hi CursorColumn +"hi CursorLine + +" syntax highlighting groups +hi Error guifg=#F5F5F5 guibg=#8F0000 +hi Ignore guifg=#666666 +hi PreProc guifg=#CD5C5C +hi Todo guifg=#F0E68C guibg=#9400D3 diff --git a/.vim/colors/wombat.vim b/.vim/colors/wombat.vim new file mode 100644 index 0000000..9ad1e56 --- /dev/null +++ b/.vim/colors/wombat.vim @@ -0,0 +1,51 @@ +" Maintainer: Lars H. Nielsen ([email protected]) +" Last Change: January 22 2007 + +set background=dark + +hi clear + +if exists("syntax_on") + syntax reset +endif + +let colors_name = "wombat" + + +" Vim >= 7.0 specific colors +if version >= 700 + hi CursorLine guibg=#2d2d2d + hi CursorColumn guibg=#2d2d2d + hi MatchParen guifg=#f6f3e8 guibg=#857b6f gui=bold + hi Pmenu guifg=#f6f3e8 guibg=#444444 + hi PmenuSel guifg=#000000 guibg=#cae682 +endif + +" General colors +hi Cursor guifg=NONE guibg=#656565 gui=none +hi Normal guifg=#f6f3e8 guibg=#242424 gui=none +hi NonText guifg=#808080 guibg=#303030 gui=none +hi LineNr guifg=#857b6f guibg=#000000 gui=none +hi StatusLine guifg=#f6f3e8 guibg=#444444 gui=italic +hi StatusLineNC guifg=#857b6f guibg=#444444 gui=none +hi VertSplit guifg=#444444 guibg=#444444 gui=none +hi Folded guibg=#384048 guifg=#a0a8b0 gui=none +hi Title guifg=#f6f3e8 guibg=NONE gui=bold +hi Visual guifg=#f6f3e8 guibg=#444444 gui=none +hi SpecialKey guifg=#808080 guibg=#343434 gui=none + +" Syntax highlighting +hi Comment guifg=#99968b gui=italic +hi Todo guifg=#8f8f8f gui=italic +hi Constant guifg=#e5786d gui=none +hi String guifg=#95e454 gui=italic +hi Identifier guifg=#cae682 gui=none +hi Function guifg=#cae682 gui=none +hi Type guifg=#cae682 gui=none +hi Statement guifg=#8ac6f2 gui=none +hi Keyword guifg=#8ac6f2 gui=none +hi PreProc guifg=#e5786d gui=none +hi Number guifg=#e5786d gui=none +hi Special guifg=#e7f6da gui=none + + diff --git a/.vim/colors/xorium.vim b/.vim/colors/xorium.vim new file mode 100644 index 0000000..49c089b --- /dev/null +++ b/.vim/colors/xorium.vim @@ -0,0 +1,118 @@ +" Vim color file +" +" Name: xorium.vim +" Version: 1.0 +" Maintainer: AUAnonymous <[email protected]> +" +" Based off of Dmitriy Y. Zotikov's (xio, <[email protected]>) xoria256 +" +" Should work in recent 256 color terminals. 88-color terms like urxvt are +" NOT supported. +" +" Don't forget to install 'ncurses-term' and set TERM to xterm-256color or +" similar value. +" +" Color numbers (0-255) see: +" http://www.calmar.ws/vim/256-xterm-24bit-rgb-color-chart.html +" +" For a specific filetype highlighting rules issue :syntax list when a file of +" that type is opened. +" +" Initialization {{{ +if &t_Co != 256 && ! has("gui_running") + echomsg "" + echomsg "err: please use GUI or a 256-color terminal (so that t_Co=256 could be set)" + echomsg "" + finish +endif + +set background=dark + +hi clear + +if exists("syntax_on") + syntax reset +endif + +let colors_name = "xorium" + +" Group ctermfg guifg ctermbg guibg cterm gui +hi Normal ctermfg=252 guifg=#d0d0d0 ctermbg=234 guibg=#1c1c1c cterm=none gui=none +hi Cursor ctermbg=214 guibg=#ffaf00 +hi CursorColumn ctermbg=236 guibg=#2c2c2c +hi CursorLine ctermbg=236 guibg=#2c2c2c cterm=none gui=none +hi Error ctermfg=15 guifg=#ffffff ctermbg=88 guibg=#800000 +hi ErrorMsg ctermfg=15 guifg=#ffffff ctermbg=88 guibg=#800000 +hi FoldColumn ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 +hi Folded ctermfg=255 guifg=#eeeeee ctermbg=60 guibg=#5f5f87 +hi IncSearch ctermfg=0 guifg=#000000 ctermbg=223 guibg=#ffdfaf cterm=none gui=none +hi LineNr ctermfg=247 guifg=#9e9e9e ctermbg=233 guibg=#121212 +hi MatchParen ctermfg=188 guifg=#dfdfdf ctermbg=68 guibg=#5f87df cterm=none gui=none +" TODO +" hi MoreMsg +hi NonText ctermfg=236 guifg=#2c2c2c cterm=none gui=none +hi Pmenu ctermfg=0 guifg=#000000 ctermbg=250 guibg=#bcbcbc +hi PmenuSel ctermfg=255 guifg=#eeeeee ctermbg=243 guibg=#767676 +hi PmenuSbar ctermbg=252 guibg=#d0d0d0 +hi PmenuThumb ctermfg=243 guifg=#767676 +hi Search ctermfg=0 guifg=#000000 ctermbg=149 guibg=#afdf5f ctermbg=149 +hi SignColumn ctermfg=248 guifg=#a8a8a8 +hi SpecialKey ctermfg=237 guifg=#3d3d3d +hi SpellBad ctermfg=160 guifg=fg ctermbg=bg cterm=underline guisp=#df0000 +hi SpellCap ctermfg=189 guifg=#dfdfff ctermbg=bg guibg=bg cterm=underline gui=underline +hi SpellRare ctermfg=168 guifg=#df5f87 ctermbg=bg guibg=bg cterm=underline gui=underline +hi SpellLocal ctermfg=98 guifg=#875fdf ctermbg=bg guibg=bg cterm=underline gui=underline +hi StatusLine ctermfg=15 guifg=#ffffff ctermbg=239 guibg=#4e4e4e cterm=bold gui=none +hi StatusLineNC ctermfg=249 guifg=#b2b2b2 ctermbg=237 guibg=#3a3a3a cterm=none gui=none +hi TabLine ctermfg=fg guifg=fg ctermbg=242 guibg=#666666 cterm=none gui=none +hi TabLineFill ctermfg=fg guifg=fg ctermbg=237 guibg=#3a3a3a cterm=none gui=none +" FIXME +hi Title ctermfg=225 guifg=#ffdfff +hi TODO ctermfg=0 guifg=#000000 ctermbg=220 guibg=#ffc400 +hi Underlined ctermfg=39 guifg=#00afff cterm=underline gui=underline +hi VertSplit ctermfg=237 guifg=#3a3a3a ctermbg=237 guibg=#3a3a3a cterm=none gui=none +hi Visual ctermfg=234 guifg=#202020 ctermbg=220 guibg=#FFD000 +hi VisualNOS ctermfg=255 guifg=#eeeeee ctermbg=60 guibg=#5f5f87 +hi WildMenu ctermfg=0 guifg=#000000 ctermbg=150 guibg=#afdf87 cterm=bold gui=bold + +"" Syntax highlighting {{{2 +hi comment ctermfg=66 guifg=#5f7682 +hi constant ctermfg=111 guifg=#94bbff +hi Identifier ctermfg=212 guifg=#f786ea cterm=none +hi Ignore ctermfg=238 guifg=#444444 +hi number ctermfg=028 guifg=#ed7715 +hi preproc ctermfg=149 guifg=#a1de6f +hi Special ctermfg=174 guifg=#eb8181 +hi Statement ctermfg=227 guifg=#ffff70 cterm=none gui=none +hi type ctermfg=104 guifg=#9b9bde cterm=none gui=none + +"" Special {{{2 +""" .diff {{{3 +hi diffAdded ctermfg=150 guifg=#afdf87 +hi diffRemoved ctermfg=174 guifg=#df8787 +""" vimdiff {{{3 +hi diffAdd ctermfg=bg guifg=bg ctermbg=151 guibg=#afdfaf +hi diffDelete ctermfg=bg guifg=bg ctermbg=246 guibg=#949494 cterm=none gui=none +hi diffChange ctermfg=bg guifg=bg ctermbg=181 guibg=#dfafaf +hi diffText ctermfg=bg guifg=bg ctermbg=174 guibg=#df8787 cterm=none gui=none +""" HTML {{{3 +hi htmlTag ctermfg=244 +hi htmlEndTag ctermfg=244 +hi htmlArg ctermfg=182 guifg=#dfafdf +hi htmlValue ctermfg=187 guifg=#dfdfaf +hi htmlTitle ctermfg=254 ctermbg=95 +""" django {{{3 +hi djangoVarBlock ctermfg=180 +hi djangoTagBlock ctermfg=150 +hi djangoStatement ctermfg=146 +hi djangoFilter ctermfg=174 +""" python {{{3 +hi pythonExceptions ctermfg=174 +""" NERDTree {{{3 +hi Directory ctermfg=110 guifg=#87afdf +hi treeCWD ctermfg=180 guifg=#dfaf87 +hi treeClosable ctermfg=174 guifg=#df8787 +hi treeOpenable ctermfg=150 guifg=#afdf87 +hi treePart ctermfg=244 guifg=#808080 +hi treeDirSlash ctermfg=244 guifg=#808080 +hi treeLink ctermfg=182 guifg=#dfafdf diff --git a/.vim/colors/zazen.vim b/.vim/colors/zazen.vim new file mode 100644 index 0000000..394b9d0 --- /dev/null +++ b/.vim/colors/zazen.vim @@ -0,0 +1,164 @@ +" ============================================================================= +" File: zazen.vim +" Description: Vim color scheme file +" Maintainer: Zoltan Dezso +" ============================================================================= +set background=dark +highlight clear +if exists("syntax_on") + syntax reset +endif +let colors_name = "zazen" + +hi Normal guifg=#cccccc guibg=#000000 gui=NONE +hi CursorLine guifg=NONE guibg=#555555 gui=NONE +hi CursorColumn guifg=NONE guibg=#1a1a1a gui=NONE +hi LineNr guifg=#555555 guibg=NONE gui=NONE +hi Statement guifg=#a6a6a6 guibg=NONE gui=bold +hi Function guifg=#eeeeee guibg=NONE gui=bold +hi String guifg=#838383 guibg=NONE gui=NONE +hi Type guifg=#eeeeee guibg=NONE gui=bold +hi Conditional guifg=#787878 guibg=NONE gui=bold +hi Todo guifg=#ff0000 guibg=#220000 gui=underline +hi Comment guifg=#777777 guibg=NONE gui=NONE +hi PmenuSel guifg=#000000 guibg=#a9a9aa gui=bold +hi Special guifg=#868585 guibg=NONE gui=NONE +hi Identifier guifg=#ffffff guibg=NONE gui=bold +hi Keyword guifg=#666666 guibg=NONE gui=bold +hi PreProc guifg=#6b6b6b guibg=NONE gui=bold +hi Include guifg=#e0e0e0 guibg=NONE gui=NONE +hi Constant guifg=#838383 guibg=#222222 gui=bold +hi Delimiter guifg=#838383 guibg=NONE gui=NONE + +hi Visual guifg=#ffffff guibg=#515151 gui=NONE +hi ColorColumn guifg=NONE guibg=#222222 +hi Cursor guifg=bg guibg=fg gui=NONE +hi CursorIM guifg=bg guibg=fg gui=NONE +hi lCursor guifg=bg guibg=fg gui=NONE + +hi DiffAdd guifg=#00cc00 guibg=#002200 gui=NONE +hi DiffChange guifg=#ff9955 guibg=#220000 gui=NONE +hi DiffDelete guifg=#ff0000 guibg=#220000 gui=NONE +hi DiffText guifg=#ff0000 guibg=#220000 gui=NONE + +hi Directory guifg=#929292 guibg=bg gui=NONE +hi ErrorMsg guifg=#6f6f6f guibg=NONE gui=NONE +hi FoldColumn guifg=#555555 guibg=#414141 gui=bold +hi Folded guifg=#828282 guibg=#212121 gui=italic + +hi IncSearch guifg=#000000 guibg=#adadad gui=NONE +hi Search guifg=#000000 guibg=#c5c3c3 gui=NONE +hi MatchParen guifg=#000000 guibg=#a8a8a8 gui=bold +hi ModeMsg guifg=#ffffff guibg=#767676 gui=bold +hi MoreMsg guifg=#7c7c7c guibg=bg gui=bold +hi NonText guifg=#7e7e7e guibg=bg gui=bold + +hi Pmenu guifg=#656565 guibg=#3f3f3f gui=NONE +hi PmenuSbar guifg=fg guibg=#5d5d5d gui=NONE +hi PmenuThumb guifg=fg guibg=#777777 gui=NONE + +hi Question guifg=#454545 guibg=bg gui=bold +hi SignColumn guifg=#ffffff guibg=#696969 gui=NONE + +hi SpecialKey guifg=#ffffff guibg=#696969 gui=NONE + +hi SpellBad guisp=#ffffff guibg=#000000 gui=undercurl guisp=#ffc0c0 +hi SpellCap guisp=#5d5d5d gui=undercurl +hi SpellLocal guisp=#434343 gui=undercurl +hi SpellRare guisp=#7d7d7d gui=undercurl +hi StatusLine guifg=#000000 guibg=#727272 gui=bold +hi StatusLineNC guifg=#5a5959 guibg=#222222 gui=italic +hi TabLine guifg=fg guibg=#757575 gui=underline +hi TabLineFill guifg=fg guibg=bg gui=reverse +hi TabLineSel guifg=fg guibg=bg gui=bold +hi Title guifg=#6d6d6d guibg=bg gui=bold +hi VertSplit guifg=#222222 guibg=#222222 +hi WarningMsg guifg=#cfcfcf guibg=#5b5b5b gui=NONE +hi WildMenu guifg=#000000 guibg=#828282 gui=NONE +hi Boolean guifg=#616060 guibg=NONE gui=bold +hi Ignore guifg=bg guibg=NONE gui=NONE +hi Error guifg=#ff7272 guibg=NONE gui=undercurl guisp=#ff0000 +" ----------------------------------------------------------------------------- +hi VimError guifg=#b6b6b6 guibg=#313131 gui=bold +hi VimCommentTitle guifg=#5c5c5c guibg=bg gui=bold,italic +hi qfFileName guifg=#6a6a6a guibg=NONE gui=italic +hi qfLineNr guifg=fg guibg=NONE gui=NONE +hi qfError guifg=fg guibg=#000000 gui=undercurl + +" 256-color Terminal support +if &t_Co > 255 + hi Normal ctermfg=251 ctermbg=0 cterm=NONE + hi CursorLine ctermfg=NONE ctermbg=240 cterm=NONE + hi CursorColumn ctermfg=NONE ctermbg=234 cterm=NONE + hi LineNr ctermfg=240 ctermbg=NONE cterm=NONE + hi Statement ctermfg=248 ctermbg=NONE cterm=bold + hi Function ctermfg=255 ctermbg=NONE cterm=bold + hi String ctermfg=244 ctermbg=NONE cterm=NONE + hi Type ctermfg=255 ctermbg=NONE cterm=bold + hi Conditional ctermfg=243 ctermbg=NONE cterm=bold + hi Todo ctermfg=9 ctermbg=NONE cterm=underline + hi Comment ctermfg=243 ctermbg=NONE cterm=NONE + hi PmenuSel ctermfg=0 ctermbg=248 cterm=bold + hi Special ctermfg=244 ctermbg=NONE cterm=NONE + hi Identifier ctermfg=15 ctermbg=NONE cterm=bold + hi Keyword ctermfg=242 ctermbg=NONE cterm=bold + hi PreProc ctermfg=242 ctermbg=NONE cterm=bold + hi Include ctermfg=254 ctermbg=NONE cterm=NONE + hi Constant ctermfg=244 ctermbg=235 cterm=bold + hi Delimiter ctermfg=244 ctermbg=NONE cterm=NONE + + hi Visual ctermfg=15 ctermbg=239 cterm=NONE + hi ColorColumn ctermfg=NONE ctermbg=234 + hi Cursor ctermfg=bg ctermbg=fg cterm=NONE + hi CursorIM ctermfg=bg ctermbg=fg cterm=NONE + hi lCursor ctermfg=bg ctermbg=fg cterm=NONE + + hi DiffAdd ctermfg=40 ctermbg=22 cterm=NONE + hi DiffChange ctermfg=209 ctermbg=52 cterm=NONE + hi DiffDelete ctermfg=9 ctermbg=52 cterm=NONE + hi DiffText ctermfg=9 ctermbg=52 cterm=NONE + + hi Directory ctermfg=246 ctermbg=bg cterm=NONE + hi ErrorMsg ctermfg=242 ctermbg=NONE cterm=NONE + hi FoldColumn ctermfg=240 ctermbg=237 cterm=bold + hi Folded ctermfg=244 ctermbg=234 cterm=italic + + hi IncSearch ctermfg=0 ctermbg=248 cterm=NONE + hi Search ctermfg=0 ctermbg=251 cterm=NONE + hi MatchParen ctermfg=0 ctermbg=248 cterm=bold + hi ModeMsg ctermfg=15 ctermbg=243 cterm=bold + hi MoreMsg ctermfg=243 ctermbg=bg cterm=bold + hi NonText ctermfg=244 ctermbg=bg cterm=bold + + hi Pmenu ctermfg=242 ctermbg=237 cterm=NONE + hi PmenuSbar ctermfg=fg ctermbg=241 cterm=NONE + hi PmenuThumb ctermfg=fg ctermbg=243 cterm=NONE + + hi Question ctermfg=238 ctermbg=bg cterm=bold + hi SignColumn ctermfg=15 ctermbg=242 cterm=NONE + + hi SpecialKey ctermfg=15 ctermbg=242 cterm=NONE + + hi SpellBad ctermbg=0 cterm=NONE + hi SpellCap ctermbg=0 cterm=NONE + hi SpellLocal ctermbg=0 cterm=NONE + hi SpellRare ctermbg=0 cterm=NONE + hi StatusLine ctermfg=0 ctermbg=243 cterm=bold + hi StatusLineNC ctermfg=241 ctermbg=235 cterm=italic + hi TabLine ctermfg=fg ctermbg=243 cterm=underline + hi TabLineFill ctermfg=fg ctermbg=bg cterm=reverse + hi TabLineSel ctermfg=fg ctermbg=bg cterm=bold + hi Title ctermfg=242 ctermbg=bg cterm=bold + hi VertSplit ctermfg=234 ctermbg=234 + hi WarningMsg ctermfg=252 ctermbg=240 cterm=NONE + hi WildMenu ctermfg=0 ctermbg=244 cterm=NONE + hi Boolean ctermfg=241 ctermbg=NONE cterm=bold + hi Ignore ctermfg=bg ctermbg=NONE cterm=NONE + hi Error ctermfg=210 ctermbg=NONE cterm=underline + + hi VimError ctermfg=250 ctermbg=236 cterm=bold + hi VimCommentTitle ctermfg=240 ctermbg=bg cterm=bold,italic + hi qfFileName ctermfg=242 ctermbg=NONE cterm=italic + hi qfLineNr ctermfg=fg ctermbg=NONE cterm=NONE + hi qfError ctermfg=fg ctermbg=0 cterm=underline +end diff --git a/.vimrc b/.vimrc index 22656b4..0582f79 100644 --- a/.vimrc +++ b/.vimrc @@ -1,605 +1,609 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 7" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme void - colorscheme lucius colorscheme diablo3 + colorscheme lucius + colorscheme aldmeris " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=79 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] +hi StatusLine guifg=#fcf4ba guibg=#333333 +hi StatusLineNC guifg=#808080 guibg=#333333 " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== - +" " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode " function! ScreenMovement(movement) " if &wrap " return "g" . a:movement " else " return a:movement " endif " endfunction " onoremap <silent> <expr> j ScreenMovement("j") " onoremap <silent> <expr> k ScreenMovement("k") " onoremap <silent> <expr> 0 ScreenMovement("0") " onoremap <silent> <expr> ^ ScreenMovement("^") " onoremap <silent> <expr> $ ScreenMovement("$") " nnoremap <silent> <expr> j ScreenMovement("j") " nnoremap <silent> <expr> k ScreenMovement("k") " nnoremap <silent> <expr> 0 ScreenMovement("0") " nnoremap <silent> <expr> ^ ScreenMovement("^") " nnoremap <silent> <expr> $ ScreenMovement("$") " " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search +nmap <silent> <C-N> :silent noh<CR> nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl " au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py compiler nose au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead *.js set errorformat=%-P%f, \%-G/*jslint\ %.%#*/, \%*[\ ]%n\ %l\\,%c:\ %m, \%-G\ \ \ \ %.%#, \%-GNo\ errors\ found., \%-Q " ================================================== " CSS " ================================================== au BufRead *.css set makeprg=csslint\ % au BufRead *.css set errorformat=%A%f:,%C%n:\ warning\ at\ line\ %l\\,\ col\ %c,%C%m,%C%.%# " au BufRead *.css set errorformat=%-Gcsslint:\ There%.%#,%A%f:,%C%n:\ %t%\\w%\\+\ at\ line %l\,\ col\ %c,%Z%m,%A%f:,%C%n:\ %t%\\w%\\+\ at\ line %l\,\ col\ %c,%C%m,%-Z%.%#,%-G%.%# " format " bookie.css: " 1: warning " Too many font-size declarations (13), abstraction needed. " bookie.css: 1: warning Too many font-size declarations (13), abstraction needed. " bookie.css: 2: warning at line 2, col 2 Rule is empty. BODY { " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim let g:pyflakes_use_quickfix = 0 " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/share/vim/vimfiles/plugin/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " Tagbar " https://github.com/majutsushi/tagbar/ " Show ctags info in the sidebar nmap <silent> <leader>l :TagbarToggle<CR> " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif " " :call MakeGreen(make_args) " endfunction " " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> " " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction
mitechie/pyvim
dd2537769e8b36ddb098169fe721599990a95c0a
Add mail snippet for into bookie email
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index 3578226..b66b45b 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,5 +1,9 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =3 +let g:netrw_dirhist_cnt =7 let g:netrw_dirhist_1='/home/rharding/configs/pyvim/.vim/bundle/Vim-nosecompiler' let g:netrw_dirhist_2='/home/rharding/configs/pyvim/.vim/bundle/vim-makegreen' let g:netrw_dirhist_3='/home/rharding/configs/pyvim/.vim/bundle/Vim-nosecompiler' +let g:netrw_dirhist_4='/home/rharding/.config/awesome' +let g:netrw_dirhist_5='/home/rharding/configs/dotfiles/awesome/autostart' +let g:netrw_dirhist_6='/var/lib/postgres/data' +let g:netrw_dirhist_7='/home/rharding/Dropbox/docs/mug_loco' diff --git a/.vim/after/syntax/css.vim b/.vim/after/syntax/css.vim deleted file mode 100644 index 09954a3..0000000 --- a/.vim/after/syntax/css.vim +++ /dev/null @@ -1,318 +0,0 @@ -" Language: Colored CSS Color Preview -" Maintainer: Niklas Hofer <[email protected]> -" URL: svn://lanpartei.de/vimrc/after/syntax/css.vim -" Last Change: 2008 Feb 12 -" Licence: No Warranties. Do whatever you want with this. But please tell me! -" Version: 0.6 - -function! s:FGforBG(bg) - " takes a 6hex color code and returns a matching color that is visible - let pure = substitute(a:bg,'^#','','') - let r = eval('0x'.pure[0].pure[1]) - let g = eval('0x'.pure[2].pure[3]) - let b = eval('0x'.pure[4].pure[5]) - if r*30 + g*59 + b*11 > 12000 - return '#000000' - else - return '#ffffff' - end -endfunction - -function! s:SetMatcher(clr,pat) - let group = 'cssColor'.substitute(a:clr,'^#','','') - redir => s:currentmatch - silent! exe 'syn list '.group - redir END - if s:currentmatch !~ a:pat.'\/' - exe 'syn match '.group.' /'.a:pat.'\>/ contained' - exe 'syn cluster cssColors add='.group - if has('gui_running') - exe 'hi '.group.' guifg='.s:FGforBG(a:clr) - exe 'hi '.group.' guibg='.a:clr - elseif &t_Co == 256 - exe 'hi '.group.' ctermfg='.s:Rgb2xterm(s:FGforBG(a:clr)) - exe 'hi '.group.' ctermbg='.s:Rgb2xterm(a:clr) - endif - return 1 - else - return 0 - endif -endfunction - -"" the 6 value iterations in the xterm color cube -let s:valuerange = [ 0x00, 0x5F, 0x87, 0xAF, 0xD7, 0xFF ] -" -"" 16 basic colors -let s:basic16 = [ [ 0x00, 0x00, 0x00 ], [ 0xCD, 0x00, 0x00 ], [ 0x00, 0xCD, 0x00 ], [ 0xCD, 0xCD, 0x00 ], [ 0x00, 0x00, 0xEE ], [ 0xCD, 0x00, 0xCD ], [ 0x00, 0xCD, 0xCD ], [ 0xE5, 0xE5, 0xE5 ], [ 0x7F, 0x7F, 0x7F ], [ 0xFF, 0x00, 0x00 ], [ 0x00, 0xFF, 0x00 ], [ 0xFF, 0xFF, 0x00 ], [ 0x5C, 0x5C, 0xFF ], [ 0xFF, 0x00, 0xFF ], [ 0x00, 0xFF, 0xFF ], [ 0xFF, 0xFF, 0xFF ] ] -: -function! s:Xterm2rgb(color) - " 16 basic colors - let r=0 - let g=0 - let b=0 - if a:color<16 - let r = s:basic16[a:color][0] - let g = s:basic16[a:color][1] - let b = s:basic16[a:color][2] - endif - - " color cube color - if a:color>=16 && a:color<=232 - let color=a:color-16 - let r = s:valuerange[(color/36)%6] - let g = s:valuerange[(color/6)%6] - let b = s:valuerange[color%6] - endif - - " gray tone - if a:color>=233 && a:color<=253 - let r=8+(a:color-232)*0x0a - let g=r - let b=r - endif - let rgb=[r,g,b] - return rgb -endfunction - -function! s:pow(x, n) - let x = a:x - for i in range(a:n-1) - let x = x*a:x - return x -endfunction - -let s:colortable=[] -for c in range(0, 254) - let color = s:Xterm2rgb(c) - call add(s:colortable, color) -endfor - -" selects the nearest xterm color for a rgb value like #FF0000 -function! s:Rgb2xterm(color) - let best_match=0 - let smallest_distance = 10000000000 - let r = eval('0x'.a:color[1].a:color[2]) - let g = eval('0x'.a:color[3].a:color[4]) - let b = eval('0x'.a:color[5].a:color[6]) - for c in range(0,254) - let d = s:pow(s:colortable[c][0]-r,2) + s:pow(s:colortable[c][1]-g,2) + s:pow(s:colortable[c][2]-b,2) - if d<smallest_distance - let smallest_distance = d - let best_match = c - endif - endfor - return best_match -endfunction - -function! s:SetNamedColor(clr,name) - let group = 'cssColor'.substitute(a:clr,'^#','','') - exe 'syn keyword '.group.' '.a:name.' contained' - exe 'syn cluster cssColors add='.group - if has('gui_running') - exe 'hi '.group.' guifg='.s:FGforBG(a:clr) - exe 'hi '.group.' guibg='.a:clr - elseif &t_Co == 256 - exe 'hi '.group.' ctermfg='.s:Rgb2xterm(s:FGforBG(a:clr)) - exe 'hi '.group.' ctermbg='.s:Rgb2xterm(a:clr) - endif - return 23 -endfunction - -function! s:PreviewCSSColorInLine(where) - " TODO use cssColor matchdata - let foundcolor = matchstr( getline(a:where), '#[0-9A-Fa-f]\{3,6\}\>' ) - let color = '' - if foundcolor != '' - if foundcolor =~ '#\x\{6}$' - let color = foundcolor - elseif foundcolor =~ '#\x\{3}$' - let color = substitute(foundcolor, '\(\x\)\(\x\)\(\x\)', '\1\1\2\2\3\3', '') - else - let color = '' - endif - if color != '' - return s:SetMatcher(color,foundcolor) - else - return 0 - endif - else - return 0 - endif -endfunction - -if has("gui_running") || &t_Co==256 - " HACK modify cssDefinition to add @cssColors to its contains - redir => s:olddef - silent! syn list cssDefinition - redir END - if s:olddef != '' - let s:b = strridx(s:olddef,'matchgroup') - if s:b != -1 - exe 'syn region cssDefinition '.strpart(s:olddef,s:b).',@cssColors' - endif - endif - - " w3c Colors - let i = s:SetNamedColor('#800000', 'maroon') - let i = s:SetNamedColor('#ff0000', 'red') - let i = s:SetNamedColor('#ffA500', 'orange') - let i = s:SetNamedColor('#ffff00', 'yellow') - let i = s:SetNamedColor('#808000', 'olive') - let i = s:SetNamedColor('#800080', 'purple') - let i = s:SetNamedColor('#ff00ff', 'fuchsia') - let i = s:SetNamedColor('#ffffff', 'white') - let i = s:SetNamedColor('#00ff00', 'lime') - let i = s:SetNamedColor('#008000', 'green') - let i = s:SetNamedColor('#000080', 'navy') - let i = s:SetNamedColor('#0000ff', 'blue') - let i = s:SetNamedColor('#00ffff', 'aqua') - let i = s:SetNamedColor('#008080', 'teal') - let i = s:SetNamedColor('#000000', 'black') - let i = s:SetNamedColor('#c0c0c0', 'silver') - let i = s:SetNamedColor('#808080', 'gray') - - " extra colors - let i = s:SetNamedColor('#F0F8FF','AliceBlue') - let i = s:SetNamedColor('#FAEBD7','AntiqueWhite') - let i = s:SetNamedColor('#7FFFD4','Aquamarine') - let i = s:SetNamedColor('#F0FFFF','Azure') - let i = s:SetNamedColor('#F5F5DC','Beige') - let i = s:SetNamedColor('#FFE4C4','Bisque') - let i = s:SetNamedColor('#FFEBCD','BlanchedAlmond') - let i = s:SetNamedColor('#8A2BE2','BlueViolet') - let i = s:SetNamedColor('#A52A2A','Brown') - let i = s:SetNamedColor('#DEB887','BurlyWood') - let i = s:SetNamedColor('#5F9EA0','CadetBlue') - let i = s:SetNamedColor('#7FFF00','Chartreuse') - let i = s:SetNamedColor('#D2691E','Chocolate') - let i = s:SetNamedColor('#FF7F50','Coral') - let i = s:SetNamedColor('#6495ED','CornflowerBlue') - let i = s:SetNamedColor('#FFF8DC','Cornsilk') - let i = s:SetNamedColor('#DC143C','Crimson') - let i = s:SetNamedColor('#00FFFF','Cyan') - let i = s:SetNamedColor('#00008B','DarkBlue') - let i = s:SetNamedColor('#008B8B','DarkCyan') - let i = s:SetNamedColor('#B8860B','DarkGoldenRod') - let i = s:SetNamedColor('#A9A9A9','DarkGray') - let i = s:SetNamedColor('#A9A9A9','DarkGrey') - let i = s:SetNamedColor('#006400','DarkGreen') - let i = s:SetNamedColor('#BDB76B','DarkKhaki') - let i = s:SetNamedColor('#8B008B','DarkMagenta') - let i = s:SetNamedColor('#556B2F','DarkOliveGreen') - let i = s:SetNamedColor('#FF8C00','Darkorange') - let i = s:SetNamedColor('#9932CC','DarkOrchid') - let i = s:SetNamedColor('#8B0000','DarkRed') - let i = s:SetNamedColor('#E9967A','DarkSalmon') - let i = s:SetNamedColor('#8FBC8F','DarkSeaGreen') - let i = s:SetNamedColor('#483D8B','DarkSlateBlue') - let i = s:SetNamedColor('#2F4F4F','DarkSlateGray') - let i = s:SetNamedColor('#2F4F4F','DarkSlateGrey') - let i = s:SetNamedColor('#00CED1','DarkTurquoise') - let i = s:SetNamedColor('#9400D3','DarkViolet') - let i = s:SetNamedColor('#FF1493','DeepPink') - let i = s:SetNamedColor('#00BFFF','DeepSkyBlue') - let i = s:SetNamedColor('#696969','DimGray') - let i = s:SetNamedColor('#696969','DimGrey') - let i = s:SetNamedColor('#1E90FF','DodgerBlue') - let i = s:SetNamedColor('#B22222','FireBrick') - let i = s:SetNamedColor('#FFFAF0','FloralWhite') - let i = s:SetNamedColor('#228B22','ForestGreen') - let i = s:SetNamedColor('#DCDCDC','Gainsboro') - let i = s:SetNamedColor('#F8F8FF','GhostWhite') - let i = s:SetNamedColor('#FFD700','Gold') - let i = s:SetNamedColor('#DAA520','GoldenRod') - let i = s:SetNamedColor('#808080','Grey') - let i = s:SetNamedColor('#ADFF2F','GreenYellow') - let i = s:SetNamedColor('#F0FFF0','HoneyDew') - let i = s:SetNamedColor('#FF69B4','HotPink') - let i = s:SetNamedColor('#CD5C5C','IndianRed') - let i = s:SetNamedColor('#4B0082','Indigo') - let i = s:SetNamedColor('#FFFFF0','Ivory') - let i = s:SetNamedColor('#F0E68C','Khaki') - let i = s:SetNamedColor('#E6E6FA','Lavender') - let i = s:SetNamedColor('#FFF0F5','LavenderBlush') - let i = s:SetNamedColor('#7CFC00','LawnGreen') - let i = s:SetNamedColor('#FFFACD','LemonChiffon') - let i = s:SetNamedColor('#ADD8E6','LightBlue') - let i = s:SetNamedColor('#F08080','LightCoral') - let i = s:SetNamedColor('#E0FFFF','LightCyan') - let i = s:SetNamedColor('#FAFAD2','LightGoldenRodYellow') - let i = s:SetNamedColor('#D3D3D3','LightGray') - let i = s:SetNamedColor('#D3D3D3','LightGrey') - let i = s:SetNamedColor('#90EE90','LightGreen') - let i = s:SetNamedColor('#FFB6C1','LightPink') - let i = s:SetNamedColor('#FFA07A','LightSalmon') - let i = s:SetNamedColor('#20B2AA','LightSeaGreen') - let i = s:SetNamedColor('#87CEFA','LightSkyBlue') - let i = s:SetNamedColor('#778899','LightSlateGray') - let i = s:SetNamedColor('#778899','LightSlateGrey') - let i = s:SetNamedColor('#B0C4DE','LightSteelBlue') - let i = s:SetNamedColor('#FFFFE0','LightYellow') - let i = s:SetNamedColor('#32CD32','LimeGreen') - let i = s:SetNamedColor('#FAF0E6','Linen') - let i = s:SetNamedColor('#FF00FF','Magenta') - let i = s:SetNamedColor('#66CDAA','MediumAquaMarine') - let i = s:SetNamedColor('#0000CD','MediumBlue') - let i = s:SetNamedColor('#BA55D3','MediumOrchid') - let i = s:SetNamedColor('#9370D8','MediumPurple') - let i = s:SetNamedColor('#3CB371','MediumSeaGreen') - let i = s:SetNamedColor('#7B68EE','MediumSlateBlue') - let i = s:SetNamedColor('#00FA9A','MediumSpringGreen') - let i = s:SetNamedColor('#48D1CC','MediumTurquoise') - let i = s:SetNamedColor('#C71585','MediumVioletRed') - let i = s:SetNamedColor('#191970','MidnightBlue') - let i = s:SetNamedColor('#F5FFFA','MintCream') - let i = s:SetNamedColor('#FFE4E1','MistyRose') - let i = s:SetNamedColor('#FFE4B5','Moccasin') - let i = s:SetNamedColor('#FFDEAD','NavajoWhite') - let i = s:SetNamedColor('#FDF5E6','OldLace') - let i = s:SetNamedColor('#6B8E23','OliveDrab') - let i = s:SetNamedColor('#FF4500','OrangeRed') - let i = s:SetNamedColor('#DA70D6','Orchid') - let i = s:SetNamedColor('#EEE8AA','PaleGoldenRod') - let i = s:SetNamedColor('#98FB98','PaleGreen') - let i = s:SetNamedColor('#AFEEEE','PaleTurquoise') - let i = s:SetNamedColor('#D87093','PaleVioletRed') - let i = s:SetNamedColor('#FFEFD5','PapayaWhip') - let i = s:SetNamedColor('#FFDAB9','PeachPuff') - let i = s:SetNamedColor('#CD853F','Peru') - let i = s:SetNamedColor('#FFC0CB','Pink') - let i = s:SetNamedColor('#DDA0DD','Plum') - let i = s:SetNamedColor('#B0E0E6','PowderBlue') - let i = s:SetNamedColor('#BC8F8F','RosyBrown') - let i = s:SetNamedColor('#4169E1','RoyalBlue') - let i = s:SetNamedColor('#8B4513','SaddleBrown') - let i = s:SetNamedColor('#FA8072','Salmon') - let i = s:SetNamedColor('#F4A460','SandyBrown') - let i = s:SetNamedColor('#2E8B57','SeaGreen') - let i = s:SetNamedColor('#FFF5EE','SeaShell') - let i = s:SetNamedColor('#A0522D','Sienna') - let i = s:SetNamedColor('#87CEEB','SkyBlue') - let i = s:SetNamedColor('#6A5ACD','SlateBlue') - let i = s:SetNamedColor('#708090','SlateGray') - let i = s:SetNamedColor('#708090','SlateGrey') - let i = s:SetNamedColor('#FFFAFA','Snow') - let i = s:SetNamedColor('#00FF7F','SpringGreen') - let i = s:SetNamedColor('#4682B4','SteelBlue') - let i = s:SetNamedColor('#D2B48C','Tan') - let i = s:SetNamedColor('#D8BFD8','Thistle') - let i = s:SetNamedColor('#FF6347','Tomato') - let i = s:SetNamedColor('#40E0D0','Turquoise') - let i = s:SetNamedColor('#EE82EE','Violet') - let i = s:SetNamedColor('#F5DEB3','Wheat') - let i = s:SetNamedColor('#F5F5F5','WhiteSmoke') - let i = s:SetNamedColor('#9ACD32','YellowGreen') - - - - let i = 1 - while i <= line("$") - call s:PreviewCSSColorInLine(i) - let i = i+1 - endwhile - unlet i - - autocmd CursorHold * silent call s:PreviewCSSColorInLine('.') - autocmd CursorHoldI * silent call s:PreviewCSSColorInLine('.') - set ut=100 -endif " has("gui_running") diff --git a/.vimrc b/.vimrc index 7f69e9c..22656b4 100644 --- a/.vimrc +++ b/.vimrc @@ -1,574 +1,605 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") - set guifont=Liberation\ Mono\ 8" use this font + set guifont=Liberation\ Mono\ 7" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide - colorscheme underwater-mod colorscheme void + colorscheme lucius + colorscheme diablo3 " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=79 " Try this out to see how textwidth helps set ch=1 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m -map <silent> <leader>m :make<CR> +map <silent> <leader>m :make<CR>:cw<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> nmap <leader>l :lopen<CR> nmap <leader>ll :lclose<CR> nmap <leader>ln :lN<CR> nmap <leader>lp :lN<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode -function! ScreenMovement(movement) - if &wrap - return "g" . a:movement - else - return a:movement - endif -endfunction -onoremap <silent> <expr> j ScreenMovement("j") -onoremap <silent> <expr> k ScreenMovement("k") -onoremap <silent> <expr> 0 ScreenMovement("0") -onoremap <silent> <expr> ^ ScreenMovement("^") -onoremap <silent> <expr> $ ScreenMovement("$") -nnoremap <silent> <expr> j ScreenMovement("j") -nnoremap <silent> <expr> k ScreenMovement("k") -nnoremap <silent> <expr> 0 ScreenMovement("0") -nnoremap <silent> <expr> ^ ScreenMovement("^") -nnoremap <silent> <expr> $ ScreenMovement("$") - +" function! ScreenMovement(movement) +" if &wrap +" return "g" . a:movement +" else +" return a:movement +" endif +" endfunction +" onoremap <silent> <expr> j ScreenMovement("j") +" onoremap <silent> <expr> k ScreenMovement("k") +" onoremap <silent> <expr> 0 ScreenMovement("0") +" onoremap <silent> <expr> ^ ScreenMovement("^") +" onoremap <silent> <expr> $ ScreenMovement("$") +" nnoremap <silent> <expr> j ScreenMovement("j") +" nnoremap <silent> <expr> k ScreenMovement("k") +" nnoremap <silent> <expr> 0 ScreenMovement("0") +" nnoremap <silent> <expr> ^ ScreenMovement("^") +" nnoremap <silent> <expr> $ ScreenMovement("$") +" " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Search for potentially strange non-ascii characters map <leader>u :match Error /[\x7f-\xff]/<CR> " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl " au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py compiler nose au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % +au BufRead *.js set errorformat=%-P%f, + \%-G/*jslint\ %.%#*/, + \%*[\ ]%n\ %l\\,%c:\ %m, + \%-G\ \ \ \ %.%#, + \%-GNo\ errors\ found., + \%-Q + +" ================================================== +" CSS +" ================================================== + +au BufRead *.css set makeprg=csslint\ % +au BufRead *.css set errorformat=%A%f:,%C%n:\ warning\ at\ line\ %l\\,\ col\ %c,%C%m,%C%.%# +" au BufRead *.css set errorformat=%-Gcsslint:\ There%.%#,%A%f:,%C%n:\ %t%\\w%\\+\ at\ line %l\,\ col\ %c,%Z%m,%A%f:,%C%n:\ %t%\\w%\\+\ at\ line %l\,\ col\ %c,%C%m,%-Z%.%#,%-G%.%# + + +" format +" bookie.css: +" 1: warning +" Too many font-size declarations (13), abstraction needed. +" bookie.css: 1: warning Too many font-size declarations (13), abstraction needed. +" bookie.css: 2: warning at line 2, col 2 Rule is empty. BODY { + " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>a :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 -autocmd FileType python map <buffer> <leader>M :call Pep8()<CR> +autocmd FileType python map <buffer> <leader>M :call Pep8()<CR>:cw<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim let g:pyflakes_use_quickfix = 0 " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) +" :Gist -l " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope -source /usr/local/ropevim.vim +source /usr/share/vim/vimfiles/plugin/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 +" Tagbar +" https://github.com/majutsushi/tagbar/ +" Show ctags info in the sidebar +nmap <silent> <leader>l :TagbarToggle<CR> + + " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " vim-makegreen && vim-nosecompiler " unit testing python code in during editing " I use files in the same dir test_xxxx.* " if we're already on the test_xxx.py file, just rerun current test file " function MakeArgs() " if empty(matchstr(expand('%'), 'test_')) " " if no test_ on the filename, then add it to run tests " let make_args = 'test_%' " else " let make_args = '%' " endif -" +" " :call MakeGreen(make_args) " endfunction -" +" " autocmd FileType python map <buffer> <leader>t :call MakeArgs()<CR> -" +" " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction diff --git a/bundle_list b/bundle_list index 8cb0636..9879a4b 100644 --- a/bundle_list +++ b/bundle_list @@ -1,20 +1,24 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git http://github.com/tomtom/tlib_vim.git https://github.com/tpope/vim-fugitive.git https://github.com/reinh/vim-makegreen.git https://github.com/olethanh/Vim-nosecompiler.git +https://github.com/ap/vim-css-color.git +#https://github.com/vim-scripts/taglist.vim.git +https://github.com/Bogdanp/quicksilver.vim.git +git://github.com/majutsushi/tagbar diff --git a/custom_snippets/mail-custom.snippets b/custom_snippets/mail-custom.snippets new file mode 100644 index 0000000..25c5d56 --- /dev/null +++ b/custom_snippets/mail-custom.snippets @@ -0,0 +1,41 @@ +snippet bmarkuser + Welcome to Bookie on https://bmark.us + + Account details: + username: ${1|username} + password: ${2|password} + api key: ${3|api} + api url: https://bmark.us/${4|username} + + We also encourage you to sign up for our mailing list at: + https://groups.google.com/forum/#!forum/bookie_bookmarks + + and our Twitter account: + http://twitter.com/BookieBmarks + + We post weekly updates and encourage discussion of feature ideas via those + channels. + + All calls to the site are over https. Please don't share your api key with + anyone. It's used to verify you're you via the Chrome plugin and other + automated calls. + + The database is based up twice a day and copied to S3 in case of disaster + or my own stupidity. + + The readable parsing of your bookmarks takes place via a cron job at 4am + very morning. If you've recently imported you should see the readable + content (if available) the next morning. + + We currently support importing from Google Bookmarks and Delicious exports. + Importing from a Chrome or Firefox export does work, however it reads the + folder names in as tags. So be aware of that. + + Documentation on getting started is at: + http://docs.bmark.us + + The Chrome latest development Chrome extension is at: + http://docs.bmark.us/bookie_chrome.crx + + If you have any issues feel free to join #bookie on freenode.net or report + the issue or idea on http://github.com/mitechie/Bookie/issues. diff --git a/custom_snippets/python-custom.snippets b/custom_snippets/python-custom.snippets index d230964..7dea522 100644 --- a/custom_snippets/python-custom.snippets +++ b/custom_snippets/python-custom.snippets @@ -1,27 +1,30 @@ # dump out via log.debug snippet dmp log.debug(${1|somevar}) snippet testfile """${1|Dockblock}""" from unittest import TestCase class Test${2|class}(TestCase): """${3|TestGroup}""" def setUp(self): """Setup Tests""" pass def tearDown(self): """Tear down each test""" pass snippet tst def test_${1|method}(self): """${2|description}""" assert ${3|Something}, "${4|Output}" snippet ass assert ${1|condition}, "${2|explanation}" + +snippet pdb + import vimpdb; vimpdb.set_trace()
mitechie/pyvim
4ee62f783a814f2db59b3cf225fa1410e065adad
Correct url for makegreen fork
diff --git a/bundle_list b/bundle_list index 9fa8f9e..c56959b 100644 --- a/bundle_list +++ b/bundle_list @@ -1,20 +1,20 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/tomtom/tcomment_vim.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git #http://github.com/vim-scripts/pylint.vim.git http://github.com/vim-scripts/nginx.vim.git git://github.com/mitechie/pyflakes-pathogen.git #http://github.com/kevinw/pyflakes-vim.git http://github.com/tomtom/tlib_vim.git https://github.com/tpope/vim-fugitive.git -https://[email protected]/mitechie/vim-makegreen.git +https://github.com/mitechie/vim-makegreen.git https://github.com/olethanh/Vim-nosecompiler.git
mitechie/pyvim
88434929498f4928714b516e494d80bb21dcaaea
pyflakes, color, html folding
diff --git a/.vim/colors/chance-of-storm.vim b/.vim/colors/chance-of-storm.vim new file mode 100644 index 0000000..f71d871 --- /dev/null +++ b/.vim/colors/chance-of-storm.vim @@ -0,0 +1,141 @@ +""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +" Author: Mario Gutierrez ([email protected]) +" Last Change: Dececember 1, 2010 +" Version: 0.1 +""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +set background=dark + +hi clear + +if exists("syntax_on") + syntax reset +endif + +let colors_name = "chance-of-storm" + + +"""""""""" General + +hi Normal guifg=#c8d7e6 guibg=#14181c gui=none + +" Vim >= 7.0 +if version >= 700 + hi CursorLine guibg=#23292f + hi MatchParen guifg=magenta guibg=bg gui=bold + hi Pmenu guifg=#dfeff6 guibg=#1E415E + hi PmenuSel guifg=#dfeff6 guibg=#2D7889 + + hi IncSearch guifg=bg guibg=#AF81F4 gui=bold + hi Search guifg=bg guibg=#AF81F4 gui=none +endif + +hi Cursor guifg=NONE guibg=#607080 gui=none +hi Folded guifg=#68CEE8 guibg=#1A3951 gui=none +hi FoldColumn guifg=#1E415E guibg=#1A3951 gui=none +hi LineNr guifg=#2e363d guibg=bg gui=none +" e.g. tildes at the end of file +hi NonText guifg=#2e363d guibg=bg gui=none +hi StatusLine guifg=#f98fcc guibg=#000000 gui=none +hi StatusLineNC guifg=#69839a guibg=#0d0f11 gui=none +hi TabLine guifg=#555555 guibg=#dddddd gui=none +hi TabLineFill guifg=fg guibg=#dddddd gui=none +hi TabLineSel guifg=#101010 guibg=#b0b0b0 gui=none +hi Title guifg=#ef7760 guibg=bg gui=none +hi VertSplit guifg=#0d0f11 guibg=#0d0f11 gui=none + " Selected text color +hi Visual guifg=#dfeff6 guibg=#24557A gui=none + + +"""""""""" Syntax highlighting + +hi Comment guifg=#69839a guibg=bg gui=italic +hi Constant guifg=#96defa gui=none +hi Function guifg=#9e7cd7 gui=none +hi Identifier guifg=#8ac6f2 gui=none +hi Ignore guifg=bg guibg=bg gui=none +hi Keyword guifg=#8ac6f2 gui=none +hi Number guifg=#dfe1b7 gui=none +hi PreProc guifg=#f99d71 gui=none +hi Special guifg=lightmagenta gui=none +hi Statement guifg=#7cd5d7 gui=none +hi String guifg=#89e14b gui=italic +hi Todo guifg=#ADED80 guibg=bg gui=bold +hi Type guifg=#489fd7 gui=none +hi Underlined guifg=#8ac6f2 gui=underline + + +""""""""""" ERB + +hi link erubyDelimiter PreProc + + +""""""""""" HAML + +hi link hamlAttributes htmlArg +hi link hamlTag htmlTag +hi link hamlTagName htmlTagName +hi link hamlIdChar hamlId +hi link hamlClassChar hamlClass + + +""""""""""" HELP + +hi link helpSectionDelim NonText +hi link helpExample Statement + + +""""""""""" HTML + +hi link htmlTag Statement +hi link htmlEndTag Statement +hi link htmlTagName Statement + + +"""""""""" JavaScript + +hi link javaScriptFunction Statement +hi link javaScriptFuncName Function +hi link javaScriptLabel PreProc + + +"""""""""" MARKDOWN (tpope's vim-markdown) + +hi link markdownCodeBlock Statement +hi link markdownCode Statement +hi link markdownCodeDelimiter Statement +hi link markdownHeadingDelimiter Title +hi markdownLinkText guifg=#489fd7 gui=underline +hi markdownUrl guifg=#69839a guibg=bg gui=none +hi link markdownLinkTextDelimiter markdownUrl +hi link markdownLinkDelimiter markdownUrl + + +""""""""""" NERDTree + +hi link treePart NonText +hi link treePartFile treePart +hi link treeDirSlash treePart +hi link treeDir Statement +hi link treeClosable PreProc +hi link treeOpenable treeClosable +hi link treeUp treeClosable +hi treeFlag guifg=#3e71a1 guibg=bg gui=none +hi link treeHelp Comment +hi link treeLink Type +hi link treeExecFile Type + + +"""""""""" Ruby + +hi rubyAccess guifg=#ef7760 guibg=bg gui=italic +hi rubyInterpolation guifg=#c8e1b7 guibg=bg +hi link rubyInterpolationDelimiter rubyInterpolation +hi link rubyStringDelimiter String + + +""""""""""" XML + +hi link xmlTag htmlTag +hi link xmlEndTag htmlEndTag +hi link xmlTagName htmlTagName diff --git a/.vim/colors/void.vim b/.vim/colors/void.vim new file mode 100644 index 0000000..f2a8ebc --- /dev/null +++ b/.vim/colors/void.vim @@ -0,0 +1,107 @@ +" Vim color file +" Maintainer: Andrew Lyon <[email protected]> +" Last Change: $Date: 2010-11-20 12:27:30PST $ +" Version: 1.1 + +" Note that this color scheme is loosely based off of desert.vim (Hans Fugal +" <[email protected]>) mixed with some of slate.vim (Ralph Amissah +" <[email protected]>) but with much of my own modification. + +set background=dark +if version > 580 + " no guarantees for version 5.8 and below, but this makes it stop + " complaining + hi clear + if exists("syntax_on") + syntax reset + endif +endif +let g:colors_name="void" + +hi Normal guifg=#f5f5f5 guibg=grey15 + +" highlight groups +hi Cursor guibg=khaki guifg=slategrey +"hi CursorIM +"hi Directory +"hi DiffAdd +"hi DiffChange +"hi DiffDelete +"hi DiffText +"hi ErrorMsg +hi VertSplit guibg=black guifg=black gui=none +hi Folded guibg=grey30 guifg=gold +hi FoldColumn guibg=grey30 guifg=tan +hi IncSearch guifg=slategrey guibg=khaki +"hi LineNr +hi ModeMsg guifg=goldenrod +hi MoreMsg guifg=SeaGreen +hi NonText guifg=LightBlue guibg=grey30 +hi Question guifg=springgreen +hi Search guibg=peru guifg=wheat +hi SpecialKey guifg=yellowgreen +hi StatusLine guibg=black guifg=#cccccc gui=none +hi StatusLineNC guibg=black guifg=grey40 gui=none +hi Title guifg=indianred +hi Visual gui=none guifg=khaki guibg=olivedrab +"hi VisualNOS +hi WarningMsg guifg=salmon +"hi WildMenu +"hi Menu +"hi Scrollbar +"hi Tooltip + +" syntax highlighting groups +hi Comment guifg=grey50 +hi Constant guifg=#e09085 +hi Identifier guifg=#f5f5f5 +hi Statement guifg=darkkhaki " #bbccee +hi PreProc guifg=#c8e0b0 +hi Type guifg=darkkhaki +hi Special guifg=#bbccee cterm=bold term=bold +hi Operator guifg=navajowhite cterm=NONE +hi Function guifg=#c8e0b0 cterm=NONE +"hi Underlined +hi Ignore guifg=grey40 +"hi Error +hi Todo guifg=orangered guibg=yellow2 +hi Todo guifg=orange guibg=gray40 + +" color terminal definitions +hi SpecialKey ctermfg=darkgreen +hi NonText guibg=grey15 cterm=bold ctermfg=darkblue +hi Directory ctermfg=brown guifg=#ddbb66 +hi ErrorMsg cterm=bold ctermfg=7 ctermbg=1 +hi IncSearch cterm=NONE ctermfg=yellow ctermbg=green +hi Search cterm=NONE ctermfg=grey ctermbg=blue +hi MoreMsg ctermfg=darkgreen +hi ModeMsg cterm=NONE ctermfg=brown +hi LineNr guifg=grey50 ctermfg=3 +hi Question ctermfg=green +hi StatusLine cterm=bold,reverse +hi StatusLineNC cterm=reverse +hi VertSplit cterm=reverse +hi Title ctermfg=5 +hi Visual cterm=reverse +hi VisualNOS cterm=bold,underline +hi WarningMsg ctermfg=1 +hi WildMenu ctermfg=0 ctermbg=3 +hi Folded ctermfg=darkgrey ctermbg=NONE +hi FoldColumn ctermfg=darkgrey ctermbg=NONE +hi DiffAdd ctermbg=4 +hi DiffChange ctermbg=5 +hi DiffDelete cterm=bold ctermfg=4 ctermbg=6 +hi DiffText cterm=bold ctermbg=1 +hi Comment ctermfg=darkcyan +hi Constant ctermfg=brown +hi Special ctermfg=5 +hi Identifier ctermfg=6 +hi Statement ctermfg=3 +hi PreProc ctermfg=5 +hi Type ctermfg=2 +hi Underlined cterm=underline ctermfg=5 +hi Ignore cterm=bold ctermfg=7 +hi Ignore ctermfg=darkgrey +hi Error cterm=bold ctermfg=7 ctermbg=1 + + diff --git a/.vim/dict.add b/.vim/dict.add index 2d51cc9..41851b1 100644 --- a/.vim/dict.add +++ b/.vim/dict.add @@ -1,20 +1,22 @@ sqlalchemy SQLAlchemy sessionmaker Metadata metadata metadata config hostname app Ubuntu blog unicode plugin plugins ajax json hotalert hotalerts HotAlert SqlAlchemy +admin +popup diff --git a/.vimrc b/.vimrc index ddab5a8..3754d81 100644 --- a/.vimrc +++ b/.vimrc @@ -1,547 +1,548 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Cousine\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme underwater-mod + colorscheme void " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=79 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Clean all end of line extra whitespace with ,S " Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 " deletes excess space but maintains the list of jumps unchanged " for more details see: h keepjumps fun! CleanExtraSpaces() let save_cursor = getpos(".") let old_query = getreg('/') :%s/\s\+$//e call setpos('.', save_cursor) call setreg('/', old_query) endfun map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> -" enable html tag folding with ,t +" enable html tag folding with ,f nnoremap <leader>f Vatzf " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction diff --git a/bundle_list b/bundle_list index 99ffdd2..aa2c0f4 100644 --- a/bundle_list +++ b/bundle_list @@ -1,14 +1,15 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/vim-scripts/tComment.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git -http://github.com/mitechie/pyflakes-pathogen.git #http://github.com/vim-scripts/pylint.vim.git +https://github.com/vim-scripts/nginx.vim +https://github.com/kevinw/pyflakes-vim
mitechie/pyvim
45e96b805b5a01ec7f23c5a4e928cde641ed437a
add new EOL WS fix per voyeg3r fixes #1
diff --git a/.vimrc b/.vimrc index 5382807..ddab5a8 100644 --- a/.vimrc +++ b/.vimrc @@ -1,538 +1,547 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " Highlight end of line whitespace. " match WhitespaceEOL /\s\+$/ au InsertEnter * match WhitespaceEOL /\s\+$/ au InsertLeave * match WhitespaceEOL /\s\+$/ " make sure our whitespace matching is setup before we do colorscheme tricks autocmd ColorScheme * highlight WhitespaceEOL ctermbg=red guibg=red " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") - set guifont=Liberation\ Mono\ 8" use this font + set guifont=Cousine\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=79 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Clean all end of line extra whitespace with ,S -:nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> - +" Credit: voyeg3r https://github.com/mitechie/pyvim/issues/#issue/1 +" deletes excess space but maintains the list of jumps unchanged +" for more details see: h keepjumps +fun! CleanExtraSpaces() + let save_cursor = getpos(".") + let old_query = getreg('/') + :%s/\s\+$//e + call setpos('.', save_cursor) + call setreg('/', old_query) +endfun +map <silent><leader>S <esc>:keepjumps call CleanExtraSpaces()<cr> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,t nnoremap <leader>f Vatzf " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction
mitechie/pyvim
8989055276127e91274ce647234c9e844d6f05ae
Removed pylint, conflicts pyflakes, added css.vim for css coloring
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index 86f8c50..c71ddc6 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,4 +1,5 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =2 +let g:netrw_dirhist_cnt =3 let g:netrw_dirhist_1='/home/rharding/configs/pyvim/bundle' let g:netrw_dirhist_2='/home/rharding/configs/pyvim/.vim.bak' +let g:netrw_dirhist_3='/home/rharding/src/qmail/qmail/qmail_app/public/css' diff --git a/.vim/after/syntax/css.vim b/.vim/after/syntax/css.vim new file mode 100644 index 0000000..09954a3 --- /dev/null +++ b/.vim/after/syntax/css.vim @@ -0,0 +1,318 @@ +" Language: Colored CSS Color Preview +" Maintainer: Niklas Hofer <[email protected]> +" URL: svn://lanpartei.de/vimrc/after/syntax/css.vim +" Last Change: 2008 Feb 12 +" Licence: No Warranties. Do whatever you want with this. But please tell me! +" Version: 0.6 + +function! s:FGforBG(bg) + " takes a 6hex color code and returns a matching color that is visible + let pure = substitute(a:bg,'^#','','') + let r = eval('0x'.pure[0].pure[1]) + let g = eval('0x'.pure[2].pure[3]) + let b = eval('0x'.pure[4].pure[5]) + if r*30 + g*59 + b*11 > 12000 + return '#000000' + else + return '#ffffff' + end +endfunction + +function! s:SetMatcher(clr,pat) + let group = 'cssColor'.substitute(a:clr,'^#','','') + redir => s:currentmatch + silent! exe 'syn list '.group + redir END + if s:currentmatch !~ a:pat.'\/' + exe 'syn match '.group.' /'.a:pat.'\>/ contained' + exe 'syn cluster cssColors add='.group + if has('gui_running') + exe 'hi '.group.' guifg='.s:FGforBG(a:clr) + exe 'hi '.group.' guibg='.a:clr + elseif &t_Co == 256 + exe 'hi '.group.' ctermfg='.s:Rgb2xterm(s:FGforBG(a:clr)) + exe 'hi '.group.' ctermbg='.s:Rgb2xterm(a:clr) + endif + return 1 + else + return 0 + endif +endfunction + +"" the 6 value iterations in the xterm color cube +let s:valuerange = [ 0x00, 0x5F, 0x87, 0xAF, 0xD7, 0xFF ] +" +"" 16 basic colors +let s:basic16 = [ [ 0x00, 0x00, 0x00 ], [ 0xCD, 0x00, 0x00 ], [ 0x00, 0xCD, 0x00 ], [ 0xCD, 0xCD, 0x00 ], [ 0x00, 0x00, 0xEE ], [ 0xCD, 0x00, 0xCD ], [ 0x00, 0xCD, 0xCD ], [ 0xE5, 0xE5, 0xE5 ], [ 0x7F, 0x7F, 0x7F ], [ 0xFF, 0x00, 0x00 ], [ 0x00, 0xFF, 0x00 ], [ 0xFF, 0xFF, 0x00 ], [ 0x5C, 0x5C, 0xFF ], [ 0xFF, 0x00, 0xFF ], [ 0x00, 0xFF, 0xFF ], [ 0xFF, 0xFF, 0xFF ] ] +: +function! s:Xterm2rgb(color) + " 16 basic colors + let r=0 + let g=0 + let b=0 + if a:color<16 + let r = s:basic16[a:color][0] + let g = s:basic16[a:color][1] + let b = s:basic16[a:color][2] + endif + + " color cube color + if a:color>=16 && a:color<=232 + let color=a:color-16 + let r = s:valuerange[(color/36)%6] + let g = s:valuerange[(color/6)%6] + let b = s:valuerange[color%6] + endif + + " gray tone + if a:color>=233 && a:color<=253 + let r=8+(a:color-232)*0x0a + let g=r + let b=r + endif + let rgb=[r,g,b] + return rgb +endfunction + +function! s:pow(x, n) + let x = a:x + for i in range(a:n-1) + let x = x*a:x + return x +endfunction + +let s:colortable=[] +for c in range(0, 254) + let color = s:Xterm2rgb(c) + call add(s:colortable, color) +endfor + +" selects the nearest xterm color for a rgb value like #FF0000 +function! s:Rgb2xterm(color) + let best_match=0 + let smallest_distance = 10000000000 + let r = eval('0x'.a:color[1].a:color[2]) + let g = eval('0x'.a:color[3].a:color[4]) + let b = eval('0x'.a:color[5].a:color[6]) + for c in range(0,254) + let d = s:pow(s:colortable[c][0]-r,2) + s:pow(s:colortable[c][1]-g,2) + s:pow(s:colortable[c][2]-b,2) + if d<smallest_distance + let smallest_distance = d + let best_match = c + endif + endfor + return best_match +endfunction + +function! s:SetNamedColor(clr,name) + let group = 'cssColor'.substitute(a:clr,'^#','','') + exe 'syn keyword '.group.' '.a:name.' contained' + exe 'syn cluster cssColors add='.group + if has('gui_running') + exe 'hi '.group.' guifg='.s:FGforBG(a:clr) + exe 'hi '.group.' guibg='.a:clr + elseif &t_Co == 256 + exe 'hi '.group.' ctermfg='.s:Rgb2xterm(s:FGforBG(a:clr)) + exe 'hi '.group.' ctermbg='.s:Rgb2xterm(a:clr) + endif + return 23 +endfunction + +function! s:PreviewCSSColorInLine(where) + " TODO use cssColor matchdata + let foundcolor = matchstr( getline(a:where), '#[0-9A-Fa-f]\{3,6\}\>' ) + let color = '' + if foundcolor != '' + if foundcolor =~ '#\x\{6}$' + let color = foundcolor + elseif foundcolor =~ '#\x\{3}$' + let color = substitute(foundcolor, '\(\x\)\(\x\)\(\x\)', '\1\1\2\2\3\3', '') + else + let color = '' + endif + if color != '' + return s:SetMatcher(color,foundcolor) + else + return 0 + endif + else + return 0 + endif +endfunction + +if has("gui_running") || &t_Co==256 + " HACK modify cssDefinition to add @cssColors to its contains + redir => s:olddef + silent! syn list cssDefinition + redir END + if s:olddef != '' + let s:b = strridx(s:olddef,'matchgroup') + if s:b != -1 + exe 'syn region cssDefinition '.strpart(s:olddef,s:b).',@cssColors' + endif + endif + + " w3c Colors + let i = s:SetNamedColor('#800000', 'maroon') + let i = s:SetNamedColor('#ff0000', 'red') + let i = s:SetNamedColor('#ffA500', 'orange') + let i = s:SetNamedColor('#ffff00', 'yellow') + let i = s:SetNamedColor('#808000', 'olive') + let i = s:SetNamedColor('#800080', 'purple') + let i = s:SetNamedColor('#ff00ff', 'fuchsia') + let i = s:SetNamedColor('#ffffff', 'white') + let i = s:SetNamedColor('#00ff00', 'lime') + let i = s:SetNamedColor('#008000', 'green') + let i = s:SetNamedColor('#000080', 'navy') + let i = s:SetNamedColor('#0000ff', 'blue') + let i = s:SetNamedColor('#00ffff', 'aqua') + let i = s:SetNamedColor('#008080', 'teal') + let i = s:SetNamedColor('#000000', 'black') + let i = s:SetNamedColor('#c0c0c0', 'silver') + let i = s:SetNamedColor('#808080', 'gray') + + " extra colors + let i = s:SetNamedColor('#F0F8FF','AliceBlue') + let i = s:SetNamedColor('#FAEBD7','AntiqueWhite') + let i = s:SetNamedColor('#7FFFD4','Aquamarine') + let i = s:SetNamedColor('#F0FFFF','Azure') + let i = s:SetNamedColor('#F5F5DC','Beige') + let i = s:SetNamedColor('#FFE4C4','Bisque') + let i = s:SetNamedColor('#FFEBCD','BlanchedAlmond') + let i = s:SetNamedColor('#8A2BE2','BlueViolet') + let i = s:SetNamedColor('#A52A2A','Brown') + let i = s:SetNamedColor('#DEB887','BurlyWood') + let i = s:SetNamedColor('#5F9EA0','CadetBlue') + let i = s:SetNamedColor('#7FFF00','Chartreuse') + let i = s:SetNamedColor('#D2691E','Chocolate') + let i = s:SetNamedColor('#FF7F50','Coral') + let i = s:SetNamedColor('#6495ED','CornflowerBlue') + let i = s:SetNamedColor('#FFF8DC','Cornsilk') + let i = s:SetNamedColor('#DC143C','Crimson') + let i = s:SetNamedColor('#00FFFF','Cyan') + let i = s:SetNamedColor('#00008B','DarkBlue') + let i = s:SetNamedColor('#008B8B','DarkCyan') + let i = s:SetNamedColor('#B8860B','DarkGoldenRod') + let i = s:SetNamedColor('#A9A9A9','DarkGray') + let i = s:SetNamedColor('#A9A9A9','DarkGrey') + let i = s:SetNamedColor('#006400','DarkGreen') + let i = s:SetNamedColor('#BDB76B','DarkKhaki') + let i = s:SetNamedColor('#8B008B','DarkMagenta') + let i = s:SetNamedColor('#556B2F','DarkOliveGreen') + let i = s:SetNamedColor('#FF8C00','Darkorange') + let i = s:SetNamedColor('#9932CC','DarkOrchid') + let i = s:SetNamedColor('#8B0000','DarkRed') + let i = s:SetNamedColor('#E9967A','DarkSalmon') + let i = s:SetNamedColor('#8FBC8F','DarkSeaGreen') + let i = s:SetNamedColor('#483D8B','DarkSlateBlue') + let i = s:SetNamedColor('#2F4F4F','DarkSlateGray') + let i = s:SetNamedColor('#2F4F4F','DarkSlateGrey') + let i = s:SetNamedColor('#00CED1','DarkTurquoise') + let i = s:SetNamedColor('#9400D3','DarkViolet') + let i = s:SetNamedColor('#FF1493','DeepPink') + let i = s:SetNamedColor('#00BFFF','DeepSkyBlue') + let i = s:SetNamedColor('#696969','DimGray') + let i = s:SetNamedColor('#696969','DimGrey') + let i = s:SetNamedColor('#1E90FF','DodgerBlue') + let i = s:SetNamedColor('#B22222','FireBrick') + let i = s:SetNamedColor('#FFFAF0','FloralWhite') + let i = s:SetNamedColor('#228B22','ForestGreen') + let i = s:SetNamedColor('#DCDCDC','Gainsboro') + let i = s:SetNamedColor('#F8F8FF','GhostWhite') + let i = s:SetNamedColor('#FFD700','Gold') + let i = s:SetNamedColor('#DAA520','GoldenRod') + let i = s:SetNamedColor('#808080','Grey') + let i = s:SetNamedColor('#ADFF2F','GreenYellow') + let i = s:SetNamedColor('#F0FFF0','HoneyDew') + let i = s:SetNamedColor('#FF69B4','HotPink') + let i = s:SetNamedColor('#CD5C5C','IndianRed') + let i = s:SetNamedColor('#4B0082','Indigo') + let i = s:SetNamedColor('#FFFFF0','Ivory') + let i = s:SetNamedColor('#F0E68C','Khaki') + let i = s:SetNamedColor('#E6E6FA','Lavender') + let i = s:SetNamedColor('#FFF0F5','LavenderBlush') + let i = s:SetNamedColor('#7CFC00','LawnGreen') + let i = s:SetNamedColor('#FFFACD','LemonChiffon') + let i = s:SetNamedColor('#ADD8E6','LightBlue') + let i = s:SetNamedColor('#F08080','LightCoral') + let i = s:SetNamedColor('#E0FFFF','LightCyan') + let i = s:SetNamedColor('#FAFAD2','LightGoldenRodYellow') + let i = s:SetNamedColor('#D3D3D3','LightGray') + let i = s:SetNamedColor('#D3D3D3','LightGrey') + let i = s:SetNamedColor('#90EE90','LightGreen') + let i = s:SetNamedColor('#FFB6C1','LightPink') + let i = s:SetNamedColor('#FFA07A','LightSalmon') + let i = s:SetNamedColor('#20B2AA','LightSeaGreen') + let i = s:SetNamedColor('#87CEFA','LightSkyBlue') + let i = s:SetNamedColor('#778899','LightSlateGray') + let i = s:SetNamedColor('#778899','LightSlateGrey') + let i = s:SetNamedColor('#B0C4DE','LightSteelBlue') + let i = s:SetNamedColor('#FFFFE0','LightYellow') + let i = s:SetNamedColor('#32CD32','LimeGreen') + let i = s:SetNamedColor('#FAF0E6','Linen') + let i = s:SetNamedColor('#FF00FF','Magenta') + let i = s:SetNamedColor('#66CDAA','MediumAquaMarine') + let i = s:SetNamedColor('#0000CD','MediumBlue') + let i = s:SetNamedColor('#BA55D3','MediumOrchid') + let i = s:SetNamedColor('#9370D8','MediumPurple') + let i = s:SetNamedColor('#3CB371','MediumSeaGreen') + let i = s:SetNamedColor('#7B68EE','MediumSlateBlue') + let i = s:SetNamedColor('#00FA9A','MediumSpringGreen') + let i = s:SetNamedColor('#48D1CC','MediumTurquoise') + let i = s:SetNamedColor('#C71585','MediumVioletRed') + let i = s:SetNamedColor('#191970','MidnightBlue') + let i = s:SetNamedColor('#F5FFFA','MintCream') + let i = s:SetNamedColor('#FFE4E1','MistyRose') + let i = s:SetNamedColor('#FFE4B5','Moccasin') + let i = s:SetNamedColor('#FFDEAD','NavajoWhite') + let i = s:SetNamedColor('#FDF5E6','OldLace') + let i = s:SetNamedColor('#6B8E23','OliveDrab') + let i = s:SetNamedColor('#FF4500','OrangeRed') + let i = s:SetNamedColor('#DA70D6','Orchid') + let i = s:SetNamedColor('#EEE8AA','PaleGoldenRod') + let i = s:SetNamedColor('#98FB98','PaleGreen') + let i = s:SetNamedColor('#AFEEEE','PaleTurquoise') + let i = s:SetNamedColor('#D87093','PaleVioletRed') + let i = s:SetNamedColor('#FFEFD5','PapayaWhip') + let i = s:SetNamedColor('#FFDAB9','PeachPuff') + let i = s:SetNamedColor('#CD853F','Peru') + let i = s:SetNamedColor('#FFC0CB','Pink') + let i = s:SetNamedColor('#DDA0DD','Plum') + let i = s:SetNamedColor('#B0E0E6','PowderBlue') + let i = s:SetNamedColor('#BC8F8F','RosyBrown') + let i = s:SetNamedColor('#4169E1','RoyalBlue') + let i = s:SetNamedColor('#8B4513','SaddleBrown') + let i = s:SetNamedColor('#FA8072','Salmon') + let i = s:SetNamedColor('#F4A460','SandyBrown') + let i = s:SetNamedColor('#2E8B57','SeaGreen') + let i = s:SetNamedColor('#FFF5EE','SeaShell') + let i = s:SetNamedColor('#A0522D','Sienna') + let i = s:SetNamedColor('#87CEEB','SkyBlue') + let i = s:SetNamedColor('#6A5ACD','SlateBlue') + let i = s:SetNamedColor('#708090','SlateGray') + let i = s:SetNamedColor('#708090','SlateGrey') + let i = s:SetNamedColor('#FFFAFA','Snow') + let i = s:SetNamedColor('#00FF7F','SpringGreen') + let i = s:SetNamedColor('#4682B4','SteelBlue') + let i = s:SetNamedColor('#D2B48C','Tan') + let i = s:SetNamedColor('#D8BFD8','Thistle') + let i = s:SetNamedColor('#FF6347','Tomato') + let i = s:SetNamedColor('#40E0D0','Turquoise') + let i = s:SetNamedColor('#EE82EE','Violet') + let i = s:SetNamedColor('#F5DEB3','Wheat') + let i = s:SetNamedColor('#F5F5F5','WhiteSmoke') + let i = s:SetNamedColor('#9ACD32','YellowGreen') + + + + let i = 1 + while i <= line("$") + call s:PreviewCSSColorInLine(i) + let i = i+1 + endwhile + unlet i + + autocmd CursorHold * silent call s:PreviewCSSColorInLine('.') + autocmd CursorHoldI * silent call s:PreviewCSSColorInLine('.') + set ut=100 +endif " has("gui_running") diff --git a/.vim/dict.add b/.vim/dict.add index e2840b6..3d4e9d8 100644 --- a/.vim/dict.add +++ b/.vim/dict.add @@ -1,12 +1,13 @@ sqlalchemy SQLAlchemy sessionmaker Metadata metadata metadata config hostname app Ubuntu blog unicode +plugin diff --git a/.vimrc b/.vimrc index 969fc15..e077375 100644 --- a/.vimrc +++ b/.vimrc @@ -1,534 +1,536 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== -au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" -au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m +au FileType python compiler pylint +au BufReadPost quickfix map <buffer> <silent> <CR> :.cc <CR> :ccl +" au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" +" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,t nnoremap <leader>f Vatzf " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction diff --git a/bundle_list b/bundle_list index 561ae01..99ffdd2 100644 --- a/bundle_list +++ b/bundle_list @@ -1,14 +1,14 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/vim-scripts/tComment.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git -http://github.com/kevinw/pyflakes-vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git http://github.com/mitechie/pyflakes-pathogen.git +#http://github.com/vim-scripts/pylint.vim.git diff --git a/install.py b/install.py index 94d228b..1460234 100755 --- a/install.py +++ b/install.py @@ -1,95 +1,96 @@ #!/usr/bin/env python """ Install the files needed for this config to operate on the user's system correctly""" import os import subprocess # command to run: # ln -s /home/rharding/configs/pyvim/vimrc ~/.vimrc # ln -s /home/rharding/configs/pyvim/vim ~/.vim CONFIG_FILES = ['.vimrc', '.vim'] HOME_PATH = os.path.expanduser('~') def removefile(filename): removing_link = "%s/%s" % (HOME_PATH, filename) ret = subprocess.call(["rm", "-rf", removing_link]) print "%s is the status code for removing %s" % (ret, removing_link) return def linkfile(filename): link_location = "%s/%s" % (HOME_PATH, filename) is_located = "%s" % (os.path.abspath(filename)) ret = subprocess.call(["ln", "-s", is_located, link_location]) print "%s is the status code for linking %s to %s" % (ret, is_located, link_location) return def empty_bundles(): """Need to clear out the bundles to make this reusable""" subprocess.call('rm -rf bundle/*', shell=True) def remove_bundles(): """Remove the bundle dir from the .vim dir""" subprocess.call('rm -rf $HOME/.vim/bundle', shell=True) def install_bundles(): """Read bundles file and git clone each repo into .vim/bundle""" bundle_list = open('bundle_list') git_cmd = '/usr/bin/git clone {0} $HOME/configs/pyvim/bundle/{1}' for b in bundle_list: - dirname_idx = b.rfind('/') + 1 - dirname = b[dirname_idx:].strip() - subprocess.call(git_cmd.format(b.strip(), dirname), shell=True) + if not b.startswith('#'): + dirname_idx = b.rfind('/') + 1 + dirname = b[dirname_idx:].strip() + subprocess.call(git_cmd.format(b.strip(), dirname), shell=True) # and finally link to the .vim/bundle dir link_location = "%s/%s" % (HOME_PATH, '.vim/bundle') is_located = "%s" % (os.path.abspath('bundle')) ret = subprocess.call(["cp", "-r", is_located, link_location]) print "{0} is the status code for linking {1} to {2}".format(ret, is_located, link_location) return def fix_xmledit(): """In order xmledit in html you need to link the file to html.vim""" xmledit_path = "$HOME/.vim/bundle/xmledit.git/ftplugin" xml = os.path.join(xmledit_path, 'xml.vim') html = os.path.join(xmledit_path, 'html.vim') mako = os.path.join(xmledit_path, 'mako.vim') print xml print html print mako subprocess.call('ln -s {0} {1}'.format(xml, html), shell=True) subprocess.call('ln -s {0} {1}'.format(xml, mako), shell=True) def copy_custom_snippets(): """We need to add our custom snippets after the plugin is downloaded/setup """ import glob, shutil, os copy_to = os.path.expanduser('~/.vim/bundle/snipmate.vim.git/snippets') for file in glob.glob("custom_snippets/*.snippets"): shutil.copy(file, copy_to) for conf_file in CONFIG_FILES: removefile(conf_file) linkfile(conf_file) empty_bundles() remove_bundles() install_bundles() fix_xmledit() copy_custom_snippets()
mitechie/pyvim
de35ced413b83e94924eab02cfc69d0a41e3668b
just tweak the bunlde's to all bit http vs git://
diff --git a/bundle_list b/bundle_list index dbcb1bc..561ae01 100644 --- a/bundle_list +++ b/bundle_list @@ -1,14 +1,14 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/vim-scripts/tComment.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/kevinw/pyflakes-vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git -git://github.com/mitechie/pyflakes-pathogen.git +http://github.com/mitechie/pyflakes-pathogen.git
mitechie/pyvim
3463c6559bcfbec056029a7f2eee8d239fe9334a
Add the custom snippets dir
diff --git a/custom_snippets/javascript-custom.snippets b/custom_snippets/javascript-custom.snippets new file mode 100644 index 0000000..1fd3231 --- /dev/null +++ b/custom_snippets/javascript-custom.snippets @@ -0,0 +1,9 @@ +# dump out to console +snippet dmp + console.log(${1|somevar}); + +snippet doc + /** + * ${1|some_doc} + * + */ diff --git a/custom_snippets/python-custom.snippets b/custom_snippets/python-custom.snippets new file mode 100644 index 0000000..0dea971 --- /dev/null +++ b/custom_snippets/python-custom.snippets @@ -0,0 +1,5 @@ +# dump out via log.debug +snippet dmp + log.debug(${1|somevar}) + +
mitechie/pyvim
83a49095df9b4f2d4e6c23d4d6b07e2b4f06e1af
Adding custom snippets/pyflakes
diff --git a/.vimrc b/.vimrc index 6c77536..969fc15 100644 --- a/.vimrc +++ b/.vimrc @@ -1,535 +1,534 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,q - reformat text paragraph " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " ,t - collapse/fold html tag " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ " bootstrap the pathogen part of the config right away filetype off call pathogen#runtime_append_all_bundles() call pathogen#helptags() " now proceed as usual syntax on " syntax highlighing filetype on " try to detect filetypes filetype plugin indent on " enable loading indent file for filetype " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... set statusline=%F%m%r%h%w\ [FORMAT=%{&ff}]\ [TYPE=%Y]\ [ASCII=\%03.3b]\ [HEX=\%02.2B]\ [POS=%04l,%04v][%p%%]\ [LEN=%L] " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Config Specific Settings " ================================================== " If we're running in vimdiff then tweak out settings a bit if &diff set nospell endif " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " for when we forget to use sudo to open/edit a file cmap w!! w !sudo tee % >/dev/null nnoremap <leader>q gqap " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t set completeopt+=menuone,longest let g:SuperTabDefaultCompletionType = "context" let g:SuperTabContextDefaultCompletionType = "<c-n>" let g:SuperTabLongestHighlight = 1 let g:SuperTabMidWordCompletion = 1 " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % -au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery - " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " enable html tag folding with ,t nnoremap <leader>f Vatzf " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 autocmd FileType python map <buffer> <leader>M :call Pep8()<CR> " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype + +" bundle/snipmate/after/plugin/snipmate ino <silent> <leader>, <c-r>=TriggerSnippet()<cr> snor <silent> <leader>, <esc>i<right><c-r>=TriggerSnippet()<cr> ino <silent> <leader>\< <c-r>=BackwardsSnippet()<cr> snor <silent> <leader>\< <esc>i<right><c-r>=BackwardsSnippet()<cr> ino <silent> <leader>n <c-r>=ShowAvailableSnips()<cr> - " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pyflakes " http://www.vim.org/scripts/script.php?script_id=3161 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter "source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 " function! CustomCodeAssistInsertMode() " call RopeCodeAssistInsertMode() " if pumvisible() " return "\<C-L>\<Down>" " else " return '' " endif " endfunction " " function! TabWrapperComplete() " let cursyn = synID(line('.'), col('.') - 1, 1) " if pumvisible() " return "\<C-Y>" " endif " if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 " return "\<Tab>" " else " return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" " endif " endfunction " " inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>) " javascript folding function! JavaScriptFold() setl foldmethod=syntax setl foldlevelstart=1 syn region foldBraces start=/{/ end=/}/ transparent fold keepend extend function! FoldText() return substitute(getline(v:foldstart), '{.*', '{...}', '') endfunction setl foldtext=FoldText() endfunction diff --git a/bundle_list b/bundle_list index a1fca5a..dbcb1bc 100644 --- a/bundle_list +++ b/bundle_list @@ -1,13 +1,14 @@ http://github.com/sukima/xmledit.git http://github.com/vim-scripts/mako.vim.git http://github.com/vim-scripts/LustyJuggler.git http://github.com/vim-scripts/tComment.git http://github.com/vim-scripts/pep8--Driessen.git http://github.com/vim-scripts/jpythonfold.vim.git http://github.com/vim-scripts/pydoc.vim.git http://github.com/tsaleh/vim-supertab.git http://github.com/msanders/snipmate.vim.git http://github.com/vim-scripts/surround.vim.git http://github.com/kevinw/pyflakes-vim.git http://github.com/vim-scripts/Gist.vim.git http://github.com/scrooloose/nerdtree.git +git://github.com/mitechie/pyflakes-pathogen.git diff --git a/install.py b/install.py index 71da564..94d228b 100755 --- a/install.py +++ b/install.py @@ -1,85 +1,95 @@ #!/usr/bin/env python """ Install the files needed for this config to operate on the user's system correctly""" import os import subprocess # command to run: # ln -s /home/rharding/configs/pyvim/vimrc ~/.vimrc # ln -s /home/rharding/configs/pyvim/vim ~/.vim CONFIG_FILES = ['.vimrc', '.vim'] HOME_PATH = os.path.expanduser('~') def removefile(filename): removing_link = "%s/%s" % (HOME_PATH, filename) ret = subprocess.call(["rm", "-rf", removing_link]) print "%s is the status code for removing %s" % (ret, removing_link) return def linkfile(filename): link_location = "%s/%s" % (HOME_PATH, filename) is_located = "%s" % (os.path.abspath(filename)) ret = subprocess.call(["ln", "-s", is_located, link_location]) print "%s is the status code for linking %s to %s" % (ret, is_located, link_location) return def empty_bundles(): """Need to clear out the bundles to make this reusable""" subprocess.call('rm -rf bundle/*', shell=True) def remove_bundles(): """Remove the bundle dir from the .vim dir""" subprocess.call('rm -rf $HOME/.vim/bundle', shell=True) def install_bundles(): """Read bundles file and git clone each repo into .vim/bundle""" bundle_list = open('bundle_list') git_cmd = '/usr/bin/git clone {0} $HOME/configs/pyvim/bundle/{1}' for b in bundle_list: dirname_idx = b.rfind('/') + 1 dirname = b[dirname_idx:].strip() subprocess.call(git_cmd.format(b.strip(), dirname), shell=True) # and finally link to the .vim/bundle dir link_location = "%s/%s" % (HOME_PATH, '.vim/bundle') is_located = "%s" % (os.path.abspath('bundle')) ret = subprocess.call(["cp", "-r", is_located, link_location]) print "{0} is the status code for linking {1} to {2}".format(ret, is_located, link_location) return def fix_xmledit(): """In order xmledit in html you need to link the file to html.vim""" xmledit_path = "$HOME/.vim/bundle/xmledit.git/ftplugin" xml = os.path.join(xmledit_path, 'xml.vim') html = os.path.join(xmledit_path, 'html.vim') mako = os.path.join(xmledit_path, 'mako.vim') print xml print html print mako subprocess.call('ln -s {0} {1}'.format(xml, html), shell=True) subprocess.call('ln -s {0} {1}'.format(xml, mako), shell=True) +def copy_custom_snippets(): + """We need to add our custom snippets after the plugin is downloaded/setup + + """ + import glob, shutil, os + copy_to = os.path.expanduser('~/.vim/bundle/snipmate.vim.git/snippets') + for file in glob.glob("custom_snippets/*.snippets"): + shutil.copy(file, copy_to) + for conf_file in CONFIG_FILES: removefile(conf_file) linkfile(conf_file) empty_bundles() remove_bundles() install_bundles() fix_xmledit() +copy_custom_snippets()
mitechie/pyvim
6534507b426bfccef84242f7e2a9f852ac6bb9d7
Added readme for github view
diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..6bd10c6 --- /dev/null +++ b/README.rst @@ -0,0 +1,19 @@ +Rick's Python Config +===================== + +I'm a Python web dev so lots of tweaks for that. + +I've recently done some heavy lifting to get it pathogen compatible so give me a +bit to settle the dust while I finish cleaning/updating docs/testing things +out. + +Install +-------- +Setup is handled by `install.py` which will do the symlinks, install the +plugins, and copy them over to the `.vim/bundle` directory. + +ToDo +---- +- need pylint or pyflakes preferably with highlighting in the page via spell +- need an upgrade script command to walk through the plugins and pull updates + diff --git a/install.py b/install.py index bbedda1..71da564 100755 --- a/install.py +++ b/install.py @@ -1,82 +1,85 @@ #!/usr/bin/env python """ Install the files needed for this config to operate on the user's system correctly""" import os import subprocess # command to run: # ln -s /home/rharding/configs/pyvim/vimrc ~/.vimrc # ln -s /home/rharding/configs/pyvim/vim ~/.vim CONFIG_FILES = ['.vimrc', '.vim'] HOME_PATH = os.path.expanduser('~') + def removefile(filename): removing_link = "%s/%s" % (HOME_PATH, filename) ret = subprocess.call(["rm", "-rf", removing_link]) print "%s is the status code for removing %s" % (ret, removing_link) return def linkfile(filename): link_location = "%s/%s" % (HOME_PATH, filename) is_located = "%s" % (os.path.abspath(filename)) ret = subprocess.call(["ln", "-s", is_located, link_location]) print "%s is the status code for linking %s to %s" % (ret, is_located, link_location) return + def empty_bundles(): """Need to clear out the bundles to make this reusable""" subprocess.call('rm -rf bundle/*', shell=True) + def remove_bundles(): """Remove the bundle dir from the .vim dir""" subprocess.call('rm -rf $HOME/.vim/bundle', shell=True) def install_bundles(): """Read bundles file and git clone each repo into .vim/bundle""" bundle_list = open('bundle_list') git_cmd = '/usr/bin/git clone {0} $HOME/configs/pyvim/bundle/{1}' for b in bundle_list: dirname_idx = b.rfind('/') + 1 dirname = b[dirname_idx:].strip() subprocess.call(git_cmd.format(b.strip(), dirname), shell=True) # and finally link to the .vim/bundle dir link_location = "%s/%s" % (HOME_PATH, '.vim/bundle') is_located = "%s" % (os.path.abspath('bundle')) ret = subprocess.call(["cp", "-r", is_located, link_location]) print "{0} is the status code for linking {1} to {2}".format(ret, is_located, link_location) return + def fix_xmledit(): - """In order to have xmledit in html you need to link the file to html.vim""" + """In order xmledit in html you need to link the file to html.vim""" xmledit_path = "$HOME/.vim/bundle/xmledit.git/ftplugin" xml = os.path.join(xmledit_path, 'xml.vim') html = os.path.join(xmledit_path, 'html.vim') mako = os.path.join(xmledit_path, 'mako.vim') print xml print html print mako subprocess.call('ln -s {0} {1}'.format(xml, html), shell=True) subprocess.call('ln -s {0} {1}'.format(xml, mako), shell=True) for conf_file in CONFIG_FILES: removefile(conf_file) linkfile(conf_file) empty_bundles() remove_bundles() install_bundles() fix_xmledit() -
mitechie/pyvim
0a0d114f1cb2d8293dd0d524d3ff41c50d2d9ec5
remove space
diff --git a/.vim/doc/tags b/.vim/doc/tags index 02519d8..a161ddb 100644 --- a/.vim/doc/tags +++ b/.vim/doc/tags @@ -1,348 +1,211 @@ 'NERDChristmasTree' NERD_tree.txt /*'NERDChristmasTree'* 'NERDTreeAutoCenter' NERD_tree.txt /*'NERDTreeAutoCenter'* 'NERDTreeAutoCenterThreshold' NERD_tree.txt /*'NERDTreeAutoCenterThreshold'* 'NERDTreeBookmarksFile' NERD_tree.txt /*'NERDTreeBookmarksFile'* 'NERDTreeCaseSensitiveSort' NERD_tree.txt /*'NERDTreeCaseSensitiveSort'* 'NERDTreeChDirMode' NERD_tree.txt /*'NERDTreeChDirMode'* 'NERDTreeHighlightCursorline' NERD_tree.txt /*'NERDTreeHighlightCursorline'* 'NERDTreeHijackNetrw' NERD_tree.txt /*'NERDTreeHijackNetrw'* 'NERDTreeIgnore' NERD_tree.txt /*'NERDTreeIgnore'* 'NERDTreeMouseMode' NERD_tree.txt /*'NERDTreeMouseMode'* 'NERDTreeQuitOnOpen' NERD_tree.txt /*'NERDTreeQuitOnOpen'* 'NERDTreeShowBookmarks' NERD_tree.txt /*'NERDTreeShowBookmarks'* 'NERDTreeShowFiles' NERD_tree.txt /*'NERDTreeShowFiles'* 'NERDTreeShowHidden' NERD_tree.txt /*'NERDTreeShowHidden'* 'NERDTreeShowLineNumbers' NERD_tree.txt /*'NERDTreeShowLineNumbers'* 'NERDTreeSortOrder' NERD_tree.txt /*'NERDTreeSortOrder'* 'NERDTreeStatusline' NERD_tree.txt /*'NERDTreeStatusline'* 'NERDTreeWinPos' NERD_tree.txt /*'NERDTreeWinPos'* 'NERDTreeWinSize' NERD_tree.txt /*'NERDTreeWinSize'* 'loaded_nerd_tree' NERD_tree.txt /*'loaded_nerd_tree'* 'snippets' snipMate.txt /*'snippets'* .snippet snipMate.txt /*.snippet* .snippets snipMate.txt /*.snippets* -:ABitLy twitvim.txt /*:ABitLy* -:ACligs twitvim.txt /*:ACligs* -:AIsGd twitvim.txt /*:AIsGd* -:AMetamark twitvim.txt /*:AMetamark* -:ASnipurl twitvim.txt /*:ASnipurl* -:ATinyURL twitvim.txt /*:ATinyURL* -:ATrim twitvim.txt /*:ATrim* -:ATweetburner twitvim.txt /*:ATweetburner* -:AUrlBorg twitvim.txt /*:AUrlBorg* -:AZima twitvim.txt /*:AZima* -:BPosttoTwitter twitvim.txt /*:BPosttoTwitter* -:BackTwitter twitvim.txt /*:BackTwitter* -:BitLy twitvim.txt /*:BitLy* -:CPosttoTwitter twitvim.txt /*:CPosttoTwitter* -:Cligs twitvim.txt /*:Cligs* -:DMSentTwitter twitvim.txt /*:DMSentTwitter* -:DMTwitter twitvim.txt /*:DMTwitter* -:ForwardTwitter twitvim.txt /*:ForwardTwitter* -:FriendsTwitter twitvim.txt /*:FriendsTwitter* -:IsGd twitvim.txt /*:IsGd* -:ListTwitter twitvim.txt /*:ListTwitter* -:LocationTwitter twitvim.txt /*:LocationTwitter* -:Metamark twitvim.txt /*:Metamark* :NERDTree NERD_tree.txt /*:NERDTree* :NERDTreeClose NERD_tree.txt /*:NERDTreeClose* :NERDTreeFind NERD_tree.txt /*:NERDTreeFind* :NERDTreeFromBookmark NERD_tree.txt /*:NERDTreeFromBookmark* :NERDTreeMirror NERD_tree.txt /*:NERDTreeMirror* :NERDTreeToggle NERD_tree.txt /*:NERDTreeToggle* -:NextTwitter twitvim.txt /*:NextTwitter* -:PBitLy twitvim.txt /*:PBitLy* -:PCligs twitvim.txt /*:PCligs* -:PIsGd twitvim.txt /*:PIsGd* -:PMetamark twitvim.txt /*:PMetamark* -:PSnipurl twitvim.txt /*:PSnipurl* -:PTinyURL twitvim.txt /*:PTinyURL* -:PTrim twitvim.txt /*:PTrim* -:PTweetburner twitvim.txt /*:PTweetburner* -:PUrlBorg twitvim.txt /*:PUrlBorg* -:PZima twitvim.txt /*:PZima* -:PosttoTwitter twitvim.txt /*:PosttoTwitter* -:PreviousTwitter twitvim.txt /*:PreviousTwitter* -:ProfileTwitter twitvim.txt /*:ProfileTwitter* -:PublicTwitter twitvim.txt /*:PublicTwitter* -:RateLimitTwitter twitvim.txt /*:RateLimitTwitter* -:RefreshTwitter twitvim.txt /*:RefreshTwitter* -:RepliesTwitter twitvim.txt /*:RepliesTwitter* -:ResetLoginTwitter twitvim.txt /*:ResetLoginTwitter* -:RetweetedByMeTwitter twitvim.txt /*:RetweetedByMeTwitter* -:RetweetedToMeTwitter twitvim.txt /*:RetweetedToMeTwitter* -:SearchTwitter twitvim.txt /*:SearchTwitter* -:SendDMTwitter twitvim.txt /*:SendDMTwitter* -:SetLoginTwitter twitvim.txt /*:SetLoginTwitter* -:Snipurl twitvim.txt /*:Snipurl* :TComment tComment.txt /*:TComment* :TCommentAs tComment.txt /*:TCommentAs* :TCommentBlock tComment.txt /*:TCommentBlock* :TCommentInline tComment.txt /*:TCommentInline* :TCommentRight tComment.txt /*:TCommentRight* -:TinyURL twitvim.txt /*:TinyURL* -:Trim twitvim.txt /*:Trim* -:Tweetburner twitvim.txt /*:Tweetburner* -:UrlBorg twitvim.txt /*:UrlBorg* -:UserTwitter twitvim.txt /*:UserTwitter* -:Zima twitvim.txt /*:Zima* ExtractSnips() snipMate.txt /*ExtractSnips()* ExtractSnipsFile() snipMate.txt /*ExtractSnipsFile()* Filename() snipMate.txt /*Filename()* NERDTree NERD_tree.txt /*NERDTree* NERDTree-? NERD_tree.txt /*NERDTree-?* NERDTree-A NERD_tree.txt /*NERDTree-A* NERDTree-B NERD_tree.txt /*NERDTree-B* NERDTree-C NERD_tree.txt /*NERDTree-C* NERDTree-C-J NERD_tree.txt /*NERDTree-C-J* NERDTree-C-K NERD_tree.txt /*NERDTree-C-K* NERDTree-D NERD_tree.txt /*NERDTree-D* NERDTree-F NERD_tree.txt /*NERDTree-F* NERDTree-I NERD_tree.txt /*NERDTree-I* NERDTree-J NERD_tree.txt /*NERDTree-J* NERDTree-K NERD_tree.txt /*NERDTree-K* NERDTree-O NERD_tree.txt /*NERDTree-O* NERDTree-P NERD_tree.txt /*NERDTree-P* NERDTree-R NERD_tree.txt /*NERDTree-R* NERDTree-T NERD_tree.txt /*NERDTree-T* NERDTree-U NERD_tree.txt /*NERDTree-U* NERDTree-X NERD_tree.txt /*NERDTree-X* NERDTree-cd NERD_tree.txt /*NERDTree-cd* NERDTree-contents NERD_tree.txt /*NERDTree-contents* NERDTree-e NERD_tree.txt /*NERDTree-e* NERDTree-f NERD_tree.txt /*NERDTree-f* NERDTree-gi NERD_tree.txt /*NERDTree-gi* NERDTree-go NERD_tree.txt /*NERDTree-go* NERDTree-gs NERD_tree.txt /*NERDTree-gs* NERDTree-i NERD_tree.txt /*NERDTree-i* NERDTree-m NERD_tree.txt /*NERDTree-m* NERDTree-o NERD_tree.txt /*NERDTree-o* NERDTree-p NERD_tree.txt /*NERDTree-p* NERDTree-q NERD_tree.txt /*NERDTree-q* NERDTree-r NERD_tree.txt /*NERDTree-r* NERDTree-s NERD_tree.txt /*NERDTree-s* NERDTree-t NERD_tree.txt /*NERDTree-t* NERDTree-u NERD_tree.txt /*NERDTree-u* NERDTree-x NERD_tree.txt /*NERDTree-x* NERDTreeAPI NERD_tree.txt /*NERDTreeAPI* NERDTreeAbout NERD_tree.txt /*NERDTreeAbout* NERDTreeAddKeyMap() NERD_tree.txt /*NERDTreeAddKeyMap()* NERDTreeAddMenuItem() NERD_tree.txt /*NERDTreeAddMenuItem()* NERDTreeAddMenuSeparator() NERD_tree.txt /*NERDTreeAddMenuSeparator()* NERDTreeAddSubmenu() NERD_tree.txt /*NERDTreeAddSubmenu()* NERDTreeBookmarkCommands NERD_tree.txt /*NERDTreeBookmarkCommands* NERDTreeBookmarkTable NERD_tree.txt /*NERDTreeBookmarkTable* NERDTreeBookmarks NERD_tree.txt /*NERDTreeBookmarks* NERDTreeChangelog NERD_tree.txt /*NERDTreeChangelog* NERDTreeCredits NERD_tree.txt /*NERDTreeCredits* NERDTreeFunctionality NERD_tree.txt /*NERDTreeFunctionality* NERDTreeGlobalCommands NERD_tree.txt /*NERDTreeGlobalCommands* NERDTreeInvalidBookmarks NERD_tree.txt /*NERDTreeInvalidBookmarks* NERDTreeKeymapAPI NERD_tree.txt /*NERDTreeKeymapAPI* NERDTreeLicense NERD_tree.txt /*NERDTreeLicense* NERDTreeMappings NERD_tree.txt /*NERDTreeMappings* NERDTreeMenu NERD_tree.txt /*NERDTreeMenu* NERDTreeMenuAPI NERD_tree.txt /*NERDTreeMenuAPI* NERDTreeOptionDetails NERD_tree.txt /*NERDTreeOptionDetails* NERDTreeOptionSummary NERD_tree.txt /*NERDTreeOptionSummary* NERDTreeOptions NERD_tree.txt /*NERDTreeOptions* NERDTreeRender() NERD_tree.txt /*NERDTreeRender()* NERD_tree.txt NERD_tree.txt /*NERD_tree.txt* ResetSnippets() snipMate.txt /*ResetSnippets()* TCommentDefineType() tComment.txt /*TCommentDefineType()* -TwitVim twitvim.txt /*TwitVim* -TwitVim-A-d twitvim.txt /*TwitVim-A-d* -TwitVim-A-g twitvim.txt /*TwitVim-A-g* -TwitVim-A-r twitvim.txt /*TwitVim-A-r* -TwitVim-A-t twitvim.txt /*TwitVim-A-t* -TwitVim-C-PageDown twitvim.txt /*TwitVim-C-PageDown* -TwitVim-C-PageUp twitvim.txt /*TwitVim-C-PageUp* -TwitVim-C-i twitvim.txt /*TwitVim-C-i* -TwitVim-C-o twitvim.txt /*TwitVim-C-o* -TwitVim-C-t twitvim.txt /*TwitVim-C-t* -TwitVim-Leader-@ twitvim.txt /*TwitVim-Leader-@* -TwitVim-Leader-C-r twitvim.txt /*TwitVim-Leader-C-r* -TwitVim-Leader-Leader twitvim.txt /*TwitVim-Leader-Leader* -TwitVim-Leader-S-r twitvim.txt /*TwitVim-Leader-S-r* -TwitVim-Leader-X twitvim.txt /*TwitVim-Leader-X* -TwitVim-Leader-d twitvim.txt /*TwitVim-Leader-d* -TwitVim-Leader-e twitvim.txt /*TwitVim-Leader-e* -TwitVim-Leader-g twitvim.txt /*TwitVim-Leader-g* -TwitVim-Leader-p twitvim.txt /*TwitVim-Leader-p* -TwitVim-Leader-r twitvim.txt /*TwitVim-Leader-r* -TwitVim-LongURL twitvim.txt /*TwitVim-LongURL* -TwitVim-add twitvim.txt /*TwitVim-add* -TwitVim-cURL twitvim.txt /*TwitVim-cURL* -TwitVim-contents twitvim.txt /*TwitVim-contents* -TwitVim-credits twitvim.txt /*TwitVim-credits* -TwitVim-delete twitvim.txt /*TwitVim-delete* -TwitVim-direct-message twitvim.txt /*TwitVim-direct-message* -TwitVim-goto twitvim.txt /*TwitVim-goto* -TwitVim-highlight twitvim.txt /*TwitVim-highlight* -TwitVim-history twitvim.txt /*TwitVim-history* -TwitVim-hotkeys twitvim.txt /*TwitVim-hotkeys* -TwitVim-inreplyto twitvim.txt /*TwitVim-inreplyto* -TwitVim-install twitvim.txt /*TwitVim-install* -TwitVim-intro twitvim.txt /*TwitVim-intro* -TwitVim-line-length twitvim.txt /*TwitVim-line-length* -TwitVim-login-base64 twitvim.txt /*TwitVim-login-base64* -TwitVim-manual twitvim.txt /*TwitVim-manual* -TwitVim-mappings twitvim.txt /*TwitVim-mappings* -TwitVim-next twitvim.txt /*TwitVim-next* -TwitVim-non-cURL twitvim.txt /*TwitVim-non-cURL* -TwitVim-previous twitvim.txt /*TwitVim-previous* -TwitVim-profile twitvim.txt /*TwitVim-profile* -TwitVim-refresh twitvim.txt /*TwitVim-refresh* -TwitVim-reply twitvim.txt /*TwitVim-reply* -TwitVim-reply-all twitvim.txt /*TwitVim-reply-all* -TwitVim-retweet twitvim.txt /*TwitVim-retweet* -TwitVim-ssl twitvim.txt /*TwitVim-ssl* -TwitVim-ssl-curl twitvim.txt /*TwitVim-ssl-curl* -TwitVim-ssl-perl twitvim.txt /*TwitVim-ssl-perl* -TwitVim-ssl-python twitvim.txt /*TwitVim-ssl-python* -TwitVim-ssl-ruby twitvim.txt /*TwitVim-ssl-ruby* -TwitVim-switch twitvim.txt /*TwitVim-switch* -TwitVim-timeline-commands twitvim.txt /*TwitVim-timeline-commands* -TwitVim-tips twitvim.txt /*TwitVim-tips* -TwitVim-update-commands twitvim.txt /*TwitVim-update-commands* -TwitVim-utility twitvim.txt /*TwitVim-utility* cs surround.txt /*cs* drawit DrawIt.txt /*drawit* drawit-a DrawIt.txt /*drawit-a* drawit-b DrawIt.txt /*drawit-b* drawit-brush DrawIt.txt /*drawit-brush* drawit-c DrawIt.txt /*drawit-c* drawit-contents DrawIt.txt /*drawit-contents* drawit-drawing DrawIt.txt /*drawit-drawing* drawit-e DrawIt.txt /*drawit-e* drawit-erase DrawIt.txt /*drawit-erase* drawit-example DrawIt.txt /*drawit-example* drawit-f DrawIt.txt /*drawit-f* drawit-history DrawIt.txt /*drawit-history* drawit-l DrawIt.txt /*drawit-l* drawit-manual DrawIt.txt /*drawit-manual* drawit-modes DrawIt.txt /*drawit-modes* drawit-move DrawIt.txt /*drawit-move* drawit-moving DrawIt.txt /*drawit-moving* drawit-options DrawIt.txt /*drawit-options* drawit-protect DrawIt.txt /*drawit-protect* drawit-s DrawIt.txt /*drawit-s* drawit-setbrush DrawIt.txt /*drawit-setbrush* drawit-setdrawit DrawIt.txt /*drawit-setdrawit* drawit-start DrawIt.txt /*drawit-start* drawit-stop DrawIt.txt /*drawit-stop* drawit-usage DrawIt.txt /*drawit-usage* drawit-visblock DrawIt.txt /*drawit-visblock* drawit.txt DrawIt.txt /*drawit.txt* ds surround.txt /*ds* g:SuperTabCompletionContexts supertab.txt /*g:SuperTabCompletionContexts* g:SuperTabContextDefaultCompletionType supertab.txt /*g:SuperTabContextDefaultCompletionType* g:SuperTabDefaultCompletionType supertab.txt /*g:SuperTabDefaultCompletionType* g:SuperTabLongestHighlight supertab.txt /*g:SuperTabLongestHighlight* g:SuperTabMappingBackward supertab.txt /*g:SuperTabMappingBackward* g:SuperTabMappingForward supertab.txt /*g:SuperTabMappingForward* g:SuperTabMappingTabLiteral supertab.txt /*g:SuperTabMappingTabLiteral* g:SuperTabMidWordCompletion supertab.txt /*g:SuperTabMidWordCompletion* g:SuperTabRetainCompletionDuration supertab.txt /*g:SuperTabRetainCompletionDuration* g:drawit_insertmode DrawIt.txt /*g:drawit_insertmode* g:snippets_dir snipMate.txt /*g:snippets_dir* g:snips_author snipMate.txt /*g:snips_author* g:tcommentMapLeader1 tComment.txt /*g:tcommentMapLeader1* g:tcommentMapLeader2 tComment.txt /*g:tcommentMapLeader2* g:tcommentMapLeaderOp1 tComment.txt /*g:tcommentMapLeaderOp1* g:tcommentMapLeaderOp2 tComment.txt /*g:tcommentMapLeaderOp2* g:tcommentOpModeExtra tComment.txt /*g:tcommentOpModeExtra* -hl-twitterLink twitvim.txt /*hl-twitterLink* -hl-twitterReply twitvim.txt /*hl-twitterReply* -hl-twitterTime twitvim.txt /*hl-twitterTime* -hl-twitterTitle twitvim.txt /*hl-twitterTitle* -hl-twitterUser twitvim.txt /*hl-twitterUser* i_CTRL-G_S surround.txt /*i_CTRL-G_S* i_CTRL-G_s surround.txt /*i_CTRL-G_s* i_CTRL-R_<Tab> snipMate.txt /*i_CTRL-R_<Tab>* list-snippets snipMate.txt /*list-snippets* multi_snip snipMate.txt /*multi_snip* snipMate snipMate.txt /*snipMate* snipMate-$# snipMate.txt /*snipMate-$#* snipMate-${#:} snipMate.txt /*snipMate-${#:}* snipMate-${#} snipMate.txt /*snipMate-${#}* snipMate-author snipMate.txt /*snipMate-author* snipMate-commands snipMate.txt /*snipMate-commands* snipMate-contact snipMate.txt /*snipMate-contact* snipMate-description snipMate.txt /*snipMate-description* snipMate-disadvantages snipMate.txt /*snipMate-disadvantages* snipMate-expandtab snipMate.txt /*snipMate-expandtab* snipMate-features snipMate.txt /*snipMate-features* snipMate-filename snipMate.txt /*snipMate-filename* snipMate-indenting snipMate.txt /*snipMate-indenting* snipMate-placeholders snipMate.txt /*snipMate-placeholders* snipMate-remap snipMate.txt /*snipMate-remap* snipMate-settings snipMate.txt /*snipMate-settings* snipMate-usage snipMate.txt /*snipMate-usage* snipMate.txt snipMate.txt /*snipMate.txt* snippet snipMate.txt /*snippet* snippet-syntax snipMate.txt /*snippet-syntax* snippets snipMate.txt /*snippets* supertab supertab.txt /*supertab* supertab-completioncontexts supertab.txt /*supertab-completioncontexts* supertab-contextdefault supertab.txt /*supertab-contextdefault* supertab-contextdiscover supertab.txt /*supertab-contextdiscover* supertab-contextexample supertab.txt /*supertab-contextexample* supertab-contexttext supertab.txt /*supertab-contexttext* supertab-defaultcompletion supertab.txt /*supertab-defaultcompletion* supertab-duration supertab.txt /*supertab-duration* supertab-forwardbackward supertab.txt /*supertab-forwardbackward* supertab-intro supertab.txt /*supertab-intro* supertab-longesthighlight supertab.txt /*supertab-longesthighlight* supertab-mappingtabliteral supertab.txt /*supertab-mappingtabliteral* supertab-midword supertab.txt /*supertab-midword* supertab-options supertab.txt /*supertab-options* supertab-usage supertab.txt /*supertab-usage* supertab.txt supertab.txt /*supertab.txt* surround surround.txt /*surround* surround-author surround.txt /*surround-author* surround-customizing surround.txt /*surround-customizing* surround-issues surround.txt /*surround-issues* surround-mappings surround.txt /*surround-mappings* surround-replacements surround.txt /*surround-replacements* surround-targets surround.txt /*surround-targets* surround.txt surround.txt /*surround.txt* tComment-Installation tComment.txt /*tComment-Installation* tComment-Key-Bindings tComment.txt /*tComment-Key-Bindings* tComment-Usage tComment.txt /*tComment-Usage* tComment-commands tComment.txt /*tComment-commands* tComment.txt tComment.txt /*tComment.txt* -twitvim-identi.ca twitvim.txt /*twitvim-identi.ca* -twitvim.txt twitvim.txt /*twitvim.txt* -twitvim_api_root twitvim.txt /*twitvim_api_root* -twitvim_bitly_key twitvim.txt /*twitvim_bitly_key* -twitvim_bitly_user twitvim.txt /*twitvim_bitly_user* -twitvim_browser_cmd twitvim.txt /*twitvim_browser_cmd* -twitvim_cert_insecure twitvim.txt /*twitvim_cert_insecure* -twitvim_cligs_key twitvim.txt /*twitvim_cligs_key* -twitvim_count twitvim.txt /*twitvim_count* -twitvim_enable_perl twitvim.txt /*twitvim_enable_perl* -twitvim_enable_python twitvim.txt /*twitvim_enable_python* -twitvim_enable_ruby twitvim.txt /*twitvim_enable_ruby* -twitvim_enable_tcl twitvim.txt /*twitvim_enable_tcl* -twitvim_login twitvim.txt /*twitvim_login* -twitvim_login_b64 twitvim.txt /*twitvim_login_b64* -twitvim_old_retweet twitvim.txt /*twitvim_old_retweet* -twitvim_proxy twitvim.txt /*twitvim_proxy* -twitvim_proxy_login twitvim.txt /*twitvim_proxy_login* -twitvim_proxy_login_b64 twitvim.txt /*twitvim_proxy_login_b64* -twitvim_retweet_format twitvim.txt /*twitvim_retweet_format* -twitvim_trim_login twitvim.txt /*twitvim_trim_login* -twitvim_urlborg_key twitvim.txt /*twitvim_urlborg_key* vS surround.txt /*vS* vgS surround.txt /*vgS* vs surround.txt /*vs* xml-plugin-callbacks xml-plugin.txt /*xml-plugin-callbacks* xml-plugin-html xml-plugin.txt /*xml-plugin-html* xml-plugin-mappings xml-plugin.txt /*xml-plugin-mappings* xml-plugin-settings xml-plugin.txt /*xml-plugin-settings* xml-plugin.txt xml-plugin.txt /*xml-plugin.txt* yS surround.txt /*yS* ySS surround.txt /*ySS* ys surround.txt /*ys* yss surround.txt /*yss* diff --git a/.vimrc b/.vimrc index e84cdfc..b7255a7 100644 --- a/.vimrc +++ b/.vimrc @@ -1,491 +1,490 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard -nmap <leader>y "*y +nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " mako.vim " http://www.vim.org/scripts/script.php?script_id=2663 " syntax support for mako code " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pylint " http://www.vim.org/scripts/script.php?script_id=891 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() - " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
186dbf708369aa37fde091f66f95a5071187981f
Small cleanup of vimrc, dict words
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index 0a15e1f..7c73f48 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,3 +1,4 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =1 +let g:netrw_dirhist_cnt =2 let g:netrw_dirhist_1='/home/rharding/src/quipp/quipp/test' +let g:netrw_dirhist_2='/home/rharding/configs/pyvim' diff --git a/.vim/dict.add b/.vim/dict.add index 59a5b82..85a6848 100644 --- a/.vim/dict.add +++ b/.vim/dict.add @@ -1,7 +1,11 @@ sqlalchemy SQLAlchemy sessionmaker Metadata metadata metadata config +hostname +app +Ubuntu +blog diff --git a/.vimrc b/.vimrc index 3a0f934..e84cdfc 100644 --- a/.vimrc +++ b/.vimrc @@ -1,487 +1,491 @@ " http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== -let mapleader="," " change the leader to be a comma vs slash +let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard -nmap <leader>y "*y +nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " setup a custom dict for spelling " zg = add word to dict " zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l -imap <c-h> <c-w>h +map <c-h> <c-w>h " Hints for other movements " <c-w><c-r> rotate window to next spot " <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 +" mako.vim +" http://www.vim.org/scripts/script.php?script_id=2663 +" syntax support for mako code + " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pylint " http://www.vim.org/scripts/script.php?script_id=891 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
5d5f23512700e138bbaaec5058db476f1941b4b8
removed spaces
diff --git a/vimsync.sh b/vimsync.sh index 53e95b5..7402ac1 100755 --- a/vimsync.sh +++ b/vimsync.sh @@ -1,43 +1,43 @@ #! /bin/zsh # add to ~/bin: ln -s ~/configs/pyvim/vimsync.sh ~/bin/vimsync.sh -# Sync my vim config to a remote host specified. +# Sync my vim config to a remote host specified. # Steps: # 1. cd ~/configs/pyvim # 2. git co portable # 3. rsync -avz --delete -e ssh ~/configs/pyvim dc:~ # 4. ln -s vim/vim .vim # 5. ln -s vim/.vimrc .vimrc # Notes: # Using a git branch since some stuff I run locally won't be on remote hosts # This setups the files in a directory on the host called vim and then it # symlinks the .vimrc and vim directory to the user's home dir # This currently syncs the git stuff as well, at some point should probably do # some fancy export to a tmp dir and rsync those files over instead VIMCONF="/home/rharding/configs/pyvim" VIMBRANCH="portable" cd $VIMCONF git checkout $VIMBRANCH # get the hostname if [ $# -ne 1 ] -then +then echo "Usage: vimsync HOSTNAME" return 65 fi HOSTNAME=$1 rsync -avz --delete -e ssh ~/configs/pyvim $HOSTNAME:~/ ssh $HOSTNAME 'rm -r ~/.vimrc ~/.vim ; ln -s pyvim/vim .vim && ln -s pyvim/vimrc .vimrc' # make sure we restore our local vim config to master git checkout master # @todo move the above into a shell function, setup a list of hosts, and loop # through them to sync all hosts at once
mitechie/pyvim
b15880c616d088a5f52ce558ef586c19b8d21d4f
Add mako/xml support, spelling fun, and smartcase ftw
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index 7771a52..0a15e1f 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,2 +1,3 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =0 +let g:netrw_dirhist_cnt =1 +let g:netrw_dirhist_1='/home/rharding/src/quipp/quipp/test' diff --git a/.vim/colors/lucius.vim b/.vim/colors/lucius.vim index d73586e..10a53fd 100644 --- a/.vim/colors/lucius.vim +++ b/.vim/colors/lucius.vim @@ -1,350 +1,353 @@ " Vim color file " Maintainer: Jonathan Filip <[email protected]> " Last Modified: Mon Apr 19, 2010 10:24AM " Version: 3.5 " " GUI / 256 color terminal " " I started out trying to combine my favorite parts of other schemes and ended " up with this (oceandeep, moria, peaksea, wombat, zenburn). " " This file also tries to have descriptive comments for each higlighting group " so it is easy to understand what each part does. set background=dark hi clear if exists("syntax_on") syntax reset endif let colors_name="lucius" " Some other colors to save " blue: 3eb8e5 " green: 92d400 " c green: d5f876, cae682 " new blue: 002D62 " new gray: CCCCCC " Base color " ---------- hi Normal guifg=#e0e0e0 guibg=#202020 hi Normal ctermfg=253 ctermbg=235 " Comment Group " ------------- " any comment hi Comment guifg=#606060 gui=none hi Comment ctermfg=240 cterm=none " Constant Group " -------------- " any constant hi Constant guifg=#8cd0d3 gui=none hi Constant ctermfg=116 cterm=none " strings hi String guifg=#80c0d9 gui=none hi String ctermfg=110 cterm=none " character constant hi Character guifg=#80c0d9 gui=none hi Character ctermfg=110 cterm=none " numbers decimal/hex hi Number guifg=#8cd0d3 gui=none hi Number ctermfg=116 cterm=none " true, false hi Boolean guifg=#8cd0d3 gui=none hi Boolean ctermfg=116 cterm=none " float hi Float guifg=#8cd0d3 gui=none hi Float ctermfg=116 cterm=none " Identifier Group " ---------------- " any variable name hi Identifier guifg=#e6c080 gui=none hi Identifier ctermfg=180 cterm=none " function, method, class hi Function guifg=#e6c080 gui=none hi Function ctermfg=180 cterm=none " Statement Group " --------------- " any statement hi Statement guifg=#b3d38c gui=none hi Statement ctermfg=150 cterm=none " if, then, else hi Conditional guifg=#b3d38c gui=none hi Conditional ctermfg=150 cterm=none " try, catch, throw, raise hi Exception guifg=#b3d38c gui=none hi Exception ctermfg=150 cterm=none " for, while, do hi Repeat guifg=#b3d38c gui=none hi Repeat ctermfg=150 cterm=none " case, default hi Label guifg=#b3d38c gui=none hi Label ctermfg=150 cterm=none " sizeof, +, * hi Operator guifg=#b3d38c gui=none hi Operator ctermfg=150 cterm=none " any other keyword hi Keyword guifg=#b3d38c gui=none hi Keyword ctermfg=150 cterm=none " Preprocessor Group " ------------------ " generic preprocessor hi PreProc guifg=#e9dfaf gui=none hi PreProc ctermfg=187 cterm=none " #include hi Include guifg=#e9dfaf gui=none hi Include ctermfg=187 cterm=none " #define hi Define guifg=#e9dfaf gui=none hi Define ctermfg=187 cterm=none " same as define hi Macro guifg=#e9dfaf gui=none hi Macro ctermfg=187 cterm=none " #if, #else, #endif hi PreCondit guifg=#e9dfaf gui=none hi PreCondit ctermfg=187 cterm=none " Type Group " ---------- " int, long, char hi Type guifg=#93d6a9 gui=none hi Type ctermfg=115 cterm=none " static, register, volative hi StorageClass guifg=#93d6a9 gui=none hi StorageClass ctermfg=115 cterm=none " struct, union, enum hi Structure guifg=#93d6a9 gui=none hi Structure ctermfg=115 cterm=none " typedef hi Typedef guifg=#93d6a9 gui=none hi Typedef ctermfg=115 cterm=none " Special Group " ------------- " any special symbol hi Special guifg=#cca3b3 gui=none hi Special ctermfg=181 cterm=none " special character in a constant hi SpecialChar guifg=#cca3b3 gui=none hi SpecialChar ctermfg=181 cterm=none " things you can CTRL-] hi Tag guifg=#cca3b3 gui=none hi Tag ctermfg=181 cterm=none " character that needs attention hi Delimiter guifg=#cca3b3 gui=none hi Delimiter ctermfg=181 cterm=none " special things inside a comment hi SpecialComment guifg=#cca3b3 gui=none hi SpecialComment ctermfg=181 cterm=none " debugging statements hi Debug guifg=#cca3b3 guibg=NONE gui=none hi Debug ctermfg=181 ctermbg=NONE cterm=none " Underlined Group " ---------------- " text that stands out, html links hi Underlined guifg=fg gui=underline hi Underlined ctermfg=fg cterm=underline " Ignore Group " ------------ " left blank, hidden hi Ignore guifg=bg hi Ignore ctermfg=bg " Error Group " ----------- " any erroneous construct hi Error guifg=#e37170 guibg=#432323 gui=none hi Error ctermfg=167 ctermbg=52 cterm=none " Todo Group " ---------- " todo, fixme, note, xxx hi Todo guifg=#efef8f guibg=NONE gui=underline hi Todo ctermfg=228 ctermbg=NONE cterm=underline " Spelling " -------- " word not recognized hi SpellBad guisp=#ee0000 gui=undercurl hi SpellBad ctermbg=196 cterm=undercurl -hi SpellBad ctermfg=225 ctermbg=196 cterm=undercurl " word not capitalized hi SpellCap guisp=#eeee00 gui=undercurl hi SpellCap ctermbg=226 cterm=undercurl " rare word hi SpellRare guisp=#ffa500 gui=undercurl hi SpellRare ctermbg=214 cterm=undercurl " wrong spelling for selected region hi SpellLocal guisp=#ffa500 gui=undercurl hi SpellLocal ctermbg=214 cterm=undercurl " Cursor " ------ " character under the cursor hi Cursor guifg=bg guibg=#a3e3ed hi Cursor ctermfg=bg ctermbg=153 " like cursor, but used when in IME mode hi CursorIM guifg=bg guibg=#96cdcd hi CursorIM ctermfg=bg ctermbg=116 " cursor column hi CursorColumn guifg=NONE guibg=#404448 gui=none hi CursorColumn ctermfg=NONE ctermbg=236 cterm=none " cursor line/row hi CursorLine gui=NONE guibg=#404448 gui=none hi CursorLine cterm=NONE ctermbg=236 cterm=none " Misc " ---- " directory names and other special names in listings hi Directory guifg=#c0e0b0 gui=none hi Directory ctermfg=151 cterm=none " error messages on the command line hi ErrorMsg guifg=#ee0000 guibg=NONE gui=none hi ErrorMsg ctermfg=196 ctermbg=NONE cterm=none " column separating vertically split windows hi VertSplit guifg=#777777 guibg=#363946 gui=none hi VertSplit ctermfg=242 ctermbg=237 cterm=none " columns where signs are displayed (used in IDEs) hi SignColumn guifg=#9fafaf guibg=#181818 gui=none hi SignColumn ctermfg=145 ctermbg=233 cterm=none " line numbers hi LineNr guifg=#818698 guibg=#363946 hi LineNr ctermfg=245 ctermbg=237 " match parenthesis, brackets hi MatchParen guifg=#00ff00 guibg=NONE gui=bold hi MatchParen ctermfg=46 ctermbg=NONE cterm=bold " the 'more' prompt when output takes more than one line hi MoreMsg guifg=#2e8b57 gui=none hi MoreMsg ctermfg=29 cterm=none " text showing what mode you are in hi ModeMsg guifg=#76d5f8 guibg=NONE gui=none hi ModeMsg ctermfg=117 ctermbg=NONE cterm=none " the '~' and '@' and showbreak, '>' double wide char doesn't fit on line hi NonText guifg=#404040 gui=none hi NonText ctermfg=238 cterm=none " the hit-enter prompt (show more output) and yes/no questions hi Question guifg=fg gui=none hi Question ctermfg=fg cterm=none " meta and special keys used with map, unprintable characters hi SpecialKey guifg=#405060 hi SpecialKey ctermfg=239 " titles for output from :set all, :autocmd, etc hi Title guifg=#62bdde gui=none hi Title ctermfg=74 cterm=none "hi Title guifg=#5ec8e5 gui=none " warning messages hi WarningMsg guifg=#e5786d gui=none hi WarningMsg ctermfg=173 cterm=none " current match in the wildmenu completion hi WildMenu guifg=#cae682 guibg=#363946 gui=bold,underline hi WildMenu ctermfg=16 ctermbg=186 cterm=bold +" color column highlighting +hi ColorColumn guifg=NONE guibg=#403630 gui=none +hi ColorColumn ctermfg=NONE ctermbg=94 cterm=none " Diff " ---- " added line hi DiffAdd guifg=#80a090 guibg=#313c36 gui=none hi DiffAdd ctermfg=fg ctermbg=22 cterm=none " changed line hi DiffChange guifg=NONE guibg=#4a343a gui=none hi DiffChange ctermfg=fg ctermbg=52 cterm=none " deleted line hi DiffDelete guifg=#6c6661 guibg=#3c3631 gui=none hi DiffDelete ctermfg=fg ctermbg=58 cterm=none " changed text within line hi DiffText guifg=#f05060 guibg=#4a343a gui=bold -hi DiffText ctermfg=203 ctermbg=52 cterm=bold +hi DiffText ctermfg=203 ctermbg=52 cterm=bold " Folds " ----- " line used for closed folds "hi Folded guifg=#91d6f8 guibg=#363946 gui=none "hi Folded ctermfg=117 ctermbg=238 cterm=none hi Folded guifg=#d0e0f0 guibg=#202020 gui=none hi Folded ctermfg=117 ctermbg=235 cterm=none " column on side used to indicated open and closed folds hi FoldColumn guifg=#91d6f8 guibg=#363946 gui=none hi FoldColumn guifg=#c0c0d0 guibg=#363946 gui=none hi FoldColumn ctermfg=117 ctermbg=238 cterm=none " Search " ------ " highlight incremental search text; also highlight text replaced with :s///c hi IncSearch guifg=#66ffff gui=reverse hi IncSearch ctermfg=87 cterm=reverse " hlsearch (last search pattern), also used for quickfix hi Search guibg=#ffaa33 gui=none hi Search ctermbg=214 cterm=none " Popup Menu " ---------- " normal item in popup hi Pmenu guifg=#e0e0e0 guibg=#303840 gui=none hi Pmenu ctermfg=253 ctermbg=233 cterm=none " selected item in popup hi PmenuSel guifg=#cae682 guibg=#505860 gui=none hi PmenuSel ctermfg=186 ctermbg=237 cterm=none " scrollbar in popup hi PMenuSbar guibg=#505860 gui=none hi PMenuSbar ctermbg=59 cterm=none " thumb of the scrollbar in the popup hi PMenuThumb guibg=#808890 gui=none hi PMenuThumb ctermbg=102 cterm=none " Status Line " ----------- " status line for current window hi StatusLine guifg=#e0e0e0 guibg=#363946 gui=bold hi StatusLine ctermfg=254 ctermbg=237 cterm=bold " status line for non-current windows hi StatusLineNC guifg=#767986 guibg=#363946 gui=none hi StatusLineNC ctermfg=244 ctermbg=237 cterm=none " Tab Lines " --------- " tab pages line, not active tab page label hi TabLine guifg=#b6bf98 guibg=#363946 gui=none hi TabLine ctermfg=244 ctermbg=236 cterm=none " tab pages line, where there are no labels hi TabLineFill guifg=#cfcfaf guibg=#363946 gui=none hi TabLineFill ctermfg=187 ctermbg=236 cterm=none " tab pages line, active tab page label hi TabLineSel guifg=#efefef guibg=#414658 gui=bold hi TabLineSel ctermfg=254 ctermbg=236 cterm=bold " Visual " ------ " visual mode selection hi Visual guifg=NONE guibg=#364458 hi Visual ctermfg=NONE ctermbg=24 " visual mode selection when vim is not owning the selection (x11 only) hi VisualNOS guifg=fg gui=underline hi VisualNOS ctermfg=fg cterm=underline + diff --git a/.vim/dict.add b/.vim/dict.add index 6fc3d48..59a5b82 100644 --- a/.vim/dict.add +++ b/.vim/dict.add @@ -1,5 +1,7 @@ sqlalchemy SQLAlchemy sessionmaker Metadata metadata +metadata +config diff --git a/.vim/doc/tags b/.vim/doc/tags index fac70cc..02519d8 100644 --- a/.vim/doc/tags +++ b/.vim/doc/tags @@ -1,329 +1,348 @@ 'NERDChristmasTree' NERD_tree.txt /*'NERDChristmasTree'* 'NERDTreeAutoCenter' NERD_tree.txt /*'NERDTreeAutoCenter'* 'NERDTreeAutoCenterThreshold' NERD_tree.txt /*'NERDTreeAutoCenterThreshold'* 'NERDTreeBookmarksFile' NERD_tree.txt /*'NERDTreeBookmarksFile'* 'NERDTreeCaseSensitiveSort' NERD_tree.txt /*'NERDTreeCaseSensitiveSort'* 'NERDTreeChDirMode' NERD_tree.txt /*'NERDTreeChDirMode'* 'NERDTreeHighlightCursorline' NERD_tree.txt /*'NERDTreeHighlightCursorline'* 'NERDTreeHijackNetrw' NERD_tree.txt /*'NERDTreeHijackNetrw'* 'NERDTreeIgnore' NERD_tree.txt /*'NERDTreeIgnore'* 'NERDTreeMouseMode' NERD_tree.txt /*'NERDTreeMouseMode'* 'NERDTreeQuitOnOpen' NERD_tree.txt /*'NERDTreeQuitOnOpen'* 'NERDTreeShowBookmarks' NERD_tree.txt /*'NERDTreeShowBookmarks'* 'NERDTreeShowFiles' NERD_tree.txt /*'NERDTreeShowFiles'* 'NERDTreeShowHidden' NERD_tree.txt /*'NERDTreeShowHidden'* 'NERDTreeShowLineNumbers' NERD_tree.txt /*'NERDTreeShowLineNumbers'* 'NERDTreeSortOrder' NERD_tree.txt /*'NERDTreeSortOrder'* 'NERDTreeStatusline' NERD_tree.txt /*'NERDTreeStatusline'* 'NERDTreeWinPos' NERD_tree.txt /*'NERDTreeWinPos'* 'NERDTreeWinSize' NERD_tree.txt /*'NERDTreeWinSize'* 'loaded_nerd_tree' NERD_tree.txt /*'loaded_nerd_tree'* 'snippets' snipMate.txt /*'snippets'* .snippet snipMate.txt /*.snippet* .snippets snipMate.txt /*.snippets* :ABitLy twitvim.txt /*:ABitLy* :ACligs twitvim.txt /*:ACligs* :AIsGd twitvim.txt /*:AIsGd* :AMetamark twitvim.txt /*:AMetamark* :ASnipurl twitvim.txt /*:ASnipurl* :ATinyURL twitvim.txt /*:ATinyURL* :ATrim twitvim.txt /*:ATrim* :ATweetburner twitvim.txt /*:ATweetburner* :AUrlBorg twitvim.txt /*:AUrlBorg* :AZima twitvim.txt /*:AZima* :BPosttoTwitter twitvim.txt /*:BPosttoTwitter* :BackTwitter twitvim.txt /*:BackTwitter* :BitLy twitvim.txt /*:BitLy* :CPosttoTwitter twitvim.txt /*:CPosttoTwitter* :Cligs twitvim.txt /*:Cligs* :DMSentTwitter twitvim.txt /*:DMSentTwitter* :DMTwitter twitvim.txt /*:DMTwitter* :ForwardTwitter twitvim.txt /*:ForwardTwitter* :FriendsTwitter twitvim.txt /*:FriendsTwitter* :IsGd twitvim.txt /*:IsGd* :ListTwitter twitvim.txt /*:ListTwitter* :LocationTwitter twitvim.txt /*:LocationTwitter* :Metamark twitvim.txt /*:Metamark* :NERDTree NERD_tree.txt /*:NERDTree* :NERDTreeClose NERD_tree.txt /*:NERDTreeClose* :NERDTreeFind NERD_tree.txt /*:NERDTreeFind* :NERDTreeFromBookmark NERD_tree.txt /*:NERDTreeFromBookmark* :NERDTreeMirror NERD_tree.txt /*:NERDTreeMirror* :NERDTreeToggle NERD_tree.txt /*:NERDTreeToggle* :NextTwitter twitvim.txt /*:NextTwitter* :PBitLy twitvim.txt /*:PBitLy* :PCligs twitvim.txt /*:PCligs* :PIsGd twitvim.txt /*:PIsGd* :PMetamark twitvim.txt /*:PMetamark* :PSnipurl twitvim.txt /*:PSnipurl* :PTinyURL twitvim.txt /*:PTinyURL* :PTrim twitvim.txt /*:PTrim* :PTweetburner twitvim.txt /*:PTweetburner* :PUrlBorg twitvim.txt /*:PUrlBorg* :PZima twitvim.txt /*:PZima* :PosttoTwitter twitvim.txt /*:PosttoTwitter* :PreviousTwitter twitvim.txt /*:PreviousTwitter* :ProfileTwitter twitvim.txt /*:ProfileTwitter* :PublicTwitter twitvim.txt /*:PublicTwitter* :RateLimitTwitter twitvim.txt /*:RateLimitTwitter* :RefreshTwitter twitvim.txt /*:RefreshTwitter* :RepliesTwitter twitvim.txt /*:RepliesTwitter* :ResetLoginTwitter twitvim.txt /*:ResetLoginTwitter* :RetweetedByMeTwitter twitvim.txt /*:RetweetedByMeTwitter* :RetweetedToMeTwitter twitvim.txt /*:RetweetedToMeTwitter* :SearchTwitter twitvim.txt /*:SearchTwitter* :SendDMTwitter twitvim.txt /*:SendDMTwitter* :SetLoginTwitter twitvim.txt /*:SetLoginTwitter* :Snipurl twitvim.txt /*:Snipurl* :TComment tComment.txt /*:TComment* :TCommentAs tComment.txt /*:TCommentAs* :TCommentBlock tComment.txt /*:TCommentBlock* :TCommentInline tComment.txt /*:TCommentInline* :TCommentRight tComment.txt /*:TCommentRight* :TinyURL twitvim.txt /*:TinyURL* :Trim twitvim.txt /*:Trim* :Tweetburner twitvim.txt /*:Tweetburner* :UrlBorg twitvim.txt /*:UrlBorg* :UserTwitter twitvim.txt /*:UserTwitter* :Zima twitvim.txt /*:Zima* ExtractSnips() snipMate.txt /*ExtractSnips()* ExtractSnipsFile() snipMate.txt /*ExtractSnipsFile()* Filename() snipMate.txt /*Filename()* NERDTree NERD_tree.txt /*NERDTree* NERDTree-? NERD_tree.txt /*NERDTree-?* NERDTree-A NERD_tree.txt /*NERDTree-A* NERDTree-B NERD_tree.txt /*NERDTree-B* NERDTree-C NERD_tree.txt /*NERDTree-C* NERDTree-C-J NERD_tree.txt /*NERDTree-C-J* NERDTree-C-K NERD_tree.txt /*NERDTree-C-K* NERDTree-D NERD_tree.txt /*NERDTree-D* NERDTree-F NERD_tree.txt /*NERDTree-F* NERDTree-I NERD_tree.txt /*NERDTree-I* NERDTree-J NERD_tree.txt /*NERDTree-J* NERDTree-K NERD_tree.txt /*NERDTree-K* NERDTree-O NERD_tree.txt /*NERDTree-O* NERDTree-P NERD_tree.txt /*NERDTree-P* NERDTree-R NERD_tree.txt /*NERDTree-R* NERDTree-T NERD_tree.txt /*NERDTree-T* NERDTree-U NERD_tree.txt /*NERDTree-U* NERDTree-X NERD_tree.txt /*NERDTree-X* NERDTree-cd NERD_tree.txt /*NERDTree-cd* NERDTree-contents NERD_tree.txt /*NERDTree-contents* NERDTree-e NERD_tree.txt /*NERDTree-e* NERDTree-f NERD_tree.txt /*NERDTree-f* NERDTree-gi NERD_tree.txt /*NERDTree-gi* NERDTree-go NERD_tree.txt /*NERDTree-go* NERDTree-gs NERD_tree.txt /*NERDTree-gs* NERDTree-i NERD_tree.txt /*NERDTree-i* NERDTree-m NERD_tree.txt /*NERDTree-m* NERDTree-o NERD_tree.txt /*NERDTree-o* NERDTree-p NERD_tree.txt /*NERDTree-p* NERDTree-q NERD_tree.txt /*NERDTree-q* NERDTree-r NERD_tree.txt /*NERDTree-r* NERDTree-s NERD_tree.txt /*NERDTree-s* NERDTree-t NERD_tree.txt /*NERDTree-t* NERDTree-u NERD_tree.txt /*NERDTree-u* NERDTree-x NERD_tree.txt /*NERDTree-x* NERDTreeAPI NERD_tree.txt /*NERDTreeAPI* NERDTreeAbout NERD_tree.txt /*NERDTreeAbout* NERDTreeAddKeyMap() NERD_tree.txt /*NERDTreeAddKeyMap()* NERDTreeAddMenuItem() NERD_tree.txt /*NERDTreeAddMenuItem()* NERDTreeAddMenuSeparator() NERD_tree.txt /*NERDTreeAddMenuSeparator()* NERDTreeAddSubmenu() NERD_tree.txt /*NERDTreeAddSubmenu()* NERDTreeBookmarkCommands NERD_tree.txt /*NERDTreeBookmarkCommands* NERDTreeBookmarkTable NERD_tree.txt /*NERDTreeBookmarkTable* NERDTreeBookmarks NERD_tree.txt /*NERDTreeBookmarks* NERDTreeChangelog NERD_tree.txt /*NERDTreeChangelog* NERDTreeCredits NERD_tree.txt /*NERDTreeCredits* NERDTreeFunctionality NERD_tree.txt /*NERDTreeFunctionality* NERDTreeGlobalCommands NERD_tree.txt /*NERDTreeGlobalCommands* NERDTreeInvalidBookmarks NERD_tree.txt /*NERDTreeInvalidBookmarks* NERDTreeKeymapAPI NERD_tree.txt /*NERDTreeKeymapAPI* NERDTreeLicense NERD_tree.txt /*NERDTreeLicense* NERDTreeMappings NERD_tree.txt /*NERDTreeMappings* NERDTreeMenu NERD_tree.txt /*NERDTreeMenu* NERDTreeMenuAPI NERD_tree.txt /*NERDTreeMenuAPI* NERDTreeOptionDetails NERD_tree.txt /*NERDTreeOptionDetails* NERDTreeOptionSummary NERD_tree.txt /*NERDTreeOptionSummary* NERDTreeOptions NERD_tree.txt /*NERDTreeOptions* NERDTreeRender() NERD_tree.txt /*NERDTreeRender()* NERD_tree.txt NERD_tree.txt /*NERD_tree.txt* ResetSnippets() snipMate.txt /*ResetSnippets()* TCommentDefineType() tComment.txt /*TCommentDefineType()* TwitVim twitvim.txt /*TwitVim* TwitVim-A-d twitvim.txt /*TwitVim-A-d* TwitVim-A-g twitvim.txt /*TwitVim-A-g* TwitVim-A-r twitvim.txt /*TwitVim-A-r* TwitVim-A-t twitvim.txt /*TwitVim-A-t* TwitVim-C-PageDown twitvim.txt /*TwitVim-C-PageDown* TwitVim-C-PageUp twitvim.txt /*TwitVim-C-PageUp* TwitVim-C-i twitvim.txt /*TwitVim-C-i* TwitVim-C-o twitvim.txt /*TwitVim-C-o* TwitVim-C-t twitvim.txt /*TwitVim-C-t* TwitVim-Leader-@ twitvim.txt /*TwitVim-Leader-@* TwitVim-Leader-C-r twitvim.txt /*TwitVim-Leader-C-r* TwitVim-Leader-Leader twitvim.txt /*TwitVim-Leader-Leader* TwitVim-Leader-S-r twitvim.txt /*TwitVim-Leader-S-r* TwitVim-Leader-X twitvim.txt /*TwitVim-Leader-X* TwitVim-Leader-d twitvim.txt /*TwitVim-Leader-d* TwitVim-Leader-e twitvim.txt /*TwitVim-Leader-e* TwitVim-Leader-g twitvim.txt /*TwitVim-Leader-g* TwitVim-Leader-p twitvim.txt /*TwitVim-Leader-p* TwitVim-Leader-r twitvim.txt /*TwitVim-Leader-r* TwitVim-LongURL twitvim.txt /*TwitVim-LongURL* TwitVim-add twitvim.txt /*TwitVim-add* TwitVim-cURL twitvim.txt /*TwitVim-cURL* TwitVim-contents twitvim.txt /*TwitVim-contents* TwitVim-credits twitvim.txt /*TwitVim-credits* TwitVim-delete twitvim.txt /*TwitVim-delete* TwitVim-direct-message twitvim.txt /*TwitVim-direct-message* TwitVim-goto twitvim.txt /*TwitVim-goto* TwitVim-highlight twitvim.txt /*TwitVim-highlight* TwitVim-history twitvim.txt /*TwitVim-history* TwitVim-hotkeys twitvim.txt /*TwitVim-hotkeys* TwitVim-inreplyto twitvim.txt /*TwitVim-inreplyto* TwitVim-install twitvim.txt /*TwitVim-install* TwitVim-intro twitvim.txt /*TwitVim-intro* TwitVim-line-length twitvim.txt /*TwitVim-line-length* TwitVim-login-base64 twitvim.txt /*TwitVim-login-base64* TwitVim-manual twitvim.txt /*TwitVim-manual* TwitVim-mappings twitvim.txt /*TwitVim-mappings* TwitVim-next twitvim.txt /*TwitVim-next* TwitVim-non-cURL twitvim.txt /*TwitVim-non-cURL* TwitVim-previous twitvim.txt /*TwitVim-previous* TwitVim-profile twitvim.txt /*TwitVim-profile* TwitVim-refresh twitvim.txt /*TwitVim-refresh* TwitVim-reply twitvim.txt /*TwitVim-reply* TwitVim-reply-all twitvim.txt /*TwitVim-reply-all* TwitVim-retweet twitvim.txt /*TwitVim-retweet* TwitVim-ssl twitvim.txt /*TwitVim-ssl* TwitVim-ssl-curl twitvim.txt /*TwitVim-ssl-curl* TwitVim-ssl-perl twitvim.txt /*TwitVim-ssl-perl* TwitVim-ssl-python twitvim.txt /*TwitVim-ssl-python* TwitVim-ssl-ruby twitvim.txt /*TwitVim-ssl-ruby* TwitVim-switch twitvim.txt /*TwitVim-switch* TwitVim-timeline-commands twitvim.txt /*TwitVim-timeline-commands* TwitVim-tips twitvim.txt /*TwitVim-tips* TwitVim-update-commands twitvim.txt /*TwitVim-update-commands* TwitVim-utility twitvim.txt /*TwitVim-utility* +cs surround.txt /*cs* drawit DrawIt.txt /*drawit* drawit-a DrawIt.txt /*drawit-a* drawit-b DrawIt.txt /*drawit-b* drawit-brush DrawIt.txt /*drawit-brush* drawit-c DrawIt.txt /*drawit-c* drawit-contents DrawIt.txt /*drawit-contents* drawit-drawing DrawIt.txt /*drawit-drawing* drawit-e DrawIt.txt /*drawit-e* drawit-erase DrawIt.txt /*drawit-erase* drawit-example DrawIt.txt /*drawit-example* drawit-f DrawIt.txt /*drawit-f* drawit-history DrawIt.txt /*drawit-history* drawit-l DrawIt.txt /*drawit-l* drawit-manual DrawIt.txt /*drawit-manual* drawit-modes DrawIt.txt /*drawit-modes* drawit-move DrawIt.txt /*drawit-move* drawit-moving DrawIt.txt /*drawit-moving* drawit-options DrawIt.txt /*drawit-options* drawit-protect DrawIt.txt /*drawit-protect* drawit-s DrawIt.txt /*drawit-s* drawit-setbrush DrawIt.txt /*drawit-setbrush* drawit-setdrawit DrawIt.txt /*drawit-setdrawit* drawit-start DrawIt.txt /*drawit-start* drawit-stop DrawIt.txt /*drawit-stop* drawit-usage DrawIt.txt /*drawit-usage* drawit-visblock DrawIt.txt /*drawit-visblock* drawit.txt DrawIt.txt /*drawit.txt* +ds surround.txt /*ds* g:SuperTabCompletionContexts supertab.txt /*g:SuperTabCompletionContexts* g:SuperTabContextDefaultCompletionType supertab.txt /*g:SuperTabContextDefaultCompletionType* g:SuperTabDefaultCompletionType supertab.txt /*g:SuperTabDefaultCompletionType* g:SuperTabLongestHighlight supertab.txt /*g:SuperTabLongestHighlight* g:SuperTabMappingBackward supertab.txt /*g:SuperTabMappingBackward* g:SuperTabMappingForward supertab.txt /*g:SuperTabMappingForward* g:SuperTabMappingTabLiteral supertab.txt /*g:SuperTabMappingTabLiteral* g:SuperTabMidWordCompletion supertab.txt /*g:SuperTabMidWordCompletion* g:SuperTabRetainCompletionDuration supertab.txt /*g:SuperTabRetainCompletionDuration* g:drawit_insertmode DrawIt.txt /*g:drawit_insertmode* g:snippets_dir snipMate.txt /*g:snippets_dir* g:snips_author snipMate.txt /*g:snips_author* g:tcommentMapLeader1 tComment.txt /*g:tcommentMapLeader1* g:tcommentMapLeader2 tComment.txt /*g:tcommentMapLeader2* g:tcommentMapLeaderOp1 tComment.txt /*g:tcommentMapLeaderOp1* g:tcommentMapLeaderOp2 tComment.txt /*g:tcommentMapLeaderOp2* g:tcommentOpModeExtra tComment.txt /*g:tcommentOpModeExtra* hl-twitterLink twitvim.txt /*hl-twitterLink* hl-twitterReply twitvim.txt /*hl-twitterReply* hl-twitterTime twitvim.txt /*hl-twitterTime* hl-twitterTitle twitvim.txt /*hl-twitterTitle* hl-twitterUser twitvim.txt /*hl-twitterUser* +i_CTRL-G_S surround.txt /*i_CTRL-G_S* +i_CTRL-G_s surround.txt /*i_CTRL-G_s* i_CTRL-R_<Tab> snipMate.txt /*i_CTRL-R_<Tab>* list-snippets snipMate.txt /*list-snippets* multi_snip snipMate.txt /*multi_snip* snipMate snipMate.txt /*snipMate* snipMate-$# snipMate.txt /*snipMate-$#* snipMate-${#:} snipMate.txt /*snipMate-${#:}* snipMate-${#} snipMate.txt /*snipMate-${#}* snipMate-author snipMate.txt /*snipMate-author* snipMate-commands snipMate.txt /*snipMate-commands* snipMate-contact snipMate.txt /*snipMate-contact* snipMate-description snipMate.txt /*snipMate-description* snipMate-disadvantages snipMate.txt /*snipMate-disadvantages* snipMate-expandtab snipMate.txt /*snipMate-expandtab* snipMate-features snipMate.txt /*snipMate-features* snipMate-filename snipMate.txt /*snipMate-filename* snipMate-indenting snipMate.txt /*snipMate-indenting* snipMate-placeholders snipMate.txt /*snipMate-placeholders* snipMate-remap snipMate.txt /*snipMate-remap* snipMate-settings snipMate.txt /*snipMate-settings* snipMate-usage snipMate.txt /*snipMate-usage* snipMate.txt snipMate.txt /*snipMate.txt* snippet snipMate.txt /*snippet* snippet-syntax snipMate.txt /*snippet-syntax* snippets snipMate.txt /*snippets* supertab supertab.txt /*supertab* supertab-completioncontexts supertab.txt /*supertab-completioncontexts* supertab-contextdefault supertab.txt /*supertab-contextdefault* supertab-contextdiscover supertab.txt /*supertab-contextdiscover* supertab-contextexample supertab.txt /*supertab-contextexample* supertab-contexttext supertab.txt /*supertab-contexttext* supertab-defaultcompletion supertab.txt /*supertab-defaultcompletion* supertab-duration supertab.txt /*supertab-duration* supertab-forwardbackward supertab.txt /*supertab-forwardbackward* supertab-intro supertab.txt /*supertab-intro* supertab-longesthighlight supertab.txt /*supertab-longesthighlight* supertab-mappingtabliteral supertab.txt /*supertab-mappingtabliteral* supertab-midword supertab.txt /*supertab-midword* supertab-options supertab.txt /*supertab-options* supertab-usage supertab.txt /*supertab-usage* supertab.txt supertab.txt /*supertab.txt* +surround surround.txt /*surround* +surround-author surround.txt /*surround-author* +surround-customizing surround.txt /*surround-customizing* +surround-issues surround.txt /*surround-issues* +surround-mappings surround.txt /*surround-mappings* +surround-replacements surround.txt /*surround-replacements* +surround-targets surround.txt /*surround-targets* +surround.txt surround.txt /*surround.txt* tComment-Installation tComment.txt /*tComment-Installation* tComment-Key-Bindings tComment.txt /*tComment-Key-Bindings* tComment-Usage tComment.txt /*tComment-Usage* tComment-commands tComment.txt /*tComment-commands* tComment.txt tComment.txt /*tComment.txt* twitvim-identi.ca twitvim.txt /*twitvim-identi.ca* twitvim.txt twitvim.txt /*twitvim.txt* twitvim_api_root twitvim.txt /*twitvim_api_root* twitvim_bitly_key twitvim.txt /*twitvim_bitly_key* twitvim_bitly_user twitvim.txt /*twitvim_bitly_user* twitvim_browser_cmd twitvim.txt /*twitvim_browser_cmd* twitvim_cert_insecure twitvim.txt /*twitvim_cert_insecure* twitvim_cligs_key twitvim.txt /*twitvim_cligs_key* twitvim_count twitvim.txt /*twitvim_count* twitvim_enable_perl twitvim.txt /*twitvim_enable_perl* twitvim_enable_python twitvim.txt /*twitvim_enable_python* twitvim_enable_ruby twitvim.txt /*twitvim_enable_ruby* twitvim_enable_tcl twitvim.txt /*twitvim_enable_tcl* twitvim_login twitvim.txt /*twitvim_login* twitvim_login_b64 twitvim.txt /*twitvim_login_b64* twitvim_old_retweet twitvim.txt /*twitvim_old_retweet* twitvim_proxy twitvim.txt /*twitvim_proxy* twitvim_proxy_login twitvim.txt /*twitvim_proxy_login* twitvim_proxy_login_b64 twitvim.txt /*twitvim_proxy_login_b64* twitvim_retweet_format twitvim.txt /*twitvim_retweet_format* twitvim_trim_login twitvim.txt /*twitvim_trim_login* twitvim_urlborg_key twitvim.txt /*twitvim_urlborg_key* +vS surround.txt /*vS* +vgS surround.txt /*vgS* +vs surround.txt /*vs* xml-plugin-callbacks xml-plugin.txt /*xml-plugin-callbacks* xml-plugin-html xml-plugin.txt /*xml-plugin-html* xml-plugin-mappings xml-plugin.txt /*xml-plugin-mappings* xml-plugin-settings xml-plugin.txt /*xml-plugin-settings* xml-plugin.txt xml-plugin.txt /*xml-plugin.txt* +yS surround.txt /*yS* +ySS surround.txt /*ySS* +ys surround.txt /*ys* +yss surround.txt /*yss* diff --git a/.vim/ftplugin/mako.vim b/.vim/ftplugin/mako.vim new file mode 100755 index 0000000..74e98e8 --- /dev/null +++ b/.vim/ftplugin/mako.vim @@ -0,0 +1,953 @@ +" Vim script file vim600:fdm=marker: +" FileType: XML +" Author: Devin Weaver <suki (at) tritarget.com> +" Maintainer: Devin Weaver <suki (at) tritarget.com> +" Last Change: Tue Apr 07 11:12:08 EDT 2009 +" Version: 1.84 +" Location: http://www.vim.org/scripts/script.php?script_id=301 +" Licence: This program is free software; you can redistribute it +" and/or modify it under the terms of the GNU General Public +" License. See http://www.gnu.org/copyleft/gpl.txt +" Credits: Brad Phelan <bphelan (at) mathworks.co.uk> for completing +" tag matching and visual tag completion. +" Ma, Xiangjiang <Xiangjiang.Ma (at) broadvision.com> for +" pointing out VIM 6.0 map <buffer> feature. +" Luc Hermitte <hermitte (at) free.fr> for testing the self +" install documentation code and providing good bug fixes. +" Guo-Peng Wen for the self install documentation code. +" Shawn Boles <ickybots (at) gmail.com> for fixing the +" <Leader>x cancelation bug. +" Martijn van der Kwast <[email protected]> for patching +" problems with multi-languages (XML and PHP). + +" This script provides some convenience when editing XML (and some SGML) +" formated documents. + +" Section: Documentation +" ---------------------- +" +" Documentation should be available by ":help xml-plugin" command, once the +" script has been copied in you .vim/plugin directory. +" +" You still can read the documentation at the end of this file. Locate it by +" searching the "xml-plugin" string (and set ft=help to have +" appropriate syntaxic coloration). + +" Note: If you used the 5.x version of this file (xmledit.vim) you'll need to +" comment out the section where you called it since it is no longer used in +" version 6.x. + +" TODO: Revamp ParseTag to pull appart a tag a rebuild it properly. +" a tag like: < test nowrap testatt=foo > +" should be fixed to: <test nowrap="nowrap" testatt="foo"></test> + +"============================================================================== + +" Only do this when not done yet for this buffer +if exists("b:did_ftplugin") + finish +endif +" sboles, init these variables so vim doesn't complain on wrap cancel +let b:last_wrap_tag_used = "" +let b:last_wrap_atts_used = "" + +" WrapTag -> Places an XML tag around a visual selection. {{{1 +" Brad Phelan: Wrap the argument in an XML tag +" Added nice GUI support to the dialogs. +" Rewrote function to implement new algorythem that addresses several bugs. +if !exists("*s:WrapTag") +function s:WrapTag(text) + if (line(".") < line("'<")) + let insert_cmd = "o" + elseif (col(".") < col("'<")) + let insert_cmd = "a" + else + let insert_cmd = "i" + endif + if strlen(a:text) > 10 + let input_text = strpart(a:text, 0, 10) . '...' + else + let input_text = a:text + endif + let wraptag = inputdialog('Tag to wrap "' . input_text . '" : ') + if strlen(wraptag)==0 + if strlen(b:last_wrap_tag_used)==0 + undo + return + endif + let wraptag = b:last_wrap_tag_used + let atts = b:last_wrap_atts_used + else + let atts = inputdialog('Attributes in <' . wraptag . '> : ') + endif + if (visualmode() ==# 'V') + let text = strpart(a:text,0,strlen(a:text)-1) + if (insert_cmd ==# "o") + let eol_cmd = "" + else + let eol_cmd = "\<Cr>" + endif + else + let text = a:text + let eol_cmd = "" + endif + if strlen(atts)==0 + let text = "<".wraptag.">".text."</".wraptag.">" + let b:last_wrap_tag_used = wraptag + let b:last_wrap_atts_used = "" + else + let text = "<".wraptag." ".atts.">".text."</".wraptag.">" + let b:last_wrap_tag_used = wraptag + let b:last_wrap_atts_used = atts + endif + execute "normal! ".insert_cmd.text.eol_cmd +endfunction +endif + +" NewFileXML -> Inserts <?xml?> at top of new file. {{{1 +if !exists("*s:NewFileXML") +function s:NewFileXML( ) + " Where is g:did_xhtmlcf_inits defined? + if &filetype == 'docbk' || &filetype == 'xml' || (!exists ("g:did_xhtmlcf_inits") && exists ("g:xml_use_xhtml") && (&filetype == 'html' || &filetype == 'xhtml')) + if append (0, '<?xml version="1.0"?>') + normal! G + endif + endif +endfunction +endif + + +" Callback -> Checks for tag callbacks and executes them. {{{1 +if !exists("*s:Callback") +function s:Callback( xml_tag, isHtml ) + let text = 0 + if a:isHtml == 1 && exists ("*HtmlAttribCallback") + let text = HtmlAttribCallback (a:xml_tag) + elseif exists ("*XmlAttribCallback") + let text = XmlAttribCallback (a:xml_tag) + endif + if text != '0' + execute "normal! i " . text ."\<Esc>l" + endif +endfunction +endif + + +" IsParsableTag -> Check to see if the tag is a real tag. {{{1 +if !exists("*s:IsParsableTag") +function s:IsParsableTag( tag ) + " The "Should I parse?" flag. + let parse = 1 + + " make sure a:tag has a proper tag in it and is not a instruction or end tag. + if a:tag !~ '^<[[:alnum:]_:\-].*>$' + let parse = 0 + endif + + " make sure this tag isn't already closed. + if strpart (a:tag, strlen (a:tag) - 2, 1) == '/' + let parse = 0 + endif + + return parse +endfunction +endif + + +" ParseTag -> The major work hourse for tag completion. {{{1 +if !exists("*s:ParseTag") +function s:ParseTag( ) + " Save registers + let old_reg_save = @" + let old_save_x = @x + + if (!exists("g:xml_no_auto_nesting") && strpart (getline ("."), col (".") - 2, 2) == '>>') + let multi_line = 1 + execute "normal! \"xX" + else + let multi_line = 0 + endif + + let @" = "" + execute "normal! \"xy%%" + let ltag = @" + if (&filetype == 'html' || &filetype == 'xhtml' || &filetype == 'mako') && (!exists ("g:xml_no_html")) + let html_mode = 1 + let ltag = substitute (ltag, '[^[:graph:]]\+', ' ', 'g') + let ltag = substitute (ltag, '<\s*\([^[:alnum:]_:\-[:blank:]]\=\)\s*\([[:alnum:]_:\-]\+\)\>', '<\1\2', '') + else + let html_mode = 0 + endif + + if <SID>IsParsableTag (ltag) + " find the break between tag name and atributes (or closing of tag) + let index = matchend (ltag, '[[:alnum:]_:\-]\+') + + let tag_name = strpart (ltag, 1, index - 1) + if strpart (ltag, index) =~ '[^/>[:blank:]]' + let has_attrib = 1 + else + let has_attrib = 0 + endif + + " That's (index - 1) + 2, 2 for the '</' and 1 for the extra character the + " while includes (the '>' is ignored because <Esc> puts the curser on top + " of the '>' + let index = index + 2 + + " print out the end tag and place the cursor back were it left off + if html_mode && tag_name =~? '^\(img\|input\|param\|frame\|br\|hr\|meta\|link\|base\|area\)$' + if has_attrib == 0 + call <SID>Callback (tag_name, html_mode) + endif + if exists ("g:xml_use_xhtml") + execute "normal! i /\<Esc>l" + endif + else + if multi_line + " Can't use \<Tab> because that indents 'tabstop' not 'shiftwidth' + " Also >> doesn't shift on an empty line hence the temporary char 'x' + let com_save = &comments + set comments-=n:> + execute "normal! a\<Cr>\<Cr>\<Esc>kAx\<Esc>>>$\"xx" + execute "set comments=" . substitute(com_save, " ", "\\\\ ", "g") + else + if has_attrib == 0 + call <SID>Callback (tag_name, html_mode) + endif + if exists("g:xml_jump_string") + let index = index + strlen(g:xml_jump_string) + let jump_char = g:xml_jump_string + call <SID>InitEditFromJump() + else + let jump_char = "" + endif + execute "normal! a</" . tag_name . ">" . jump_char . "\<Esc>" . index . "h" + endif + endif + endif + + " restore registers + let @" = old_reg_save + let @x = old_save_x + + if multi_line + startinsert! + else + execute "normal! l" + startinsert + endif +endfunction +endif + + +" ParseTag2 -> Experimental function to replace ParseTag {{{1 +"if !exists("*s:ParseTag2") +"function s:ParseTag2( ) + " My thought is to pull the tag out and reformat it to a normalized tag + " and put it back. +"endfunction +"endif + + +" BuildTagName -> Grabs the tag's name for tag matching. {{{1 +if !exists("*s:BuildTagName") +function s:BuildTagName( ) + "First check to see if we Are allready on the end of the tag. The / search + "forwards command will jump to the next tag otherwise + + " Store contents of register x in a variable + let b:xreg = @x + + exec "normal! v\"xy" + if @x=='>' + " Don't do anything + else + exec "normal! />/\<Cr>" + endif + + " Now we head back to the < to reach the beginning. + exec "normal! ?<?\<Cr>" + + " Capture the tag (a > will be catured by the /$/ match) + exec "normal! v/\\s\\|$/\<Cr>\"xy" + + " We need to strip off any junk at the end. + let @x=strpart(@x, 0, match(@x, "[[:blank:]>\<C-J>]")) + + "remove <, > + let @x=substitute(@x,'^<\|>$','','') + + " remove spaces. + let @x=substitute(@x,'/\s*','/', '') + let @x=substitute(@x,'^\s*','', '') + + " Swap @x and b:xreg + let temp = @x + let @x = b:xreg + let b:xreg = temp +endfunction +endif + +" TagMatch1 -> First step in tag matching. {{{1 +" Brad Phelan: First step in tag matching. +if !exists("*s:TagMatch1") +function s:TagMatch1() + " Save registers + let old_reg_save = @" + + "Drop a marker here just in case we have a mismatched tag and + "wish to return (:mark looses column position) + normal! mz + + call <SID>BuildTagName() + + "Check to see if it is an end tag. If it is place a 1 in endtag + if match(b:xreg, '^/')==-1 + let endtag = 0 + else + let endtag = 1 + endif + + " Extract the tag from the whole tag block + " eg if the block = + " tag attrib1=blah attrib2=blah + " we will end up with + " tag + " with no trailing or leading spaces + let b:xreg=substitute(b:xreg,'^/','','g') + + " Make sure the tag is valid. + " Malformed tags could be <?xml ?>, <![CDATA[]]>, etc. + if match(b:xreg,'^[[:alnum:]_:\-]') != -1 + " Pass the tag to the matching + " routine + call <SID>TagMatch2(b:xreg, endtag) + endif + " Restore registers + let @" = old_reg_save +endfunction +endif + + +" TagMatch2 -> Second step in tag matching. {{{1 +" Brad Phelan: Second step in tag matching. +if !exists("*s:TagMatch2") +function s:TagMatch2(tag,endtag) + let match_type='' + + " Build the pattern for searching for XML tags based + " on the 'tag' type passed into the function. + " Note we search forwards for end tags and + " backwards for start tags + if a:endtag==0 + "let nextMatch='normal /\(<\s*' . a:tag . '\(\s\+.\{-}\)*>\)\|\(<\/' . a:tag . '\s*>\)' + let match_type = '/' + else + "let nextMatch='normal ?\(<\s*' . a:tag . '\(\s\+.\{-}\)*>\)\|\(<\/' . a:tag . '\s*>\)' + let match_type = '?' + endif + + if a:endtag==0 + let stk = 1 + else + let stk = 1 + end + + " wrapscan must be turned on. We'll recored the value and reset it afterward. + " We have it on because if we don't we'll get a nasty error if the search hits + " BOF or EOF. + let wrapval = &wrapscan + let &wrapscan = 1 + + "Get the current location of the cursor so we can + "detect if we wrap on ourselves + let lpos = line(".") + let cpos = col(".") + + if a:endtag==0 + " If we are trying to find a start tag + " then decrement when we find a start tag + let iter = 1 + else + " If we are trying to find an end tag + " then increment when we find a start tag + let iter = -1 + endif + + "Loop until stk == 0. + while 1 + " exec search. + " Make sure to avoid />$/ as well as /\s$/ and /$/. + exec "normal! " . match_type . '<\s*\/*\s*' . a:tag . '\([[:blank:]>]\|$\)' . "\<Cr>" + + " Check to see if our match makes sence. + if a:endtag == 0 + if line(".") < lpos + call <SID>MisMatchedTag (0, a:tag) + break + elseif line(".") == lpos && col(".") <= cpos + call <SID>MisMatchedTag (1, a:tag) + break + endif + else + if line(".") > lpos + call <SID>MisMatchedTag (2, '/'.a:tag) + break + elseif line(".") == lpos && col(".") >= cpos + call <SID>MisMatchedTag (3, '/'.a:tag) + break + endif + endif + + call <SID>BuildTagName() + + if match(b:xreg,'^/')==-1 + " Found start tag + let stk = stk + iter + else + " Found end tag + let stk = stk - iter + endif + + if stk == 0 + break + endif + endwhile + + let &wrapscan = wrapval +endfunction +endif + +" MisMatchedTag -> What to do if a tag is mismatched. {{{1 +if !exists("*s:MisMatchedTag") +function s:MisMatchedTag( id, tag ) + "Jump back to our formor spot + normal! `z + normal zz + echohl WarningMsg + " For debugging + "echo "Mismatched tag " . a:id . ": <" . a:tag . ">" + " For release + echo "Mismatched tag <" . a:tag . ">" + echohl None +endfunction +endif + +" DeleteTag -> Deletes surrounding tags from cursor. {{{1 +" Modifies mark z +if !exists("*s:DeleteTag") +function s:DeleteTag( ) + if strpart (getline ("."), col (".") - 1, 1) == "<" + normal! l + endif + if search ("<[^\/]", "bW") == 0 + return + endif + normal! mz + normal \5 + normal! d%`zd% +endfunction +endif + +" VisualTag -> Selects Tag body in a visual selection. {{{1 +" Modifies mark z +if !exists("*s:VisualTag") +function s:VisualTag( ) + if strpart (getline ("."), col (".") - 1, 1) == "<" + normal! l + endif + if search ("<[^\/]", "bW") == 0 + return + endif + normal! mz + normal \5 + normal! % + exe "normal! " . visualmode() + normal! `z +endfunction +endif + +" InsertGt -> close tags only if the cursor is in a HTML or XML context {{{1 +" Else continue editing +if !exists("*s:InsertGt") +function s:InsertGt( ) + let save_matchpairs = &matchpairs + set matchpairs-=<:> + execute "normal! a>" + execute "set matchpairs=" . save_matchpairs + " When the current char is text within a tag it will not proccess as a + " syntax'ed element and return nothing below. Since the multi line wrap + " feture relies on using the '>' char as text within a tag we must use the + " char prior to establish if it is valid html/xml + if (getline('.')[col('.') - 1] == '>') + let char_syn=synIDattr(synID(line("."), col(".") - 1, 1), "name") + endif + if -1 == match(char_syn, "xmlProcessing") && (0 == match(char_syn, 'html') || 0 == match(char_syn, 'xml') || 0 == match(char_syn, 'docbk')) + call <SID>ParseTag() + else + if col(".") == col("$") - 1 + startinsert! + else + execute "normal! l" + startinsert + endif + endif +endfunction +endif + +" InitEditFromJump -> Set some needed autocommands and syntax highlights for EditFromJump. {{{1 +if !exists("*s:InitEditFromJump") +function s:InitEditFromJump( ) + " Add a syntax highlight for the xml_jump_string. + execute "syntax match Error /\\V" . g:xml_jump_string . "/" +endfunction +endif + +" ClearJumpMarks -> Clean out extranious left over xml_jump_string garbage. {{{1 +if !exists("*s:ClearJumpMarks") +function s:ClearJumpMarks( ) + if exists("g:xml_jump_string") + if g:xml_jump_string != "" + execute ":%s/" . g:xml_jump_string . "//ge" + endif + endif +endfunction +endif + +" EditFromJump -> Jump to the end of the tag and continue editing. {{{1 +" g:xml_jump_string must be set. +if !exists("*s:EditFromJump") +function s:EditFromJump( ) + if exists("g:xml_jump_string") + if g:xml_jump_string != "" + let foo = search(g:xml_jump_string, 'csW') " Moves cursor by default + execute "normal! " . strlen(g:xml_jump_string) . "x" + if col(".") == col("$") - 1 + startinsert! + else + startinsert + endif + endif + else + echohl WarningMsg + echo "Function disabled. xml_jump_string not defined." + echohl None + endif +endfunction +endif + +" Section: Doc installation {{{1 +" Function: s:XmlInstallDocumentation(full_name, revision) {{{2 +" Install help documentation. +" Arguments: +" full_name: Full name of this vim plugin script, including path name. +" revision: Revision of the vim script. #version# mark in the document file +" will be replaced with this string with 'v' prefix. +" Return: +" 1 if new document installed, 0 otherwise. +" Note: Cleaned and generalized by guo-peng Wen +"''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +function! s:XmlInstallDocumentation(full_name, revision) + " Name of the document path based on the system we use: + if (has("unix")) + " On UNIX like system, using forward slash: + let l:slash_char = '/' + let l:mkdir_cmd = ':silent !mkdir -p ' + else + " On M$ system, use backslash. Also mkdir syntax is different. + " This should only work on W2K and up. + let l:slash_char = '\' + let l:mkdir_cmd = ':silent !mkdir ' + endif + + let l:doc_path = l:slash_char . 'doc' + "let l:doc_home = l:slash_char . '.vim' . l:slash_char . 'doc' + + " Figure out document path based on full name of this script: + let l:vim_plugin_path = fnamemodify(a:full_name, ':h') + "let l:vim_doc_path = fnamemodify(a:full_name, ':h:h') . l:doc_path + let l:vim_doc_path = matchstr(l:vim_plugin_path, + \ '.\{-}\ze\%(\%(ft\)\=plugin\|macros\)') . l:doc_path + if (!(filewritable(l:vim_doc_path) == 2)) + echomsg "Doc path: " . l:vim_doc_path + execute l:mkdir_cmd . l:vim_doc_path + if (!(filewritable(l:vim_doc_path) == 2)) + " Try a default configuration in user home: + "let l:vim_doc_path = expand("~") . l:doc_home + let l:vim_doc_path = matchstr(&rtp, + \ escape($HOME, '\') .'[/\\]\%(\.vim\|vimfiles\)') + if (!(filewritable(l:vim_doc_path) == 2)) + execute l:mkdir_cmd . l:vim_doc_path + if (!(filewritable(l:vim_doc_path) == 2)) + " Put a warning: + echomsg "Unable to open documentation directory" + echomsg " type :help add-local-help for more informations." + return 0 + endif + endif + endif + endif + + " Exit if we have problem to access the document directory: + if (!isdirectory(l:vim_plugin_path) + \ || !isdirectory(l:vim_doc_path) + \ || filewritable(l:vim_doc_path) != 2) + return 0 + endif + + " Full name of script and documentation file: + let l:script_name = 'xml.vim' + let l:doc_name = 'xml-plugin.txt' + let l:plugin_file = l:vim_plugin_path . l:slash_char . l:script_name + let l:doc_file = l:vim_doc_path . l:slash_char . l:doc_name + + " Bail out if document file is still up to date: + if (filereadable(l:doc_file) && + \ getftime(l:plugin_file) < getftime(l:doc_file)) + return 0 + endif + + " Prepare window position restoring command: + if (strlen(@%)) + let l:go_back = 'b ' . bufnr("%") + else + let l:go_back = 'enew!' + endif + + " Create a new buffer & read in the plugin file (me): + setl nomodeline + exe 'enew!' + exe 'r ' . l:plugin_file + + setl modeline + let l:buf = bufnr("%") + setl noswapfile modifiable + + norm zR + norm gg + + " Delete from first line to a line starts with + " === START_DOC + 1,/^=\{3,}\s\+START_DOC\C/ d + + " Delete from a line starts with + " === END_DOC + " to the end of the documents: + /^=\{3,}\s\+END_DOC\C/,$ d + + " Remove fold marks: + % s/{\{3}[1-9]/ / + + " Add modeline for help doc: the modeline string is mangled intentionally + " to avoid it be recognized by VIM: + call append(line('$'), '') + call append(line('$'), ' v' . 'im:tw=78:ts=8:ft=help:norl:') + + " Replace revision: + exe "normal :1,5s/#version#/ v" . a:revision . "/\<CR>" + + " Save the help document: + exe 'w! ' . l:doc_file + exe l:go_back + exe 'bw ' . l:buf + + " Build help tags: + exe 'helptags ' . l:vim_doc_path + + return 1 +endfunction +" }}}2 + +let s:script_lines = readfile(expand("<sfile>"), "", 6) +let s:revision= + \ substitute(s:script_lines[5], '^" Version:\s*\|\s*$', '', '') +" \ substitute("$Revision: 83 $",'\$\S*: \([.0-9]\+\) \$','\1','') +silent! let s:install_status = + \ s:XmlInstallDocumentation(expand('<sfile>:p'), s:revision) +if (s:install_status == 1) + echom expand("<sfile>:t:r") . '-plugin v' . s:revision . + \ ': Help-documentation installed.' +endif + + +" Mappings and Settings. {{{1 +" This makes the '%' jump between the start and end of a single tag. +setlocal matchpairs+=<:> +setlocal commentstring=<!--%s--> + +" Have this as an escape incase you want a literal '>' not to run the +" ParseTag function. +if !exists("g:xml_tag_completion_map") + inoremap <buffer> <LocalLeader>. > + inoremap <buffer> <LocalLeader>> > +endif + +" Jump between the beggining and end tags. +nnoremap <buffer> <LocalLeader>5 :call <SID>TagMatch1()<Cr> +nnoremap <buffer> <LocalLeader>% :call <SID>TagMatch1()<Cr> +vnoremap <buffer> <LocalLeader>5 <Esc>:call <SID>VisualTag()<Cr> +vnoremap <buffer> <LocalLeader>% <Esc>:call <SID>VisualTag()<Cr> + +" Wrap selection in XML tag +vnoremap <buffer> <LocalLeader>x "xx:call <SID>WrapTag(@x)<Cr> +nnoremap <buffer> <LocalLeader>d :call <SID>DeleteTag()<Cr> + +" Parse the tag after pressing the close '>'. +if !exists("g:xml_tag_completion_map") + " inoremap <buffer> > ><Esc>:call <SID>ParseTag()<Cr> + inoremap <buffer> > <Esc>:call <SID>InsertGt()<Cr> +else + execute "inoremap <buffer> " . g:xml_tag_completion_map . " <Esc>:call <SID>InsertGt()<Cr>" +endif + +nnoremap <buffer> <LocalLeader><Space> :call <SID>EditFromJump()<Cr> +inoremap <buffer> <LocalLeader><Space> <Esc>:call <SID>EditFromJump()<Cr> +" Clear out all left over xml_jump_string garbage +nnoremap <buffer> <LocalLeader>w :call <SID>ClearJumpMarks()<Cr> +" The syntax files clear out any predefined syntax definitions. Recreate +" this when ever a xml_jump_string is created. (in ParseTag) + +augroup xml + au! + au BufNewFile * call <SID>NewFileXML() + " Remove left over garbage from xml_jump_string on file save. + au BufWritePre <buffer> call <SID>ClearJumpMarks() +augroup END +"}}}1 +finish + +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +" Section: Documentation content {{{1 +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +=== START_DOC +*xml-plugin.txt* Help edit XML and SGML documents. #version# + + XML Edit {{{2 ~ + +A filetype plugin to help edit XML and SGML documents. + +This script provides some convenience when editing XML (and some SGML +including HTML) formated documents. It allows you to jump to the beginning +or end of the tag block your cursor is in. '%' will jump between '<' and '>' +within the tag your cursor is in. When in insert mode and you finish a tag +(pressing '>') the tag will be completed. If you press '>' twice it will +complete the tag and place the cursor in the middle of the tags on it's own +line (helps with nested tags). + +Usage: Place this file into your ftplugin directory. To add html support +Sym-link or copy this file to html.vim in your ftplugin directory. To activte +the script place 'filetype plugin on' in your |.vimrc| file. See |ftplugins| +for more information on this topic. + +If the file edited is of type "html" and "xml_use_html" is defined then the +following tags will not auto complete: +<img>, <input>, <param>, <frame>, <br>, <hr>, <meta>, <link>, <base>, <area> + +If the file edited is of type 'html' and 'xml_use_xhtml' is defined the above +tags will autocomplete the xml closing staying xhtml compatable. +ex. <hr> becomes <hr /> (see |xml-plugin-settings|) + +NOTE: If you used the VIM 5.x version of this file (xmledit.vim) you'll need +to comment out the section where you called it. It is no longer used in the +VIM 6.x version. + +Known Bugs {{{2 ~ + +- This script will modify registers ". and "x; register "" will be restored. +- < & > marks inside of a CDATA section are interpreted as actual XML tags + even if unmatched. +- Although the script can handle leading spaces such as < tag></ tag> it is + illegal XML syntax and considered very bad form. +- Placing a literal `>' in an attribute value will auto complete dispite that + the start tag isn't finished. This is poor XML anyway you should use + &gt; instead. +- The matching algorithm can handle illegal tag characters where as the tag + completion algorithm can not. + +------------------------------------------------------------------------------ + *xml-plugin-mappings* +Mappings {{{2 ~ + +<LocalLeader> is a setting in VIM that depicts a prefix for scripts and +plugins to use. By default this is the backslash key `\'. See |mapleader| +for details. + +<LocalLeader><Space> + Normal or Insert - Continue editing after the ending tag. This + option requires xml_jump_string to be set to function. When a tag + is completed it will append the xml_jump_string. Once this mapping + is ran it will delete the next xml_jump_string pattern to the right + of the curser and delete it leaving you in insert mode to continue + editing. + +<LocalLeader>w + Normal - Will clear the entire file of left over xml_jump_string garbage. + * This will also happen automatically when you save the file. * + +<LocalLeader>x + Visual - Place a custom XML tag to suround the selected text. You + need to have selected text in visual mode before you can use this + mapping. See |visual-mode| for details. + +<LocalLeader>. or <LocalLeader>> + Insert - Place a literal '>' without parsing tag. + +<LocalLeader>5 or <LocalLeader>% + Normal or Visual - Jump to the begining or end tag. + +<LocalLeader>d + Normal - Deletes the surrounding tags from the cursor. > + <tag1>outter <tag2>inner text</tag2> text</tag1> + ^ +< Turns to: > + outter <tag2>inner text</tag2> text + ^ +< + +------------------------------------------------------------------------------ + *xml-plugin-settings* +Options {{{2 ~ + +(All options must be placed in your |.vimrc| prior to the |ftplugin| +command.) + +xml_tag_completion_map + Use this setting to change the default mapping to auto complete a + tag. By default typing a literal `>' will cause the tag your editing + to auto complete; pressing twice will auto nest the tag. By using + this setting the `>' will be a literal `>' and you must use the new + mapping to perform auto completion and auto nesting. For example if + you wanted Control-L to perform auto completion inmstead of typing a + `>' place the following into your .vimrc: > + let xml_tag_completion_map = "<C-l>" +< +xml_no_auto_nesting + This turns off the auto nesting feature. After a completion is made + and another `>' is typed xml-edit automatically will break the tag + accross multiple lines and indent the curser to make creating nested + tqags easier. This feature turns it off. Enter the following in your + .vimrc: > + let xml_no_auto_nesting = 1 +< +xml_use_xhtml + When editing HTML this will auto close the short tags to make valid + XML like <hr /> and <br />. Enter the following in your vimrc to + turn this option on: > + let xml_use_xhtml = 1 +< +xml_no_html + This turns off the support for HTML specific tags. Place this in your + .vimrc: > + let xml_no_html = 1 +< +xml_jump_string + This turns off the support for continuing edits after an ending tag. + xml_jump_string can be any string how ever a simple character will + suffice. Pick a character or small string that is unique and will + not interfer with your normal editing. See the <LocalLeader>Space + mapping for more. + .vimrc: > + let xml_jump_string = "`" +< +------------------------------------------------------------------------------ + *xml-plugin-callbacks* +Callback Functions {{{2 ~ + +A callback function is a function used to customize features on a per tag +basis. For example say you wish to have a default set of attributs when you +type an empty tag like this: + You type: <tag> + You get: <tag default="attributes"></tag> + +This is for any script programmers who wish to add xml-plugin support to +there own filetype plugins. + +Callback functions recive one attribute variable which is the tag name. The +all must return either a string or the number zero. If it returns a string +the plugin will place the string in the proper location. If it is a zero the +plugin will ignore and continue as if no callback existed. + +The following are implemented callback functions: + +HtmlAttribCallback + This is used to add default attributes to html tag. It is intended + for HTML files only. + +XmlAttribCallback + This is a generic callback for xml tags intended to add attributes. + + *xml-plugin-html* +Callback Example {{{2 ~ + +The following is an example of using XmlAttribCallback in your .vimrc +> + function XmlAttribCallback (xml_tag) + if a:xml_tag ==? "my-xml-tag" + return "attributes=\"my xml attributes\"" + else + return 0 + endif + endfunction +< +The following is a sample html.vim file type plugin you could use: +> + " Vim script file vim600:fdm=marker: + " FileType: HTML + " Maintainer: Devin Weaver <vim (at) tritarget.com> + " Location: http://www.vim.org/scripts/script.php?script_id=301 + + " This is a wrapper script to add extra html support to xml documents. + " Original script can be seen in xml-plugin documentation. + + " Only do this when not done yet for this buffer + if exists("b:did_ftplugin") + finish + endif + " Don't set 'b:did_ftplugin = 1' because that is xml.vim's responsability. + + let b:html_mode = 1 + + if !exists("*HtmlAttribCallback") + function HtmlAttribCallback( xml_tag ) + if a:xml_tag ==? "table" + return "cellpadding=\"0\" cellspacing=\"0\" border=\"0\"" + elseif a:xml_tag ==? "link" + return "href=\"/site.css\" rel=\"StyleSheet\" type=\"text/css\"" + elseif a:xml_tag ==? "body" + return "bgcolor=\"white\"" + elseif a:xml_tag ==? "frame" + return "name=\"NAME\" src=\"/\" scrolling=\"auto\" noresize" + elseif a:xml_tag ==? "frameset" + return "rows=\"0,*\" cols=\"*,0\" border=\"0\"" + elseif a:xml_tag ==? "img" + return "src=\"\" width=\"0\" height=\"0\" border=\"0\" alt=\"\"" + elseif a:xml_tag ==? "a" + if has("browse") + " Look up a file to fill the href. Used in local relative file + " links. typeing your own href before closing the tag with `>' + " will override this. + let cwd = getcwd() + let cwd = substitute (cwd, "\\", "/", "g") + let href = browse (0, "Link to href...", getcwd(), "") + let href = substitute (href, cwd . "/", "", "") + let href = substitute (href, " ", "%20", "g") + else + let href = "" + endif + return "href=\"" . href . "\"" + else + return 0 + endif + endfunction + endif + + " On to loading xml.vim + runtime ftplugin/xml.vim +< +=== END_DOC +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +" vim: set tabstop=8 shiftwidth=4 softtabstop=4 smartindent +" vim600: set foldmethod=marker smarttab fileencoding=iso-8859-15 + diff --git a/.vim/ftplugin/xml.vim b/.vim/ftplugin/xml.vim index 7117c9f..74e98e8 100755 --- a/.vim/ftplugin/xml.vim +++ b/.vim/ftplugin/xml.vim @@ -1,686 +1,686 @@ " Vim script file vim600:fdm=marker: " FileType: XML " Author: Devin Weaver <suki (at) tritarget.com> " Maintainer: Devin Weaver <suki (at) tritarget.com> " Last Change: Tue Apr 07 11:12:08 EDT 2009 " Version: 1.84 " Location: http://www.vim.org/scripts/script.php?script_id=301 " Licence: This program is free software; you can redistribute it " and/or modify it under the terms of the GNU General Public " License. See http://www.gnu.org/copyleft/gpl.txt " Credits: Brad Phelan <bphelan (at) mathworks.co.uk> for completing " tag matching and visual tag completion. " Ma, Xiangjiang <Xiangjiang.Ma (at) broadvision.com> for " pointing out VIM 6.0 map <buffer> feature. " Luc Hermitte <hermitte (at) free.fr> for testing the self " install documentation code and providing good bug fixes. " Guo-Peng Wen for the self install documentation code. " Shawn Boles <ickybots (at) gmail.com> for fixing the " <Leader>x cancelation bug. " Martijn van der Kwast <[email protected]> for patching " problems with multi-languages (XML and PHP). " This script provides some convenience when editing XML (and some SGML) " formated documents. " Section: Documentation " ---------------------- " " Documentation should be available by ":help xml-plugin" command, once the " script has been copied in you .vim/plugin directory. " " You still can read the documentation at the end of this file. Locate it by " searching the "xml-plugin" string (and set ft=help to have " appropriate syntaxic coloration). " Note: If you used the 5.x version of this file (xmledit.vim) you'll need to " comment out the section where you called it since it is no longer used in " version 6.x. " TODO: Revamp ParseTag to pull appart a tag a rebuild it properly. " a tag like: < test nowrap testatt=foo > " should be fixed to: <test nowrap="nowrap" testatt="foo"></test> "============================================================================== " Only do this when not done yet for this buffer if exists("b:did_ftplugin") finish endif " sboles, init these variables so vim doesn't complain on wrap cancel let b:last_wrap_tag_used = "" let b:last_wrap_atts_used = "" " WrapTag -> Places an XML tag around a visual selection. {{{1 " Brad Phelan: Wrap the argument in an XML tag " Added nice GUI support to the dialogs. " Rewrote function to implement new algorythem that addresses several bugs. if !exists("*s:WrapTag") function s:WrapTag(text) if (line(".") < line("'<")) let insert_cmd = "o" elseif (col(".") < col("'<")) let insert_cmd = "a" else let insert_cmd = "i" endif if strlen(a:text) > 10 let input_text = strpart(a:text, 0, 10) . '...' else let input_text = a:text endif let wraptag = inputdialog('Tag to wrap "' . input_text . '" : ') if strlen(wraptag)==0 if strlen(b:last_wrap_tag_used)==0 undo return endif let wraptag = b:last_wrap_tag_used let atts = b:last_wrap_atts_used else let atts = inputdialog('Attributes in <' . wraptag . '> : ') endif if (visualmode() ==# 'V') let text = strpart(a:text,0,strlen(a:text)-1) if (insert_cmd ==# "o") let eol_cmd = "" else let eol_cmd = "\<Cr>" endif else let text = a:text let eol_cmd = "" endif if strlen(atts)==0 let text = "<".wraptag.">".text."</".wraptag.">" let b:last_wrap_tag_used = wraptag let b:last_wrap_atts_used = "" else let text = "<".wraptag." ".atts.">".text."</".wraptag.">" let b:last_wrap_tag_used = wraptag let b:last_wrap_atts_used = atts endif execute "normal! ".insert_cmd.text.eol_cmd endfunction endif " NewFileXML -> Inserts <?xml?> at top of new file. {{{1 if !exists("*s:NewFileXML") function s:NewFileXML( ) " Where is g:did_xhtmlcf_inits defined? if &filetype == 'docbk' || &filetype == 'xml' || (!exists ("g:did_xhtmlcf_inits") && exists ("g:xml_use_xhtml") && (&filetype == 'html' || &filetype == 'xhtml')) if append (0, '<?xml version="1.0"?>') normal! G endif endif endfunction endif " Callback -> Checks for tag callbacks and executes them. {{{1 if !exists("*s:Callback") function s:Callback( xml_tag, isHtml ) let text = 0 if a:isHtml == 1 && exists ("*HtmlAttribCallback") let text = HtmlAttribCallback (a:xml_tag) elseif exists ("*XmlAttribCallback") let text = XmlAttribCallback (a:xml_tag) endif if text != '0' execute "normal! i " . text ."\<Esc>l" endif endfunction endif " IsParsableTag -> Check to see if the tag is a real tag. {{{1 if !exists("*s:IsParsableTag") function s:IsParsableTag( tag ) " The "Should I parse?" flag. let parse = 1 " make sure a:tag has a proper tag in it and is not a instruction or end tag. if a:tag !~ '^<[[:alnum:]_:\-].*>$' let parse = 0 endif " make sure this tag isn't already closed. if strpart (a:tag, strlen (a:tag) - 2, 1) == '/' let parse = 0 endif return parse endfunction endif " ParseTag -> The major work hourse for tag completion. {{{1 if !exists("*s:ParseTag") function s:ParseTag( ) " Save registers let old_reg_save = @" let old_save_x = @x if (!exists("g:xml_no_auto_nesting") && strpart (getline ("."), col (".") - 2, 2) == '>>') let multi_line = 1 execute "normal! \"xX" else let multi_line = 0 endif let @" = "" execute "normal! \"xy%%" let ltag = @" - if (&filetype == 'html' || &filetype == 'xhtml') && (!exists ("g:xml_no_html")) + if (&filetype == 'html' || &filetype == 'xhtml' || &filetype == 'mako') && (!exists ("g:xml_no_html")) let html_mode = 1 let ltag = substitute (ltag, '[^[:graph:]]\+', ' ', 'g') let ltag = substitute (ltag, '<\s*\([^[:alnum:]_:\-[:blank:]]\=\)\s*\([[:alnum:]_:\-]\+\)\>', '<\1\2', '') else let html_mode = 0 endif if <SID>IsParsableTag (ltag) " find the break between tag name and atributes (or closing of tag) let index = matchend (ltag, '[[:alnum:]_:\-]\+') let tag_name = strpart (ltag, 1, index - 1) if strpart (ltag, index) =~ '[^/>[:blank:]]' let has_attrib = 1 else let has_attrib = 0 endif " That's (index - 1) + 2, 2 for the '</' and 1 for the extra character the " while includes (the '>' is ignored because <Esc> puts the curser on top " of the '>' let index = index + 2 " print out the end tag and place the cursor back were it left off if html_mode && tag_name =~? '^\(img\|input\|param\|frame\|br\|hr\|meta\|link\|base\|area\)$' if has_attrib == 0 call <SID>Callback (tag_name, html_mode) endif if exists ("g:xml_use_xhtml") execute "normal! i /\<Esc>l" endif else if multi_line " Can't use \<Tab> because that indents 'tabstop' not 'shiftwidth' " Also >> doesn't shift on an empty line hence the temporary char 'x' let com_save = &comments set comments-=n:> execute "normal! a\<Cr>\<Cr>\<Esc>kAx\<Esc>>>$\"xx" execute "set comments=" . substitute(com_save, " ", "\\\\ ", "g") else if has_attrib == 0 call <SID>Callback (tag_name, html_mode) endif if exists("g:xml_jump_string") let index = index + strlen(g:xml_jump_string) let jump_char = g:xml_jump_string call <SID>InitEditFromJump() else let jump_char = "" endif execute "normal! a</" . tag_name . ">" . jump_char . "\<Esc>" . index . "h" endif endif endif " restore registers let @" = old_reg_save let @x = old_save_x if multi_line startinsert! else execute "normal! l" startinsert endif endfunction endif " ParseTag2 -> Experimental function to replace ParseTag {{{1 "if !exists("*s:ParseTag2") "function s:ParseTag2( ) " My thought is to pull the tag out and reformat it to a normalized tag " and put it back. "endfunction "endif " BuildTagName -> Grabs the tag's name for tag matching. {{{1 if !exists("*s:BuildTagName") function s:BuildTagName( ) "First check to see if we Are allready on the end of the tag. The / search "forwards command will jump to the next tag otherwise " Store contents of register x in a variable let b:xreg = @x exec "normal! v\"xy" if @x=='>' " Don't do anything else exec "normal! />/\<Cr>" endif " Now we head back to the < to reach the beginning. exec "normal! ?<?\<Cr>" " Capture the tag (a > will be catured by the /$/ match) exec "normal! v/\\s\\|$/\<Cr>\"xy" " We need to strip off any junk at the end. let @x=strpart(@x, 0, match(@x, "[[:blank:]>\<C-J>]")) "remove <, > let @x=substitute(@x,'^<\|>$','','') " remove spaces. let @x=substitute(@x,'/\s*','/', '') let @x=substitute(@x,'^\s*','', '') " Swap @x and b:xreg let temp = @x let @x = b:xreg let b:xreg = temp endfunction endif " TagMatch1 -> First step in tag matching. {{{1 " Brad Phelan: First step in tag matching. if !exists("*s:TagMatch1") function s:TagMatch1() " Save registers let old_reg_save = @" "Drop a marker here just in case we have a mismatched tag and "wish to return (:mark looses column position) normal! mz call <SID>BuildTagName() "Check to see if it is an end tag. If it is place a 1 in endtag if match(b:xreg, '^/')==-1 let endtag = 0 else let endtag = 1 endif " Extract the tag from the whole tag block " eg if the block = " tag attrib1=blah attrib2=blah " we will end up with " tag " with no trailing or leading spaces let b:xreg=substitute(b:xreg,'^/','','g') " Make sure the tag is valid. " Malformed tags could be <?xml ?>, <![CDATA[]]>, etc. if match(b:xreg,'^[[:alnum:]_:\-]') != -1 " Pass the tag to the matching " routine call <SID>TagMatch2(b:xreg, endtag) endif " Restore registers let @" = old_reg_save endfunction endif " TagMatch2 -> Second step in tag matching. {{{1 " Brad Phelan: Second step in tag matching. if !exists("*s:TagMatch2") function s:TagMatch2(tag,endtag) let match_type='' " Build the pattern for searching for XML tags based " on the 'tag' type passed into the function. " Note we search forwards for end tags and " backwards for start tags if a:endtag==0 "let nextMatch='normal /\(<\s*' . a:tag . '\(\s\+.\{-}\)*>\)\|\(<\/' . a:tag . '\s*>\)' let match_type = '/' else "let nextMatch='normal ?\(<\s*' . a:tag . '\(\s\+.\{-}\)*>\)\|\(<\/' . a:tag . '\s*>\)' let match_type = '?' endif if a:endtag==0 let stk = 1 else let stk = 1 end " wrapscan must be turned on. We'll recored the value and reset it afterward. " We have it on because if we don't we'll get a nasty error if the search hits " BOF or EOF. let wrapval = &wrapscan let &wrapscan = 1 "Get the current location of the cursor so we can "detect if we wrap on ourselves let lpos = line(".") let cpos = col(".") if a:endtag==0 " If we are trying to find a start tag " then decrement when we find a start tag let iter = 1 else " If we are trying to find an end tag " then increment when we find a start tag let iter = -1 endif "Loop until stk == 0. while 1 " exec search. " Make sure to avoid />$/ as well as /\s$/ and /$/. exec "normal! " . match_type . '<\s*\/*\s*' . a:tag . '\([[:blank:]>]\|$\)' . "\<Cr>" " Check to see if our match makes sence. if a:endtag == 0 if line(".") < lpos call <SID>MisMatchedTag (0, a:tag) break elseif line(".") == lpos && col(".") <= cpos call <SID>MisMatchedTag (1, a:tag) break endif else if line(".") > lpos call <SID>MisMatchedTag (2, '/'.a:tag) break elseif line(".") == lpos && col(".") >= cpos call <SID>MisMatchedTag (3, '/'.a:tag) break endif endif call <SID>BuildTagName() if match(b:xreg,'^/')==-1 " Found start tag let stk = stk + iter else " Found end tag let stk = stk - iter endif if stk == 0 break endif endwhile let &wrapscan = wrapval endfunction endif " MisMatchedTag -> What to do if a tag is mismatched. {{{1 if !exists("*s:MisMatchedTag") function s:MisMatchedTag( id, tag ) "Jump back to our formor spot normal! `z normal zz echohl WarningMsg " For debugging "echo "Mismatched tag " . a:id . ": <" . a:tag . ">" " For release echo "Mismatched tag <" . a:tag . ">" echohl None endfunction endif " DeleteTag -> Deletes surrounding tags from cursor. {{{1 " Modifies mark z if !exists("*s:DeleteTag") function s:DeleteTag( ) if strpart (getline ("."), col (".") - 1, 1) == "<" normal! l endif if search ("<[^\/]", "bW") == 0 return endif normal! mz normal \5 normal! d%`zd% endfunction endif " VisualTag -> Selects Tag body in a visual selection. {{{1 " Modifies mark z if !exists("*s:VisualTag") function s:VisualTag( ) if strpart (getline ("."), col (".") - 1, 1) == "<" normal! l endif if search ("<[^\/]", "bW") == 0 return endif normal! mz normal \5 normal! % exe "normal! " . visualmode() normal! `z endfunction endif " InsertGt -> close tags only if the cursor is in a HTML or XML context {{{1 " Else continue editing if !exists("*s:InsertGt") function s:InsertGt( ) let save_matchpairs = &matchpairs set matchpairs-=<:> execute "normal! a>" execute "set matchpairs=" . save_matchpairs " When the current char is text within a tag it will not proccess as a " syntax'ed element and return nothing below. Since the multi line wrap " feture relies on using the '>' char as text within a tag we must use the " char prior to establish if it is valid html/xml if (getline('.')[col('.') - 1] == '>') let char_syn=synIDattr(synID(line("."), col(".") - 1, 1), "name") endif if -1 == match(char_syn, "xmlProcessing") && (0 == match(char_syn, 'html') || 0 == match(char_syn, 'xml') || 0 == match(char_syn, 'docbk')) call <SID>ParseTag() else if col(".") == col("$") - 1 startinsert! else execute "normal! l" startinsert endif endif endfunction endif " InitEditFromJump -> Set some needed autocommands and syntax highlights for EditFromJump. {{{1 if !exists("*s:InitEditFromJump") function s:InitEditFromJump( ) " Add a syntax highlight for the xml_jump_string. execute "syntax match Error /\\V" . g:xml_jump_string . "/" endfunction endif " ClearJumpMarks -> Clean out extranious left over xml_jump_string garbage. {{{1 if !exists("*s:ClearJumpMarks") function s:ClearJumpMarks( ) if exists("g:xml_jump_string") if g:xml_jump_string != "" execute ":%s/" . g:xml_jump_string . "//ge" endif endif endfunction endif " EditFromJump -> Jump to the end of the tag and continue editing. {{{1 " g:xml_jump_string must be set. if !exists("*s:EditFromJump") function s:EditFromJump( ) if exists("g:xml_jump_string") if g:xml_jump_string != "" let foo = search(g:xml_jump_string, 'csW') " Moves cursor by default execute "normal! " . strlen(g:xml_jump_string) . "x" if col(".") == col("$") - 1 startinsert! else startinsert endif endif else echohl WarningMsg echo "Function disabled. xml_jump_string not defined." echohl None endif endfunction endif " Section: Doc installation {{{1 " Function: s:XmlInstallDocumentation(full_name, revision) {{{2 " Install help documentation. " Arguments: " full_name: Full name of this vim plugin script, including path name. " revision: Revision of the vim script. #version# mark in the document file " will be replaced with this string with 'v' prefix. " Return: " 1 if new document installed, 0 otherwise. " Note: Cleaned and generalized by guo-peng Wen "''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' function! s:XmlInstallDocumentation(full_name, revision) " Name of the document path based on the system we use: if (has("unix")) " On UNIX like system, using forward slash: let l:slash_char = '/' let l:mkdir_cmd = ':silent !mkdir -p ' else " On M$ system, use backslash. Also mkdir syntax is different. " This should only work on W2K and up. let l:slash_char = '\' let l:mkdir_cmd = ':silent !mkdir ' endif let l:doc_path = l:slash_char . 'doc' "let l:doc_home = l:slash_char . '.vim' . l:slash_char . 'doc' " Figure out document path based on full name of this script: let l:vim_plugin_path = fnamemodify(a:full_name, ':h') "let l:vim_doc_path = fnamemodify(a:full_name, ':h:h') . l:doc_path let l:vim_doc_path = matchstr(l:vim_plugin_path, \ '.\{-}\ze\%(\%(ft\)\=plugin\|macros\)') . l:doc_path if (!(filewritable(l:vim_doc_path) == 2)) echomsg "Doc path: " . l:vim_doc_path execute l:mkdir_cmd . l:vim_doc_path if (!(filewritable(l:vim_doc_path) == 2)) " Try a default configuration in user home: "let l:vim_doc_path = expand("~") . l:doc_home let l:vim_doc_path = matchstr(&rtp, \ escape($HOME, '\') .'[/\\]\%(\.vim\|vimfiles\)') if (!(filewritable(l:vim_doc_path) == 2)) execute l:mkdir_cmd . l:vim_doc_path if (!(filewritable(l:vim_doc_path) == 2)) " Put a warning: echomsg "Unable to open documentation directory" echomsg " type :help add-local-help for more informations." return 0 endif endif endif endif " Exit if we have problem to access the document directory: if (!isdirectory(l:vim_plugin_path) \ || !isdirectory(l:vim_doc_path) \ || filewritable(l:vim_doc_path) != 2) return 0 endif " Full name of script and documentation file: let l:script_name = 'xml.vim' let l:doc_name = 'xml-plugin.txt' let l:plugin_file = l:vim_plugin_path . l:slash_char . l:script_name let l:doc_file = l:vim_doc_path . l:slash_char . l:doc_name " Bail out if document file is still up to date: if (filereadable(l:doc_file) && \ getftime(l:plugin_file) < getftime(l:doc_file)) return 0 endif " Prepare window position restoring command: if (strlen(@%)) let l:go_back = 'b ' . bufnr("%") else let l:go_back = 'enew!' endif " Create a new buffer & read in the plugin file (me): setl nomodeline exe 'enew!' exe 'r ' . l:plugin_file setl modeline let l:buf = bufnr("%") setl noswapfile modifiable norm zR norm gg " Delete from first line to a line starts with " === START_DOC 1,/^=\{3,}\s\+START_DOC\C/ d " Delete from a line starts with " === END_DOC " to the end of the documents: /^=\{3,}\s\+END_DOC\C/,$ d " Remove fold marks: % s/{\{3}[1-9]/ / " Add modeline for help doc: the modeline string is mangled intentionally " to avoid it be recognized by VIM: call append(line('$'), '') call append(line('$'), ' v' . 'im:tw=78:ts=8:ft=help:norl:') " Replace revision: exe "normal :1,5s/#version#/ v" . a:revision . "/\<CR>" " Save the help document: exe 'w! ' . l:doc_file exe l:go_back exe 'bw ' . l:buf " Build help tags: exe 'helptags ' . l:vim_doc_path return 1 endfunction " }}}2 let s:script_lines = readfile(expand("<sfile>"), "", 6) let s:revision= \ substitute(s:script_lines[5], '^" Version:\s*\|\s*$', '', '') " \ substitute("$Revision: 83 $",'\$\S*: \([.0-9]\+\) \$','\1','') silent! let s:install_status = \ s:XmlInstallDocumentation(expand('<sfile>:p'), s:revision) if (s:install_status == 1) echom expand("<sfile>:t:r") . '-plugin v' . s:revision . \ ': Help-documentation installed.' endif " Mappings and Settings. {{{1 " This makes the '%' jump between the start and end of a single tag. setlocal matchpairs+=<:> setlocal commentstring=<!--%s--> " Have this as an escape incase you want a literal '>' not to run the " ParseTag function. if !exists("g:xml_tag_completion_map") inoremap <buffer> <LocalLeader>. > inoremap <buffer> <LocalLeader>> > endif diff --git a/.vimrc b/.vimrc index 85a4268..3a0f934 100644 --- a/.vimrc +++ b/.vimrc @@ -1,478 +1,487 @@ +" http://github.com/mitechie/pyvim " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck +" ,S - remove end of line spaces " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide - colorscheme lucius - colorscheme vilight colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background - colorscheme hornet colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> + +" setup a custom dict for spelling +" zg = add word to dict +" zw = mark word as not spelled correctly (remove) set spellfile=~/.vim/dict.add + " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l -map <c-h> <c-w>h +imap <c-h> <c-w>h + +" Hints for other movements +" <c-w><c-r> rotate window to next spot +" <c-w><c-x> swap window with current one " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching +set smartcase " if searching and search contains upper case, make case sensitive search nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config -map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> +map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pylint " http://www.vim.org/scripts/script.php?script_id=891 -" default config for underlines of syntax errors in gvim +" default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim -" opeVim +" RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
0e238df0d1b0679af98877ca6571bdb57033be3d
Add a custom dict for spelling issues
diff --git a/.vim/colors/lucius.vim b/.vim/colors/lucius.vim index 410cc55..d73586e 100644 --- a/.vim/colors/lucius.vim +++ b/.vim/colors/lucius.vim @@ -1,349 +1,350 @@ " Vim color file " Maintainer: Jonathan Filip <[email protected]> " Last Modified: Mon Apr 19, 2010 10:24AM " Version: 3.5 " " GUI / 256 color terminal " " I started out trying to combine my favorite parts of other schemes and ended " up with this (oceandeep, moria, peaksea, wombat, zenburn). " " This file also tries to have descriptive comments for each higlighting group " so it is easy to understand what each part does. set background=dark hi clear if exists("syntax_on") syntax reset endif let colors_name="lucius" " Some other colors to save " blue: 3eb8e5 " green: 92d400 " c green: d5f876, cae682 " new blue: 002D62 " new gray: CCCCCC " Base color " ---------- hi Normal guifg=#e0e0e0 guibg=#202020 hi Normal ctermfg=253 ctermbg=235 " Comment Group " ------------- " any comment hi Comment guifg=#606060 gui=none hi Comment ctermfg=240 cterm=none " Constant Group " -------------- " any constant hi Constant guifg=#8cd0d3 gui=none hi Constant ctermfg=116 cterm=none " strings hi String guifg=#80c0d9 gui=none hi String ctermfg=110 cterm=none " character constant hi Character guifg=#80c0d9 gui=none hi Character ctermfg=110 cterm=none " numbers decimal/hex hi Number guifg=#8cd0d3 gui=none hi Number ctermfg=116 cterm=none " true, false hi Boolean guifg=#8cd0d3 gui=none hi Boolean ctermfg=116 cterm=none " float hi Float guifg=#8cd0d3 gui=none hi Float ctermfg=116 cterm=none " Identifier Group " ---------------- " any variable name hi Identifier guifg=#e6c080 gui=none hi Identifier ctermfg=180 cterm=none " function, method, class hi Function guifg=#e6c080 gui=none hi Function ctermfg=180 cterm=none " Statement Group " --------------- " any statement hi Statement guifg=#b3d38c gui=none hi Statement ctermfg=150 cterm=none " if, then, else hi Conditional guifg=#b3d38c gui=none hi Conditional ctermfg=150 cterm=none " try, catch, throw, raise hi Exception guifg=#b3d38c gui=none hi Exception ctermfg=150 cterm=none " for, while, do hi Repeat guifg=#b3d38c gui=none hi Repeat ctermfg=150 cterm=none " case, default hi Label guifg=#b3d38c gui=none hi Label ctermfg=150 cterm=none " sizeof, +, * hi Operator guifg=#b3d38c gui=none hi Operator ctermfg=150 cterm=none " any other keyword hi Keyword guifg=#b3d38c gui=none hi Keyword ctermfg=150 cterm=none " Preprocessor Group " ------------------ " generic preprocessor hi PreProc guifg=#e9dfaf gui=none hi PreProc ctermfg=187 cterm=none " #include hi Include guifg=#e9dfaf gui=none hi Include ctermfg=187 cterm=none " #define hi Define guifg=#e9dfaf gui=none hi Define ctermfg=187 cterm=none " same as define hi Macro guifg=#e9dfaf gui=none hi Macro ctermfg=187 cterm=none " #if, #else, #endif hi PreCondit guifg=#e9dfaf gui=none hi PreCondit ctermfg=187 cterm=none " Type Group " ---------- " int, long, char hi Type guifg=#93d6a9 gui=none hi Type ctermfg=115 cterm=none " static, register, volative hi StorageClass guifg=#93d6a9 gui=none hi StorageClass ctermfg=115 cterm=none " struct, union, enum hi Structure guifg=#93d6a9 gui=none hi Structure ctermfg=115 cterm=none " typedef hi Typedef guifg=#93d6a9 gui=none hi Typedef ctermfg=115 cterm=none " Special Group " ------------- " any special symbol hi Special guifg=#cca3b3 gui=none hi Special ctermfg=181 cterm=none " special character in a constant hi SpecialChar guifg=#cca3b3 gui=none hi SpecialChar ctermfg=181 cterm=none " things you can CTRL-] hi Tag guifg=#cca3b3 gui=none hi Tag ctermfg=181 cterm=none " character that needs attention hi Delimiter guifg=#cca3b3 gui=none hi Delimiter ctermfg=181 cterm=none " special things inside a comment hi SpecialComment guifg=#cca3b3 gui=none hi SpecialComment ctermfg=181 cterm=none " debugging statements hi Debug guifg=#cca3b3 guibg=NONE gui=none hi Debug ctermfg=181 ctermbg=NONE cterm=none " Underlined Group " ---------------- " text that stands out, html links hi Underlined guifg=fg gui=underline hi Underlined ctermfg=fg cterm=underline " Ignore Group " ------------ " left blank, hidden hi Ignore guifg=bg hi Ignore ctermfg=bg " Error Group " ----------- " any erroneous construct hi Error guifg=#e37170 guibg=#432323 gui=none hi Error ctermfg=167 ctermbg=52 cterm=none " Todo Group " ---------- " todo, fixme, note, xxx hi Todo guifg=#efef8f guibg=NONE gui=underline hi Todo ctermfg=228 ctermbg=NONE cterm=underline " Spelling " -------- " word not recognized hi SpellBad guisp=#ee0000 gui=undercurl hi SpellBad ctermbg=196 cterm=undercurl +hi SpellBad ctermfg=225 ctermbg=196 cterm=undercurl " word not capitalized hi SpellCap guisp=#eeee00 gui=undercurl hi SpellCap ctermbg=226 cterm=undercurl " rare word hi SpellRare guisp=#ffa500 gui=undercurl hi SpellRare ctermbg=214 cterm=undercurl " wrong spelling for selected region hi SpellLocal guisp=#ffa500 gui=undercurl hi SpellLocal ctermbg=214 cterm=undercurl " Cursor " ------ " character under the cursor hi Cursor guifg=bg guibg=#a3e3ed hi Cursor ctermfg=bg ctermbg=153 " like cursor, but used when in IME mode hi CursorIM guifg=bg guibg=#96cdcd hi CursorIM ctermfg=bg ctermbg=116 " cursor column hi CursorColumn guifg=NONE guibg=#404448 gui=none hi CursorColumn ctermfg=NONE ctermbg=236 cterm=none " cursor line/row hi CursorLine gui=NONE guibg=#404448 gui=none hi CursorLine cterm=NONE ctermbg=236 cterm=none " Misc " ---- " directory names and other special names in listings hi Directory guifg=#c0e0b0 gui=none hi Directory ctermfg=151 cterm=none " error messages on the command line hi ErrorMsg guifg=#ee0000 guibg=NONE gui=none hi ErrorMsg ctermfg=196 ctermbg=NONE cterm=none " column separating vertically split windows hi VertSplit guifg=#777777 guibg=#363946 gui=none hi VertSplit ctermfg=242 ctermbg=237 cterm=none " columns where signs are displayed (used in IDEs) hi SignColumn guifg=#9fafaf guibg=#181818 gui=none hi SignColumn ctermfg=145 ctermbg=233 cterm=none " line numbers hi LineNr guifg=#818698 guibg=#363946 hi LineNr ctermfg=245 ctermbg=237 " match parenthesis, brackets hi MatchParen guifg=#00ff00 guibg=NONE gui=bold hi MatchParen ctermfg=46 ctermbg=NONE cterm=bold " the 'more' prompt when output takes more than one line hi MoreMsg guifg=#2e8b57 gui=none hi MoreMsg ctermfg=29 cterm=none " text showing what mode you are in hi ModeMsg guifg=#76d5f8 guibg=NONE gui=none hi ModeMsg ctermfg=117 ctermbg=NONE cterm=none " the '~' and '@' and showbreak, '>' double wide char doesn't fit on line hi NonText guifg=#404040 gui=none hi NonText ctermfg=238 cterm=none " the hit-enter prompt (show more output) and yes/no questions hi Question guifg=fg gui=none hi Question ctermfg=fg cterm=none " meta and special keys used with map, unprintable characters hi SpecialKey guifg=#405060 hi SpecialKey ctermfg=239 " titles for output from :set all, :autocmd, etc hi Title guifg=#62bdde gui=none hi Title ctermfg=74 cterm=none "hi Title guifg=#5ec8e5 gui=none " warning messages hi WarningMsg guifg=#e5786d gui=none hi WarningMsg ctermfg=173 cterm=none " current match in the wildmenu completion hi WildMenu guifg=#cae682 guibg=#363946 gui=bold,underline hi WildMenu ctermfg=16 ctermbg=186 cterm=bold " Diff " ---- " added line hi DiffAdd guifg=#80a090 guibg=#313c36 gui=none hi DiffAdd ctermfg=fg ctermbg=22 cterm=none " changed line hi DiffChange guifg=NONE guibg=#4a343a gui=none hi DiffChange ctermfg=fg ctermbg=52 cterm=none " deleted line hi DiffDelete guifg=#6c6661 guibg=#3c3631 gui=none hi DiffDelete ctermfg=fg ctermbg=58 cterm=none " changed text within line hi DiffText guifg=#f05060 guibg=#4a343a gui=bold hi DiffText ctermfg=203 ctermbg=52 cterm=bold " Folds " ----- " line used for closed folds "hi Folded guifg=#91d6f8 guibg=#363946 gui=none "hi Folded ctermfg=117 ctermbg=238 cterm=none hi Folded guifg=#d0e0f0 guibg=#202020 gui=none hi Folded ctermfg=117 ctermbg=235 cterm=none " column on side used to indicated open and closed folds hi FoldColumn guifg=#91d6f8 guibg=#363946 gui=none hi FoldColumn guifg=#c0c0d0 guibg=#363946 gui=none hi FoldColumn ctermfg=117 ctermbg=238 cterm=none " Search " ------ " highlight incremental search text; also highlight text replaced with :s///c hi IncSearch guifg=#66ffff gui=reverse hi IncSearch ctermfg=87 cterm=reverse " hlsearch (last search pattern), also used for quickfix hi Search guibg=#ffaa33 gui=none hi Search ctermbg=214 cterm=none " Popup Menu " ---------- " normal item in popup hi Pmenu guifg=#e0e0e0 guibg=#303840 gui=none hi Pmenu ctermfg=253 ctermbg=233 cterm=none " selected item in popup hi PmenuSel guifg=#cae682 guibg=#505860 gui=none hi PmenuSel ctermfg=186 ctermbg=237 cterm=none " scrollbar in popup hi PMenuSbar guibg=#505860 gui=none hi PMenuSbar ctermbg=59 cterm=none " thumb of the scrollbar in the popup hi PMenuThumb guibg=#808890 gui=none hi PMenuThumb ctermbg=102 cterm=none " Status Line " ----------- " status line for current window hi StatusLine guifg=#e0e0e0 guibg=#363946 gui=bold hi StatusLine ctermfg=254 ctermbg=237 cterm=bold " status line for non-current windows hi StatusLineNC guifg=#767986 guibg=#363946 gui=none hi StatusLineNC ctermfg=244 ctermbg=237 cterm=none " Tab Lines " --------- " tab pages line, not active tab page label hi TabLine guifg=#b6bf98 guibg=#363946 gui=none hi TabLine ctermfg=244 ctermbg=236 cterm=none " tab pages line, where there are no labels hi TabLineFill guifg=#cfcfaf guibg=#363946 gui=none hi TabLineFill ctermfg=187 ctermbg=236 cterm=none " tab pages line, active tab page label hi TabLineSel guifg=#efefef guibg=#414658 gui=bold hi TabLineSel ctermfg=254 ctermbg=236 cterm=bold " Visual " ------ " visual mode selection hi Visual guifg=NONE guibg=#364458 hi Visual ctermfg=NONE ctermbg=24 " visual mode selection when vim is not owning the selection (x11 only) hi VisualNOS guifg=fg gui=underline hi VisualNOS ctermfg=fg cterm=underline diff --git a/.vim/dict.add b/.vim/dict.add new file mode 100644 index 0000000..6fc3d48 --- /dev/null +++ b/.vim/dict.add @@ -0,0 +1,5 @@ +sqlalchemy +SQLAlchemy +sessionmaker +Metadata +metadata diff --git a/.vimrc b/.vimrc index 3b408e5..85a4268 100644 --- a/.vimrc +++ b/.vimrc @@ -1,477 +1,478 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " cs"( - replace the " with ( " ysiw" - wrap current text object with " " yss" - wrap current line with " " S - in visual mode surroud with something " ds( - remove wrapping ( from text " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme vilight colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> +set spellfile=~/.vim/dict.add " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Surround " http://www.vim.org/scripts/script.php?script_id=1697 " default shortcuts " Pylint " http://www.vim.org/scripts/script.php?script_id=891 " default config for underlines of syntax errors in gvim " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " opeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
6cb81fc4886715b0170a1f014e4313bd67e40682
Added surround plugin and updated notes with pylint/surround
diff --git a/.vim/doc/surround.txt b/.vim/doc/surround.txt new file mode 100644 index 0000000..4387fa2 --- /dev/null +++ b/.vim/doc/surround.txt @@ -0,0 +1,222 @@ +*surround.txt* Plugin for deleting, changing, and adding "surroundings" + +Author: Tim Pope <[email protected]> *surround-author* +License: Same terms as Vim itself (see |license|) + +This plugin is only available if 'compatible' is not set. + +INTRODUCTION *surround* + +This plugin is a tool for dealing with pairs of "surroundings." Examples +of surroundings include parentheses, quotes, and HTML tags. They are +closely related to what Vim refers to as |text-objects|. Provided +are mappings to allow for removing, changing, and adding surroundings. + +Details follow on the exact semantics, but first, consider the following +examples. An asterisk (*) is used to denote the cursor position. + + Old text Command New text ~ + "Hello *world!" ds" Hello world! + [123+4*56]/2 cs]) (123+456)/2 + "Look ma, I'm *HTML!" cs"<q> <q>Look ma, I'm HTML!</q> + if *x>3 { ysW( if ( x>3 ) { + my $str = *whee!; vlllls' my $str = 'whee!'; + +While a few features of this plugin will work in older versions of Vim, +Vim 7 is recommended for full functionality. + +MAPPINGS *surround-mappings* + +Delete surroundings is *ds* . The next character given determines the target +to delete. The exact nature of the target is explained in |surround-targets| +but essentially it is the last character of a |text-object|. This mapping +deletes the difference between the "i"nner object and "a"n object. This is +easiest to understand with some examples: + + Old text Command New text ~ + "Hello *world!" ds" Hello world! + (123+4*56)/2 ds) 123+456/2 + <div>Yo!*</div> dst Yo! + +Change surroundings is *cs* . It takes two arguments, a target like with +|ds|, and a replacement. Details about the second argument can be found +below in |surround-replacements|. Once again, examples are in order. + + Old text Command New text ~ + "Hello *world!" cs"' 'Hello world!' + "Hello *world!" cs"<q> <q>Hello world!</q> + (123+4*56)/2 cs)] [123+456]/2 + (123+4*56)/2 cs)[ [ 123+456 ]/2 + <div>Yo!*</div> cst<p> <p>Yo!</p> + +*ys* takes a valid Vim motion or text object as the first object, and wraps +it using the second argument as with |cs|. (Unfortunately there's no good +mnemonic for "ys".) + + Old text Command New text ~ + Hello w*orld! ysiw) Hello (world)! + +As a special case, *yss* operates on the current line, ignoring leading +whitespace. + + Old text Command New text ~ + Hello w*orld! yssB {Hello world!} + +There is also *yS* and *ySS* which indent the surrounded text and place it +on a line of its own. + +In visual mode, a simple "s" with an argument wraps the selection. This is +referred to as the *vS* mapping, although ordinarily there will be +additional keystrokes between the v and s. In linewise visual mode, the +surroundings are placed on separate lines and indented. In blockwise visual +mode, each line is surrounded. + +A "gS" in visual mode, known as *vgS* , behaves similarly. In linewise visual +mode, the automatic indenting is surpressed. In blockwise visual mode, this +enables surrounding past the end of the like with 'virtualedit' set (there +seems to be no way in Vim Script to differentiate between a jagged end of line +selection and a virtual block selected past the end of the line, so two maps +were needed). + +Additionally, there is a legacy "s" or *vs* mapping which is basically the +same as |vS|. Due to popular demand of wanting to use "s" as Vim does to mean +replacing the selection (also available as "c"), this mapping is going away. +If you were one of these people and would like to disable "s" with the current +release, indicate this to surround.vim by assigning the "s" mapping to +something else. +> + xmap <Leader>s <Plug>Vsurround +< + *i_CTRL-G_s* *i_CTRL-G_S* +Finally, there is an experimental insert mode mapping on <C-G>s and <C-S>. +Beware that the latter won't work on terminals with flow control (if you +accidentally freeze your terminal, use <C-Q> to unfreeze it). The mapping +inserts the specified surroundings and puts the cursor between them. If, +immediately after the mapping and before the replacement, a second <C-S> or +carriage return is pressed, the prefix, cursor, and suffix will be placed on +three separate lines. <C-G>S (not <C-G>s) also exhibits this behavior. + +TARGETS *surround-targets* + +The |ds| and |cs| commands both take a target as their first argument. The +possible targets are based closely on the |text-objects| provided by Vim. +In order for a target to work, the corresponding text object must be +supported in the version of Vim used (Vim 7 adds several text objects, and +thus is highly recommended). All targets are currently just one character. + +Eight punctuation marks, (, ), {, }, [, ], <, and >, represent themselves +and their counterparts. If the opening mark is used, contained whitespace is +also trimmed. The targets b, B, r, and a are aliases for ), }, ], and > +(the first two mirror Vim; the second two are completely arbitrary and +subject to change). + +Three quote marks, ', ", `, represent themselves, in pairs. They are only +searched for on the current line. + +A t is a pair of HTML or XML tags. See |tag-blocks| for details. Remember +that you can specify a numerical argument if you want to get to a tag other +than the innermost one. + +The letters w, W, and s correspond to a |word|, a |WORD|, and a |sentence|, +respectively. These are special in that they have nothing to delete, and +used with |ds| they are a no-op. With |cs|, one could consider them a +slight shortcut for ysi (cswb == ysiwb, more or less). + +A p represents a |paragraph|. This behaves similarly to w, W, and s above; +however, newlines are sometimes added and/or removed. + +REPLACEMENTS *surround-replacements* + +A replacement argument is a single character, and is required by |cs|, |ys|, +and |vs|. Undefined replacement characters (with the exception of alphabetic +characters) default to placing themselves at the beginning and end of the +destination, which can be useful for characters like / and |. + +If either ), }, ], or > is used, the text is wrapped in the appropriate pair +of characters. Similar behavior can be found with (, {, and [ (but not <), +which append an additional space to the inside. Like with the targets above, +b, B, r, and a are aliases for ), }, ], and >. To fulfill the common need for +code blocks in C-style languages, <C-}> (which is really <C-]>) adds braces on +lines separate from the content. + +If t or < is used, Vim prompts for an HTML/XML tag to insert. You may specify +attributes here and they will be stripped from the closing tag. End your +input by pressing <CR> or >. If <C-T> is used, the tags will appear on lines +by themselves. + +A deprecated replacement of a LaTeX environment is provided on \ and l. The +name of the environment and any arguments will be input from a prompt. This +will be removed once a more fully functional customization system is +implemented. The following shows the resulting environment from +csp\tabular}{lc<CR> +> + \begin{tabular}{lc} + \end{tabular} +< +CUSTOMIZING *surround-customizing* + +The following adds a potential replacement on "-" (ASCII 45) in PHP files. +(To determine the ASCII code to use, :echo char2nr("-")). The carriage +return will be replaced by the original text. +> + autocmd FileType php let b:surround_45 = "<?php \r ?>" +< +This can be used in a PHP file as in the following example. + + Old text Command New text ~ + print "Hello *world!" yss- <?php print "Hello world!" ?> + +Additionally, one can use a global variable for globally available +replacements. +> + let g:surround_45 = "<% \r %>" + let g:surround_61 = "<%= \r %>" +< +Advanced, experimental, and subject to change: One can also prompt for +replacement text. The syntax for this is to surround the replacement in pairs +of low numbered control characters. If this sounds confusing, that's because +it is (but it makes the parsing easy). Consider the following example for a +LaTeX environment on the "l" replacement. +> + let g:surround_108 = "\\begin{\1environment: \1}\r\\end{\1\1}" +< +When this replacement is used, the user is prompted with an "environment: " +prompt for input. This input is inserted between each set of \1's. +Additional inputs up to \7 can be used. + +Furthermore, one can specify a regular expression substitution to apply. +> + let g:surround_108 = "\\begin{\1environment: \1}\r\\end{\1\r}.*\r\1}" +< +This will remove anything after the first } in the input when the text is +placed within the \end{} slot. The first \r marks where the pattern begins, +and the second where the replacement text begins. + +Here's a second example for creating an HTML <div>. The substitution cleverly +prompts for an id, but only adds id="" if it is non-blank. You may have to +read this one a few times slowly before you understand it. +> + let g:surround_{char2nr("d")} = "<div\1id: \r..*\r id=\"&\"\1>\r</div>" +< +Inputting text replacements is a proof of concept at this point. The ugly, +unintuitive interface and the brevity of the documentation reflect this. + +Finally, It is possible to always append a string to surroundings in insert +mode (and only insert mode). This is useful with certain plugins and mappings +that allow you to jump to such markings. +> + let g:surround_insert_tail = "<++>" +< +ISSUES *surround-issues* + +Vim could potentially get confused when deleting/changing occurs at the very +end of the line. Please report any repeatable instances of this. + +Do we need to use |inputsave()|/|inputrestore()| with the tag replacement? + +Indenting is handled haphazardly. Need to decide the most appropriate +behavior and implement it. Right now one can do :let b:surround_indent = 1 +(or the global equivalent) to enable automatic re-indenting by Vim via |=|; +should this be the default? + + vim:tw=78:ts=8:ft=help:norl: diff --git a/.vim/plugin/surround.vim b/.vim/plugin/surround.vim new file mode 100644 index 0000000..ea28c02 --- /dev/null +++ b/.vim/plugin/surround.vim @@ -0,0 +1,625 @@ +" surround.vim - Surroundings +" Author: Tim Pope <[email protected]> +" Version: 1.90 +" GetLatestVimScripts: 1697 1 :AutoInstall: surround.vim +" +" See surround.txt for help. This can be accessed by doing +" +" :helptags ~/.vim/doc +" :help surround +" +" Licensed under the same terms as Vim itself. + +" ============================================================================ + +" Exit quickly when: +" - this plugin was already loaded or disabled +" - when 'compatible' is set +if (exists("g:loaded_surround") && g:loaded_surround) || &cp + finish +endif +let g:loaded_surround = 1 + +let s:cpo_save = &cpo +set cpo&vim + +" Input functions {{{1 + +function! s:getchar() + let c = getchar() + if c =~ '^\d\+$' + let c = nr2char(c) + endif + return c +endfunction + +function! s:inputtarget() + let c = s:getchar() + while c =~ '^\d\+$' + let c = c . s:getchar() + endwhile + if c == " " + let c = c . s:getchar() + endif + if c =~ "\<Esc>\|\<C-C>\|\0" + return "" + else + return c + endif +endfunction + +function! s:inputreplacement() + "echo '-- SURROUND --' + let c = s:getchar() + if c == " " + let c = c . s:getchar() + endif + if c =~ "\<Esc>" || c =~ "\<C-C>" + return "" + else + return c + endif +endfunction + +function! s:beep() + exe "norm! \<Esc>" + return "" +endfunction + +function! s:redraw() + redraw + return "" +endfunction + +" }}}1 + +" Wrapping functions {{{1 + +function! s:extractbefore(str) + if a:str =~ '\r' + return matchstr(a:str,'.*\ze\r') + else + return matchstr(a:str,'.*\ze\n') + endif +endfunction + +function! s:extractafter(str) + if a:str =~ '\r' + return matchstr(a:str,'\r\zs.*') + else + return matchstr(a:str,'\n\zs.*') + endif +endfunction + +function! s:repeat(str,count) + let cnt = a:count + let str = "" + while cnt > 0 + let str = str . a:str + let cnt = cnt - 1 + endwhile + return str +endfunction + +function! s:fixindent(str,spc) + let str = substitute(a:str,'\t',s:repeat(' ',&sw),'g') + let spc = substitute(a:spc,'\t',s:repeat(' ',&sw),'g') + let str = substitute(str,'\(\n\|\%^\).\@=','\1'.spc,'g') + if ! &et + let str = substitute(str,'\s\{'.&ts.'\}',"\t",'g') + endif + return str +endfunction + +function! s:process(string) + let i = 0 + while i < 7 + let i = i + 1 + let repl_{i} = '' + let m = matchstr(a:string,nr2char(i).'.\{-\}\ze'.nr2char(i)) + if m != '' + let m = substitute(strpart(m,1),'\r.*','','') + let repl_{i} = input(substitute(m,':\s*$','','').': ') + endif + endwhile + let s = "" + let i = 0 + while i < strlen(a:string) + let char = strpart(a:string,i,1) + if char2nr(char) < 8 + let next = stridx(a:string,char,i+1) + if next == -1 + let s = s . char + else + let insertion = repl_{char2nr(char)} + let subs = strpart(a:string,i+1,next-i-1) + let subs = matchstr(subs,'\r.*') + while subs =~ '^\r.*\r' + let sub = matchstr(subs,"^\r\\zs[^\r]*\r[^\r]*") + let subs = strpart(subs,strlen(sub)+1) + let r = stridx(sub,"\r") + let insertion = substitute(insertion,strpart(sub,0,r),strpart(sub,r+1),'') + endwhile + let s = s . insertion + let i = next + endif + else + let s = s . char + endif + let i = i + 1 + endwhile + return s +endfunction + +function! s:wrap(string,char,type,...) + let keeper = a:string + let newchar = a:char + let type = a:type + let linemode = type ==# 'V' ? 1 : 0 + let special = a:0 ? a:1 : 0 + let before = "" + let after = "" + if type ==# "V" + let initspaces = matchstr(keeper,'\%^\s*') + else + let initspaces = matchstr(getline('.'),'\%^\s*') + endif + " Duplicate b's are just placeholders (removed) + let pairs = "b()B{}r[]a<>" + let extraspace = "" + if newchar =~ '^ ' + let newchar = strpart(newchar,1) + let extraspace = ' ' + endif + let idx = stridx(pairs,newchar) + if newchar == ' ' + let before = '' + let after = '' + elseif exists("b:surround_".char2nr(newchar)) + let all = s:process(b:surround_{char2nr(newchar)}) + let before = s:extractbefore(all) + let after = s:extractafter(all) + elseif exists("g:surround_".char2nr(newchar)) + let all = s:process(g:surround_{char2nr(newchar)}) + let before = s:extractbefore(all) + let after = s:extractafter(all) + elseif newchar ==# "p" + let before = "\n" + let after = "\n\n" + elseif newchar =~# "[tT\<C-T><,]" + let dounmapp = 0 + let dounmapb = 0 + if !maparg(">","c") + let dounmapb= 1 + " Hide from AsNeeded + exe "cn"."oremap > <CR>" + endif + let default = "" + if newchar ==# "T" + if !exists("s:lastdel") + let s:lastdel = "" + endif + let default = matchstr(s:lastdel,'<\zs.\{-\}\ze>') + endif + let tag = input("<",default) + echo "<".substitute(tag,'>*$','>','') + if dounmapb + silent! cunmap > + endif + if tag != "" + let tag = substitute(tag,'>*$','','') + let before = '<'.tag.'>' + if tag =~ '/$' + let after = '' + else + let after = '</'.substitute(tag,' .*','','').'>' + endif + if newchar == "\<C-T>" || newchar == "," + if type ==# "v" || type ==# "V" + let before = before . "\n\t" + endif + if type ==# "v" + let after = "\n". after + endif + endif + endif + elseif newchar ==# 'l' || newchar == '\' + " LaTeX + let env = input('\begin{') + let env = '{' . env + let env = env . s:closematch(env) + echo '\begin'.env + if env != "" + let before = '\begin'.env + let after = '\end'.matchstr(env,'[^}]*').'}' + endif + "if type ==# 'v' || type ==# 'V' + "let before = before ."\n\t" + "endif + "if type ==# 'v' + "let after = "\n".initspaces.after + "endif + elseif newchar ==# 'f' || newchar ==# 'F' + let fnc = input('function: ') + if fnc != "" + let before = substitute(fnc,'($','','').'(' + let after = ')' + if newchar ==# 'F' + let before = before . ' ' + let after = ' ' . after + endif + endif + elseif idx >= 0 + let spc = (idx % 3) == 1 ? " " : "" + let idx = idx / 3 * 3 + let before = strpart(pairs,idx+1,1) . spc + let after = spc . strpart(pairs,idx+2,1) + elseif newchar == "\<C-[>" || newchar == "\<C-]>" + let before = "{\n\t" + let after = "\n}" + elseif newchar !~ '\a' + let before = newchar + let after = newchar + else + let before = '' + let after = '' + endif + "let before = substitute(before,'\n','\n'.initspaces,'g') + let after = substitute(after ,'\n','\n'.initspaces,'g') + "let after = substitute(after,"\n\\s*\<C-U>\\s*",'\n','g') + if type ==# 'V' || (special && type ==# "v") + let before = substitute(before,' \+$','','') + let after = substitute(after ,'^ \+','','') + if after !~ '^\n' + let after = initspaces.after + endif + if keeper !~ '\n$' && after !~ '^\n' + let keeper = keeper . "\n" + elseif keeper =~ '\n$' && after =~ '^\n' + let after = strpart(after,1) + endif + if before !~ '\n\s*$' + let before = before . "\n" + if special + let before = before . "\t" + endif + endif + endif + if type ==# 'V' + let before = initspaces.before + endif + if before =~ '\n\s*\%$' + if type ==# 'v' + let keeper = initspaces.keeper + endif + let padding = matchstr(before,'\n\zs\s\+\%$') + let before = substitute(before,'\n\s\+\%$','\n','') + let keeper = s:fixindent(keeper,padding) + endif + if type ==# 'V' + let keeper = before.keeper.after + elseif type =~ "^\<C-V>" + " Really we should be iterating over the buffer + let repl = substitute(before,'[\\~]','\\&','g').'\1'.substitute(after,'[\\~]','\\&','g') + let repl = substitute(repl,'\n',' ','g') + let keeper = substitute(keeper."\n",'\(.\{-\}\)\(\n\)',repl.'\n','g') + let keeper = substitute(keeper,'\n\%$','','') + else + let keeper = before.extraspace.keeper.extraspace.after + endif + return keeper +endfunction + +function! s:wrapreg(reg,char,...) + let orig = getreg(a:reg) + let type = substitute(getregtype(a:reg),'\d\+$','','') + let special = a:0 ? a:1 : 0 + let new = s:wrap(orig,a:char,type,special) + call setreg(a:reg,new,type) +endfunction +" }}}1 + +function! s:insert(...) " {{{1 + " Optional argument causes the result to appear on 3 lines, not 1 + "call inputsave() + let linemode = a:0 ? a:1 : 0 + let char = s:inputreplacement() + while char == "\<CR>" || char == "\<C-S>" + " TODO: use total count for additional blank lines + let linemode = linemode + 1 + let char = s:inputreplacement() + endwhile + "call inputrestore() + if char == "" + return "" + endif + "call inputsave() + let cb_save = &clipboard + set clipboard-=unnamed + let reg_save = @@ + call setreg('"',"\r",'v') + call s:wrapreg('"',char,linemode) + " If line mode is used and the surrounding consists solely of a suffix, + " remove the initial newline. This fits a use case of mine but is a + " little inconsistent. Is there anyone that would prefer the simpler + " behavior of just inserting the newline? + if linemode && match(getreg('"'),'^\n\s*\zs.*') == 0 + call setreg('"',matchstr(getreg('"'),'^\n\s*\zs.*'),getregtype('"')) + endif + " This can be used to append a placeholder to the end + if exists("g:surround_insert_tail") + call setreg('"',g:surround_insert_tail,"a".getregtype('"')) + endif + "if linemode + "call setreg('"',substitute(getreg('"'),'^\s\+','',''),'c') + "endif + if col('.') >= col('$') + norm! ""p + else + norm! ""P + endif + if linemode + call s:reindent() + endif + norm! `] + call search('\r','bW') + let @@ = reg_save + let &clipboard = cb_save + return "\<Del>" +endfunction " }}}1 + +function! s:reindent() " {{{1 + if exists("b:surround_indent") ? b:surround_indent : (exists("g:surround_indent") && g:surround_indent) + silent norm! '[='] + endif +endfunction " }}}1 + +function! s:dosurround(...) " {{{1 + let scount = v:count1 + let char = (a:0 ? a:1 : s:inputtarget()) + let spc = "" + if char =~ '^\d\+' + let scount = scount * matchstr(char,'^\d\+') + let char = substitute(char,'^\d\+','','') + endif + if char =~ '^ ' + let char = strpart(char,1) + let spc = 1 + endif + if char == 'a' + let char = '>' + endif + if char == 'r' + let char = ']' + endif + let newchar = "" + if a:0 > 1 + let newchar = a:2 + if newchar == "\<Esc>" || newchar == "\<C-C>" || newchar == "" + return s:beep() + endif + endif + let cb_save = &clipboard + set clipboard-=unnamed + let append = "" + let original = getreg('"') + let otype = getregtype('"') + call setreg('"',"") + let strcount = (scount == 1 ? "" : scount) + if char == '/' + exe 'norm! '.strcount.'[/d'.strcount.']/' + else + exe 'norm! d'.strcount.'i'.char + endif + let keeper = getreg('"') + let okeeper = keeper " for reindent below + if keeper == "" + call setreg('"',original,otype) + let &clipboard = cb_save + return "" + endif + let oldline = getline('.') + let oldlnum = line('.') + if char ==# "p" + call setreg('"','','V') + elseif char ==# "s" || char ==# "w" || char ==# "W" + " Do nothing + call setreg('"','') + elseif char =~ "[\"'`]" + exe "norm! i \<Esc>d2i".char + call setreg('"',substitute(getreg('"'),' ','','')) + elseif char == '/' + norm! "_x + call setreg('"','/**/',"c") + let keeper = substitute(substitute(keeper,'^/\*\s\=','',''),'\s\=\*$','','') + else + " One character backwards + call search('.','bW') + exe "norm! da".char + endif + let removed = getreg('"') + let rem2 = substitute(removed,'\n.*','','') + let oldhead = strpart(oldline,0,strlen(oldline)-strlen(rem2)) + let oldtail = strpart(oldline, strlen(oldline)-strlen(rem2)) + let regtype = getregtype('"') + if char =~# '[\[({<T]' || spc + let keeper = substitute(keeper,'^\s\+','','') + let keeper = substitute(keeper,'\s\+$','','') + endif + if col("']") == col("$") && col('.') + 1 == col('$') + if oldhead =~# '^\s*$' && a:0 < 2 + let keeper = substitute(keeper,'\%^\n'.oldhead.'\(\s*.\{-\}\)\n\s*\%$','\1','') + endif + let pcmd = "p" + else + let pcmd = "P" + endif + if line('.') < oldlnum && regtype ==# "V" + let pcmd = "p" + endif + call setreg('"',keeper,regtype) + if newchar != "" + call s:wrapreg('"',newchar) + endif + silent exe 'norm! ""'.pcmd.'`[' + if removed =~ '\n' || okeeper =~ '\n' || getreg('"') =~ '\n' + call s:reindent() + endif + if getline('.') =~ '^\s\+$' && keeper =~ '^\s*\n' + silent norm! cc + endif + call setreg('"',removed,regtype) + let s:lastdel = removed + let &clipboard = cb_save + if newchar == "" + silent! call repeat#set("\<Plug>Dsurround".char,scount) + else + silent! call repeat#set("\<Plug>Csurround".char.newchar,scount) + endif +endfunction " }}}1 + +function! s:changesurround() " {{{1 + let a = s:inputtarget() + if a == "" + return s:beep() + endif + let b = s:inputreplacement() + if b == "" + return s:beep() + endif + call s:dosurround(a,b) +endfunction " }}}1 + +function! s:opfunc(type,...) " {{{1 + let char = s:inputreplacement() + if char == "" + return s:beep() + endif + let reg = '"' + let sel_save = &selection + let &selection = "inclusive" + let cb_save = &clipboard + set clipboard-=unnamed + let reg_save = getreg(reg) + let reg_type = getregtype(reg) + "call setreg(reg,"\n","c") + let type = a:type + if a:type == "char" + silent exe 'norm! v`[o`]"'.reg.'y' + let type = 'v' + elseif a:type == "line" + silent exe 'norm! `[V`]"'.reg.'y' + let type = 'V' + elseif a:type ==# "v" || a:type ==# "V" || a:type ==# "\<C-V>" + let ve = &virtualedit + if !(a:0 && a:1) + set virtualedit= + endif + silent exe 'norm! gv"'.reg.'y' + let &virtualedit = ve + elseif a:type =~ '^\d\+$' + let type = 'v' + silent exe 'norm! ^v'.a:type.'$h"'.reg.'y' + if mode() ==# 'v' + norm! v + return s:beep() + endif + else + let &selection = sel_save + let &clipboard = cb_save + return s:beep() + endif + let keeper = getreg(reg) + if type ==# "v" && a:type !=# "v" + let append = matchstr(keeper,'\_s\@<!\s*$') + let keeper = substitute(keeper,'\_s\@<!\s*$','','') + endif + call setreg(reg,keeper,type) + call s:wrapreg(reg,char,a:0 && a:1) + if type ==# "v" && a:type !=# "v" && append != "" + call setreg(reg,append,"ac") + endif + silent exe 'norm! gv'.(reg == '"' ? '' : '"' . reg).'p`[' + if type ==# 'V' || (getreg(reg) =~ '\n' && type ==# 'v') + call s:reindent() + endif + call setreg(reg,reg_save,reg_type) + let &selection = sel_save + let &clipboard = cb_save + if a:type =~ '^\d\+$' + silent! call repeat#set("\<Plug>Y".(a:0 && a:1 ? "S" : "s")."surround".char,a:type) + endif +endfunction + +function! s:opfunc2(arg) + call s:opfunc(a:arg,1) +endfunction " }}}1 + +function! s:closematch(str) " {{{1 + " Close an open (, {, [, or < on the command line. + let tail = matchstr(a:str,'.[^\[\](){}<>]*$') + if tail =~ '^\[.\+' + return "]" + elseif tail =~ '^(.\+' + return ")" + elseif tail =~ '^{.\+' + return "}" + elseif tail =~ '^<.+' + return ">" + else + return "" + endif +endfunction " }}}1 + +nnoremap <silent> <Plug>Dsurround :<C-U>call <SID>dosurround(<SID>inputtarget())<CR> +nnoremap <silent> <Plug>Csurround :<C-U>call <SID>changesurround()<CR> +nnoremap <silent> <Plug>Yssurround :<C-U>call <SID>opfunc(v:count1)<CR> +nnoremap <silent> <Plug>YSsurround :<C-U>call <SID>opfunc2(v:count1)<CR> +" <C-U> discards the numerical argument but there's not much we can do with it +nnoremap <silent> <Plug>Ysurround :<C-U>set opfunc=<SID>opfunc<CR>g@ +nnoremap <silent> <Plug>YSurround :<C-U>set opfunc=<SID>opfunc2<CR>g@ +vnoremap <silent> <Plug>Vsurround :<C-U>call <SID>opfunc(visualmode())<CR> +vnoremap <silent> <Plug>VSurround :<C-U>call <SID>opfunc(visualmode(),visualmode() ==# 'V' ? 1 : 0)<CR> +vnoremap <silent> <Plug>VgSurround :<C-U>call <SID>opfunc(visualmode(),visualmode() ==# 'V' ? 0 : 1)<CR> +inoremap <silent> <Plug>Isurround <C-R>=<SID>insert()<CR> +inoremap <silent> <Plug>ISurround <C-R>=<SID>insert(1)<CR> + +if !exists("g:surround_no_mappings") || ! g:surround_no_mappings + nmap ds <Plug>Dsurround + nmap cs <Plug>Csurround + nmap ys <Plug>Ysurround + nmap yS <Plug>YSurround + nmap yss <Plug>Yssurround + nmap ySs <Plug>YSsurround + nmap ySS <Plug>YSsurround + if !hasmapto("<Plug>Vsurround","v") && !hasmapto("<Plug>VSurround","v") + if exists(":xmap") + xmap s <Plug>Vsurround + else + vmap s <Plug>Vsurround + endif + endif + if !hasmapto("<Plug>VSurround","v") + if exists(":xmap") + xmap S <Plug>VSurround + else + vmap S <Plug>VSurround + endif + endif + if exists(":xmap") + xmap gS <Plug>VgSurround + else + vmap gS <Plug>VgSurround + endif + if !hasmapto("<Plug>Isurround","i") && "" == mapcheck("<C-S>","i") + imap <C-S> <Plug>Isurround + endif + imap <C-G>s <Plug>Isurround + imap <C-G>S <Plug>ISurround + "Implemented internally instead + "imap <C-S><C-S> <Plug>ISurround +endif + +let &cpo = s:cpo_save + +" vim:set ft=vim sw=2 sts=2 et: diff --git a/.vimrc b/.vimrc index 7d1c73f..3b408e5 100644 --- a/.vimrc +++ b/.vimrc @@ -1,463 +1,477 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " +" cs"( - replace the " with ( +" ysiw" - wrap current text object with " +" yss" - wrap current line with " +" S - in visual mode surroud with something +" ds( - remove wrapping ( from text +" " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme vilight colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " HTML " ================================================== " enable a shortcut for tidy using ~/.tidyrc config map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype +" Surround +" http://www.vim.org/scripts/script.php?script_id=1697 +" default shortcuts + +" Pylint +" http://www.vim.org/scripts/script.php?script_id=891 +" default config for underlines of syntax errors in gvim + " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " opeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
2a0d90528865806fbba1434826b09437e71a7b34
- add tidy support if installed on the system - added pyflakes for on the fly code checking with spelling underline
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index c975a61..7771a52 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,11 +1,2 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =9 -let g:netrw_dirhist_1='/home/rharding/configs/pyvim' -let g:netrw_dirhist_2='/home/rharding/configs/dotfiles/awesome/autostart' -let g:netrw_dirhist_3='/home/rharding/.offlineimap' -let g:netrw_dirhist_4='/home/rharding/configs/dotfiles/awesome/autostart' -let g:netrw_dirhist_5='/home/rharding/src/gitosis_private/gitosis-admin' -let g:netrw_dirhist_6='/etc/apache2/sites-enabled' -let g:netrw_dirhist_7='/home/rharding/src/hotalert/qsat_hotalert_app/docs' -let g:netrw_dirhist_8='/home/rharding/src/qmail/qmail/docs/source' -let g:netrw_dirhist_9='/home/rharding/.config/awesome' +let g:netrw_dirhist_cnt =0 diff --git a/.vim/ftplugin/python/pyflakes-vim.zip b/.vim/ftplugin/python/pyflakes-vim.zip new file mode 100644 index 0000000..fc3f08c Binary files /dev/null and b/.vim/ftplugin/python/pyflakes-vim.zip differ diff --git a/.vim/ftplugin/python/pyflakes.vim b/.vim/ftplugin/python/pyflakes.vim new file mode 100644 index 0000000..d6699bc --- /dev/null +++ b/.vim/ftplugin/python/pyflakes.vim @@ -0,0 +1,300 @@ +" pyflakes.vim - A script to highlight Python code on the fly with warnings +" from Pyflakes, a Python lint tool. +" +" Place this script and the accompanying pyflakes directory in +" .vim/ftplugin/python. +" +" See README for additional installation and information. +" +" Thanks to matlib.vim for ideas/code on interactive linting. +" +" Maintainer: Kevin Watters <[email protected]> +" Version: 0.1 + +if exists("b:did_pyflakes_plugin") + finish " only load once +else + let b:did_pyflakes_plugin = 1 +endif + +if !exists('g:pyflakes_builtins') + let g:pyflakes_builtins = [] +endif + +if !exists("b:did_python_init") + let b:did_python_init = 0 + + if !has('python') + echoerr "Error: the pyflakes.vim plugin requires Vim to be compiled with +python" + finish + endif + + python << EOF +import vim +import os.path +import sys + +if sys.version_info[:2] < (2, 5): + raise AssertionError('Vim must be compiled with Python 2.5 or higher; you have ' + sys.version) + +# get the directory this script is in: the pyflakes python module should be installed there. +scriptdir = os.path.join(os.path.dirname(vim.eval('expand("<sfile>")')), 'pyflakes') +sys.path.insert(0, scriptdir) + +from pyflakes import checker, ast, messages +from operator import attrgetter +import re + +class SyntaxError(messages.Message): + message = 'could not compile: %s' + def __init__(self, filename, lineno, col, message): + messages.Message.__init__(self, filename, lineno, col) + self.message_args = (message,) + +class blackhole(object): + write = flush = lambda *a, **k: None + +def check(buffer): + filename = buffer.name + contents = buffer[:] + + # shebang usually found at the top of the file, followed by source code encoding marker. + # assume everything else that follows is encoded in the encoding. + encoding_found = False + for n, line in enumerate(contents): + if not encoding_found: + if re.match(r'^# -\*- coding: .+? -*-', line): + encoding_found = True + else: + # skip all preceeding lines + contents = [''] * n + contents[n:] + break + contents = '\n'.join(contents) + '\n' + + vimenc = vim.eval('&encoding') + if vimenc: + contents = contents.decode(vimenc) + + builtins = [] + try: + builtins = eval(vim.eval('string(g:pyflakes_builtins)')) + except Exception: + pass + + try: + # TODO: use warnings filters instead of ignoring stderr + old_stderr, sys.stderr = sys.stderr, blackhole() + try: + tree = ast.parse(contents, filename) + finally: + sys.stderr = old_stderr + except: + try: + value = sys.exc_info()[1] + lineno, offset, line = value[1][1:] + except IndexError: + lineno, offset, line = 1, 0, '' + if line and line.endswith("\n"): + line = line[:-1] + + return [SyntaxError(filename, lineno, offset, str(value))] + else: + w = checker.Checker(tree, filename, builtins = builtins) + w.messages.sort(key = attrgetter('lineno')) + return w.messages + + +def vim_quote(s): + return s.replace("'", "''") +EOF + let b:did_python_init = 1 +endif + +if !b:did_python_init + finish +endif + +au BufLeave <buffer> call s:ClearPyflakes() + +au BufEnter <buffer> call s:RunPyflakes() +au InsertLeave <buffer> call s:RunPyflakes() +au InsertEnter <buffer> call s:RunPyflakes() +au BufWritePost <buffer> call s:RunPyflakes() + +au CursorHold <buffer> call s:RunPyflakes() +au CursorHoldI <buffer> call s:RunPyflakes() + +au CursorHold <buffer> call s:GetPyflakesMessage() +au CursorMoved <buffer> call s:GetPyflakesMessage() + +if !exists("*s:PyflakesUpdate") + function s:PyflakesUpdate() + silent call s:RunPyflakes() + call s:GetPyflakesMessage() + endfunction +endif + +" Call this function in your .vimrc to update PyFlakes +if !exists(":PyflakesUpdate") + command PyflakesUpdate :call s:PyflakesUpdate() +endif + +" Hook common text manipulation commands to update PyFlakes +" TODO: is there a more general "text op" autocommand we could register +" for here? +noremap <buffer><silent> dd dd:PyflakesUpdate<CR> +noremap <buffer><silent> dw dw:PyflakesUpdate<CR> +noremap <buffer><silent> u u:PyflakesUpdate<CR> +noremap <buffer><silent> <C-R> <C-R>:PyflakesUpdate<CR> + +" WideMsg() prints [long] message up to (&columns-1) length +" guaranteed without "Press Enter" prompt. +if !exists("*s:WideMsg") + function s:WideMsg(msg) + let x=&ruler | let y=&showcmd + set noruler noshowcmd + redraw + echo a:msg + let &ruler=x | let &showcmd=y + endfun +endif + +if !exists("*s:GetQuickFixStackCount") + function s:GetQuickFixStackCount() + let l:stack_count = 0 + try + silent colder 9 + catch /E380:/ + endtry + + try + for i in range(9) + silent cnewer + let l:stack_count = l:stack_count + 1 + endfor + catch /E381:/ + return l:stack_count + endtry + endfunction +endif + +if !exists("*s:ActivatePyflakesQuickFixWindow") + function s:ActivatePyflakesQuickFixWindow() + try + silent colder 9 " go to the bottom of quickfix stack + catch /E380:/ + endtry + + if s:pyflakes_qf > 0 + try + exe "silent cnewer " . s:pyflakes_qf + catch /E381:/ + echoerr "Could not activate Pyflakes Quickfix Window." + endtry + endif + endfunction +endif + +if !exists("*s:RunPyflakes") + function s:RunPyflakes() + highlight link PyFlakes SpellBad + + if exists("b:cleared") + if b:cleared == 0 + silent call s:ClearPyflakes() + let b:cleared = 1 + endif + else + let b:cleared = 1 + endif + + let b:matched = [] + let b:matchedlines = {} + + let b:qf_list = [] + let b:qf_window_count = -1 + + python << EOF +for w in check(vim.current.buffer): + vim.command('let s:matchDict = {}') + vim.command("let s:matchDict['lineNum'] = " + str(w.lineno)) + vim.command("let s:matchDict['message'] = '%s'" % vim_quote(w.message % w.message_args)) + vim.command("let b:matchedlines[" + str(w.lineno) + "] = s:matchDict") + + vim.command("let l:qf_item = {}") + vim.command("let l:qf_item.bufnr = bufnr('%')") + vim.command("let l:qf_item.filename = expand('%')") + vim.command("let l:qf_item.lnum = %s" % str(w.lineno)) + vim.command("let l:qf_item.text = '%s'" % vim_quote(w.message % w.message_args)) + vim.command("let l:qf_item.type = 'E'") + + if w.col is None or isinstance(w, SyntaxError): + # without column information, just highlight the whole line + # (minus the newline) + vim.command(r"let s:mID = matchadd('PyFlakes', '\%" + str(w.lineno) + r"l\n\@!')") + else: + # with a column number, highlight the first keyword there + vim.command(r"let s:mID = matchadd('PyFlakes', '^\%" + str(w.lineno) + r"l\_.\{-}\zs\k\+\k\@!\%>" + str(w.col) + r"c')") + + vim.command("let l:qf_item.vcol = 1") + vim.command("let l:qf_item.col = %s" % str(w.col + 1)) + + vim.command("call add(b:matched, s:matchDict)") + vim.command("call add(b:qf_list, l:qf_item)") +EOF + if exists("s:pyflakes_qf") + " if pyflakes quickfix window is already created, reuse it + call s:ActivatePyflakesQuickFixWindow() + call setqflist(b:qf_list, 'r') + else + " one pyflakes quickfix window for all buffer + call setqflist(b:qf_list, '') + let s:pyflakes_qf = s:GetQuickFixStackCount() + endif + let b:cleared = 0 + endfunction +end + +" keep track of whether or not we are showing a message +let b:showing_message = 0 + +if !exists("*s:GetPyflakesMessage") + function s:GetPyflakesMessage() + let s:cursorPos = getpos(".") + + " Bail if RunPyflakes hasn't been called yet. + if !exists('b:matchedlines') + return + endif + + " if there's a message for the line the cursor is currently on, echo + " it to the console + if has_key(b:matchedlines, s:cursorPos[1]) + let s:pyflakesMatch = get(b:matchedlines, s:cursorPos[1]) + call s:WideMsg(s:pyflakesMatch['message']) + let b:showing_message = 1 + return + endif + + " otherwise, if we're showing a message, clear it + if b:showing_message == 1 + echo + let b:showing_message = 0 + endif + endfunction +endif + +if !exists('*s:ClearPyflakes') + function s:ClearPyflakes() + let s:matches = getmatches() + for s:matchId in s:matches + if s:matchId['group'] == 'PyFlakes' + call matchdelete(s:matchId['id']) + endif + endfor + let b:matched = [] + let b:matchedlines = {} + let b:cleared = 1 + endfunction +endif + diff --git a/.vim/ftplugin/python/pyflakes/LICENSE b/.vim/ftplugin/python/pyflakes/LICENSE new file mode 100644 index 0000000..42b8cf3 --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/LICENSE @@ -0,0 +1,21 @@ + +Copyright (c) 2005 Divmod, Inc., http://www.divmod.com/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/.vim/ftplugin/python/pyflakes/README.rst b/.vim/ftplugin/python/pyflakes/README.rst new file mode 100644 index 0000000..9ac34fc --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/README.rst @@ -0,0 +1,36 @@ +pyflakes +======== + +This version of PyFlakes_ has been improved to use Python's newer ``ast`` +module, instead of ``compiler``. So code checking happens faster, and will stay +up to date with new language changes. + +.. _PyFlakes: http://http://www.divmod.org/trac/wiki/DivmodPyflakes + +TODO +---- + +Importing several modules from the same package results in unnecessary warnings: + +:: + + import a.b + import a.c # Redefinition of unused "a" from line 1 + +The following construct for defining a function differently depending on some +condition results in a redefinition warning: + +:: + + if some_condition: + def foo(): do_foo() + else: + def foo(): do_bar() # redefinition of function 'foo' from line 2 + +IDE Integration +--------------- + +* vim: pyflakes-vim_ + +.. _pyflakes-vim: http://github.com/kevinw/pyflakes-vim + diff --git a/.vim/ftplugin/python/pyflakes/TODO b/.vim/ftplugin/python/pyflakes/TODO new file mode 100644 index 0000000..69f3f12 --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/TODO @@ -0,0 +1,11 @@ + - Check for methods that override other methods except that they vary by case. + - assign/increment + unbound local error not caught + def foo(): + bar = 5 + def meep(): + bar += 2 + meep() + print bar + + print foo() + diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/__init__.py b/.vim/ftplugin/python/pyflakes/pyflakes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/ast.py b/.vim/ftplugin/python/pyflakes/pyflakes/ast.py new file mode 100644 index 0000000..d52025f --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/ast.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +""" + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +""" +from _ast import * +from _ast import __version__ + + +def parse(expr, filename='<unknown>', mode='exec'): + """ + Parse an expression into an AST node. + Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST). + """ + return compile(expr, filename, mode, PyCF_ONLY_AST) + + +def literal_eval(node_or_string): + """ + Safely evaluate an expression node or a string containing a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, numbers, tuples, lists, dicts, booleans, + and None. + """ + _safe_names = {'None': None, 'True': True, 'False': False} + if isinstance(node_or_string, basestring): + node_or_string = parse(node_or_string, mode='eval') + if isinstance(node_or_string, Expression): + node_or_string = node_or_string.body + def _convert(node): + if isinstance(node, Str): + return node.s + elif isinstance(node, Num): + return node.n + elif isinstance(node, Tuple): + return tuple(map(_convert, node.elts)) + elif isinstance(node, List): + return list(map(_convert, node.elts)) + elif isinstance(node, Dict): + return dict((_convert(k), _convert(v)) for k, v + in zip(node.keys, node.values)) + elif isinstance(node, Name): + if node.id in _safe_names: + return _safe_names[node.id] + raise ValueError('malformed string') + return _convert(node_or_string) + + +def dump(node, annotate_fields=True, include_attributes=False): + """ + Return a formatted dump of the tree in *node*. This is mainly useful for + debugging purposes. The returned string will show the names and the values + for fields. This makes the code impossible to evaluate, so if evaluation is + wanted *annotate_fields* must be set to False. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + *include_attributes* can be set to True. + """ + def _format(node): + if isinstance(node, AST): + fields = [(a, _format(b)) for a, b in iter_fields(node)] + rv = '%s(%s' % (node.__class__.__name__, ', '.join( + ('%s=%s' % field for field in fields) + if annotate_fields else + (b for a, b in fields) + )) + if include_attributes and node._attributes: + rv += fields and ', ' or ' ' + rv += ', '.join('%s=%s' % (a, _format(getattr(node, a))) + for a in node._attributes) + return rv + ')' + elif isinstance(node, list): + return '[%s]' % ', '.join(_format(x) for x in node) + return repr(node) + if not isinstance(node, AST): + raise TypeError('expected AST, got %r' % node.__class__.__name__) + return _format(node) + + +def copy_location(new_node, old_node): + """ + Copy source location (`lineno` and `col_offset` attributes) from + *old_node* to *new_node* if possible, and return *new_node*. + """ + for attr in 'lineno', 'col_offset': + if attr in old_node._attributes and attr in new_node._attributes \ + and hasattr(old_node, attr): + setattr(new_node, attr, getattr(old_node, attr)) + return new_node + + +def fix_missing_locations(node): + """ + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + """ + def _fix(node, lineno, col_offset): + if 'lineno' in node._attributes: + if not hasattr(node, 'lineno'): + node.lineno = lineno + else: + lineno = node.lineno + if 'col_offset' in node._attributes: + if not hasattr(node, 'col_offset'): + node.col_offset = col_offset + else: + col_offset = node.col_offset + for child in iter_child_nodes(node): + _fix(child, lineno, col_offset) + _fix(node, 1, 0) + return node + +def add_col_end(node): + def _fix(node, next): + children = list(iter_child_nodes(node)) + for i, child in enumerate(children): + next_offset = children[i+1].col_offset if i < len(children) else next.col_offset + child.col_end = next_offset + + +def increment_lineno(node, n=1): + """ + Increment the line number of each node in the tree starting at *node* by *n*. + This is useful to "move code" to a different location in a file. + """ + if 'lineno' in node._attributes: + node.lineno = getattr(node, 'lineno', 0) + n + for child in walk(node): + if 'lineno' in child._attributes: + child.lineno = getattr(child, 'lineno', 0) + n + return node + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + if node._fields is None: + return + + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def iter_child_nodes(node): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name, field in iter_fields(node): + if isinstance(field, AST): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, AST): + yield item + + +def get_docstring(node, clean=True): + """ + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + """ + if not isinstance(node, (FunctionDef, ClassDef, Module)): + raise TypeError("%r can't have docstrings" % node.__class__.__name__) + if node.body and isinstance(node.body[0], Expr) and \ + isinstance(node.body[0].value, Str): + if clean: + import inspect + return inspect.cleandoc(node.body[0].value.s) + return node.body[0].value.s + + +def walk(node): + """ + Recursively yield all child nodes of *node*, in no specified order. This is + useful if you only want to modify nodes in place and don't care about the + context. + """ + from collections import deque + todo = deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +class NodeVisitor(object): + """ + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item) + elif isinstance(value, AST): + self.visit(value) + + +class NodeTransformer(NodeVisitor): + """ + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return copy_location(Subscript( + value=Name(id='data', ctx=Load()), + slice=Index(value=Str(s=node.id)), + ctx=node.ctx + ), node) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + """ + + def generic_visit(self, node): + for field, old_value in iter_fields(node): + old_value = getattr(node, field, None) + if isinstance(old_value, list): + new_values = [] + for value in old_value: + if isinstance(value, AST): + value = self.visit(value) + if value is None: + continue + elif not isinstance(value, AST): + new_values.extend(value) + continue + new_values.append(value) + old_value[:] = new_values + elif isinstance(old_value, AST): + new_node = self.visit(old_value) + if new_node is None: + delattr(node, field) + else: + setattr(node, field, new_node) + return node diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/checker.py b/.vim/ftplugin/python/pyflakes/pyflakes/checker.py new file mode 100644 index 0000000..b3d6960 --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/checker.py @@ -0,0 +1,408 @@ +import ast +from pyflakes import messages +import __builtin__ + + +allowed_before_future = (ast.Module, ast.ImportFrom, ast.Expr, ast.Str) +defined_names = set(('__file__', '__builtins__')) + +class Binding(object): + """ + @ivar used: pair of (L{Scope}, line-number) indicating the scope and + line number that this binding was last used + """ + def __init__(self, name, source): + self.name = name + self.source = source + self.used = False + + def __str__(self): + return self.name + + def __repr__(self): + return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__, + self.name, + self.source.lineno, + id(self)) + +class UnBinding(Binding): + '''Created by the 'del' operator.''' + +class Importation(Binding): + def __init__(self, name, source): + name = name.split('.')[0] + super(Importation, self).__init__(name, source) + +class Assignment(Binding): + pass + +class FunctionDefinition(Binding): + _property_decorator = False + + +class Scope(dict): + import_starred = False # set to True when import * is found + + def __repr__(self): + return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self)) + + def __init__(self): + super(Scope, self).__init__() + +class ClassScope(Scope): + pass + + + +class FunctionScope(Scope): + """ + I represent a name scope for a function. + + @ivar globals: Names declared 'global' in this function. + """ + def __init__(self): + super(FunctionScope, self).__init__() + self.globals = {} + + + +class ModuleScope(Scope): + pass + +class Checker(ast.NodeVisitor): + def __init__(self, tree, filename='(none)', builtins = None): + ast.NodeVisitor.__init__(self) + + self.deferred = [] + self.dead_scopes = [] + self.messages = [] + self.filename = filename + self.scope_stack = [ModuleScope()] + self.futures_allowed = True + self.builtins = frozenset(builtins or []) + + self.visit(tree) + for handler, scope in self.deferred: + self.scope_stack = scope + handler() + del self.scope_stack[1:] + self.pop_scope() + self.check_dead_scopes() + + def defer(self, callable): + '''Schedule something to be called after just before completion. + + This is used for handling function bodies, which must be deferred + because code later in the file might modify the global scope. When + `callable` is called, the scope at the time this is called will be + restored, however it will contain any new bindings added to it. + ''' + self.deferred.append( (callable, self.scope_stack[:]) ) + + def check_dead_scopes(self): + # Check for modules that were imported but unused + for scope in self.dead_scopes: + for importation in scope.itervalues(): + if isinstance(importation, Importation) and not importation.used: + self.report(messages.UnusedImport, importation.source.lineno, importation.name) + + def push_function_scope(self): + self.scope_stack.append(FunctionScope()) + + def push_class_scope(self): + self.scope_stack.append(ClassScope()) + + def pop_scope(self): + scope = self.scope_stack.pop() + self.dead_scopes.append(scope) + + @property + def scope(self): + return self.scope_stack[-1] + + def report(self, message_class, *args, **kwargs): + self.messages.append(message_class(self.filename, *args, **kwargs)) + + def visit_Import(self, node): + for name_node in node.names: + # "import bar as foo" -> name=bar, asname=foo + name = name_node.asname or name_node.name + self.add_binding(node, Importation(name, node)) + + def visit_GeneratorExp(self, node): + for generator in node.generators: + self.visit(generator.iter) + self.assign_vars(generator.target) + + for generator in node.generators: + if hasattr(node, 'elt'): + self.visit(node.elt) + + self.visit_nodes(generator.ifs) + + visit_ListComp = visit_GeneratorExp + + def visit_For(self, node): + ''' + Process bindings for loop variables. + ''' + self.visit_nodes(node.iter) + + for var in self.flatten(node.target): + upval = self.scope.get(var.id) + if isinstance(upval, Importation) and upval.used: + self.report(messages.ImportShadowedByLoopVar, + node.lineno, node.col_offset, var.id, upval.source.lineno) + + self.add_binding(var, Assignment(var.id, var)) + + self.visit_nodes(node.body + node.orelse) + + def visit_FunctionDef(self, node): + + try: + decorators = node.decorator_list + except AttributeError: + # Use .decorators for Python 2.5 compatibility + decorators = node.decorators + + self.visit_nodes(decorators) + + # Check for property decorator + func_def = FunctionDefinition(node.name, node) + + for decorator in decorators: + if getattr(decorator, 'attr', None) in ('setter', 'deleter'): + func_def._property_decorator = True + + self.add_binding(node, func_def) + + self.visit_Lambda(node) + + def visit_Lambda(self, node): + self.visit_nodes(node.args.defaults) + + def run_function(): + self.push_function_scope() + + # Check for duplicate arguments + argnames = set() + for arg in self.flatten(node.args.args): + if arg.id in argnames: + self.report(messages.DuplicateArgument, arg.lineno, arg.col_offset, arg.id) + argnames.add(arg.id) + + self.assign_vars(node.args.args, report_redef=False) + if node.args.vararg is not None: + self.add_binding(node, Assignment(node.args.vararg, node), False) + if node.args.kwarg is not None: + self.add_binding(node, Assignment(node.args.kwarg, node), False) + self.visit_nodes(node.body) + self.pop_scope() + + self.defer(run_function) + + def visit_Name(self, node): + ''' + Locate names in locals / function / globals scopes. + ''' + scope, name = self.scope, node.id + + # try local scope + import_starred = scope.import_starred + try: + scope[name].used = (scope, node.lineno, node.col_offset) + except KeyError: + pass + else: + return + + # try enclosing function scopes + for func_scope in self.scope_stack[-2:0:-1]: + import_starred = import_starred or func_scope.import_starred + if not isinstance(func_scope, FunctionScope): + continue + try: + func_scope[name].used = (scope, node.lineno, node.col_offset) + except KeyError: + pass + else: + return + + # try global scope + import_starred = import_starred or self.scope_stack[0].import_starred + try: + self.scope_stack[0][node.id].used = (scope, node.lineno, node.col_offset) + except KeyError: + if not import_starred and not self.is_builtin(name): + self.report(messages.UndefinedName, node.lineno, node.col_offset, name) + + def assign_vars(self, targets, report_redef=True): + scope = self.scope + + for target in self.flatten(targets): + name = target.id + # if the name hasn't already been defined in the current scope + if isinstance(scope, FunctionScope) and name not in scope: + # for each function or module scope above us + for upscope in self.scope_stack[:-1]: + if not isinstance(upscope, (FunctionScope, ModuleScope)): + continue + + upval = upscope.get(name) + # if the name was defined in that scope, and the name has + # been accessed already in the current scope, and hasn't + # been declared global + if upval is not None: + if upval.used and upval.used[0] is scope and name not in scope.globals: + # then it's probably a mistake + self.report(messages.UndefinedLocal, + upval.used[1], upval.used[2], name, upval.source.lineno, upval.source.col_offset) + + self.add_binding(target, Assignment(name, target), report_redef) + + def visit_Assign(self, node): + for target in node.targets: + self.visit_nodes(node.value) + self.assign_vars(node.targets) + + def visit_Delete(self, node): + for target in self.flatten(node.targets): + if isinstance(self.scope, FunctionScope) and target.id in self.scope.globals: + del self.scope.globals[target.id] + else: + self.add_binding(target, UnBinding(target.id, target)) + + def visit_With(self, node): + self.visit(node.context_expr) + + # handle new bindings made by optional "as" part + if node.optional_vars is not None: + self.assign_vars(node.optional_vars) + + self.visit_nodes(node.body) + + def visit_ImportFrom(self, node): + if node.module == '__future__': + if not self.futures_allowed: + self.report(messages.LateFutureImport, node.lineno, node.col_offset, [alias.name for alias in node.names]) + else: + self.futures_allowed = False + + for alias in node.names: + if alias.name == '*': + self.scope.import_starred = True + self.report(messages.ImportStarUsed, node.lineno, node.col_offset, node.module) + continue + name = alias.asname or alias.name + importation = Importation(name, node) + if node.module == '__future__': + importation.used = (self.scope, node.lineno, node.col_offset) + self.add_binding(node, importation) + + def visit_Global(self, node): + ''' + Keep track of global declarations. + ''' + scope = self.scope + if isinstance(scope, FunctionScope): + scope.globals.update(dict.fromkeys(node.names)) + + def visit_ClassDef(self, node): + try: + decorators = node.decorator_list + except AttributeError: + # Use .decorators for Python 2.5 compatibility + decorators = getattr(node, 'decorators', []) + + self.visit_nodes(decorators) + + self.add_binding(node, Assignment(node.name, node)) + self.visit_nodes(node.bases) + + self.push_class_scope() + self.visit_nodes(node.body) + self.pop_scope() + + def visit_excepthandler(self, node): + if node.type is not None: + self.visit(node.type) + if node.name is not None: + self.assign_vars(node.name) + self.visit_nodes(node.body) + + visit_ExceptHandler = visit_excepthandler # in 2.6, this was CamelCased + + def flatten(self, nodes): + if isinstance(nodes, ast.Attribute): + self.visit(nodes) + return [] + elif isinstance(nodes, ast.Subscript): + self.visit(nodes.value) + self.visit(nodes.slice) + return [] + elif isinstance(nodes, ast.Name): + return [nodes] + elif isinstance(nodes, (ast.Tuple, ast.List)): + return self.flatten(nodes.elts) + + flattened_nodes = [] + for node in nodes: + if hasattr(node, 'elts'): + flattened_nodes += self.flatten(node.elts) + elif node is not None: + flattened_nodes += self.flatten(node) + + return flattened_nodes + + def add_binding(self, node, value, report_redef=True): + line, col, scope, name = node.lineno, node.col_offset, self.scope, value.name + + # Check for a redefined function + func = scope.get(name) + if (isinstance(func, FunctionDefinition) and isinstance(value, FunctionDefinition)): + # Property-decorated functions (@x.setter) should have duplicate names + if not value._property_decorator: + self.report(messages.RedefinedFunction, line, name, func.source.lineno) + + # Check for redefining an unused import + if report_redef and not isinstance(scope, ClassScope): + for up_scope in self.scope_stack[::-1]: + upval = up_scope.get(name) + if isinstance(upval, Importation) and not upval.used: + self.report(messages.RedefinedWhileUnused, line, col, name, upval.source.lineno) + + # Check for "del undefined_name" + if isinstance(value, UnBinding): + try: + del scope[name] + except KeyError: + self.report(messages.UndefinedName, line, col, name) + else: + scope[name] = value + + def visit(self, node): + if not isinstance(node, allowed_before_future): + self.futures_allowed = False + + return super(Checker, self).visit(node) + + def visit_nodes(self, nodes): + try: + nodes = list(getattr(nodes, 'elts', nodes)) + except TypeError: + nodes = [nodes] + + for node in nodes: + self.visit(node) + + def is_builtin(self, name): + if hasattr(__builtin__, name): + return True + if name in defined_names: + return True + if name in self.builtins: + return True + + return False + diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/messages.py b/.vim/ftplugin/python/pyflakes/pyflakes/messages.py new file mode 100644 index 0000000..15a920e --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/messages.py @@ -0,0 +1,77 @@ +# (c) 2005 Divmod, Inc. See LICENSE file for details + +class Message(object): + message = '' + message_args = () + def __init__(self, filename, lineno, col = None): + self.filename = filename + self.lineno = lineno + self.col = col + def __str__(self): + if self.col is not None: + return '%s:%s(%d): %s' % (self.filename, self.lineno, self.col, self.message % self.message_args) + else: + return '%s:%s: %s' % (self.filename, self.lineno, self.message % self.message_args) + + +class UnusedImport(Message): + message = '%r imported but unused' + def __init__(self, filename, lineno, name): + Message.__init__(self, filename, lineno) + self.message_args = (name,) + + +class RedefinedWhileUnused(Message): + message = 'redefinition of unused %r from line %r' + def __init__(self, filename, lineno, col, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class ImportShadowedByLoopVar(Message): + message = 'import %r from line %r shadowed by loop variable' + def __init__(self, filename, lineno, col, name, orig_lineno): + Message.__init__(self, filename, lineno, col) + self.message_args = (name, orig_lineno) + + +class ImportStarUsed(Message): + message = "'from %s import *' used; unable to detect undefined names" + def __init__(self, filename, lineno, col, modname): + Message.__init__(self, filename, lineno, col) + self.message_args = (modname,) + + +class UndefinedName(Message): + message = 'undefined name %r' + def __init__(self, filename, lineno, col, name): + Message.__init__(self, filename, lineno, col) + self.message_args = (name,) + + +class UndefinedLocal(Message): + message = "local variable %r (defined in enclosing scope on line %r) referenced before assignment" + def __init__(self, filename, lineno, col, name, orig_lineno, orig_col): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class DuplicateArgument(Message): + message = 'duplicate argument %r in function definition' + def __init__(self, filename, lineno, col, name): + Message.__init__(self, filename, lineno, col) + self.message_args = (name,) + + +class RedefinedFunction(Message): + message = 'redefinition of function %r from line %r' + def __init__(self, filename, lineno, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class LateFutureImport(Message): + message = 'future import(s) %r after other statements' + def __init__(self, filename, lineno, col, names): + Message.__init__(self, filename, lineno) + self.message_args = (names,) diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/scripts/__init__.py b/.vim/ftplugin/python/pyflakes/pyflakes/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/scripts/pyflakes.py b/.vim/ftplugin/python/pyflakes/pyflakes/scripts/pyflakes.py new file mode 100644 index 0000000..06937a7 --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/scripts/pyflakes.py @@ -0,0 +1,63 @@ + +""" +Implementation of the command-line I{pyflakes} tool. +""" + +import _ast +import sys +import os + +checker = __import__('pyflakes.checker').checker + +def check(codeString, filename): + try: + tree = compile(codeString, filename, 'exec', _ast.PyCF_ONLY_AST) + except (SyntaxError, IndentationError): + value = sys.exc_info()[1] + try: + (lineno, offset, line) = value[1][1:] + except IndexError: + print >> sys.stderr, 'could not compile %r' % (filename,) + return 1 + if line.endswith("\n"): + line = line[:-1] + print >> sys.stderr, '%s:%d: could not compile' % (filename, lineno) + print >> sys.stderr, line + print >> sys.stderr, " " * (offset-2), "^" + return 1 + else: + w = checker.Checker(tree, filename) + w.messages.sort(lambda a, b: cmp(a.lineno, b.lineno)) + for warning in w.messages: + print warning + return len(w.messages) + + +def checkPath(filename): + """ + Check the given path, printing out any warnings detected. + + @return: the number of warnings printed + """ + if os.path.exists(filename): + return check(file(filename, 'U').read() + '\n', filename) + else: + print >> sys.stderr, '%s: no such file' % (filename,) + return 1 + +def main(): + warnings = 0 + args = sys.argv[1:] + if args: + for arg in args: + if os.path.isdir(arg): + for dirpath, dirnames, filenames in os.walk(arg): + for filename in filenames: + if filename.endswith('.py'): + warnings += checkPath(os.path.join(dirpath, filename)) + else: + warnings += checkPath(arg) + else: + warnings += check(sys.stdin.read(), '<stdin>') + + raise SystemExit(warnings > 0) diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/test/__init__.py b/.vim/ftplugin/python/pyflakes/pyflakes/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/test/harness.py b/.vim/ftplugin/python/pyflakes/pyflakes/test/harness.py new file mode 100644 index 0000000..765cda4 --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/test/harness.py @@ -0,0 +1,24 @@ + +import textwrap + +from twisted.trial import unittest + +from pyflakes import checker, ast + + +class Test(unittest.TestCase): + + def flakes(self, input, *expectedOutputs): + w = checker.Checker(ast.parse(textwrap.dedent(input))) + outputs = [type(o) for o in w.messages] + expectedOutputs = list(expectedOutputs) + outputs.sort() + expectedOutputs.sort() + self.assert_(outputs == expectedOutputs, '''\ +for input: +%s +expected outputs: +%s +but got: +%s''' % (input, repr(expectedOutputs), '\n'.join([str(o) for o in w.messages]))) + return w diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/test/test_imports.py b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_imports.py new file mode 100644 index 0000000..4f87f3e --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_imports.py @@ -0,0 +1,512 @@ + +from sys import version_info + +from pyflakes import messages as m +from pyflakes.test import harness + +class Test(harness.Test): + + def test_unusedImport(self): + self.flakes('import fu, bar', m.UnusedImport, m.UnusedImport) + self.flakes('from baz import fu, bar', m.UnusedImport, m.UnusedImport) + + def test_aliasedImport(self): + self.flakes('import fu as FU, bar as FU', m.RedefinedWhileUnused, m.UnusedImport) + self.flakes('from moo import fu as FU, bar as FU', m.RedefinedWhileUnused, m.UnusedImport) + + def test_usedImport(self): + self.flakes('import fu; print fu') + self.flakes('from baz import fu; print fu') + + def test_redefinedWhileUnused(self): + self.flakes('import fu; fu = 3', m.RedefinedWhileUnused) + self.flakes('import fu; del fu', m.RedefinedWhileUnused) + self.flakes('import fu; fu, bar = 3', m.RedefinedWhileUnused) + self.flakes('import fu; [fu, bar] = 3', m.RedefinedWhileUnused) + + def test_redefinedByFunction(self): + self.flakes(''' + import fu + def fu(): + pass + ''', m.RedefinedWhileUnused) + + def test_redefinedInNestedFunction(self): + """ + Test that shadowing a global name with a nested function definition + generates a warning. + """ + self.flakes(''' + import fu + def bar(): + def baz(): + def fu(): + pass + ''', m.RedefinedWhileUnused, m.UnusedImport) + + def test_redefinedByClass(self): + self.flakes(''' + import fu + class fu: + pass + ''', m.RedefinedWhileUnused) + + def test_redefinedInClass(self): + """ + Test that shadowing a global with a class attribute does not produce a + warning. + """ + self.flakes(''' + import fu + class bar: + fu = 1 + print fu + ''') + + def test_usedInFunction(self): + self.flakes(''' + import fu + def fun(): + print fu + ''') + + def test_shadowedByParameter(self): + self.flakes(''' + import fu + def fun(fu): + print fu + ''', m.UnusedImport) + + self.flakes(''' + import fu + def fun(fu): + print fu + print fu + ''') + + def test_newAssignment(self): + self.flakes('fu = None') + + def test_usedInGetattr(self): + self.flakes('import fu; fu.bar.baz') + self.flakes('import fu; "bar".fu.baz', m.UnusedImport) + + def test_usedInSlice(self): + self.flakes('import fu; print fu.bar[1:]') + + def test_usedInIfBody(self): + self.flakes(''' + import fu + if True: print fu + ''') + + def test_usedInIfConditional(self): + self.flakes(''' + import fu + if fu: pass + ''') + + def test_usedInElifConditional(self): + self.flakes(''' + import fu + if False: pass + elif fu: pass + ''') + + def test_usedInElse(self): + self.flakes(''' + import fu + if False: pass + else: print fu + ''') + + def test_usedInCall(self): + self.flakes('import fu; fu.bar()') + + def test_usedInClass(self): + self.flakes(''' + import fu + class bar: + bar = fu + ''') + + def test_usedInClassBase(self): + self.flakes(''' + import fu + class bar(object, fu.baz): + pass + ''') + + def test_notUsedInNestedScope(self): + self.flakes(''' + import fu + def bleh(): + pass + print fu + ''') + + def test_usedInFor(self): + self.flakes(''' + import fu + for bar in range(9): + print fu + ''') + + def test_usedInForElse(self): + self.flakes(''' + import fu + for bar in range(10): + pass + else: + print fu + ''') + + def test_redefinedByFor(self): + self.flakes(''' + import fu + for fu in range(2): + pass + ''', m.RedefinedWhileUnused) + + def test_shadowedByFor(self): + """ + Test that shadowing a global name with a for loop variable generates a + warning. + """ + self.flakes(''' + import fu + fu.bar() + for fu in (): + pass + ''', m.ImportShadowedByLoopVar) + + def test_shadowedByForDeep(self): + """ + Test that shadowing a global name with a for loop variable nested in a + tuple unpack generates a warning. + """ + self.flakes(''' + import fu + fu.bar() + for (x, y, z, (a, b, c, (fu,))) in (): + pass + ''', m.ImportShadowedByLoopVar) + + def test_usedInReturn(self): + self.flakes(''' + import fu + def fun(): + return fu + ''') + + def test_usedInOperators(self): + self.flakes('import fu; 3 + fu.bar') + self.flakes('import fu; 3 % fu.bar') + self.flakes('import fu; 3 - fu.bar') + self.flakes('import fu; 3 * fu.bar') + self.flakes('import fu; 3 ** fu.bar') + self.flakes('import fu; 3 / fu.bar') + self.flakes('import fu; 3 // fu.bar') + self.flakes('import fu; -fu.bar') + self.flakes('import fu; ~fu.bar') + self.flakes('import fu; 1 == fu.bar') + self.flakes('import fu; 1 | fu.bar') + self.flakes('import fu; 1 & fu.bar') + self.flakes('import fu; 1 ^ fu.bar') + self.flakes('import fu; 1 >> fu.bar') + self.flakes('import fu; 1 << fu.bar') + + def test_usedInAssert(self): + self.flakes('import fu; assert fu.bar') + + def test_usedInSubscript(self): + self.flakes('import fu; fu.bar[1]') + + def test_usedInLogic(self): + self.flakes('import fu; fu and False') + self.flakes('import fu; fu or False') + self.flakes('import fu; not fu.bar') + + def test_usedInList(self): + self.flakes('import fu; [fu]') + + def test_usedInTuple(self): + self.flakes('import fu; (fu,)') + + def test_usedInTry(self): + self.flakes(''' + import fu + try: fu + except: pass + ''') + + def test_usedInExcept(self): + self.flakes(''' + import fu + try: fu + except: pass + ''') + + def test_redefinedByExcept(self): + self.flakes(''' + import fu + try: pass + except Exception, fu: pass + ''', m.RedefinedWhileUnused) + + def test_usedInRaise(self): + self.flakes(''' + import fu + raise fu.bar + ''') + + def test_usedInYield(self): + self.flakes(''' + import fu + def gen(): + yield fu + ''') + + def test_usedInDict(self): + self.flakes('import fu; {fu:None}') + self.flakes('import fu; {1:fu}') + + def test_usedInParameterDefault(self): + self.flakes(''' + import fu + def f(bar=fu): + pass + ''') + + def test_usedInAttributeAssign(self): + self.flakes('import fu; fu.bar = 1') + + def test_usedInKeywordArg(self): + self.flakes('import fu; fu.bar(stuff=fu)') + + def test_usedInAssignment(self): + self.flakes('import fu; bar=fu') + self.flakes('import fu; n=0; n+=fu') + + def test_usedInListComp(self): + self.flakes('import fu; [fu for _ in range(1)]') + self.flakes('import fu; [1 for _ in range(1) if fu]') + + def test_redefinedByListComp(self): + self.flakes('import fu; [1 for fu in range(1)]', m.RedefinedWhileUnused) + + + def test_usedInTryFinally(self): + self.flakes(''' + import fu + try: pass + finally: fu + ''') + + self.flakes(''' + import fu + try: fu + finally: pass + ''') + + def test_usedInWhile(self): + self.flakes(''' + import fu + while 0: + fu + ''') + + self.flakes(''' + import fu + while fu: pass + ''') + + def test_usedInGlobal(self): + self.flakes(''' + import fu + def f(): global fu + ''', m.UnusedImport) + + def test_usedInBackquote(self): + self.flakes('import fu; `fu`') + + def test_usedInExec(self): + self.flakes('import fu; exec "print 1" in fu.bar') + + def test_usedInLambda(self): + self.flakes('import fu; lambda: fu') + + def test_shadowedByLambda(self): + self.flakes('import fu; lambda fu: fu', m.UnusedImport) + + def test_usedInSliceObj(self): + self.flakes('import fu; "meow"[::fu]') + + def test_unusedInNestedScope(self): + self.flakes(''' + def bar(): + import fu + fu + ''', m.UnusedImport, m.UndefinedName) + + def test_methodsDontUseClassScope(self): + self.flakes(''' + class bar: + import fu + def fun(self): + fu + ''', m.UnusedImport, m.UndefinedName) + + def test_nestedFunctionsNestScope(self): + self.flakes(''' + def a(): + def b(): + fu + import fu + ''') + + def test_nestedClassAndFunctionScope(self): + self.flakes(''' + def a(): + import fu + class b: + def c(self): + print fu + ''') + + def test_importStar(self): + self.flakes('from fu import *', m.ImportStarUsed) + + def test_packageImport(self): + self.flakes('import fu.bar; fu.bar') + test_packageImport.todo = "this has been hacked to treat 'import fu.bar' as just 'import fu'" + + def test_assignRHSFirst(self): + self.flakes('import fu; fu = fu') + self.flakes('import fu; fu, bar = fu') + self.flakes('import fu; [fu, bar] = fu') + self.flakes('import fu; fu += fu') + + def test_tryingMultipleImports(self): + self.flakes(''' + try: + import fu + except ImportError: + import bar as fu + ''') + test_tryingMultipleImports.todo = '' + + def test_nonGlobalDoesNotRedefine(self): + self.flakes(''' + import fu + def a(): + fu = 3 + fu + ''') + + def test_functionsRunLater(self): + self.flakes(''' + def a(): + fu + import fu + ''') + + def test_functionNamesAreBoundNow(self): + self.flakes(''' + import fu + def fu(): + fu + fu + ''', m.RedefinedWhileUnused) + + def test_ignoreNonImportRedefinitions(self): + self.flakes('a = 1; a = 2') + + def test_importingForImportError(self): + self.flakes(''' + try: + import fu + except ImportError: + pass + ''') + test_importingForImportError.todo = '' + + def test_explicitlyPublic(self): + '''imports mentioned in __all__ are not unused''' + self.flakes('import fu; __all__ = ["fu"]') + test_explicitlyPublic.todo = "this would require importing the module or doing smarter parsing" + + def test_importedInClass(self): + '''Imports in class scope can be used through self''' + self.flakes(''' + class c: + import i + def __init__(self): + self.i + ''') + test_importedInClass.todo = 'requires evaluating attribute access' + + def test_futureImport(self): + '''__future__ is special''' + self.flakes('from __future__ import division') + + def test_futureImportFirst(self): + """ + __future__ imports must come before anything else. + """ + self.flakes(''' + x = 5 + from __future__ import division + ''', m.LateFutureImport) + + + +class Python24Tests(harness.Test): + """ + Tests for checking of syntax which is valid in Python 2.4 and newer. + """ + if version_info < (2, 4): + skip = "Python 2.4 required for generator expression and decorator tests." + + + def test_usedInGenExp(self): + """ + Using a global in a generator expression results in no warnings. + """ + self.flakes('import fu; (fu for _ in range(1))') + self.flakes('import fu; (1 for _ in range(1) if fu)') + + + def test_redefinedByGenExp(self): + """ + Re-using a global name as the loop variable for a generator + expression results in a redefinition warning. + """ + self.flakes('import fu; (1 for fu in range(1))', m.RedefinedWhileUnused) + + + def test_usedAsDecorator(self): + """ + Using a global name in a decorator statement results in no warnings, + but using an undefined name in a decorator statement results in an + undefined name warning. + """ + self.flakes(''' + from interior import decorate + @decorate + def f(): + return "hello" + ''') + + self.flakes(''' + from interior import decorate + @decorate('value') + def f(): + return "hello" + ''') + + self.flakes(''' + @decorate + def f(): + return "hello" + ''', m.UndefinedName) diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/test/test_other.py b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_other.py new file mode 100644 index 0000000..26a306c --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_other.py @@ -0,0 +1,234 @@ +# (c) 2005-2008 Divmod, Inc. +# See LICENSE file for details + +""" +Tests for various Pyflakes behavior. +""" + +from sys import version_info + +from pyflakes import messages as m +from pyflakes.test import harness + + +class Test(harness.Test): + + def test_duplicateArgs(self): + self.flakes('def fu(bar, bar): pass', m.DuplicateArgument) + + def test_localReferencedBeforeAssignment(self): + self.flakes(''' + a = 1 + def f(): + a; a=1 + f() + ''', m.UndefinedName) + test_localReferencedBeforeAssignment.todo = 'this requires finding all assignments in the function body first' + + def test_redefinedFunction(self): + """ + Test that shadowing a function definition with another one raises a + warning. + """ + self.flakes(''' + def a(): pass + def a(): pass + ''', m.RedefinedFunction) + + def test_redefinedClassFunction(self): + """ + Test that shadowing a function definition in a class suite with another + one raises a warning. + """ + self.flakes(''' + class A: + def a(): pass + def a(): pass + ''', m.RedefinedFunction) + + def test_functionDecorator(self): + """ + Test that shadowing a function definition with a decorated version of + that function does not raise a warning. + """ + self.flakes(''' + from somewhere import somedecorator + + def a(): pass + a = somedecorator(a) + ''') + + def test_classFunctionDecorator(self): + """ + Test that shadowing a function definition in a class suite with a + decorated version of that function does not raise a warning. + """ + self.flakes(''' + class A: + def a(): pass + a = classmethod(a) + ''') + + def test_unaryPlus(self): + '''Don't die on unary +''' + self.flakes('+1') + + + +class Python25Test(harness.Test): + """ + Tests for checking of syntax only available in Python 2.5 and newer. + """ + if version_info < (2, 5): + skip = "Python 2.5 required for if-else and with tests" + + def test_ifexp(self): + """ + Test C{foo if bar else baz} statements. + """ + self.flakes("a = 'moo' if True else 'oink'") + self.flakes("a = foo if True else 'oink'", m.UndefinedName) + self.flakes("a = 'moo' if True else bar", m.UndefinedName) + + + def test_withStatementNoNames(self): + """ + No warnings are emitted for using inside or after a nameless C{with} + statement a name defined beforehand. + """ + self.flakes(''' + from __future__ import with_statement + bar = None + with open("foo"): + bar + bar + ''') + + def test_withStatementSingleName(self): + """ + No warnings are emitted for using a name defined by a C{with} statement + within the suite or afterwards. + """ + self.flakes(''' + from __future__ import with_statement + with open('foo') as bar: + bar + bar + ''') + + + def test_withStatementTupleNames(self): + """ + No warnings are emitted for using any of the tuple of names defined by + a C{with} statement within the suite or afterwards. + """ + self.flakes(''' + from __future__ import with_statement + with open('foo') as (bar, baz): + bar, baz + bar, baz + ''') + + + def test_withStatementSingleNameUndefined(self): + """ + An undefined name warning is emitted if the name first defined by a + C{with} statement is used before the C{with} statement. + """ + self.flakes(''' + from __future__ import with_statement + bar + with open('foo') as bar: + pass + ''', m.UndefinedName) + + + def test_withStatementTupleNamesUndefined(self): + """ + An undefined name warning is emitted if a name first defined by a the + tuple-unpacking form of the C{with} statement is used before the + C{with} statement. + """ + self.flakes(''' + from __future__ import with_statement + baz + with open('foo') as (bar, baz): + pass + ''', m.UndefinedName) + + + def test_withStatementSingleNameRedefined(self): + """ + A redefined name warning is emitted if a name bound by an import is + rebound by the name defined by a C{with} statement. + """ + self.flakes(''' + from __future__ import with_statement + import bar + with open('foo') as bar: + pass + ''', m.RedefinedWhileUnused) + + + def test_withStatementTupleNamesRedefined(self): + """ + A redefined name warning is emitted if a name bound by an import is + rebound by one of the names defined by the tuple-unpacking form of a + C{with} statement. + """ + self.flakes(''' + from __future__ import with_statement + import bar + with open('foo') as (bar, baz): + pass + ''', m.RedefinedWhileUnused) + + + def test_withStatementUndefinedInside(self): + """ + An undefined name warning is emitted if a name is used inside the + body of a C{with} statement without first being bound. + """ + self.flakes(''' + from __future__ import with_statement + with open('foo') as bar: + baz + ''', m.UndefinedName) + + + def test_withStatementNameDefinedInBody(self): + """ + A name defined in the body of a C{with} statement can be used after + the body ends without warning. + """ + self.flakes(''' + from __future__ import with_statement + with open('foo') as bar: + baz = 10 + baz + ''') + + + def test_withStatementUndefinedInExpression(self): + """ + An undefined name warning is emitted if a name in the I{test} + expression of a C{with} statement is undefined. + """ + self.flakes(''' + from __future__ import with_statement + with bar as baz: + pass + ''', m.UndefinedName) + + self.flakes(''' + from __future__ import with_statement + with bar as bar: + pass + ''', m.UndefinedName) + + def test_listNestedListComprehension(self): + self.flakes(''' + root = [['213', '123'], ['4354']] + foo = [int(c) for group in root for c in group] + ''') + diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/test/test_script.py b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_script.py new file mode 100644 index 0000000..b6fb685 --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_script.py @@ -0,0 +1,48 @@ + +""" +Tests for L{pyflakes.scripts.pyflakes}. +""" + +import sys +from StringIO import StringIO + +from twisted.python.filepath import FilePath +from twisted.trial.unittest import TestCase + +from pyflakes.scripts.pyflakes import checkPath + +def withStderrTo(stderr, f): + """ + Call C{f} with C{sys.stderr} redirected to C{stderr}. + """ + (outer, sys.stderr) = (sys.stderr, stderr) + try: + return f() + finally: + sys.stderr = outer + + + +class CheckTests(TestCase): + """ + Tests for L{check} and L{checkPath} which check a file for flakes. + """ + def test_missingTrailingNewline(self): + """ + Source which doesn't end with a newline shouldn't cause any + exception to be raised nor an error indicator to be returned by + L{check}. + """ + fName = self.mktemp() + FilePath(fName).setContent("def foo():\n\tpass\n\t") + self.assertFalse(checkPath(fName)) + + + def test_checkPathNonExisting(self): + """ + L{checkPath} handles non-existing files. + """ + err = StringIO() + count = withStderrTo(err, lambda: checkPath('extremo')) + self.assertEquals(err.getvalue(), 'extremo: no such file\n') + self.assertEquals(count, 1) diff --git a/.vim/ftplugin/python/pyflakes/pyflakes/test/test_undefined_names.py b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_undefined_names.py new file mode 100644 index 0000000..57e4e3c --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/pyflakes/test/test_undefined_names.py @@ -0,0 +1,182 @@ + +from sys import version_info + +from pyflakes import messages as m +from pyflakes.test import harness + + +class Test(harness.Test): + def test_undefined(self): + self.flakes('bar', m.UndefinedName) + + def test_definedInListComp(self): + self.flakes('[a for a in range(10) if a]') + + + def test_functionsNeedGlobalScope(self): + self.flakes(''' + class a: + def b(): + fu + fu = 1 + ''') + + def test_builtins(self): + self.flakes('range(10)') + + def test_magic_globals(self): + self.flakes('__file__') + + def test_globalImportStar(self): + '''Can't find undefined names with import *''' + self.flakes('from fu import *; bar', m.ImportStarUsed) + + def test_localImportStar(self): + '''A local import * still allows undefined names to be found in upper scopes''' + self.flakes(''' + def a(): + from fu import * + bar + ''', m.ImportStarUsed, m.UndefinedName) + + def test_unpackedParameter(self): + '''Unpacked function parameters create bindings''' + self.flakes(''' + def a((bar, baz)): + bar; baz + ''') + + def test_definedByGlobal(self): + '''"global" can make an otherwise undefined name in another function defined''' + self.flakes(''' + def a(): global fu; fu = 1 + def b(): fu + ''') + test_definedByGlobal.todo = '' + + def test_del(self): + '''del deletes bindings''' + self.flakes('a = 1; del a; a', m.UndefinedName) + + def test_delGlobal(self): + '''del a global binding from a function''' + self.flakes(''' + a = 1 + def f(): + global a + del a + a + ''') + + def test_delUndefined(self): + '''del an undefined name''' + self.flakes('del a', m.UndefinedName) + + def test_globalFromNestedScope(self): + '''global names are available from nested scopes''' + self.flakes(''' + a = 1 + def b(): + def c(): + a + ''') + + def test_laterRedefinedGlobalFromNestedScope(self): + """ + Test that referencing a local name that shadows a global, before it is + defined, generates a warning. + """ + self.flakes(''' + a = 1 + def fun(): + a + a = 2 + ''', m.UndefinedLocal) + + def test_laterRedefinedGlobalFromNestedScope2(self): + """ + Test that referencing a local name in a nested scope that shadows a + global declared in an enclosing scope, before it is defined, generates + a warning. + """ + self.flakes(''' + a = 1 + def fun(): + global a + def fun2(): + a + a = 2 + ''', m.UndefinedLocal) + + + def test_doubleNestingReportsClosestName(self): + """ + Test that referencing a local name in a nested scope that shadows a + variable declared in two different outer scopes before it is defined + in the innermost scope generates an UnboundLocal warning which + refers to the nearest shadowed name. + """ + exc = self.flakes(''' + def a(): + x = 1 + def b(): + x = 2 # line 5 + def c(): + x + x = 3 + ''', m.UndefinedLocal).messages[0] + self.assertEqual(exc.message_args, ('x', 5)) + + + def test_laterRedefinedGlobalFromNestedScope3(self): + """ + Test that referencing a local name in a nested scope that shadows a + global, before it is defined, generates a warning. + """ + self.flakes(''' + def fun(): + a = 1 + def fun2(): + a + a = 1 + ''', m.UndefinedLocal) + + def test_nestedClass(self): + '''nested classes can access enclosing scope''' + self.flakes(''' + def f(foo): + class C: + bar = foo + def f(self): + return foo + return C() + + f(123).f() + ''') + + def test_badNestedClass(self): + '''free variables in nested classes must bind at class creation''' + self.flakes(''' + def f(): + class C: + bar = foo + foo = 456 + + f() + ''', m.UndefinedName) + + + +class Python24Test(harness.Test): + """ + Tests for checking of syntax which is valid in Python 2.4 and newer. + """ + if version_info < (2, 4): + skip = "Python 2.4 required for generator expression tests." + + def test_definedInGenExp(self): + """ + Using the loop variable of a generator expression results in no + warnings. + """ + self.flakes('(a for a in xrange(10) if a)') diff --git a/.vim/ftplugin/python/pyflakes/setup.py b/.vim/ftplugin/python/pyflakes/setup.py new file mode 100644 index 0000000..d875a9e --- /dev/null +++ b/.vim/ftplugin/python/pyflakes/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +# (c) 2005 Divmod, Inc. See LICENSE file for details + +from distutils.core import setup + +setup( + name="pyflakes", + license="MIT", + version="0.2.1", + description="passive checker of Python programs", + author="Phil Frost", + maintainer="Moe Aboulkheir", + maintainer_email="[email protected]", + url="http://www.divmod.org/projects/pyflakes", + packages=["pyflakes", "pyflakes.scripts"], + scripts=["bin/pyflakes"], + long_description="""Pyflakes is program to analyze Python programs and detect various errors. It +works by parsing the source file, not importing it, so it is safe to use on +modules with side effects. It's also much faster.""") diff --git a/.vim/ftplugin/python/quickfix.diff b/.vim/ftplugin/python/quickfix.diff new file mode 100644 index 0000000..ce2471f --- /dev/null +++ b/.vim/ftplugin/python/quickfix.diff @@ -0,0 +1,124 @@ +diff --git a/README.rst b/README.rst +index 5f8467f..acff657 100644 +--- a/README.rst ++++ b/README.rst +@@ -8,11 +8,13 @@ accessing a local before it is bound, and also gives warnings for things like + unused imports. + + pyflakes-vim uses the output from PyFlakes to highlight errors in your code. ++To locate errors quickly, use quickfix_ commands: + + Make sure to check vim.org_ for the latest updates. + + .. _pyflakes.vim: http://www.vim.org/scripts/script.php?script_id=2441 + .. _vim.org: http://www.vim.org/scripts/script.php?script_id=2441 ++.. _quickfix: http://vimdoc.sourceforge.net/htmldoc/quickfix.html#quickfix + + Quick Installation + ------------------ +@@ -57,12 +59,10 @@ Hacking + TODO + ---- + * signs_ support (show warning and error icons to left of the buffer area) +- * quickfix_ support (allow jumping forward and back through the error list) + * configuration variables + * parse or intercept useful output from the warnings module + + .. _signs: http://www.vim.org/htmldoc/sign.html +-.. _quickfix: http://vimdoc.sourceforge.net/htmldoc/quickfix.html + + Changelog + --------- +diff --git a/pyflakes.vim b/pyflakes.vim +index 8aa508b..d6699bc 100644 +--- a/pyflakes.vim ++++ b/pyflakes.vim +@@ -159,6 +159,42 @@ if !exists("*s:WideMsg") + endfun + endif + ++if !exists("*s:GetQuickFixStackCount") ++ function s:GetQuickFixStackCount() ++ let l:stack_count = 0 ++ try ++ silent colder 9 ++ catch /E380:/ ++ endtry ++ ++ try ++ for i in range(9) ++ silent cnewer ++ let l:stack_count = l:stack_count + 1 ++ endfor ++ catch /E381:/ ++ return l:stack_count ++ endtry ++ endfunction ++endif ++ ++if !exists("*s:ActivatePyflakesQuickFixWindow") ++ function s:ActivatePyflakesQuickFixWindow() ++ try ++ silent colder 9 " go to the bottom of quickfix stack ++ catch /E380:/ ++ endtry ++ ++ if s:pyflakes_qf > 0 ++ try ++ exe "silent cnewer " . s:pyflakes_qf ++ catch /E381:/ ++ echoerr "Could not activate Pyflakes Quickfix Window." ++ endtry ++ endif ++ endfunction ++endif ++ + if !exists("*s:RunPyflakes") + function s:RunPyflakes() + highlight link PyFlakes SpellBad +@@ -174,12 +210,23 @@ if !exists("*s:RunPyflakes") + + let b:matched = [] + let b:matchedlines = {} ++ ++ let b:qf_list = [] ++ let b:qf_window_count = -1 ++ + python << EOF + for w in check(vim.current.buffer): + vim.command('let s:matchDict = {}') + vim.command("let s:matchDict['lineNum'] = " + str(w.lineno)) + vim.command("let s:matchDict['message'] = '%s'" % vim_quote(w.message % w.message_args)) + vim.command("let b:matchedlines[" + str(w.lineno) + "] = s:matchDict") ++ ++ vim.command("let l:qf_item = {}") ++ vim.command("let l:qf_item.bufnr = bufnr('%')") ++ vim.command("let l:qf_item.filename = expand('%')") ++ vim.command("let l:qf_item.lnum = %s" % str(w.lineno)) ++ vim.command("let l:qf_item.text = '%s'" % vim_quote(w.message % w.message_args)) ++ vim.command("let l:qf_item.type = 'E'") + + if w.col is None or isinstance(w, SyntaxError): + # without column information, just highlight the whole line +@@ -189,8 +236,21 @@ for w in check(vim.current.buffer): + # with a column number, highlight the first keyword there + vim.command(r"let s:mID = matchadd('PyFlakes', '^\%" + str(w.lineno) + r"l\_.\{-}\zs\k\+\k\@!\%>" + str(w.col) + r"c')") + ++ vim.command("let l:qf_item.vcol = 1") ++ vim.command("let l:qf_item.col = %s" % str(w.col + 1)) ++ + vim.command("call add(b:matched, s:matchDict)") ++ vim.command("call add(b:qf_list, l:qf_item)") + EOF ++ if exists("s:pyflakes_qf") ++ " if pyflakes quickfix window is already created, reuse it ++ call s:ActivatePyflakesQuickFixWindow() ++ call setqflist(b:qf_list, 'r') ++ else ++ " one pyflakes quickfix window for all buffer ++ call setqflist(b:qf_list, '') ++ let s:pyflakes_qf = s:GetQuickFixStackCount() ++ endif + let b:cleared = 0 + endfunction + end diff --git a/.vimrc b/.vimrc index 603e6c3..7d1c73f 100644 --- a/.vimrc +++ b/.vimrc @@ -1,456 +1,463 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme vilight colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery +" ================================================== +" HTML +" ================================================== +" enable a shortcut for tidy using ~/.tidyrc config +map <Leader>T :!tidy -config ~/.tidyrc<cr><cr> + + " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " opeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope -source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim +source /usr/local/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
b32818af141474c2f0d9e764d80b773c44838516
Add underwater-mod colorscheme
diff --git a/.vim/colors/underwater-mod.vim b/.vim/colors/underwater-mod.vim new file mode 100644 index 0000000..b0c58ca --- /dev/null +++ b/.vim/colors/underwater-mod.vim @@ -0,0 +1,98 @@ +" Maintainer: Mario Gutierrez ([email protected]) +" Original Theme: Dmitry Kichenko ([email protected]) +" Last Change: Jun 23, 2010 + +set background=dark + +hi clear + +if exists("syntax_on") + syntax reset +endif + +let colors_name = "underwater-mod" + +" Vim >= 7.0 specific colors +if version >= 700 + " highlights current line + hi CursorLine guibg=#18374F + " cursor's colour + hi CursorColumn guibg=#ffffff + "hi MatchParen guifg=#ffffff guibg=#439ea9 gui=bold + hi MatchParen guifg=magenta guibg=bg gui=bold + hi Pmenu guifg=#dfeff6 guibg=#1E415E + hi PmenuSel guifg=#dfeff6 guibg=#2D7889 + + " Search + hi IncSearch guifg=#E2DAEF guibg=#AF81F4 gui=bold + hi Search guifg=#E2DAEF guibg=#AF81F4 gui=none +endif + +" General colors +hi Cursor guifg=NONE guibg=#55A096 gui=none +"hi Normal guifg=#e3f3fa guibg=#102235 gui=none +hi Normal guifg=#e3f3fa guibg=#0B1724 gui=none +" e.g. tildes at the end of file +hi NonText guifg=#2F577C guibg=bg gui=none +hi LineNr guifg=#233f59 guibg=bg gui=none +hi StatusLine guifg=#ffec99 guibg=#0a1721 gui=none +hi StatusLineNC guifg=#233f59 guibg=#0a1721 gui=none +hi VertSplit guifg=#0a1721 guibg=#0a1721 gui=none +hi Folded guifg=#68CEE8 guibg=#1A3951 gui=none +hi FoldColumn guifg=#1E415E guibg=#1A3951 gui=none +hi Title guifg=cyan guibg=NONE gui=bold + " Selected text color +hi Visual guifg=#dfeff6 guibg=#24557A gui=none +"hi SpecialKey guifg=#3e71a1 guibg=#102235 gui=none + +" Syntax highlighting +" +hi Comment guifg=#3e71a1 guibg=bg gui=italic +hi Todo guifg=#ADED80 guibg=#579929 gui=bold +hi Constant guifg=#96defa gui=none +hi String guifg=#89e14b gui=italic + " names of variables in PHP +hi Identifier guifg=#8ac6f2 gui=none + " Function names as in python. currently purleish +hi Function guifg=#AF81F4 gui=none + " declarations of type, e.g. int blah +hi Type guifg=#41B2EA gui=none + " statement, such as 'hi' right here +hi Statement guifg=#68CEE8 gui=none +hi Keyword guifg=#8ac6f2 gui=none + " specified preprocessed words (like bold, italic etc. above) +hi PreProc guifg=#ef7760 gui=none +hi Number guifg=#96defa gui=none +hi Special guifg=#DFEFF6 gui=none + +" Ruby +hi rubyInterpolation guifg=#b9e19d guibg=bg +hi rubyInterpolationDelimiter guifg=#b9e19d guibg=bg +hi link rubyStringDelimiter String +""hi erubyDelimiter guifg=#f8c6bd guibg=bg +hi link erubyDelimiter PreProc + +" HTML +hi link htmlTag Statement +hi link htmlEndTag Statement +hi link htmlTagName Statement + +" XML +hi link xmlTag htmlTag +hi link xmlEndTag htmlEndTag +hi link xmlTagName htmlTagName + +" NERDTree +hi link treePart LineNr +hi link treePartFile treePart +hi link treeDirSlash treePart +hi link treeDir Statement +hi link treeClosable PreProc +hi link treeOpenable treeClosable +hi link treeUp treeClosable +hi treeFlag guifg=#3e71a1 guibg=bg gui=none +hi link treeHelp Comment +hi link treeLink Type + +hi link markdownCode Function +hi link markdownCodeBlock Function diff --git a/.vimrc b/.vimrc index d3be845..603e6c3 100644 --- a/.vimrc +++ b/.vimrc @@ -1,455 +1,456 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme lucius colorscheme vilight + colorscheme underwater-mod " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet colorscheme lucius set t_Co=256 endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " Highlight end of line whitespace. highlight WhitespaceEOL ctermbg=red guibg=red match WhitespaceEOL /\s\+$/ " Clean all end of line extra whitespace with ,S :nnoremap <silent><leader>S :let _s=@/<Bar>:%s/\s\+$//e<Bar>:let @/=_s<Bar>:nohl<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " opeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim let ropevim_codeassist_maxfixes=10 let ropevim_vim_completion=1 let ropevim_guess_project=1 let ropevim_enable_autoimport=1 let ropevim_extended_complete=1 function! CustomCodeAssistInsertMode() call RopeCodeAssistInsertMode() if pumvisible() return "\<C-L>\<Down>" else return '' endif endfunction function! TabWrapperComplete() let cursyn = synID(line('.'), col('.') - 1, 1) if pumvisible() return "\<C-Y>" endif if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 return "\<Tab>" else return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" endif endfunction inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
b210304f65697ed2c78808ae5900122cabd86054
Add ropevim support in config
diff --git a/.vimrc b/.vimrc index d6b613c..35f8346 100644 --- a/.vimrc +++ b/.vimrc @@ -1,415 +1,444 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim -" RopeVim +" opeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim +let ropevim_codeassist_maxfixes=10 +let ropevim_vim_completion=1 +let ropevim_guess_project=1 +let ropevim_enable_autoimport=1 +let ropevim_extended_complete=1 + +function! CustomCodeAssistInsertMode() + call RopeCodeAssistInsertMode() + if pumvisible() + return "\<C-L>\<Down>" + else + return '' + endif +endfunction + +function! TabWrapperComplete() + let cursyn = synID(line('.'), col('.') - 1, 1) + if pumvisible() + return "\<C-Y>" + endif + if strpart(getline('.'), 0, col('.')-1) =~ '^\s*$' || cursyn != 0 + return "\<Tab>" + else + return "\<C-R>=CustomCodeAssistInsertMode()\<CR>" + endif +endfunction + +inoremap <buffer><silent><expr> <C-l> TabWrapperComplete() + " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
5914551443259eed236cd93df484bd8df3b55338
Add drawit plugin for awesome text graphs
diff --git a/.vim/.VimballRecord b/.vim/.VimballRecord index 70818d6..ae26644 100644 --- a/.vim/.VimballRecord +++ b/.vim/.VimballRecord @@ -1,3 +1,4 @@ tComment.vba: call delete('/home/rharding/.vim/doc/tComment.txt')|call delete('/home/rharding/.vim/plugin/tComment.vim')|call delete('/home/rharding/.vim/autoload/tcomment.vim') supertab.vba: call delete('/home/rharding/.vim/doc/supertab.txt')|call delete('/home/rharding/.vim/plugin/supertab.vim') twitvim-0.4.5.vba: call delete('/home/rharding/.vim/plugin/twitvim.vim')|call delete('/home/rharding/.vim/doc/twitvim.txt') +DrawIt.vba: call delete('/home/rharding/.vim/plugin/DrawItPlugin.vim')|call delete('/home/rharding/.vim/plugin/cecutil.vim')|call delete('/home/rharding/.vim/autoload/DrawIt.vim')|call delete('/home/rharding/.vim/doc/DrawIt.txt') diff --git a/.vim/.netrwhist b/.vim/.netrwhist index a1423cc..660d5e2 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,6 +1,9 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =4 +let g:netrw_dirhist_cnt =7 let g:netrw_dirhist_1='/home/rharding/configs/pyvim' let g:netrw_dirhist_2='/home/rharding/configs/dotfiles/awesome/autostart' let g:netrw_dirhist_3='/home/rharding/.offlineimap' let g:netrw_dirhist_4='/home/rharding/configs/dotfiles/awesome/autostart' +let g:netrw_dirhist_5='/home/rharding/src/gitosis_private/gitosis-admin' +let g:netrw_dirhist_6='/etc/apache2/sites-enabled' +let g:netrw_dirhist_7='/home/rharding/src/hotalert/qsat_hotalert_app/docs' diff --git a/.vim/autoload/DrawIt.vim b/.vim/autoload/DrawIt.vim new file mode 100644 index 0000000..cc941f0 --- /dev/null +++ b/.vim/autoload/DrawIt.vim @@ -0,0 +1,1660 @@ +" DrawIt.vim: a simple way to draw things in Vim +" +" Maintainer: Charles E. Campbell, Jr. +" Authors: Charles E. Campbell, Jr. <[email protected]> - NOSPAM +" Sylvain Viart ([email protected]) +" Version: 10 +" Date: Jun 12, 2008 +" +" Quick Setup: {{{1 +" tar -oxvf DrawIt.tar +" Should put DrawItPlugin.vim in your .vim/plugin directory, +" put DrawIt.vim in your .vim/autoload directory +" put DrawIt.txt in your .vim/doc directory. +" Then, use \di to start DrawIt, +" \ds to stop Drawit, and +" draw by simply moving about using the cursor keys. +" +" You may also use visual-block mode to select endpoints and +" draw lines, arrows, and ellipses. +" +" Copyright: Copyright (C) 1999-2005 Charles E. Campbell, Jr. {{{1 +" Permission is hereby granted to use and distribute this code, +" with or without modifications, provided that this copyright +" notice is copied with it. Like anything else that's free, +" DrawIt.vim is provided *as is* and comes with no warranty +" of any kind, either expressed or implied. By using this +" plugin, you agree that in no event will the copyright +" holder be liable for any damages resulting from the use +" of this software. +" +" Required: THIS SCRIPT REQUIRES VIM 7.0 (or later) {{{1 +" GetLatestVimScripts: 40 1 :AutoInstall: DrawIt.vim +" GetLatestVimScripts: 1066 1 cecutil.vim +" +" Woe to her who is rebellious and polluted, the oppressing {{{1 +" city! She didn't obey the voice. She didn't receive correction. +" She didn't trust in Yahweh. She didn't draw near to her God. (Zeph 3:1,2 WEB) + +" --------------------------------------------------------------------- +" Load Once: {{{1 +if &cp || exists("g:loaded_DrawIt") + finish +endif +let s:keepcpo= &cpo +set cpo&vim + +" --------------------------------------------------------------------- +" Script Variables: {{{1 +if !exists("s:saveposn_count") + let s:saveposn_count= 0 +endif +let g:loaded_DrawIt= "v10" +"DechoTabOn + +" ===================================================================== +" DrawIt Functions: (by Charles E. Campbell, Jr.) {{{1 +" ===================================================================== + +" --------------------------------------------------------------------- +" DrawIt#StartDrawIt: this function maps the cursor keys, sets up default {{{2 +" drawing characters, and makes some settings +fun! DrawIt#StartDrawIt() +" call Dfunc("StartDrawIt()") + + " StartDrawIt: report on [DrawIt] mode {{{3 + if exists("b:dodrawit") && b:dodrawit == 1 + " already in DrawIt mode + echo "[DrawIt] (already on, use ".((exists("mapleader") && mapleader != "")? mapleader : '\')."ds to stop)" +" call Dret("StartDrawIt") + return + endif + let b:dodrawit= 1 + + " indicate in DrawIt mode + echo "[DrawIt]" + + " StartDrawIt: turn on mouse {{{3 + if !exists("b:drawit_keep_mouse") + let b:drawit_keep_mouse= &mouse + endif + setlocal mouse=a + + " StartDrawIt: set up DrawIt commands {{{3 + com! -nargs=1 -range SetBrush <line1>,<line2>call DrawIt#SetBrush(<q-args>) + com! -count Canvas call s:Spacer(line("."),line(".") + <count> - 1,0) + + " StartDrawIt: set up default drawing characters {{{3 + if !exists("b:di_vert") + let b:di_vert= "|" + endif + if !exists("b:di_horiz") + let b:di_horiz= "-" + endif + if !exists("b:di_plus") + let b:di_plus= "+" + endif + if !exists("b:di_upright") " also downleft + let b:di_upright= "/" + endif + if !exists("b:di_upleft") " also downright + let b:di_upleft= "\\" + endif + if !exists("b:di_cross") + let b:di_cross= "X" + endif + if !exists("b:di_ellipse") + let b:di_ellipse= '*' + endif + + " set up initial DrawIt behavior (as opposed to erase behavior) + let b:di_erase = 0 + + " StartDrawIt: option recording {{{3 + let b:di_aikeep = &ai + let b:di_cinkeep = &cin + let b:di_cpokeep = &cpo + let b:di_etkeep = &et + let b:di_fokeep = &fo + let b:di_gdkeep = &gd + let b:di_gokeep = &go + let b:di_magickeep = &magic + let b:di_remapkeep = &remap + let b:di_repkeep = &report + let b:di_sikeep = &si + let b:di_stakeep = &sta + let b:di_vekeep = &ve + set cpo&vim + set nocin noai nosi nogd sta et ve=all report=10000 + set go-=aA + set fo-=a + set remap magic + + " StartDrawIt: save and unmap user maps {{{3 + let b:lastdir = 1 + if exists("mapleader") + let usermaplead = mapleader + else + let usermaplead = "\\" + endif + call SaveUserMaps("n","","><^v","DrawIt") + call SaveUserMaps("v",usermaplead,"abeflsy","DrawIt") + call SaveUserMaps("n",usermaplead,"h><v^","DrawIt") + call SaveUserMaps("n","","<left>","DrawIt") + call SaveUserMaps("n","","<right>","DrawIt") + call SaveUserMaps("n","","<up>","DrawIt") + call SaveUserMaps("n","","<down>","DrawIt") + call SaveUserMaps("n","","<left>","DrawIt") + call SaveUserMaps("n","","<s-right>","DrawIt") + call SaveUserMaps("n","","<s-up>","DrawIt") + call SaveUserMaps("n","","<s-down>","DrawIt") + call SaveUserMaps("n","","<space>","DrawIt") + call SaveUserMaps("n","","<home>","DrawIt") + call SaveUserMaps("n","","<end>","DrawIt") + call SaveUserMaps("n","","<pageup>","DrawIt") + call SaveUserMaps("n","","<pagedown>","DrawIt") + call SaveUserMaps("n","","<leftmouse>","DrawIt") + call SaveUserMaps("n","","<middlemouse>","DrawIt") + call SaveUserMaps("n","","<rightmouse>","DrawIt") + call SaveUserMaps("n","","<leftdrag>","DrawIt") + call SaveUserMaps("n","","<s-leftmouse>","DrawIt") + call SaveUserMaps("n","","<s-leftdrag>","DrawIt") + call SaveUserMaps("n","","<s-leftrelease>","DrawIt") + call SaveUserMaps("n","","<c-leftmouse>","DrawIt") + call SaveUserMaps("n","","<c-leftdrag>","DrawIt") + call SaveUserMaps("n","","<c-leftrelease>","DrawIt") + call SaveUserMaps("n",usermaplead,":pa","DrawIt") + call SaveUserMaps("n",usermaplead,":pb","DrawIt") + call SaveUserMaps("n",usermaplead,":pc","DrawIt") + call SaveUserMaps("n",usermaplead,":pd","DrawIt") + call SaveUserMaps("n",usermaplead,":pe","DrawIt") + call SaveUserMaps("n",usermaplead,":pf","DrawIt") + call SaveUserMaps("n",usermaplead,":pg","DrawIt") + call SaveUserMaps("n",usermaplead,":ph","DrawIt") + call SaveUserMaps("n",usermaplead,":pi","DrawIt") + call SaveUserMaps("n",usermaplead,":pj","DrawIt") + call SaveUserMaps("n",usermaplead,":pk","DrawIt") + call SaveUserMaps("n",usermaplead,":pl","DrawIt") + call SaveUserMaps("n",usermaplead,":pm","DrawIt") + call SaveUserMaps("n",usermaplead,":pn","DrawIt") + call SaveUserMaps("n",usermaplead,":po","DrawIt") + call SaveUserMaps("n",usermaplead,":pp","DrawIt") + call SaveUserMaps("n",usermaplead,":pq","DrawIt") + call SaveUserMaps("n",usermaplead,":pr","DrawIt") + call SaveUserMaps("n",usermaplead,":ps","DrawIt") + call SaveUserMaps("n",usermaplead,":pt","DrawIt") + call SaveUserMaps("n",usermaplead,":pu","DrawIt") + call SaveUserMaps("n",usermaplead,":pv","DrawIt") + call SaveUserMaps("n",usermaplead,":pw","DrawIt") + call SaveUserMaps("n",usermaplead,":px","DrawIt") + call SaveUserMaps("n",usermaplead,":py","DrawIt") + call SaveUserMaps("n",usermaplead,":pz","DrawIt") + call SaveUserMaps("n",usermaplead,":ra","DrawIt") + call SaveUserMaps("n",usermaplead,":rb","DrawIt") + call SaveUserMaps("n",usermaplead,":rc","DrawIt") + call SaveUserMaps("n",usermaplead,":rd","DrawIt") + call SaveUserMaps("n",usermaplead,":re","DrawIt") + call SaveUserMaps("n",usermaplead,":rf","DrawIt") + call SaveUserMaps("n",usermaplead,":rg","DrawIt") + call SaveUserMaps("n",usermaplead,":rh","DrawIt") + call SaveUserMaps("n",usermaplead,":ri","DrawIt") + call SaveUserMaps("n",usermaplead,":rj","DrawIt") + call SaveUserMaps("n",usermaplead,":rk","DrawIt") + call SaveUserMaps("n",usermaplead,":rl","DrawIt") + call SaveUserMaps("n",usermaplead,":rm","DrawIt") + call SaveUserMaps("n",usermaplead,":rn","DrawIt") + call SaveUserMaps("n",usermaplead,":ro","DrawIt") + call SaveUserMaps("n",usermaplead,":rp","DrawIt") + call SaveUserMaps("n",usermaplead,":rq","DrawIt") + call SaveUserMaps("n",usermaplead,":rr","DrawIt") + call SaveUserMaps("n",usermaplead,":rs","DrawIt") + call SaveUserMaps("n",usermaplead,":rt","DrawIt") + call SaveUserMaps("n",usermaplead,":ru","DrawIt") + call SaveUserMaps("n",usermaplead,":rv","DrawIt") + call SaveUserMaps("n",usermaplead,":rw","DrawIt") + call SaveUserMaps("n",usermaplead,":rx","DrawIt") + call SaveUserMaps("n",usermaplead,":ry","DrawIt") + call SaveUserMaps("n",usermaplead,":rz","DrawIt") + if exists("g:drawit_insertmode") && g:drawit_insertmode + call SaveUserMaps("i","","<left>","DrawIt") + call SaveUserMaps("i","","<right>","DrawIt") + call SaveUserMaps("i","","<up>","DrawIt") + call SaveUserMaps("i","","<down>","DrawIt") + call SaveUserMaps("i","","<left>","DrawIt") + call SaveUserMaps("i","","<s-right>","DrawIt") + call SaveUserMaps("i","","<s-up>","DrawIt") + call SaveUserMaps("i","","<s-down>","DrawIt") + call SaveUserMaps("i","","<home>","DrawIt") + call SaveUserMaps("i","","<end>","DrawIt") + call SaveUserMaps("i","","<pageup>","DrawIt") + call SaveUserMaps("i","","<pagedown>","DrawIt") + call SaveUserMaps("i","","<leftmouse>","DrawIt") + endif + call SaveUserMaps("n","",":\<c-v>","DrawIt") + + " StartDrawIt: DrawIt maps (Charles Campbell) {{{3 + nmap <silent> <left> :set lz<CR>:silent! call <SID>DrawLeft()<CR>:set nolz<CR> + nmap <silent> <right> :set lz<CR>:silent! call <SID>DrawRight()<CR>:set nolz<CR> + nmap <silent> <up> :set lz<CR>:silent! call <SID>DrawUp()<CR>:set nolz<CR> + nmap <silent> <down> :set lz<CR>:silent! call <SID>DrawDown()<CR>:set nolz<CR> + nmap <silent> <s-left> :set lz<CR>:silent! call <SID>MoveLeft()<CR>:set nolz<CR> + nmap <silent> <s-right> :set lz<CR>:silent! call <SID>MoveRight()<CR>:set nolz<CR> + nmap <silent> <s-up> :set lz<CR>:silent! call <SID>MoveUp()<CR>:set nolz<CR> + nmap <silent> <s-down> :set lz<CR>:silent! call <SID>MoveDown()<CR>:set nolz<CR> + nmap <silent> <space> :set lz<CR>:silent! call <SID>DrawErase()<CR>:set nolz<CR> + nmap <silent> > :set lz<CR>:silent! call <SID>DrawSpace('>',1)<CR>:set nolz<CR> + nmap <silent> < :set lz<CR>:silent! call <SID>DrawSpace('<',2)<CR>:set nolz<CR> + nmap <silent> ^ :set lz<CR>:silent! call <SID>DrawSpace('^',3)<CR>:set nolz<CR> + nmap <silent> v :set lz<CR>:silent! call <SID>DrawSpace('v',4)<CR>:set nolz<CR> + nmap <silent> <home> :set lz<CR>:silent! call <SID>DrawSlantUpLeft()<CR>:set nolz<CR> + nmap <silent> <end> :set lz<CR>:silent! call <SID>DrawSlantDownLeft()<CR>:set nolz<CR> + nmap <silent> <pageup> :set lz<CR>:silent! call <SID>DrawSlantUpRight()<CR>:set nolz<CR> + nmap <silent> <pagedown> :set lz<CR>:silent! call <SID>DrawSlantDownRight()<CR>:set nolz<CR> + nmap <silent> <Leader>> :set lz<CR>:silent! call <SID>DrawFatRArrow()<CR>:set nolz<CR> + nmap <silent> <Leader>< :set lz<CR>:silent! call <SID>DrawFatLArrow()<CR>:set nolz<CR> + nmap <silent> <Leader>^ :set lz<CR>:silent! call <SID>DrawFatUArrow()<CR>:set nolz<CR> + nmap <silent> <Leader>v :set lz<CR>:silent! call <SID>DrawFatDArrow()<CR>:set nolz<CR> + nmap <silent> <Leader>f :call <SID>Flood()<cr> + + " StartDrawIt: Set up insertmode maps {{{3 + if exists("g:drawit_insertmode") && g:drawit_insertmode + imap <buffer> <silent> <left> <Esc><left>a + imap <buffer> <silent> <right> <Esc><right>a + imap <buffer> <silent> <up> <Esc><up>a + imap <buffer> <silent> <down> <Esc><down>a + imap <buffer> <silent> <left> <Esc><left>a + imap <buffer> <silent> <s-right> <Esc><s-right>a + imap <buffer> <silent> <s-up> <Esc><s-up>a + imap <buffer> <silent> <s-down> <Esc><s-down>a + imap <buffer> <silent> <home> <Esc><home>a + imap <buffer> <silent> <end> <Esc><end>a + imap <buffer> <silent> <pageup> <Esc><pageup>a + imap <buffer> <silent> <pagedown> <Esc><pagedown>a + endif + + " StartDrawIt: set up drawing mode mappings (Sylvain Viart) {{{3 + nnoremap <buffer> <silent> <c-v> :call <SID>LeftStart()<CR><c-v> + vmap <buffer> <silent> <Leader>a :<c-u>call <SID>CallBox('Arrow')<CR> + vmap <buffer> <silent> <Leader>b :<c-u>call <SID>CallBox('DrawBox')<cr> + nmap <buffer> <Leader>c :call <SID>Canvas()<cr> + vmap <buffer> <silent> <Leader>l :<c-u>call <SID>CallBox('DrawPlainLine')<CR> + vmap <buffer> <silent> <Leader>s :<c-u>call <SID>Spacer(line("'<"), line("'>"),0)<cr> + + " StartDrawIt: set up drawing mode mappings (Charles Campbell) {{{3 + " \pa ... \pz : blanks are transparent + " \ra ... \rz : blanks copy over + vmap <buffer> <silent> <Leader>e :<c-u>call <SID>CallBox('DrawEllipse')<CR> + + let allreg= "abcdefghijklmnopqrstuvwxyz" + while strlen(allreg) > 0 + let ireg= strpart(allreg,0,1) + exe "nmap <buffer> <silent> <Leader>p".ireg.' :<c-u>set lz<cr>:silent! call <SID>PutBlock("'.ireg.'",0)<cr>:set nolz<cr>' + exe "nmap <buffer> <silent> <Leader>r".ireg.' :<c-u>set lz<cr>:silent! call <SID>PutBlock("'.ireg.'",1)<cr>:set nolz<cr>' + let allreg= strpart(allreg,1) + endwhile + + " StartDrawIt: mouse maps (Sylvain Viart) {{{3 + " start visual-block with leftmouse + nnoremap <buffer> <silent> <leftmouse> <leftmouse>:call <SID>LeftStart()<CR><c-v> + vnoremap <buffer> <silent> <rightmouse> <leftmouse>:<c-u>call <SID>RightStart(1)<cr> + vnoremap <buffer> <silent> <middlemouse> <leftmouse>:<c-u>call <SID>RightStart(0)<cr> + vnoremap <buffer> <silent> <c-leftmouse> <leftmouse>:<c-u>call <SID>CLeftStart()<cr> + + " StartDrawIt: mouse maps (Charles Campbell) {{{3 + " Draw with current brush + nnoremap <buffer> <silent> <s-leftmouse> <leftmouse>:call <SID>SLeftStart()<CR><c-v> + nnoremap <buffer> <silent> <c-leftmouse> <leftmouse>:call <SID>CLeftStart()<CR><c-v> + + " StartDrawIt: Menu support {{{3 + if has("gui_running") && has("menu") && &go =~ 'm' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Stop\ \ DrawIt<tab>\\ds <Leader>ds' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Toggle\ Erase\ Mode<tab><space> <space>' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Arrow<tab>\\a <Leader>a' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Box<tab>\\b <Leader>b' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Make\ Blank\ Zone<tab>\\c <Leader>c' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Ellipse<tab>\\e <Leader>e' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Flood<tab>\\e <Leader>f' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Line<tab>\\l <Leader>l' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Append\ Blanks<tab>\\s <Leader>s' + exe 'silent! unmenu '.g:DrChipTopLvlMenu.'DrawIt.Start\ DrawIt' + endif +" call Dret("StartDrawIt") +endfun + +" --------------------------------------------------------------------- +" DrawIt#StopDrawIt: this function unmaps the cursor keys and restores settings {{{2 +fun! DrawIt#StopDrawIt() +" call Dfunc("StopDrawIt()") + + " StopDrawIt: report on [DrawIt off] mode {{{3 + if !exists("b:dodrawit") + echo "[DrawIt off]" +" call Dret("StopDrawIt") + return + endif + + " StopDrawIt: restore mouse {{{3 + if exists("b:drawit_keep_mouse") + let &mouse= b:drawit_keep_mouse + unlet b:drawit_keep_mouse + endif + unlet b:dodrawit + echo "[DrawIt off]" + + if exists("b:drawit_canvas_used") + " StopDrawIt: clean up trailing white space {{{3 + call s:SavePosn() + silent! %s/\s\+$//e + unlet b:drawit_canvas_used + call s:RestorePosn() + endif + + " StopDrawIt: remove drawit commands {{{3 + delc SetBrush + + " StopDrawIt: insure that erase mode is off {{{3 + " (thanks go to Gary Johnson for this) + if b:di_erase == 1 + call s:DrawErase() + endif + + " StopDrawIt: restore user map(s), if any {{{3 + call RestoreUserMaps("DrawIt") + + " StopDrawIt: restore user's options {{{3 + let &ai = b:di_aikeep + let &cin = b:di_cinkeep + let &cpo = b:di_cpokeep + let &et = b:di_etkeep + let &fo = b:di_fokeep + let &gd = b:di_gdkeep + let &go = b:di_gokeep + let &magic = b:di_magickeep + let &remap = b:di_remapkeep + let &report = b:di_repkeep + let &si = b:di_sikeep + let &sta = b:di_stakeep + let &ve = b:di_vekeep + unlet b:di_aikeep + unlet b:di_cinkeep + unlet b:di_cpokeep + unlet b:di_etkeep + unlet b:di_fokeep + unlet b:di_gdkeep + unlet b:di_gokeep + unlet b:di_magickeep + unlet b:di_remapkeep + unlet b:di_repkeep + unlet b:di_sikeep + unlet b:di_stakeep + unlet b:di_vekeep + + " StopDrawIt: DrChip menu support: {{{3 + if has("gui_running") && has("menu") && &go =~ 'm' + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Start\ DrawIt<tab>\\di <Leader>di' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Stop\ \ DrawIt' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Toggle\ Erase\ Mode' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Arrow' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Box' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Ellipse' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Flood' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Draw\ Line' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Make\ Blank\ Zone' + exe 'unmenu '.g:DrChipTopLvlMenu.'DrawIt.Append\ Blanks' + endif +" call Dret("StopDrawIt") +endfun + +" --------------------------------------------------------------------- +" SetDrawIt: this function allows one to change the drawing characters {{{2 +fun! SetDrawIt(di_vert,di_horiz,di_plus,di_upleft,di_upright,di_cross,di_ellipse) +" call Dfunc("SetDrawIt(vert<".a:di_vert."> horiz<".a:di_horiz."> plus<".a:di_plus."> upleft<".a:di_upleft."> upright<".a:di_upright."> cross<".a:di_cross."> ellipse<".a:di_ellipse.">)") + let b:di_vert = a:di_vert + let b:di_horiz = a:di_horiz + let b:di_plus = a:di_plus + let b:di_upleft = a:di_upleft + let b:di_upright = a:di_upright + let b:di_cross = a:di_cross + let b:di_ellipse = a:di_ellipse +" call Dret("SetDrawIt") +endfun + +" ===================================================================== +" s:DrawLeft: {{{2 +fun! s:DrawLeft() +" call Dfunc("s:DrawLeft()") + let curline = getline(".") + let curcol = virtcol(".") + let b:lastdir = 2 + + if curcol > 0 + let curchar= strpart(curline,curcol-1,1) + + " replace + if curchar == b:di_vert || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_horiz + endif + + " move and replace + if curcol >= 2 + call s:MoveLeft() + let curchar= strpart(curline,curcol-2,1) + if curchar == b:di_vert || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_horiz + endif + endif + endif +" call Dret("s:DrawLeft") +endfun + +" --------------------------------------------------------------------- +" s:DrawRight: {{{2 +fun! s:DrawRight() +" call Dfunc("s:DrawRight()") + let curline = getline(".") + let curcol = virtcol(".") + let b:lastdir = 1 + + " replace + if curcol == virtcol("$") + exe "norm! a".b:di_horiz."\<Esc>" + else + let curchar= strpart(curline,curcol-1,1) + if curchar == b:di_vert || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_horiz + endif + endif + + " move and replace + call s:MoveRight() + if curcol == virtcol("$") + exe "norm! i".b:di_horiz."\<Esc>" + else + let curchar= strpart(curline,curcol,1) + if curchar == b:di_vert || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_horiz + endif + endif +" call Dret("s:DrawRight") +endfun + +" --------------------------------------------------------------------- +" s:DrawUp: {{{2 +fun! s:DrawUp() +" call Dfunc("s:DrawUp()") + let curline = getline(".") + let curcol = virtcol(".") + let b:lastdir = 3 + + " replace + if curcol == 1 && virtcol("$") == 1 + exe "norm! i".b:di_vert."\<Esc>" + else + let curchar= strpart(curline,curcol-1,1) + if curchar == b:di_horiz || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_vert + endif + endif + + " move and replace/insert + call s:MoveUp() + let curline= getline(".") + let curchar= strpart(curline,curcol-1,1) + + if curcol == 1 && virtcol("$") == 1 + exe "norm! i".b:di_vert."\<Esc>" + elseif curchar == b:di_horiz || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_vert + endif + endif +" call Dret("s:DrawUp") +endfun + +" --------------------------------------------------------------------- +" s:DrawDown: {{{2 +fun! s:DrawDown() +" call Dfunc("s:DrawDown()") + let curline = getline(".") + let curcol = virtcol(".") + let b:lastdir = 4 + + " replace + if curcol == 1 && virtcol("$") == 1 + exe "norm! i".b:di_vert."\<Esc>" + else + let curchar= strpart(curline,curcol-1,1) + if curchar == b:di_horiz || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_vert + endif + endif + + " move and replace/insert + call s:MoveDown() + let curline= getline(".") + let curchar= strpart(curline,curcol-1,1) + if curcol == 1 && virtcol("$") == 1 + exe "norm! i".b:di_vert."\<Esc>" + elseif curchar == b:di_horiz || curchar == b:di_plus + exe "norm! r".b:di_plus + else + exe "norm! r".b:di_vert + endif +" call Dret("s:DrawDown") +endfun + +" --------------------------------------------------------------------- +" s:DrawErase: toggle [DrawIt on] and [DrawIt erase] modes {{{2 +fun! s:DrawErase() +" call Dfunc("s:DrawErase() b:di_erase=".b:di_erase) + if b:di_erase == 0 + let b:di_erase= 1 + echo "[DrawIt erase]" + let b:di_vert_save = b:di_vert + let b:di_horiz_save = b:di_horiz + let b:di_plus_save = b:di_plus + let b:di_upright_save = b:di_upright + let b:di_upleft_save = b:di_upleft + let b:di_cross_save = b:di_cross + let b:di_ellipse_save = b:di_ellipse + call SetDrawIt(' ',' ',' ',' ',' ',' ',' ') + else + let b:di_erase= 0 + echo "[DrawIt]" + call SetDrawIt(b:di_vert_save,b:di_horiz_save,b:di_plus_save,b:di_upleft_save,b:di_upright_save,b:di_cross_save,b:di_ellipse_save) + endif +" call Dret("s:DrawErase") +endfun + +" --------------------------------------------------------------------- +" s:DrawSpace: clear character and move right {{{2 +fun! s:DrawSpace(chr,dir) +" call Dfunc("s:DrawSpace(chr<".a:chr."> dir<".a:dir.">)") + let curcol= virtcol(".") + + " replace current location with arrowhead/space + if curcol == virtcol("$")-1 + exe "norm! r".a:chr + else + exe "norm! r".a:chr + endif + + if a:dir == 0 + let dir= b:lastdir + else + let dir= a:dir + endif + + " perform specified move + if dir == 1 + call s:MoveRight() + elseif dir == 2 + call s:MoveLeft() + elseif dir == 3 + call s:MoveUp() + else + call s:MoveDown() + endif +" call Dret("s:DrawSpace") +endfun + +" --------------------------------------------------------------------- +" s:DrawSlantDownLeft: / {{{2 +fun! s:DrawSlantDownLeft() +" call Dfunc("s:DrawSlantDownLeft()") + call s:ReplaceDownLeft() " replace + call s:MoveDown() " move + call s:MoveLeft() " move + call s:ReplaceDownLeft() " replace +" call Dret("s:DrawSlantDownLeft") +endfun + +" --------------------------------------------------------------------- +" s:DrawSlantDownRight: \ {{{2 +fun! s:DrawSlantDownRight() +" call Dfunc("s:DrawSlantDownRight()") + call s:ReplaceDownRight() " replace + call s:MoveDown() " move + call s:MoveRight() " move + call s:ReplaceDownRight() " replace +" call Dret("s:DrawSlantDownRight") +endfun + +" --------------------------------------------------------------------- +" s:DrawSlantUpLeft: \ {{{2 +fun! s:DrawSlantUpLeft() +" call Dfunc("s:DrawSlantUpLeft()") + call s:ReplaceDownRight() " replace + call s:MoveUp() " move + call s:MoveLeft() " move + call s:ReplaceDownRight() " replace +" call Dret("s:DrawSlantUpLeft") +endfun + +" --------------------------------------------------------------------- +" s:DrawSlantUpRight: / {{{2 +fun! s:DrawSlantUpRight() +" call Dfunc("s:DrawSlantUpRight()") + call s:ReplaceDownLeft() " replace + call s:MoveUp() " move + call s:MoveRight() " replace + call s:ReplaceDownLeft() " replace +" call Dret("s:DrawSlantUpRight") +endfun + +" --------------------------------------------------------------------- +" s:MoveLeft: {{{2 +fun! s:MoveLeft() +" call Dfunc("s:MoveLeft()") + norm! h + let b:lastdir= 2 +" call Dret("s:MoveLeft : b:lastdir=".b:lastdir) +endfun + +" --------------------------------------------------------------------- +" s:MoveRight: {{{2 +fun! s:MoveRight() +" call Dfunc("s:MoveRight()") + if virtcol(".") >= virtcol("$") - 1 + exe "norm! A \<Esc>" + else + norm! l + endif + let b:lastdir= 1 +" call Dret("s:MoveRight : b:lastdir=".b:lastdir) +endfun + +" --------------------------------------------------------------------- +" s:MoveUp: {{{2 +fun! s:MoveUp() +" call Dfunc("s:MoveUp()") + if line(".") == 1 + let curcol= virtcol(".") - 1 + if curcol == 0 && virtcol("$") == 1 + exe "norm! i \<Esc>" + elseif curcol == 0 + exe "norm! YP:s/./ /ge\<CR>0r " + else + exe "norm! YP:s/./ /ge\<CR>0".curcol."lr " + endif + else + let curcol= virtcol(".") + norm! k + while virtcol("$") <= curcol + exe "norm! A \<Esc>" + endwhile + endif + let b:lastdir= 3 +" call Dret("s:MoveUp : b:lastdir=".b:lastdir) +endfun + +" --------------------------------------------------------------------- +" s:MoveDown: {{{2 +fun! s:MoveDown() +" call Dfunc("s:MoveDown()") + if line(".") == line("$") + let curcol= virtcol(".") - 1 + if curcol == 0 && virtcol("$") == 1 + exe "norm! i \<Esc>" + elseif curcol == 0 + exe "norm! Yp:s/./ /ge\<CR>0r " + else + exe "norm! Yp:s/./ /ge\<CR>0".curcol."lr " + endif + else + let curcol= virtcol(".") + norm! j + while virtcol("$") <= curcol + exe "norm! A \<Esc>" + endwhile + endif + let b:lastdir= 4 +" call Dret("s:MoveDown : b:lastdir=".b:lastdir) +endfun + +" --------------------------------------------------------------------- +" s:ReplaceDownLeft: / X (upright) {{{2 +fun! s:ReplaceDownLeft() +" call Dfunc("s:ReplaceDownLeft()") + let curcol = virtcol(".") + if curcol != virtcol("$") + let curchar= strpart(getline("."),curcol-1,1) + if curchar == "\\" || curchar == "X" + exe "norm! r".b:di_cross + else + exe "norm! r".b:di_upright + endif + else + exe "norm! i".b:di_upright."\<Esc>" + endif +" call Dret("s:ReplaceDownLeft") +endfun + +" --------------------------------------------------------------------- +" s:ReplaceDownRight: \ X (upleft) {{{2 +fun! s:ReplaceDownRight() +" call Dfunc("s:ReplaceDownRight()") + let curcol = virtcol(".") + if curcol != virtcol("$") + let curchar= strpart(getline("."),curcol-1,1) + if curchar == "/" || curchar == "X" + exe "norm! r".b:di_cross + else + exe "norm! r".b:di_upleft + endif + else + exe "norm! i".b:di_upleft."\<Esc>" + endif +" call Dret("s:ReplaceDownRight") +endfun + +" --------------------------------------------------------------------- +" s:DrawFatRArrow: ----|> {{{2 +fun! s:DrawFatRArrow() +" call Dfunc("s:DrawFatRArrow()") + call s:MoveRight() + norm! r| + call s:MoveRight() + norm! r> +" call Dret("s:DrawFatRArrow") +endfun + +" --------------------------------------------------------------------- +" s:DrawFatLArrow: <|---- {{{2 +fun! s:DrawFatLArrow() +" call Dfunc("s:DrawFatLArrow()") + call s:MoveLeft() + norm! r| + call s:MoveLeft() + norm! r< +" call Dret("s:DrawFatLArrow") +endfun + +" --------------------------------------------------------------------- +" . +" s:DrawFatUArrow: /_\ {{{2 +" | +fun! s:DrawFatUArrow() +" call Dfunc("s:DrawFatUArrow()") + call s:MoveUp() + norm! r_ + call s:MoveRight() + norm! r\ + call s:MoveLeft() + call s:MoveLeft() + norm! r/ + call s:MoveRight() + call s:MoveUp() + norm! r. +" call Dret("s:DrawFatUArrow") +endfun + +" --------------------------------------------------------------------- +" s:DrawFatDArrow: _|_ {{{2 +" \ / +" ' +fun! s:DrawFatDArrow() +" call Dfunc("s:DrawFatDArrow()") + call s:MoveRight() + norm! r_ + call s:MoveLeft() + call s:MoveLeft() + norm! r_ + call s:MoveDown() + norm! r\ + call s:MoveRight() + call s:MoveRight() + norm! r/ + call s:MoveDown() + call s:MoveLeft() + norm! r' +" call Dret("s:DrawFatDArrow") +endfun + +" --------------------------------------------------------------------- +" s:DrawEllipse: Bresenham-like ellipse drawing algorithm {{{2 +" 2 2 can +" x y be 2 2 2 2 2 2 +" - + - = 1 rewritten b x + a y = a b +" a b as +" +" Take step which has minimum error +" (x,y-1) (x+1,y) (x+1,y-1) +" +" 2 2 2 2 2 2 +" Ei = | b x + a y - a b | +" +" Algorithm only draws arc from (0,b) to (a,0) and uses +" DrawFour() to reflect points to other three quadrants +fun! s:DrawEllipse(x0,y0,x1,y1) +" call Dfunc("s:DrawEllipse(x0=".a:x0." y0=".a:y0." x1=".a:x1." y1=".a:y1.")") + let x0 = a:x0 + let y0 = a:y0 + let x1 = a:x1 + let y1 = a:y1 + let xoff = (x0+x1)/2 + let yoff = (y0+y1)/2 + let a = s:Abs(x1-x0)/2 + let b = s:Abs(y1-y0)/2 + let a2 = a*a + let b2 = b*b + let twoa2= a2 + a2 + let twob2= b2 + b2 + + let xi= 0 + let yi= b + let ei= 0 + call s:DrawFour(xi,yi,xoff,yoff,a,b) + while xi <= a && yi >= 0 + + let dy= a2 - twoa2*yi + let ca= ei + twob2*xi + b2 + let cb= ca + dy + let cc= ei + dy + + let aca= s:Abs(ca) + let acb= s:Abs(cb) + let acc= s:Abs(cc) + + " pick case: (xi+1,yi) (xi,yi-1) (xi+1,yi-1) + if aca <= acb && aca <= acc + let xi= xi + 1 + let ei= ca + elseif acb <= aca && acb <= acc + let ei= cb + let xi= xi + 1 + let yi= yi - 1 + else + let ei= cc + let yi= yi - 1 + endif + if xi > a:x1 + break + endif + call s:DrawFour(xi,yi,xoff,yoff,a,b) + endw +" call Dret("s:DrawEllipse") +endf + +" --------------------------------------------------------------------- +" s:DrawFour: reflect a point to four quadrants {{{2 +fun! s:DrawFour(x,y,xoff,yoff,a,b) +" call Dfunc("s:DrawFour(xy[".a:x.",".a:y."] off[".a:xoff.",".a:yoff."] a=".a:a." b=".a:b.")") + let x = a:xoff + a:x + let y = a:yoff + a:y + let lx = a:xoff - a:x + let by = a:yoff - a:y + call s:SetCharAt(b:di_ellipse, x, y) + call s:SetCharAt(b:di_ellipse, lx, y) + call s:SetCharAt(b:di_ellipse, lx,by) + call s:SetCharAt(b:di_ellipse, x,by) +" call Dret("s:DrawFour") +endf + +" --------------------------------------------------------------------- +" s:SavePosn: saves position of cursor on screen so NetWrite can restore it {{{2 +fun! s:SavePosn() +" call Dfunc("s:SavePosn() saveposn_count=".s:saveposn_count.' ['.line('.').','.virtcol('.').']') + let s:saveposn_count= s:saveposn_count + 1 + + " Save current line and column + let b:drawit_line_{s:saveposn_count} = line(".") + let b:drawit_col_{s:saveposn_count} = virtcol(".") - 1 + + " Save top-of-screen line + norm! H + let b:drawit_hline_{s:saveposn_count}= line(".") + + " restore position + exe "norm! ".b:drawit_hline_{s:saveposn_count}."G0z\<CR>" + if b:drawit_col_{s:saveposn_count} == 0 + exe "norm! ".b:drawit_line_{s:saveposn_count}."G0" + else + exe "norm! ".b:drawit_line_{s:saveposn_count}."G0".b:drawit_col_{s:saveposn_count}."l" + endif +" call Dret("s:SavePosn : saveposn_count=".s:saveposn_count) +endfun + +" ------------------------------------------------------------------------ +" s:RestorePosn: {{{2 +fun! s:RestorePosn() +" call Dfunc("s:RestorePosn() saveposn_count=".s:saveposn_count) + if s:saveposn_count <= 0 +" call Dret("s:RestorePosn : s:saveposn_count<=0") + return + endif + " restore top-of-screen line + exe "norm! ".b:drawit_hline_{s:saveposn_count}."G0z\<CR>" + + " restore position + if b:drawit_col_{s:saveposn_count} == 0 + exe "norm! ".b:drawit_line_{s:saveposn_count}."G0" + else + exe "norm! ".b:drawit_line_{s:saveposn_count}."G0".b:drawit_col_{s:saveposn_count}."l" + endif + if s:saveposn_count > 0 + unlet b:drawit_hline_{s:saveposn_count} + unlet b:drawit_line_{s:saveposn_count} + unlet b:drawit_col_{s:saveposn_count} + let s:saveposn_count= s:saveposn_count - 1 + endif +" call Dret("s:RestorePosn : saveposn_count=".s:saveposn_count) +endfun + +" ------------------------------------------------------------------------ +" s:Flood: this function begins a flood of a region {{{2 +" based on b:di... characters as boundaries +" and starting at the current cursor location. +fun! s:Flood() +" call Dfunc("s:Flood()") + + let s:bndry = b:di_vert.b:di_horiz.b:di_plus.b:di_upright.b:di_upleft.b:di_cross.b:di_ellipse + let row = line(".") + let col = virtcol(".") + let athold = @0 + let s:DIrows = line("$") + call s:SavePosn() + + " get fill character from user + " Put entire fillchar string into the s:bndry (boundary characters), + " although only use the first such character for filling + call inputsave() + let s:fillchar= input("Enter fill character: ") + call inputrestore() + let s:bndry= "[".escape(s:bndry.s:fillchar,'\-]^')."]" + if strlen(s:fillchar) > 1 + let s:fillchar= strpart(s:fillchar,0,1) + endif + + " flood the region + call s:DI_Flood(row,col) + + " restore + call s:RestorePosn() + let @0= athold + unlet s:DIrows s:bndry s:fillchar + +" call Dret("s:Flood") +endfun + +" ------------------------------------------------------------------------ +" s:DI_Flood: fill up to the boundaries all characters to the left and right. {{{2 +" Then, based on the left/right column extents reached, check +" adjacent rows to see if any characters there need filling. +fun! s:DI_Flood(frow,fcol) +" call Dfunc("s:DI_Flood(frow=".a:frow." fcol=".a:fcol.")") + if a:frow <= 0 || a:fcol <= 0 || s:SetPosn(a:frow,a:fcol) || s:IsBoundary(a:frow,a:fcol) +" call Dret("s:DI_Flood") + return + endif + + " fill current line + let colL= s:DI_FillLeft(a:frow,a:fcol) + let colR= s:DI_FillRight(a:frow,a:fcol+1) + + " do a filladjacent on the next line up + if a:frow > 1 + call s:DI_FillAdjacent(a:frow-1,colL,colR) + endif + + " do a filladjacent on the next line down + if a:frow < s:DIrows + call s:DI_FillAdjacent(a:frow+1,colL,colR) + endif + +" call Dret("s:DI_Flood") +endfun + +" ------------------------------------------------------------------------ +" s:DI_FillLeft: Starting at (frow,fcol), non-boundary locations are {{{2 +" filled with the fillchar. The leftmost extent reached +" is returned. +fun! s:DI_FillLeft(frow,fcol) +" call Dfunc("s:DI_FillLeft(frow=".a:frow." fcol=".a:fcol.")") + if s:SetPosn(a:frow,a:fcol) +" call Dret("s:DI_FillLeft ".a:fcol) + return a:fcol + endif + + let Lcol= a:fcol + while Lcol >= 1 + if !s:IsBoundary(a:frow,Lcol) + exe "silent! norm! r".s:fillchar."h" + else + break + endif + let Lcol= Lcol - 1 + endwhile + + let Lcol= (Lcol < 1)? 1 : Lcol + 1 + +" call Dret("s:DI_FillLeft ".Lcol) + return Lcol +endfun + +" --------------------------------------------------------------------- +" s:DI_FillRight: Starting at (frow,fcol), non-boundary locations are {{{2 +" filled with the fillchar. The rightmost extent reached +" is returned. +fun! s:DI_FillRight(frow,fcol) +" call Dfunc("s:DI_FillRight(frow=".a:frow." fcol=".a:fcol.")") + if s:SetPosn(a:frow,a:fcol) +" call Dret("s:DI_FillRight ".a:fcol) + return a:fcol + endif + + let Rcol = a:fcol + while Rcol <= virtcol("$") + if !s:IsBoundary(a:frow,Rcol) + exe "silent! norm! r".s:fillchar."l" + else + break + endif + let Rcol= Rcol + 1 + endwhile + + let DIcols = virtcol("$") + let Rcol = (Rcol > DIcols)? DIcols : Rcol - 1 + +" call Dret("s:DI_FillRight ".Rcol) + return Rcol +endfun + +" --------------------------------------------------------------------- +" s:DI_FillAdjacent: {{{2 +" DI_Flood does FillLeft and FillRight, so the run from left to right +" (fcolL to fcolR) is known to have been filled. FillAdjacent is called +" from (fcolL to fcolR) on the lines one row up and down; if any character +" on the run is not a boundary character, then a flood is needed on that +" location. +fun! s:DI_FillAdjacent(frow,fcolL,fcolR) +" call Dfunc("s:DI_FillAdjacent(frow=".a:frow." fcolL=".a:fcolL." fcolR=".a:fcolR.")") + + let icol = a:fcolL + while icol <= a:fcolR + if !s:IsBoundary(a:frow,icol) + call s:DI_Flood(a:frow,icol) + endif + let icol= icol + 1 + endwhile + +" call Dret("s:DI_FillAdjacent") +endfun + +" --------------------------------------------------------------------- +" s:SetPosn: set cursor to given position on screen {{{2 +" srow,scol: -s-creen row and column +" Returns 1 : failed sanity check +" 0 : otherwise +fun! s:SetPosn(row,col) +" call Dfunc("s:SetPosn(row=".a:row." col=".a:col.")") + " sanity checks + if a:row < 1 +" call Dret("s:SetPosn 1") + return 1 + endif + if a:col < 1 +" call Dret("s:SetPosn 1") + return 1 + endif + + exe "norm! ".a:row."G".a:col."\<Bar>" + +" call Dret("s:SetPosn 0") + return 0 +endfun + +" --------------------------------------------------------------------- +" s:IsBoundary: returns 0 if not on boundary, 1 if on boundary {{{2 +" The "boundary" also includes the fill character. +fun! s:IsBoundary(row,col) +" call Dfunc("s:IsBoundary(row=".a:row." col=".a:col.")") + + let orow= line(".") + let ocol= virtcol(".") + exe "norm! ".a:row."G".a:col."\<Bar>" + norm! vy + let ret= @0 =~ s:bndry + if a:row != orow || a:col != ocol + exe "norm! ".orow."G".ocol."\<Bar>" + endif + +" call Dret("s:IsBoundary ".ret." : @0<".@0.">") + return ret +endfun + +" --------------------------------------------------------------------- +" s:PutBlock: puts a register's contents into the text at the current {{{2 +" cursor location +" replace= 0: Blanks are transparent +" = 1: Blanks copy over +" = 2: Erase all drawing characters +" +fun! s:PutBlock(block,replace) +" call Dfunc("s:PutBlock(block<".a:block."> replace=".a:replace.")") + call s:SavePosn() + exe "let block = @".a:block + let blocklen = strlen(block) + let drawit_line = line('.') + let drawchars = '['.escape(b:di_vert.b:di_horiz.b:di_plus.b:di_upright.b:di_upleft.b:di_cross,'\-').']' + + " insure that putting a block will do so in a region containing spaces out to textwidth + exe "let blockrows= strlen(substitute(@".a:block.",'[^[:cntrl:]]','','g'))" + exe 'let blockcols= strlen(substitute(@'.a:block.",'^\\(.\\{-}\\)\\n\\_.*$','\\1',''))" + let curline= line('.') + let curcol = virtcol('.') +" call Decho("blockrows=".blockrows." blockcols=".blockcols." curline=".curline." curcol=".curcol) + call s:AutoCanvas(curline-1,curline + blockrows+1,curcol + blockcols) + + let iblock = 0 + while iblock < blocklen + let chr= strpart(block,iblock,1) + + if char2nr(chr) == 10 + " handle newline + let drawit_line= drawit_line + 1 + if b:drawit_col_{s:saveposn_count} == 0 + exe "norm! ".drawit_line."G0" + else + exe "norm! ".drawit_line."G0".b:drawit_col_{s:saveposn_count}."l" + endif + + elseif a:replace == 2 + " replace all drawing characters with blanks + if match(chr,drawchars) != -1 + norm! r l + else + norm! l + endif + + elseif chr == ' ' && a:replace == 0 + " allow blanks to be transparent + norm! l + + else + " usual replace character + exe "norm! r".chr."l" + endif + let iblock = iblock + 1 + endwhile + call s:RestorePosn() + +" call Dret("s:PutBlock") +endfun + +" --------------------------------------------------------------------- +" s:AutoCanvas: automatic "Canvas" routine {{{2 +fun! s:AutoCanvas(linestart,linestop,cols) +" call Dfunc("s:AutoCanvas(linestart=".a:linestart." linestop=".a:linestop." cols=".a:cols.") line($)=".line("$")) + + " insure there's enough blank lines at end-of-file + if line("$") < a:linestop +" call Decho("append ".(a:linestop - line("$"))." empty lines") + call s:SavePosn() + exe "norm! G".(a:linestop - line("$"))."o\<esc>" + call s:RestorePosn() + endif + + " insure that any tabs contained within the selected region are converted to blanks + let etkeep= &et + set et +" call Decho("exe ".a:linestart.",".a:linestop."retab") + exe a:linestart.",".a:linestop."retab" + let &et= etkeep + + " insure that there's whitespace to textwidth/screenwidth/a:cols + if a:cols <= 0 + let tw= &tw + if tw <= 0 + let tw= &columns + endif + else + let tw= a:cols + endif +" Decho("tw=".tw) + if search('^$\|.\%<'.(tw+1).'v$',"cn",(a:linestop+1)) > 0 +" call Decho("append trailing whitespace") + call s:Spacer(a:linestart,a:linestop,tw) + endif + +" call Dret("s:AutoCanvas : tw=".tw) +endfun + +" ===================================================================== +" DrawIt Functions: (by Sylvain Viart) {{{1 +" ===================================================================== + +" --------------------------------------------------------------------- +" s:Canvas: {{{2 +fun! s:Canvas() +" call Dfunc("s:Canvas()") + + let lines = input("how many lines under the cursor? ") + let curline= line('.') + if curline < line('$') + exe "norm! ".lines."o\<esc>" + endif + call s:Spacer(curline+1,curline+lines,0) + let b:drawit_canvas_used= 1 + +" call Dret("s:Canvas") +endf + +" --------------------------------------------------------------------- +" s:Spacer: fill end of line with space {{{2 +" if a:cols >0: to the virtual column specified by a:cols +" <=0: to textwidth (if nonzero), otherwise +" to display width (&columns) +fun! s:Spacer(debut, fin, cols) range +" call Dfunc("s:Spacer(debut=".a:debut." fin=".a:fin." cols=".a:cols.")") + call s:SavePosn() + + if a:cols <= 0 + let width = &textwidth + if width <= 0 + let width= &columns + endif + else + let width= a:cols + endif + + let l= a:debut + while l <= a:fin + call setline(l,printf('%-'.width.'s',getline(l))) + let l = l + 1 + endwhile + + call s:RestorePosn() + +" call Dret("s:Spacer") +endf + +" --------------------------------------------------------------------- +" s:CallBox: call the specified function using the current visual selection box {{{2 +fun! s:CallBox(func_name) +" call Dfunc("s:CallBox(func_name<".a:func_name.">)") + + let xdep = b:xmouse_start + let ydep = b:ymouse_start + let col0 = virtcol("'<") + let row0 = line("'<") + let col1 = virtcol("'>") + let row1 = line("'>") +" call Decho("TL corner[".row0.",".col0."] original") +" call Decho("BR corner[".row1.",".col1."] original") +" call Decho("xydep [".ydep.",".xdep."]") + + if col1 == xdep && row1 == ydep + let col1 = col0 + let row1 = row0 + let col0 = xdep + let row0 = ydep + endif +" call Decho("TL corner[".row0.",".col0."]") +" call Decho("BR corner[".row1.",".col1."]") + + " insure that the selected region has blanks to that specified by col1 + call s:AutoCanvas((row0 < row1)? row0 : row1,(row1 > row0)? row1 : row0,(col1 > col0)? col1 : col0) + +" call Decho("exe call s:".a:func_name."(".col0.','.row0.','.col1.','.row1.")") + exe "call s:".a:func_name."(".col0.','.row0.','.col1.','.row1.")" + let b:xmouse_start= 0 + let b:ymouse_start= 0 + +" call Dret("s:CallBox") +endf + +" --------------------------------------------------------------------- +" s:DrawBox: {{{2 +fun! s:DrawBox(x0, y0, x1, y1) +" call Dfunc("s:DrawBox(xy0[".a:x0.",".a:y0." xy1[".a:x1.",".a:y1."])") + " loop each line + let l = a:y0 + while l <= a:y1 + let c = a:x0 + while c <= a:x1 + if l == a:y0 || l == a:y1 + let remp = '-' + if c == a:x0 || c == a:x1 + let remp = '+' + endif + else + let remp = '|' + if c != a:x0 && c != a:x1 + let remp = '.' + endif + endif + + if remp != '.' + call s:SetCharAt(remp, c, l) + endif + let c = c + 1 + endw + let l = l + 1 + endw + +" call Dret("s:DrawBox") +endf + +" --------------------------------------------------------------------- +" s:SetCharAt: set the character at the specified position (must exist) {{{2 +fun! s:SetCharAt(char, x, y) +" call Dfunc("s:SetCharAt(char<".a:char."> xy[".a:x.",".a:y."])") + + let content = getline(a:y) + let long = strlen(content) + let deb = strpart(content, 0, a:x - 1) + let fin = strpart(content, a:x, long) + call setline(a:y, deb.a:char.fin) + +" call Dret("s:SetCharAt") +endf + +" --------------------------------------------------------------------- +" s:DrawLine: Bresenham line-drawing algorithm {{{2 +" taken from : +" http://www.graphics.lcs.mit.edu/~mcmillan/comp136/Lecture6/Lines.html +fun! s:DrawLine(x0, y0, x1, y1, horiz) +" call Dfunc("s:DrawLine(xy0[".a:x0.",".a:y0."] xy1[".a:x1.",".a:y1."] horiz=".a:horiz.")") + + if ( a:x0 < a:x1 && a:y0 > a:y1 ) || ( a:x0 > a:x1 && a:y0 > a:y1 ) + " swap direction + let x0 = a:x1 + let y0 = a:y1 + let x1 = a:x0 + let y1 = a:y0 + else + let x0 = a:x0 + let y0 = a:y0 + let x1 = a:x1 + let y1 = a:y1 + endif + let dy = y1 - y0 + let dx = x1 - x0 + + if dy < 0 + let dy = -dy + let stepy = -1 + else + let stepy = 1 + endif + + if dx < 0 + let dx = -dx + let stepx = -1 + else + let stepx = 1 + endif + + let dy = 2*dy + let dx = 2*dx + + if dx > dy + " move under x + let char = a:horiz + call s:SetCharAt(char, x0, y0) + let fraction = dy - (dx / 2) " same as 2*dy - dx + while x0 != x1 + let char = a:horiz + if fraction >= 0 + if stepx > 0 + let char = '\' + else + let char = '/' + endif + let y0 = y0 + stepy + let fraction = fraction - dx " same as fraction -= 2*dx + endif + let x0 = x0 + stepx + let fraction = fraction + dy " same as fraction = fraction - 2*dy + call s:SetCharAt(char, x0, y0) + endw + else + " move under y + let char = '|' + call s:SetCharAt(char, x0, y0) + let fraction = dx - (dy / 2) + while y0 != y1 + let char = '|' + if fraction >= 0 + if stepy > 0 || stepx < 0 + let char = '\' + else + let char = '/' + endif + let x0 = x0 + stepx + let fraction = fraction - dy + endif + let y0 = y0 + stepy + let fraction = fraction + dx + call s:SetCharAt(char, x0, y0) + endw + endif + +" call Dret("s:DrawLine") +endf + +" --------------------------------------------------------------------- +" s:Arrow: {{{2 +fun! s:Arrow(x0, y0, x1, y1) +" call Dfunc("s:Arrow(xy0[".a:x0.",".a:y0."] xy1[".a:x1.",".a:y1."])") + + call s:DrawLine(a:x0, a:y0, a:x1, a:y1,'-') + let dy = a:y1 - a:y0 + let dx = a:x1 - a:x0 + if s:Abs(dx) > <SID>Abs(dy) + " move x + if dx > 0 + call s:SetCharAt('>', a:x1, a:y1) + else + call s:SetCharAt('<', a:x1, a:y1) + endif + else + " move y + if dy > 0 + call s:SetCharAt('v', a:x1, a:y1) + else + call s:SetCharAt('^', a:x1, a:y1) + endif + endif + +" call Dret("s:Arrow") +endf + +" --------------------------------------------------------------------- +" s:Abs: return absolute value {{{2 +fun! s:Abs(val) + if a:val < 0 + return - a:val + else + return a:val + endif +endf + +" --------------------------------------------------------------------- +" s:DrawPlainLine: {{{2 +fun! s:DrawPlainLine(x0,y0,x1,y1) +" call Dfunc("s:DrawPlainLine(xy0[".a:x0.",".a:y0."] xy1[".a:x1.",".a:y1."])") + +" call Decho("exe call s:DrawLine(".a:x0.','.a:y0.','.a:x1.','.a:y1.',"_")') + exe "call s:DrawLine(".a:x0.','.a:y0.','.a:x1.','.a:y1.',"_")' + +" call Dret("s:DrawPlainLine") +endf + +" ===================================================================== +" Mouse Functions: {{{1 +" ===================================================================== + +" --------------------------------------------------------------------- +" s:LeftStart: Read visual drag mapping {{{2 +" The visual start point is saved in b:xmouse_start and b:ymouse_start +fun! s:LeftStart() +" call Dfunc("s:LeftStart()") + let b:xmouse_start = virtcol('.') + let b:ymouse_start = line('.') + vnoremap <silent> <leftrelease> <leftrelease>:<c-u>call <SID>LeftRelease()<cr>gv +" call Dret("s:LeftStart : [".b:ymouse_start.",".b:xmouse_start."]") +endf! + +" --------------------------------------------------------------------- +" s:LeftRelease: {{{2 +fun! s:LeftRelease() +" call Dfunc("s:LeftRelease()") + vunmap <leftrelease> +" call Dret("s:LeftRelease : [".line('.').','.virtcol('.').']') +endf + +" --------------------------------------------------------------------- +" s:SLeftStart: begin drawing with a brush {{{2 +fun! s:SLeftStart() + if !exists("b:drawit_brush") + let b:drawit_brush= "a" + endif +" call Dfunc("s:SLeftStart() brush=".b:drawit_brush.' ['.line('.').','.virtcol('.').']') + noremap <silent> <s-leftdrag> <leftmouse>:<c-u>call <SID>SLeftDrag()<cr> + noremap <silent> <s-leftrelease> <leftmouse>:<c-u>call <SID>SLeftRelease()<cr> +" call Dret("s:SLeftStart") +endfun + +" --------------------------------------------------------------------- +" s:SLeftDrag: {{{2 +fun! s:SLeftDrag() +" call Dfunc("s:SLeftDrag() brush=".b:drawit_brush.' ['.line('.').','.virtcol('.').']') + call s:SavePosn() + call s:PutBlock(b:drawit_brush,0) + call s:RestorePosn() +" call Dret("s:SLeftDrag") +endfun + +" --------------------------------------------------------------------- +" s:SLeftRelease: {{{2 +fun! s:SLeftRelease() +" call Dfunc("s:SLeftRelease() brush=".b:drawit_brush.' ['.line('.').','.virtcol('.').']') + call s:SLeftDrag() + nunmap <s-leftdrag> + nunmap <s-leftrelease> +" call Dret("s:SLeftRelease") +endfun + +" --------------------------------------------------------------------- +" s:CLeftStart: begin moving a block of text {{{2 +fun! s:CLeftStart() + if !exists("b:drawit_brush") + let b:drawit_brush= "a" + endif +" call Dfunc("s:CLeftStart() brush=".b:drawit_brush) + if !line("'<") || !line("'>") + redraw! + echohl Error + echo "must visual-block select a region first" +" call Dret("s:CLeftStart : must visual-block select a region first") + return + endif + '<,'>call DrawIt#SetBrush(b:drawit_brush) + let s:cleft_width= virtcol("'>") - virtcol("'<") + if s:cleft_width < 0 + let s:cleft_width= -s:cleft_width + endif + let s:cleft_height= line("'>") - line("'<") + if s:cleft_height < 0 + let s:cleft_height= -s:cleft_height + endif + if exists("s:cleft_oldblock") + unlet s:cleft_oldblock + endif +" call Decho("blocksize: ".s:cleft_height."x".s:cleft_width) + noremap <silent> <c-leftdrag> :<c-u>call <SID>CLeftDrag()<cr> + noremap <silent> <c-leftrelease> <leftmouse>:<c-u>call <SID>CLeftRelease()<cr> +" call Dret("s:CLeftStart") +endfun + +" --------------------------------------------------------------------- +" s:CLeftDrag: {{{2 +fun! s:CLeftDrag() +" call Dfunc("s:CLeftDrag() cleft_width=".s:cleft_width." cleft_height=".s:cleft_height) + exe 'let keepbrush= @'.b:drawit_brush +" call Decho("keepbrush<".keepbrush.">") + + " restore prior contents of block zone + if exists("s:cleft_oldblock") +" call Decho("draw prior contents: [".line(".").",".virtcol(".")."] line($)=".line("$")) +" call Decho("draw prior contents<".s:cleft_oldblock.">") + exe 'let @'.b:drawit_brush.'=s:cleft_oldblock' + call s:PutBlock(b:drawit_brush,1) + endif + + " move cursor to <leftmouse> position + exe "norm! \<leftmouse>" + + " save new block zone contents +" call Decho("save contents: [".line(".").",".virtcol(".")."] - [".(line(".")+s:cleft_height).",".(virtcol(".")+s:cleft_width)."]") + let curline= line(".") + call s:AutoCanvas(curline,curline + s:cleft_height,virtcol(".")+s:cleft_width) + if s:cleft_width > 0 && s:cleft_height > 0 + exe "silent! norm! \<c-v>".s:cleft_width."l".s:cleft_height.'j"'.b:drawit_brush.'y' + elseif s:cleft_width > 0 + exe "silent! norm! \<c-v>".s:cleft_width.'l"'.b:drawit_brush.'y' + else + exe "silent! norm! \<c-v>".s:cleft_height.'j"'.b:drawit_brush.'y' + endif + exe "let s:cleft_oldblock= @".b:drawit_brush +" call Decho("s:cleft_oldblock=@".b:drawit_brush) +" call Decho("cleft_height=".s:cleft_height." cleft_width=".s:cleft_width) +" call Decho("save contents<".s:cleft_oldblock.">") + + " draw the brush +" call Decho("draw brush") +" call Decho("draw brush ".b:drawit_brush.": [".line(".").",".virtcol(".")."] line($)=".line("$")) + exe 'let @'.b:drawit_brush.'=keepbrush' + call s:PutBlock(b:drawit_brush,1) + +" call Dret("s:CLeftDrag") +endfun + +" --------------------------------------------------------------------- +" s:CLeftRelease: {{{2 +fun! s:CLeftRelease() +" call Dfunc("s:CLeftRelease()") + call s:CLeftDrag() + nunmap <c-leftdrag> + nunmap <c-leftrelease> + unlet s:cleft_oldblock s:cleft_height s:cleft_width +" call Dret("s:CLeftRelease") +endfun + +" --------------------------------------------------------------------- +" DrawIt#SetBrush: {{{2 +fun! DrawIt#SetBrush(brush) range +" call Dfunc("DrawIt#SetBrush(brush<".a:brush.">)") + let b:drawit_brush= a:brush +" call Decho("visualmode<".visualmode()."> range[".a:firstline.",".a:lastline."] visrange[".line("'<").",".line("'>")."]") + if visualmode() == "\<c-v>" && ((a:firstline == line("'>") && a:lastline == line("'<")) || (a:firstline == line("'<") && a:lastline == line("'>"))) + " last visual mode was visual block mode, and + " either [firstline,lastline] == ['<,'>] or ['>,'<] + " Assuming that SetBrush called from a visual-block selection! + " Yank visual block into selected register (brush) +" call Decho("yanking visual block into register ".b:drawit_brush) + exe 'norm! gv"'.b:drawit_brush.'y' + endif +" call Dret("DrawIt#SetBrush : b:drawit_brush=".b:drawit_brush) +endfun + +" ------------------------------------------------------------------------ +" Modelines: {{{1 +" vim: fdm=marker +let &cpo= s:keepcpo +unlet s:keepcpo diff --git a/.vim/doc/DrawIt.txt b/.vim/doc/DrawIt.txt new file mode 100644 index 0000000..bae86ff --- /dev/null +++ b/.vim/doc/DrawIt.txt @@ -0,0 +1,399 @@ +*drawit.txt* The DrawIt Tool Jun 12, 2008 + +Authors: Charles E. Campbell, Jr. <[email protected]> {{{1 + Sylvain Viart <[email protected]> + (remove NOSPAM from Campbell's email first) +Copyright: Copyright (C) 2004-2007 Charles E. Campbell, Jr. {{{1 + Permission is hereby granted to use and distribute this code, + with or without modifications, provided that this copyright + notice is copied with it. Like anything else that's free, + DrawIt.vim is provided *as is* and comes with no warranty + of any kind, either expressed or implied. By using this + plugin, you agree that in no event will the copyright + holder be liable for any damages resulting from the use + of this software. + + +============================================================================== +1. Contents *drawit-contents* {{{1 + + 1. Contents......................: |drawit-contents| + 2. DrawIt Manual.................: |drawit| + 3. DrawIt Usage..................: |drawit-usage| + Starting....................: |drawit-start| + Stopping....................: |drawit-stop| + User Map Protection.........: |drawit-protect| + Drawing.....................: |drawit-drawing| + Changing Drawing Characters.: |drawit-setdrawit| + Moving......................: |drawit-moving| + Erasing.....................: |drawit-erase| + Example.....................: |drawit-example| + Visual Block Mode...........: |drawit-visblock| + Brushes.....................: |drawit-brush| + DrawIt Modes................: |drawit-modes| + 4. DrawIt History................: |drawit-history| + + +============================================================================== +2. DrawIt Manual *drawit* {{{1 + *drawit-manual* + /===============+============================================================\ + || Starting & | || + || Stopping | Explanation || + ++--------------+-----------------------------------------------------------++ + || \di | start DrawIt |drawit-start| || + || \ds | stop DrawIt |drawit-stop| || + || :DIstart | start DrawIt |drawit-start| || + || :DIstop | stop DrawIt |drawit-stop| || + || | || + ++==============+===========================================================++ + || Maps | Explanation || + ++--------------+-----------------------------------------------------------++ + || | The DrawIt routines use a replace, move, and || + || | replace/insert strategy. The package also lets one insert|| + || | spaces, draw arrows by using the following characters or || + || | keypad characters: || + || +-----------------------------------------------------------++ + || <left> | move and draw left |drawit-drawing| || + || <right> | move and draw right, inserting lines/space as needed || + || <up> | move and draw up, inserting lines/space as needed || + || <down> | move and draw down, inserting lines/space as needed || + || <s-left> | move cursor left |drawit-move| || + || <s-right> | move cursor right, inserting lines/space as needed || + || <s-up> | move cursor up, inserting lines/space as needed || + || <s-down> | move cursor down, inserting lines/space as needed || + || <space> | toggle into and out of erase mode || + || > | insert a > and move right (draw -> arrow) || + || < | insert a < and move left (draw <- arrow) || + || ^ | insert a ^ and move up (draw ^ arrow) || + || v | insert a v and move down (draw v arrow) || + || <pgdn> | replace with a \, move down and right, and insert a \ || + || <end> | replace with a /, move down and left, and insert a / || + || <pgup> | replace with a /, move up and right, and insert a / || + || <home> | replace with a \, move up and left, and insert a \ || + || \> | insert a fat > and move right (draw -> arrow) || + || \< | insert a fat < and move left (draw <- arrow) || + || \^ | insert a fat ^ and move up (draw ^ arrow) || + || \v | insert a fat v and move down (draw v arrow) || + ||<s-leftmouse> | drag and draw with current brush |drawit-brush| || + ||<c-leftmouse> | drag and move current brush |drawit-brush| || + || | || + ||==============+===========================================================++ + ||Visual Cmds | Explanation || + ||--------------+-----------------------------------------------------------++ + || | The drawing mode routines use visual-block mode to || + || | select endpoints for lines, arrows, and ellipses. Bresen- || + || | ham and Bresenham-like algorithms are used for this. || + || | || + || | These routines need a block of spaces, and so the canvas || + || | routine must first be used to create such a block. The || + || | canvas routine will query the user for the number of || + || | lines to hold |'textwidth'| spaces. || + || +-----------------------------------------------------------++ + || \a | draw arrow from corners of visual-block selected region || + || \b | draw box on visual-block selected region || + || \c | the canvas routine (will query user, see above) || + || \e | draw an ellipse on visual-block selected region || + || \f | flood figure with a character (you will be prompted) || + || \l | draw line from corners of visual-block selected region || + || \s | spacer: appends spaces up to the textwidth (default: 78) || + || | || + ++==============+===========================================================++ + || Function and Explanation || + ++--------------+-----------------------------------------------------------++ + || :call SetDrawIt('vertical','horizontal','crossing','\','/','X','*') || + || set drawing characters for motions for moving || + || and for the ellipse drawing boundary || + || default motion || + || | up/down, || + || - left/right, || + || + -| crossing, || + || \ downright, || + || / downleft, and || + || X \/ crossing || + ++=======================+==================================================++ + || Commands | Explanation || + ++-----------------------+--------------------------------------------------++ + || :SetBrush a-z | sets brush (register) to given register || + || :'<,'>SetBrush a-z | yanks visual block to brush (register) || + \============================================================================/ + + +============================================================================== +3. DrawIt Usage *drawit-usage* {{{1 + +STARTING *drawit-start* {{{2 +\di + +Typically one puts <drawit.vim> into the .vim/plugin directory +(vimfiles\plugin for Windows) where it becomes always available. It uses a +minimal interface (\di: you can think of it as *D*raw*I*t or *D*rawIt +*I*nitialize) to start it and (\ds: *D*rawIt *S*top) to stop it. Instead of +using "\" you may specify your own preference for a map leader (see +|mapleader|). + +A message, "[DrawIt]", will appear on the message line. + + +STOPPING *drawit-stop* {{{2 +\ds + +When you are done with DrawIt, use \ds to stop DrawIt mode. Stopping DrawIt +will restore your usual options and remove the maps DrawIt set up. + +A message, "[DrawIt off]", will appear on the message line. + + +USER MAP PROTECTION *drawit-protect* {{{2 + +Starting DrawIt causes it to set up a number of maps which facilitate drawing. +DrawIt accomodates users with conflicting maps by saving both maps and user +options and before setting them to what DrawIt needs. When you stop DrawIt +(|drawit-stop|), DrawIt will restore the user's maps and options as they were +before DrawIt was started. + + +OPTIONS *drawit-options* {{{2 + + *g:drawit_insertmode* +g:drawit_insertmode : if this variable exists and is 1 then maps are + made which make cursor-control drawing available + while in insert mode, too. Otherwise, DrawIt's + maps only affect normal mode. + +DRAWING *drawit-drawing* {{{2 + +After DrawIt is started, use the number pad or arrow keys to move the cursor +about. As the cursor moves, DrawIt will then leave appropriate "line" +characters behind as you move horizontally, vertically, or diagonally, and +will transparently enlarge your file to accommodate your drawing as needed. +The trail will consist of -, |, \, / characters (depending on which direction +and SetDrawIt() changes), and + and X characters where line crossings occur. +You may use h-j-k-l to move about your display and generally use editing +commands as you wish even while in DrawIt mode. + + +CHANGING DRAWING CHARACTERS *drawit-setdrawit* {{{2 + +The SetDrawIt() function is available for those who wish to change the +characters that DrawIt uses. > + + ex. :call SetDrawIt('*','*','*','*','*','*','*') + ex. :call SetDrawIt('-','|','-','\','/','/','*') +< +The first example shows how to change all the DrawIt drawing characters to +asterisks, and the second shows how to give crossing priority to - and /. +The default setting is equivalent to: > + + :call SetDrawIt('|','-','+','\','/','X','*') +< +where SetDrawit()'s arguments refer, in order, to the > + + vertical drawing character + horizontal drawing character + horizontal/vertical crossing drawing character + down right drawing character + down left drawing character + diagonal crossing drawing character + ellipse boundary drawing character +< + +MOVING *drawit-move* *drawit-moving* {{{2 + +DrawIt supports shifting the arrow keys to cause motion of the cursor. The +motion of the cursor will not modify what's below the cursor. The cursor +will move and lines and/or spaces will be inserted to support the move as +required. Your terminal may not support shifted arrow keys, however, or Vim +may not catch them as such. For example, on the machine I use, shift-up +(<s-up>) produced <Esc>[161q, but vim didn't know that sequence was a <s-up>. +I merely made a nmap: + + nmap <Esc>[161q <s-up> + +and vim thereafter recognized the <s-up> command. + + +ERASING *drawit-erase* {{{2 +<space> + +The <space> key will toggle DrawIt's erase mode/DrawIt mode. When in [DrawIt +erase] mode, a message "[DrawIt erase]" will appear and the number pad will +now cause spaces to be drawn instead of the usual drawing characters. The +drawing behavior will be restored when the <space> key toggles DrawIt back +to regular DrawIt mode. + + +EXAMPLES *drawit-example* {{{2 + +Needless to say, the square spirals which follow were done with DrawIt and +a bit of block editing with Vim: > + + +------------ -----------+ +------------ -----------+ +------------ + |+----------+ +---------+| |+----------+ +---------+| |+----------+ + ||+--------+| |+-------+|| ||+--------+| |+-------+|| ||+--------+| + |||-------+|| ||+------||| |||-------+|| ||+------||| |||-------+|| + ||+-------+|| ||+------+|| ||+-------+|| ||+------+|| ||+-------+|| + |+---------+| |+--------+| |+---------+| |+--------+| |+---------+| + +-----------+ +----------+ +-----------+ +----------+ +-----------+ + +VISUAL BLOCK MODE FOR ARROWS LINES BOXES AND ELLIPSES *drawit-visblock* {{{2 + +\a : draw arrow from corners of visual-block selected region *drawit-a* +\b : draw box on visual-block selected region *drawit-b* +\c : the canvas routine (will query user, see above) *drawit-c* +\e : draw an ellipse on visual-block selected region *drawit-e* +\f : flood figure with a character (you will be prompted) *drawit-f* +\l : draw line from corners of visual-block selected region *drawit-l* +\s : spacer: appends spaces up to the textwidth (default: 78) *drawit-s* + +The DrawIt package has been merged with Sylvain Viart's drawing package (by +permission) which provides DrawIt with visual-block selection of +starting/ending point drawing of arrows (\a), lines (\l), and boxes (\b). +Additionally I wrote an ellipse drawing function using visual block +specification (|drawit-e|). + +One may create a block of spaces for these maps to operate in; the "canvas" +routine (\c) will help create such blocks. First, the s:Canvas() routine will +query the user for the number of lines s/he wishes to have, and will then fill +those lines with spaces out to the |'textwidth'| if user has specified it; +otherwise, the display width will be used. + +The Sylvain Viart functions and the ellipse drawing function depend +upon using visual block mode. As a typical use: > + + Example: * \h + DrawIt asks: how many lines under the cursor? 10 + DrawIt then appends 10 lines filled with blanks + out to textwidth (if defined) or 78 columns. + * ctrl-v (move) \b + DrawIt then draws a box + * ctrl-v (move) \e + DrawIt then draws an ellipse +< +Select the first endpoint with ctrl-v and then move to the other endpoint. +One may then select \a for arrows, \b for boxes, \e for ellipses, or \l for +lines. The internal s:AutoCanvas() will convert tabs to spaces and will +extend with spaces as needed to support the visual block. Note that when +DrawIt is enabled, virtualedit is also enabled (to "all"). +> + Examples: + + __ _ *************** +-------+ + \_ _/ **** **** | | + \_ _/ ** ---------> ** | | + \_ _/ **** **** | | + \__/ <------- *************** +-------+ + + \l \a \e and \a \b +< + *drawit-setbrush* +BRUSHES *drawit-brush* {{{2 +> + :SetBrush a-z +< + Set the current brush to the given letter (actually, its + a named register). Default brush: a > + ex. :SetBrush b + + :'<,'>SetBrush a-z +< + Set the current brush to the given letter, and yank the visual + block to that named register). Default brush: a +> + <leftmouse> +< + Select a visual-block region. One may use "ay, for example, + to yank selected text to register a. +> + <shift-leftmouse> +< + One may drag and draw with the current brush (default brush: a) + by holding down the shift key and the leftmouse button and moving + the mouse. Blanks in the brush are considered to be transparent. +> + <ctrl-leftmouse> +< + One may drag and move a selection with <ctrl-leftmouse>. First, + select the region using the <leftmouse>. Release the mouse button, + then press ctrl and the <leftmouse> button; while continuing to press + the button, move the mouse. The selected block of text will then + move along with the cursor. +> + \ra ... \rz +< + Replace text with the given register's contents (ie. the brush). +> + \pa ... \pz +< + Like \ra ... \rz, except that blanks are considered to be transparent. + + Example: Draw the following > + \ \ + o o + * + --- +< Then use ctrl-v, move, "ay to grab a copy into register a. + By default, the current brush uses register a (change brush + with :SetBrush [reg]). Hold the <shift> and <leftbutton> + keys down and move the mouse; as you move, a copy of the + brush will be left behind. + + +DRAWIT MODES *drawit-modes* {{{2 + + -[DrawIt] regular DrawIt mode (|drawit-start|) + -[DrawIt off] DrawIt is off (|drawit-stop| ) + -[DrawIt erase] DrawIt will erase using the number pad (|drawit-erase|) + + g:DrChipTopLvlMenu: by default its "DrChip"; you may set this to whatever + you like in your <.vimrc>. This variable controls where + DrawIt's menu items are placed. + + +============================================================================== +4. History *drawit-history* {{{1 + + 10 Jun 12, 2008 * Fixed a bug with ctrl-leftmouse (which was leaving + a space in the original selected text) + 9 Sep 14, 2007 * Johann-Guenter Simon fixed a bug with s:DrawErase(); + it called SetDrawIt() and that call hadn't been + updated to account for the new b:di_ellipse + parameter. + 8 Feb 12, 2007 * fixed a bug which prevented multi-character user + maps from being restored properly + May 03, 2007 * Extended SetDrawIt() to handle b:di_ellipse, the + ellipse boundary drawing character + * Changed "Holer" to "Canvas", and wrote AutoCanvas(), + which allow one to use the visual-block drawing + maps without creating a canvas first. + * DrawIt now uses the ctrl-leftmouse to move a visual + block selected region. + * Floods can now be done inside an ellipse + * DrawIt's maps are now all users of <buffer> + 7 Feb 16, 2005 * now checks that "m" is in &go before attempting to + use menus + Aug 17, 2005 * report option workaround + Nov 01, 2005 * converted DrawIt to use autoload feature of vim 7.0 + Dec 28, 2005 * now uses cecutil to save/restore user maps + Jan 18, 2006 * cecutil now updated to use keepjumps + Jan 23, 2006 * :DIstart and :DIstop commands provided; thus users + using "set noremap" can still use DrawIt. + Jan 26, 2006 * DrawIt menu entry now keeps its place + Apr 10, 2006 * Brushes were implemented + 6 Feb 24, 2003 * The latest DrawIt now provides a fill function. + \f will ask for a character to fill the figure + surrounding the current cursor location. Plus + I suggest reading :he drawit-tip for those whose + home/pageup/pagedown/end keys aren't all working + properly with DrawIt. + 08/18/03 : \p[a-z] and \r[a-z] implemented + 08/04/03 : b:..keep variables renamed to b:di_..keep variables + StopDrawIt() now insures that erase mode is off + 03/11/03 : included g:drawit_insertmode handling + 02/21/03 : included flood function + 12/11/02 : deletes trailing whitespace only if holer used + 8/27/02 : fat arrowheads included + : shift-arrow keys move but don't modify + + --------------------------------------------------------------------- +vim:tw=78:ts=8:ft=help:fdm=marker diff --git a/.vim/doc/tags b/.vim/doc/tags index 07e7c9b..fac70cc 100644 --- a/.vim/doc/tags +++ b/.vim/doc/tags @@ -1,301 +1,329 @@ 'NERDChristmasTree' NERD_tree.txt /*'NERDChristmasTree'* 'NERDTreeAutoCenter' NERD_tree.txt /*'NERDTreeAutoCenter'* 'NERDTreeAutoCenterThreshold' NERD_tree.txt /*'NERDTreeAutoCenterThreshold'* 'NERDTreeBookmarksFile' NERD_tree.txt /*'NERDTreeBookmarksFile'* 'NERDTreeCaseSensitiveSort' NERD_tree.txt /*'NERDTreeCaseSensitiveSort'* 'NERDTreeChDirMode' NERD_tree.txt /*'NERDTreeChDirMode'* 'NERDTreeHighlightCursorline' NERD_tree.txt /*'NERDTreeHighlightCursorline'* 'NERDTreeHijackNetrw' NERD_tree.txt /*'NERDTreeHijackNetrw'* 'NERDTreeIgnore' NERD_tree.txt /*'NERDTreeIgnore'* 'NERDTreeMouseMode' NERD_tree.txt /*'NERDTreeMouseMode'* 'NERDTreeQuitOnOpen' NERD_tree.txt /*'NERDTreeQuitOnOpen'* 'NERDTreeShowBookmarks' NERD_tree.txt /*'NERDTreeShowBookmarks'* 'NERDTreeShowFiles' NERD_tree.txt /*'NERDTreeShowFiles'* 'NERDTreeShowHidden' NERD_tree.txt /*'NERDTreeShowHidden'* 'NERDTreeShowLineNumbers' NERD_tree.txt /*'NERDTreeShowLineNumbers'* 'NERDTreeSortOrder' NERD_tree.txt /*'NERDTreeSortOrder'* 'NERDTreeStatusline' NERD_tree.txt /*'NERDTreeStatusline'* 'NERDTreeWinPos' NERD_tree.txt /*'NERDTreeWinPos'* 'NERDTreeWinSize' NERD_tree.txt /*'NERDTreeWinSize'* 'loaded_nerd_tree' NERD_tree.txt /*'loaded_nerd_tree'* 'snippets' snipMate.txt /*'snippets'* .snippet snipMate.txt /*.snippet* .snippets snipMate.txt /*.snippets* :ABitLy twitvim.txt /*:ABitLy* :ACligs twitvim.txt /*:ACligs* :AIsGd twitvim.txt /*:AIsGd* :AMetamark twitvim.txt /*:AMetamark* :ASnipurl twitvim.txt /*:ASnipurl* :ATinyURL twitvim.txt /*:ATinyURL* :ATrim twitvim.txt /*:ATrim* :ATweetburner twitvim.txt /*:ATweetburner* :AUrlBorg twitvim.txt /*:AUrlBorg* :AZima twitvim.txt /*:AZima* :BPosttoTwitter twitvim.txt /*:BPosttoTwitter* :BackTwitter twitvim.txt /*:BackTwitter* :BitLy twitvim.txt /*:BitLy* :CPosttoTwitter twitvim.txt /*:CPosttoTwitter* :Cligs twitvim.txt /*:Cligs* :DMSentTwitter twitvim.txt /*:DMSentTwitter* :DMTwitter twitvim.txt /*:DMTwitter* :ForwardTwitter twitvim.txt /*:ForwardTwitter* :FriendsTwitter twitvim.txt /*:FriendsTwitter* :IsGd twitvim.txt /*:IsGd* :ListTwitter twitvim.txt /*:ListTwitter* :LocationTwitter twitvim.txt /*:LocationTwitter* :Metamark twitvim.txt /*:Metamark* :NERDTree NERD_tree.txt /*:NERDTree* :NERDTreeClose NERD_tree.txt /*:NERDTreeClose* :NERDTreeFind NERD_tree.txt /*:NERDTreeFind* :NERDTreeFromBookmark NERD_tree.txt /*:NERDTreeFromBookmark* :NERDTreeMirror NERD_tree.txt /*:NERDTreeMirror* :NERDTreeToggle NERD_tree.txt /*:NERDTreeToggle* :NextTwitter twitvim.txt /*:NextTwitter* :PBitLy twitvim.txt /*:PBitLy* :PCligs twitvim.txt /*:PCligs* :PIsGd twitvim.txt /*:PIsGd* :PMetamark twitvim.txt /*:PMetamark* :PSnipurl twitvim.txt /*:PSnipurl* :PTinyURL twitvim.txt /*:PTinyURL* :PTrim twitvim.txt /*:PTrim* :PTweetburner twitvim.txt /*:PTweetburner* :PUrlBorg twitvim.txt /*:PUrlBorg* :PZima twitvim.txt /*:PZima* :PosttoTwitter twitvim.txt /*:PosttoTwitter* :PreviousTwitter twitvim.txt /*:PreviousTwitter* :ProfileTwitter twitvim.txt /*:ProfileTwitter* :PublicTwitter twitvim.txt /*:PublicTwitter* :RateLimitTwitter twitvim.txt /*:RateLimitTwitter* :RefreshTwitter twitvim.txt /*:RefreshTwitter* :RepliesTwitter twitvim.txt /*:RepliesTwitter* :ResetLoginTwitter twitvim.txt /*:ResetLoginTwitter* :RetweetedByMeTwitter twitvim.txt /*:RetweetedByMeTwitter* :RetweetedToMeTwitter twitvim.txt /*:RetweetedToMeTwitter* :SearchTwitter twitvim.txt /*:SearchTwitter* :SendDMTwitter twitvim.txt /*:SendDMTwitter* :SetLoginTwitter twitvim.txt /*:SetLoginTwitter* :Snipurl twitvim.txt /*:Snipurl* :TComment tComment.txt /*:TComment* :TCommentAs tComment.txt /*:TCommentAs* :TCommentBlock tComment.txt /*:TCommentBlock* :TCommentInline tComment.txt /*:TCommentInline* :TCommentRight tComment.txt /*:TCommentRight* :TinyURL twitvim.txt /*:TinyURL* :Trim twitvim.txt /*:Trim* :Tweetburner twitvim.txt /*:Tweetburner* :UrlBorg twitvim.txt /*:UrlBorg* :UserTwitter twitvim.txt /*:UserTwitter* :Zima twitvim.txt /*:Zima* ExtractSnips() snipMate.txt /*ExtractSnips()* ExtractSnipsFile() snipMate.txt /*ExtractSnipsFile()* Filename() snipMate.txt /*Filename()* NERDTree NERD_tree.txt /*NERDTree* NERDTree-? NERD_tree.txt /*NERDTree-?* NERDTree-A NERD_tree.txt /*NERDTree-A* NERDTree-B NERD_tree.txt /*NERDTree-B* NERDTree-C NERD_tree.txt /*NERDTree-C* NERDTree-C-J NERD_tree.txt /*NERDTree-C-J* NERDTree-C-K NERD_tree.txt /*NERDTree-C-K* NERDTree-D NERD_tree.txt /*NERDTree-D* NERDTree-F NERD_tree.txt /*NERDTree-F* NERDTree-I NERD_tree.txt /*NERDTree-I* NERDTree-J NERD_tree.txt /*NERDTree-J* NERDTree-K NERD_tree.txt /*NERDTree-K* NERDTree-O NERD_tree.txt /*NERDTree-O* NERDTree-P NERD_tree.txt /*NERDTree-P* NERDTree-R NERD_tree.txt /*NERDTree-R* NERDTree-T NERD_tree.txt /*NERDTree-T* NERDTree-U NERD_tree.txt /*NERDTree-U* NERDTree-X NERD_tree.txt /*NERDTree-X* NERDTree-cd NERD_tree.txt /*NERDTree-cd* NERDTree-contents NERD_tree.txt /*NERDTree-contents* NERDTree-e NERD_tree.txt /*NERDTree-e* NERDTree-f NERD_tree.txt /*NERDTree-f* NERDTree-gi NERD_tree.txt /*NERDTree-gi* NERDTree-go NERD_tree.txt /*NERDTree-go* NERDTree-gs NERD_tree.txt /*NERDTree-gs* NERDTree-i NERD_tree.txt /*NERDTree-i* NERDTree-m NERD_tree.txt /*NERDTree-m* NERDTree-o NERD_tree.txt /*NERDTree-o* NERDTree-p NERD_tree.txt /*NERDTree-p* NERDTree-q NERD_tree.txt /*NERDTree-q* NERDTree-r NERD_tree.txt /*NERDTree-r* NERDTree-s NERD_tree.txt /*NERDTree-s* NERDTree-t NERD_tree.txt /*NERDTree-t* NERDTree-u NERD_tree.txt /*NERDTree-u* NERDTree-x NERD_tree.txt /*NERDTree-x* NERDTreeAPI NERD_tree.txt /*NERDTreeAPI* NERDTreeAbout NERD_tree.txt /*NERDTreeAbout* NERDTreeAddKeyMap() NERD_tree.txt /*NERDTreeAddKeyMap()* NERDTreeAddMenuItem() NERD_tree.txt /*NERDTreeAddMenuItem()* NERDTreeAddMenuSeparator() NERD_tree.txt /*NERDTreeAddMenuSeparator()* NERDTreeAddSubmenu() NERD_tree.txt /*NERDTreeAddSubmenu()* NERDTreeBookmarkCommands NERD_tree.txt /*NERDTreeBookmarkCommands* NERDTreeBookmarkTable NERD_tree.txt /*NERDTreeBookmarkTable* NERDTreeBookmarks NERD_tree.txt /*NERDTreeBookmarks* NERDTreeChangelog NERD_tree.txt /*NERDTreeChangelog* NERDTreeCredits NERD_tree.txt /*NERDTreeCredits* NERDTreeFunctionality NERD_tree.txt /*NERDTreeFunctionality* NERDTreeGlobalCommands NERD_tree.txt /*NERDTreeGlobalCommands* NERDTreeInvalidBookmarks NERD_tree.txt /*NERDTreeInvalidBookmarks* NERDTreeKeymapAPI NERD_tree.txt /*NERDTreeKeymapAPI* NERDTreeLicense NERD_tree.txt /*NERDTreeLicense* NERDTreeMappings NERD_tree.txt /*NERDTreeMappings* NERDTreeMenu NERD_tree.txt /*NERDTreeMenu* NERDTreeMenuAPI NERD_tree.txt /*NERDTreeMenuAPI* NERDTreeOptionDetails NERD_tree.txt /*NERDTreeOptionDetails* NERDTreeOptionSummary NERD_tree.txt /*NERDTreeOptionSummary* NERDTreeOptions NERD_tree.txt /*NERDTreeOptions* NERDTreeRender() NERD_tree.txt /*NERDTreeRender()* NERD_tree.txt NERD_tree.txt /*NERD_tree.txt* ResetSnippets() snipMate.txt /*ResetSnippets()* TCommentDefineType() tComment.txt /*TCommentDefineType()* TwitVim twitvim.txt /*TwitVim* TwitVim-A-d twitvim.txt /*TwitVim-A-d* TwitVim-A-g twitvim.txt /*TwitVim-A-g* TwitVim-A-r twitvim.txt /*TwitVim-A-r* TwitVim-A-t twitvim.txt /*TwitVim-A-t* TwitVim-C-PageDown twitvim.txt /*TwitVim-C-PageDown* TwitVim-C-PageUp twitvim.txt /*TwitVim-C-PageUp* TwitVim-C-i twitvim.txt /*TwitVim-C-i* TwitVim-C-o twitvim.txt /*TwitVim-C-o* TwitVim-C-t twitvim.txt /*TwitVim-C-t* TwitVim-Leader-@ twitvim.txt /*TwitVim-Leader-@* TwitVim-Leader-C-r twitvim.txt /*TwitVim-Leader-C-r* TwitVim-Leader-Leader twitvim.txt /*TwitVim-Leader-Leader* TwitVim-Leader-S-r twitvim.txt /*TwitVim-Leader-S-r* TwitVim-Leader-X twitvim.txt /*TwitVim-Leader-X* TwitVim-Leader-d twitvim.txt /*TwitVim-Leader-d* TwitVim-Leader-e twitvim.txt /*TwitVim-Leader-e* TwitVim-Leader-g twitvim.txt /*TwitVim-Leader-g* TwitVim-Leader-p twitvim.txt /*TwitVim-Leader-p* TwitVim-Leader-r twitvim.txt /*TwitVim-Leader-r* TwitVim-LongURL twitvim.txt /*TwitVim-LongURL* TwitVim-add twitvim.txt /*TwitVim-add* TwitVim-cURL twitvim.txt /*TwitVim-cURL* TwitVim-contents twitvim.txt /*TwitVim-contents* TwitVim-credits twitvim.txt /*TwitVim-credits* TwitVim-delete twitvim.txt /*TwitVim-delete* TwitVim-direct-message twitvim.txt /*TwitVim-direct-message* TwitVim-goto twitvim.txt /*TwitVim-goto* TwitVim-highlight twitvim.txt /*TwitVim-highlight* TwitVim-history twitvim.txt /*TwitVim-history* TwitVim-hotkeys twitvim.txt /*TwitVim-hotkeys* TwitVim-inreplyto twitvim.txt /*TwitVim-inreplyto* TwitVim-install twitvim.txt /*TwitVim-install* TwitVim-intro twitvim.txt /*TwitVim-intro* TwitVim-line-length twitvim.txt /*TwitVim-line-length* TwitVim-login-base64 twitvim.txt /*TwitVim-login-base64* TwitVim-manual twitvim.txt /*TwitVim-manual* TwitVim-mappings twitvim.txt /*TwitVim-mappings* TwitVim-next twitvim.txt /*TwitVim-next* TwitVim-non-cURL twitvim.txt /*TwitVim-non-cURL* TwitVim-previous twitvim.txt /*TwitVim-previous* TwitVim-profile twitvim.txt /*TwitVim-profile* TwitVim-refresh twitvim.txt /*TwitVim-refresh* TwitVim-reply twitvim.txt /*TwitVim-reply* TwitVim-reply-all twitvim.txt /*TwitVim-reply-all* TwitVim-retweet twitvim.txt /*TwitVim-retweet* TwitVim-ssl twitvim.txt /*TwitVim-ssl* TwitVim-ssl-curl twitvim.txt /*TwitVim-ssl-curl* TwitVim-ssl-perl twitvim.txt /*TwitVim-ssl-perl* TwitVim-ssl-python twitvim.txt /*TwitVim-ssl-python* TwitVim-ssl-ruby twitvim.txt /*TwitVim-ssl-ruby* TwitVim-switch twitvim.txt /*TwitVim-switch* TwitVim-timeline-commands twitvim.txt /*TwitVim-timeline-commands* TwitVim-tips twitvim.txt /*TwitVim-tips* TwitVim-update-commands twitvim.txt /*TwitVim-update-commands* TwitVim-utility twitvim.txt /*TwitVim-utility* +drawit DrawIt.txt /*drawit* +drawit-a DrawIt.txt /*drawit-a* +drawit-b DrawIt.txt /*drawit-b* +drawit-brush DrawIt.txt /*drawit-brush* +drawit-c DrawIt.txt /*drawit-c* +drawit-contents DrawIt.txt /*drawit-contents* +drawit-drawing DrawIt.txt /*drawit-drawing* +drawit-e DrawIt.txt /*drawit-e* +drawit-erase DrawIt.txt /*drawit-erase* +drawit-example DrawIt.txt /*drawit-example* +drawit-f DrawIt.txt /*drawit-f* +drawit-history DrawIt.txt /*drawit-history* +drawit-l DrawIt.txt /*drawit-l* +drawit-manual DrawIt.txt /*drawit-manual* +drawit-modes DrawIt.txt /*drawit-modes* +drawit-move DrawIt.txt /*drawit-move* +drawit-moving DrawIt.txt /*drawit-moving* +drawit-options DrawIt.txt /*drawit-options* +drawit-protect DrawIt.txt /*drawit-protect* +drawit-s DrawIt.txt /*drawit-s* +drawit-setbrush DrawIt.txt /*drawit-setbrush* +drawit-setdrawit DrawIt.txt /*drawit-setdrawit* +drawit-start DrawIt.txt /*drawit-start* +drawit-stop DrawIt.txt /*drawit-stop* +drawit-usage DrawIt.txt /*drawit-usage* +drawit-visblock DrawIt.txt /*drawit-visblock* +drawit.txt DrawIt.txt /*drawit.txt* g:SuperTabCompletionContexts supertab.txt /*g:SuperTabCompletionContexts* g:SuperTabContextDefaultCompletionType supertab.txt /*g:SuperTabContextDefaultCompletionType* g:SuperTabDefaultCompletionType supertab.txt /*g:SuperTabDefaultCompletionType* g:SuperTabLongestHighlight supertab.txt /*g:SuperTabLongestHighlight* g:SuperTabMappingBackward supertab.txt /*g:SuperTabMappingBackward* g:SuperTabMappingForward supertab.txt /*g:SuperTabMappingForward* g:SuperTabMappingTabLiteral supertab.txt /*g:SuperTabMappingTabLiteral* g:SuperTabMidWordCompletion supertab.txt /*g:SuperTabMidWordCompletion* g:SuperTabRetainCompletionDuration supertab.txt /*g:SuperTabRetainCompletionDuration* +g:drawit_insertmode DrawIt.txt /*g:drawit_insertmode* g:snippets_dir snipMate.txt /*g:snippets_dir* g:snips_author snipMate.txt /*g:snips_author* g:tcommentMapLeader1 tComment.txt /*g:tcommentMapLeader1* g:tcommentMapLeader2 tComment.txt /*g:tcommentMapLeader2* g:tcommentMapLeaderOp1 tComment.txt /*g:tcommentMapLeaderOp1* g:tcommentMapLeaderOp2 tComment.txt /*g:tcommentMapLeaderOp2* g:tcommentOpModeExtra tComment.txt /*g:tcommentOpModeExtra* hl-twitterLink twitvim.txt /*hl-twitterLink* hl-twitterReply twitvim.txt /*hl-twitterReply* hl-twitterTime twitvim.txt /*hl-twitterTime* hl-twitterTitle twitvim.txt /*hl-twitterTitle* hl-twitterUser twitvim.txt /*hl-twitterUser* i_CTRL-R_<Tab> snipMate.txt /*i_CTRL-R_<Tab>* list-snippets snipMate.txt /*list-snippets* multi_snip snipMate.txt /*multi_snip* snipMate snipMate.txt /*snipMate* snipMate-$# snipMate.txt /*snipMate-$#* snipMate-${#:} snipMate.txt /*snipMate-${#:}* snipMate-${#} snipMate.txt /*snipMate-${#}* snipMate-author snipMate.txt /*snipMate-author* snipMate-commands snipMate.txt /*snipMate-commands* snipMate-contact snipMate.txt /*snipMate-contact* snipMate-description snipMate.txt /*snipMate-description* snipMate-disadvantages snipMate.txt /*snipMate-disadvantages* snipMate-expandtab snipMate.txt /*snipMate-expandtab* snipMate-features snipMate.txt /*snipMate-features* snipMate-filename snipMate.txt /*snipMate-filename* snipMate-indenting snipMate.txt /*snipMate-indenting* snipMate-placeholders snipMate.txt /*snipMate-placeholders* snipMate-remap snipMate.txt /*snipMate-remap* snipMate-settings snipMate.txt /*snipMate-settings* snipMate-usage snipMate.txt /*snipMate-usage* snipMate.txt snipMate.txt /*snipMate.txt* snippet snipMate.txt /*snippet* snippet-syntax snipMate.txt /*snippet-syntax* snippets snipMate.txt /*snippets* supertab supertab.txt /*supertab* supertab-completioncontexts supertab.txt /*supertab-completioncontexts* supertab-contextdefault supertab.txt /*supertab-contextdefault* supertab-contextdiscover supertab.txt /*supertab-contextdiscover* supertab-contextexample supertab.txt /*supertab-contextexample* supertab-contexttext supertab.txt /*supertab-contexttext* supertab-defaultcompletion supertab.txt /*supertab-defaultcompletion* supertab-duration supertab.txt /*supertab-duration* supertab-forwardbackward supertab.txt /*supertab-forwardbackward* supertab-intro supertab.txt /*supertab-intro* supertab-longesthighlight supertab.txt /*supertab-longesthighlight* supertab-mappingtabliteral supertab.txt /*supertab-mappingtabliteral* supertab-midword supertab.txt /*supertab-midword* supertab-options supertab.txt /*supertab-options* supertab-usage supertab.txt /*supertab-usage* supertab.txt supertab.txt /*supertab.txt* tComment-Installation tComment.txt /*tComment-Installation* tComment-Key-Bindings tComment.txt /*tComment-Key-Bindings* tComment-Usage tComment.txt /*tComment-Usage* tComment-commands tComment.txt /*tComment-commands* tComment.txt tComment.txt /*tComment.txt* twitvim-identi.ca twitvim.txt /*twitvim-identi.ca* twitvim.txt twitvim.txt /*twitvim.txt* twitvim_api_root twitvim.txt /*twitvim_api_root* twitvim_bitly_key twitvim.txt /*twitvim_bitly_key* twitvim_bitly_user twitvim.txt /*twitvim_bitly_user* twitvim_browser_cmd twitvim.txt /*twitvim_browser_cmd* twitvim_cert_insecure twitvim.txt /*twitvim_cert_insecure* twitvim_cligs_key twitvim.txt /*twitvim_cligs_key* twitvim_count twitvim.txt /*twitvim_count* twitvim_enable_perl twitvim.txt /*twitvim_enable_perl* twitvim_enable_python twitvim.txt /*twitvim_enable_python* twitvim_enable_ruby twitvim.txt /*twitvim_enable_ruby* twitvim_enable_tcl twitvim.txt /*twitvim_enable_tcl* twitvim_login twitvim.txt /*twitvim_login* twitvim_login_b64 twitvim.txt /*twitvim_login_b64* twitvim_old_retweet twitvim.txt /*twitvim_old_retweet* twitvim_proxy twitvim.txt /*twitvim_proxy* twitvim_proxy_login twitvim.txt /*twitvim_proxy_login* twitvim_proxy_login_b64 twitvim.txt /*twitvim_proxy_login_b64* twitvim_retweet_format twitvim.txt /*twitvim_retweet_format* twitvim_trim_login twitvim.txt /*twitvim_trim_login* twitvim_urlborg_key twitvim.txt /*twitvim_urlborg_key* xml-plugin-callbacks xml-plugin.txt /*xml-plugin-callbacks* xml-plugin-html xml-plugin.txt /*xml-plugin-html* xml-plugin-mappings xml-plugin.txt /*xml-plugin-mappings* xml-plugin-settings xml-plugin.txt /*xml-plugin-settings* xml-plugin.txt xml-plugin.txt /*xml-plugin.txt* diff --git a/.vim/plugin/DrawItPlugin.vim b/.vim/plugin/DrawItPlugin.vim new file mode 100644 index 0000000..1281e81 --- /dev/null +++ b/.vim/plugin/DrawItPlugin.vim @@ -0,0 +1,65 @@ +" DrawItPlugin.vim: a simple way to draw things in Vim -- just put this file in +" your plugin directory, use \di to start (\ds to stop), and +" just move about using the cursor keys. +" +" You may also use visual-block mode to select endpoints and +" draw lines, arrows, and ellipses. +" +" Date: May 20, 2008 +" Maintainer: Charles E. Campbell, Jr. <[email protected]> +" Copyright: Copyright (C) 1999-2005 Charles E. Campbell, Jr. {{{1 +" Permission is hereby granted to use and distribute this code, +" with or without modifications, provided that this copyright +" notice is copied with it. Like anything else that's free, +" DrawIt.vim is provided *as is* and comes with no warranty +" of any kind, either expressed or implied. By using this +" plugin, you agree that in no event will the copyright +" holder be liable for any damages resulting from the use +" of this software. +" +" Required: this script requires Vim 7.0 (or later) {{{1 +" To Enable: simply put this plugin into your ~/.vim/plugin directory {{{2 +" +" GetLatestVimScripts: 40 1 :AutoInstall: DrawIt.vim +" +" (Zeph 3:1,2 WEB) Woe to her who is rebellious and polluted, the {{{1 +" oppressing city! She didn't obey the voice. She didn't receive +" correction. She didn't trust in Yahweh. She didn't draw near to her God. + +" --------------------------------------------------------------------- +" Load Once: {{{1 +if &cp || exists("g:loaded_DrawItPlugin") + finish +endif +let g:loaded_DrawItPlugin = "v10" +let s:keepcpo = &cpo +set cpo&vim + +" --------------------------------------------------------------------- +" DrChip Menu Support: {{{1 +if has("gui_running") && has("menu") && &go =~ 'm' + if !exists("g:DrChipTopLvlMenu") + let g:DrChipTopLvlMenu= "DrChip." + endif + exe 'menu '.g:DrChipTopLvlMenu.'DrawIt.Start\ DrawIt<tab>\\di <Leader>di' +endif + +" --------------------------------------------------------------------- +" Public Interface: {{{1 +if !hasmapto('<Plug>StartDrawIt') + map <unique> <Leader>di <Plug>StartDrawIt +endif +map <silent> <Plug>StartDrawIt :set lz<cr>:call DrawIt#StartDrawIt()<cr>:set nolz<cr> +com! -nargs=0 DIstart set lz|call DrawIt#StartDrawIt()|set nolz + +if !hasmapto('<Plug>StopDrawIt') + map <unique> <Leader>ds <Plug>StopDrawIt +endif +map <silent> <Plug>StopDrawIt :set lz<cr>:call DrawIt#StopDrawIt()<cr>:set nolz<cr> +com! -nargs=0 DIstop set lz|call DrawIt#StopDrawIt()|set nolz + +" --------------------------------------------------------------------- +" Cleanup And Modelines: +" vim: fdm=marker +let &cpo= s:keepcpo +unlet s:keepcpo diff --git a/.vim/plugin/cecutil.vim b/.vim/plugin/cecutil.vim new file mode 100644 index 0000000..506b7bf --- /dev/null +++ b/.vim/plugin/cecutil.vim @@ -0,0 +1,482 @@ +" cecutil.vim : save/restore window position +" save/restore mark position +" save/restore selected user maps +" Author: Charles E. Campbell, Jr. +" Version: 17 +" Date: Sep 04, 2007 +" +" Saving Restoring Destroying Marks: {{{1 +" call SaveMark(markname) let savemark= SaveMark(markname) +" call RestoreMark(markname) call RestoreMark(savemark) +" call DestroyMark(markname) +" commands: SM RM DM +" +" Saving Restoring Destroying Window Position: {{{1 +" call SaveWinPosn() let winposn= SaveWinPosn() +" call RestoreWinPosn() call RestoreWinPosn(winposn) +" \swp : save current window/buffer's position +" \rwp : restore current window/buffer's previous position +" commands: SWP RWP +" +" Saving And Restoring User Maps: {{{1 +" call SaveUserMaps(mapmode,maplead,mapchx,suffix) +" call RestoreUserMaps(suffix) +" +" GetLatestVimScripts: 1066 1 :AutoInstall: cecutil.vim +" +" You believe that God is one. You do well. The demons also {{{1 +" believe, and shudder. But do you want to know, vain man, that +" faith apart from works is dead? (James 2:19,20 WEB) + +" Load Once: {{{1 +if &cp || exists("g:loaded_cecutil") + finish +endif +let g:loaded_cecutil = "v17" +let s:keepcpo = &cpo +set cpo&vim +"DechoVarOn + +" ----------------------- +" Public Interface: {{{1 +" ----------------------- + +" Map Interface: {{{2 +if !hasmapto('<Plug>SaveWinPosn') + map <unique> <Leader>swp <Plug>SaveWinPosn +endif +if !hasmapto('<Plug>RestoreWinPosn') + map <unique> <Leader>rwp <Plug>RestoreWinPosn +endif +nmap <silent> <Plug>SaveWinPosn :call SaveWinPosn()<CR> +nmap <silent> <Plug>RestoreWinPosn :call RestoreWinPosn()<CR> + +" Command Interface: {{{2 +com! -bar -nargs=0 SWP call SaveWinPosn() +com! -bar -nargs=0 RWP call RestoreWinPosn() +com! -bar -nargs=1 SM call SaveMark(<q-args>) +com! -bar -nargs=1 RM call RestoreMark(<q-args>) +com! -bar -nargs=1 DM call DestroyMark(<q-args>) + +if v:version < 630 + let s:modifier= "sil " +else + let s:modifier= "sil keepj " +endif + +" --------------------------------------------------------------------- +" SaveWinPosn: {{{1 +" let winposn= SaveWinPosn() will save window position in winposn variable +" call SaveWinPosn() will save window position in b:cecutil_winposn{b:cecutil_iwinposn} +" let winposn= SaveWinPosn(0) will *only* save window position in winposn variable (no stacking done) +fun! SaveWinPosn(...) +" call Dfunc("SaveWinPosn() a:0=".a:0) + if line(".") == 1 && getline(1) == "" +" call Dfunc("SaveWinPosn : empty buffer") + return "" + endif + let so_keep = &so + let siso_keep = &siso + let ss_keep = &ss + set so=0 siso=0 ss=0 + + let swline = line(".") + let swcol = col(".") + let swwline = winline() - 1 + let swwcol = virtcol(".") - wincol() + let savedposn = "call GoWinbufnr(".winbufnr(0).")|silent ".swline + let savedposn = savedposn."|".s:modifier."norm! 0z\<cr>" + if swwline > 0 + let savedposn= savedposn.":".s:modifier."norm! ".swwline."\<c-y>\<cr>" + endif + if swwcol > 0 + let savedposn= savedposn.":".s:modifier."norm! 0".swwcol."zl\<cr>" + endif + let savedposn = savedposn.":".s:modifier."call cursor(".swline.",".swcol.")\<cr>" + + " save window position in + " b:cecutil_winposn_{iwinposn} (stack) + " only when SaveWinPosn() is used + if a:0 == 0 + if !exists("b:cecutil_iwinposn") + let b:cecutil_iwinposn= 1 + else + let b:cecutil_iwinposn= b:cecutil_iwinposn + 1 + endif +" call Decho("saving posn to SWP stack") + let b:cecutil_winposn{b:cecutil_iwinposn}= savedposn + endif + + let &so = so_keep + let &siso = siso_keep + let &ss = ss_keep + +" if exists("b:cecutil_iwinposn") " Decho +" call Decho("b:cecutil_winpos{".b:cecutil_iwinposn."}[".b:cecutil_winposn{b:cecutil_iwinposn}."]") +" else " Decho +" call Decho("b:cecutil_iwinposn doesn't exist") +" endif " Decho +" call Dret("SaveWinPosn [".savedposn."]") + return savedposn +endfun + +" --------------------------------------------------------------------- +" RestoreWinPosn: {{{1 +fun! RestoreWinPosn(...) +" call Dfunc("RestoreWinPosn() a:0=".a:0) +" call Decho("getline(1)<".getline(1).">") +" call Decho("line(.)=".line(".")) + if line(".") == 1 && getline(1) == "" +" call Dfunc("RestoreWinPosn : empty buffer") + return "" + endif + let so_keep = &so + let siso_keep = &siso + let ss_keep = &ss + set so=0 siso=0 ss=0 + + if a:0 == 0 || a:1 == "" + " use saved window position in b:cecutil_winposn{b:cecutil_iwinposn} if it exists + if exists("b:cecutil_iwinposn") && exists("b:cecutil_winposn{b:cecutil_iwinposn}") +" call Decho("using stack b:cecutil_winposn{".b:cecutil_iwinposn."}<".b:cecutil_winposn{b:cecutil_iwinposn}.">") + try + exe "silent! ".b:cecutil_winposn{b:cecutil_iwinposn} + catch /^Vim\%((\a\+)\)\=:E749/ + " ignore empty buffer error messages + endtry + " normally drop top-of-stack by one + " but while new top-of-stack doesn't exist + " drop top-of-stack index by one again + if b:cecutil_iwinposn >= 1 + unlet b:cecutil_winposn{b:cecutil_iwinposn} + let b:cecutil_iwinposn= b:cecutil_iwinposn - 1 + while b:cecutil_iwinposn >= 1 && !exists("b:cecutil_winposn{b:cecutil_iwinposn}") + let b:cecutil_iwinposn= b:cecutil_iwinposn - 1 + endwhile + if b:cecutil_iwinposn < 1 + unlet b:cecutil_iwinposn + endif + endif + else + echohl WarningMsg + echomsg "***warning*** need to SaveWinPosn first!" + echohl None + endif + + else " handle input argument +" call Decho("using input a:1<".a:1.">") + " use window position passed to this function + exe "silent ".a:1 + " remove a:1 pattern from b:cecutil_winposn{b:cecutil_iwinposn} stack + if exists("b:cecutil_iwinposn") + let jwinposn= b:cecutil_iwinposn + while jwinposn >= 1 " search for a:1 in iwinposn..1 + if exists("b:cecutil_winposn{jwinposn}") " if it exists + if a:1 == b:cecutil_winposn{jwinposn} " and the pattern matches + unlet b:cecutil_winposn{jwinposn} " unlet it + if jwinposn == b:cecutil_iwinposn " if at top-of-stack + let b:cecutil_iwinposn= b:cecutil_iwinposn - 1 " drop stacktop by one + endif + endif + endif + let jwinposn= jwinposn - 1 + endwhile + endif + endif + + " seems to be something odd: vertical motions after RWP + " cause jump to first column. Following fixes that + if wincol() > 1 + silent norm! hl + elseif virtcol(".") < virtcol("$") + silent norm! lh + endif + + let &so = so_keep + let &siso = siso_keep + let &ss = ss_keep + +" call Dret("RestoreWinPosn") +endfun + +" --------------------------------------------------------------------- +" GoWinbufnr: go to window holding given buffer (by number) {{{1 +" Prefers current window; if its buffer number doesn't match, +" then will try from topleft to bottom right +fun! GoWinbufnr(bufnum) +" call Dfunc("GoWinbufnr(".a:bufnum.")") + if winbufnr(0) == a:bufnum +" call Dret("GoWinbufnr : winbufnr(0)==a:bufnum") + return + endif + winc t + let first=1 + while winbufnr(0) != a:bufnum && (first || winnr() != 1) + winc w + let first= 0 + endwhile +" call Dret("GoWinbufnr") +endfun + +" --------------------------------------------------------------------- +" SaveMark: sets up a string saving a mark position. {{{1 +" For example, SaveMark("a") +" Also sets up a global variable, g:savemark_{markname} +fun! SaveMark(markname) +" call Dfunc("SaveMark(markname<".a:markname.">)") + let markname= a:markname + if strpart(markname,0,1) !~ '\a' + let markname= strpart(markname,1,1) + endif +" call Decho("markname=".markname) + + let lzkeep = &lz + set lz + + if 1 <= line("'".markname) && line("'".markname) <= line("$") + let winposn = SaveWinPosn(0) + exe s:modifier."norm! `".markname + let savemark = SaveWinPosn(0) + let g:savemark_{markname} = savemark + let savemark = markname.savemark + call RestoreWinPosn(winposn) + else + let g:savemark_{markname} = "" + let savemark = "" + endif + + let &lz= lzkeep + +" call Dret("SaveMark : savemark<".savemark.">") + return savemark +endfun + +" --------------------------------------------------------------------- +" RestoreMark: {{{1 +" call RestoreMark("a") -or- call RestoreMark(savemark) +fun! RestoreMark(markname) +" call Dfunc("RestoreMark(markname<".a:markname.">)") + + if strlen(a:markname) <= 0 +" call Dret("RestoreMark : no such mark") + return + endif + let markname= strpart(a:markname,0,1) + if markname !~ '\a' + " handles 'a -> a styles + let markname= strpart(a:markname,1,1) + endif +" call Decho("markname=".markname." strlen(a:markname)=".strlen(a:markname)) + + let lzkeep = &lz + set lz + let winposn = SaveWinPosn(0) + + if strlen(a:markname) <= 2 + if exists("g:savemark_{markname}") && strlen(g:savemark_{markname}) != 0 + " use global variable g:savemark_{markname} +" call Decho("use savemark list") + call RestoreWinPosn(g:savemark_{markname}) + exe "norm! m".markname + endif + else + " markname is a savemark command (string) +" call Decho("use savemark command") + let markcmd= strpart(a:markname,1) + call RestoreWinPosn(markcmd) + exe "norm! m".markname + endif + + call RestoreWinPosn(winposn) + let &lz = lzkeep + +" call Dret("RestoreMark") +endfun + +" --------------------------------------------------------------------- +" DestroyMark: {{{1 +" call DestroyMark("a") -- destroys mark +fun! DestroyMark(markname) +" call Dfunc("DestroyMark(markname<".a:markname.">)") + + " save options and set to standard values + let reportkeep= &report + let lzkeep = &lz + set lz report=10000 + + let markname= strpart(a:markname,0,1) + if markname !~ '\a' + " handles 'a -> a styles + let markname= strpart(a:markname,1,1) + endif +" call Decho("markname=".markname) + + let curmod = &mod + let winposn = SaveWinPosn(0) + 1 + let lineone = getline(".") + exe "k".markname + d + put! =lineone + let &mod = curmod + call RestoreWinPosn(winposn) + + " restore options to user settings + let &report = reportkeep + let &lz = lzkeep + +" call Dret("DestroyMark") +endfun + +" --------------------------------------------------------------------- +" QArgSplitter: to avoid \ processing by <f-args>, <q-args> is needed. {{{1 +" However, <q-args> doesn't split at all, so this one returns a list +" with splits at all whitespace (only!), plus a leading length-of-list. +" The resulting list: qarglist[0] corresponds to a:0 +" qarglist[i] corresponds to a:{i} +fun! QArgSplitter(qarg) +" call Dfunc("QArgSplitter(qarg<".a:qarg.">)") + let qarglist = split(a:qarg) + let qarglistlen = len(qarglist) + let qarglist = insert(qarglist,qarglistlen) +" call Dret("QArgSplitter ".string(qarglist)) + return qarglist +endfun + +" --------------------------------------------------------------------- +" ListWinPosn: +"fun! ListWinPosn() " Decho +" if !exists("b:cecutil_iwinposn") || b:cecutil_iwinposn == 0 " Decho +" call Decho("nothing on SWP stack") " Decho +" else " Decho +" let jwinposn= b:cecutil_iwinposn " Decho +" while jwinposn >= 1 " Decho +" if exists("b:cecutil_winposn{jwinposn}") " Decho +" call Decho("winposn{".jwinposn."}<".b:cecutil_winposn{jwinposn}.">") " Decho +" else " Decho +" call Decho("winposn{".jwinposn."} -- doesn't exist") " Decho +" endif " Decho +" let jwinposn= jwinposn - 1 " Decho +" endwhile " Decho +" endif " Decho +"endfun " Decho +"com! -nargs=0 LWP call ListWinPosn() " Decho + +" --------------------------------------------------------------------- +" SaveUserMaps: this function sets up a script-variable (s:restoremap) {{{1 +" which can be used to restore user maps later with +" call RestoreUserMaps() +" +" mapmode - see :help maparg for its list +" ex. "n" = Normal +" If the first letter is u, then unmapping will be done +" ex. "un" = Normal + unmapping +" maplead - see mapchx +" mapchx - "<something>" handled as a single map item. +" ex. "<left>" +" - "string" a string of single letters which are actually +" multiple two-letter maps (using the maplead: +" maplead . each_character_in_string) +" ex. maplead="\" and mapchx="abc" saves user mappings for +" \a, \b, and \c +" Of course, if maplead is "", then for mapchx="abc", +" mappings for a, b, and c are saved. +" - :something handled as a single map item, w/o the ":" +" ex. mapchx= ":abc" will save a mapping for "abc" +" suffix - a string unique to your plugin +" ex. suffix= "DrawIt" +fun! SaveUserMaps(mapmode,maplead,mapchx,suffix) +" call Dfunc("SaveUserMaps(mapmode<".a:mapmode."> maplead<".a:maplead."> mapchx<".a:mapchx."> suffix<".a:suffix.">)") + + if !exists("s:restoremap_{a:suffix}") + " initialize restoremap_suffix to null string + let s:restoremap_{a:suffix}= "" + endif + + " set up dounmap: if 1, then save and unmap (a:mapmode leads with a "u") + " if 0, save only + if a:mapmode =~ '^u' + let dounmap= 1 + let mapmode= strpart(a:mapmode,1) + else + let dounmap= 0 + let mapmode= a:mapmode + endif + + " save single map :...something... + if strpart(a:mapchx,0,1) == ':' + let amap= strpart(a:mapchx,1) + if amap == "|" || amap == "\<c-v>" + let amap= "\<c-v>".amap + endif + let amap = a:maplead.amap + let s:restoremap_{a:suffix} = s:restoremap_{a:suffix}."|:silent! ".mapmode."unmap ".amap + if maparg(amap,mapmode) != "" + let maprhs = substitute(maparg(amap,mapmode),'|','<bar>','ge') + let s:restoremap_{a:suffix} = s:restoremap_{a:suffix}."|:".mapmode."map ".amap." ".maprhs + endif + if dounmap + exe "silent! ".mapmode."unmap ".amap + endif + + " save single map <something> + elseif strpart(a:mapchx,0,1) == '<' + let amap = a:mapchx + if amap == "|" || amap == "\<c-v>" + let amap= "\<c-v>".amap + endif + let s:restoremap_{a:suffix} = s:restoremap_{a:suffix}."|silent! ".mapmode."unmap ".amap + if maparg(a:mapchx,mapmode) != "" + let maprhs = substitute(maparg(amap,mapmode),'|','<bar>','ge') + let s:restoremap_{a:suffix} = s:restoremap_{a:suffix}."|".mapmode."map ".amap." ".maprhs + endif + if dounmap + exe "silent! ".mapmode."unmap ".amap + endif + + " save multiple maps + else + let i= 1 + while i <= strlen(a:mapchx) + let amap= a:maplead.strpart(a:mapchx,i-1,1) + if amap == "|" || amap == "\<c-v>" + let amap= "\<c-v>".amap + endif + let s:restoremap_{a:suffix} = s:restoremap_{a:suffix}."|silent! ".mapmode."unmap ".amap + if maparg(amap,mapmode) != "" + let maprhs = substitute(maparg(amap,mapmode),'|','<bar>','ge') + let s:restoremap_{a:suffix} = s:restoremap_{a:suffix}."|".mapmode."map ".amap." ".maprhs + endif + if dounmap + exe "silent! ".mapmode."unmap ".amap + endif + let i= i + 1 + endwhile + endif +" call Dret("SaveUserMaps : restoremap_".a:suffix.": ".s:restoremap_{a:suffix}) +endfun + +" --------------------------------------------------------------------- +" RestoreUserMaps: {{{1 +" Used to restore user maps saved by SaveUserMaps() +fun! RestoreUserMaps(suffix) +" call Dfunc("RestoreUserMaps(suffix<".a:suffix.">)") + if exists("s:restoremap_{a:suffix}") + let s:restoremap_{a:suffix}= substitute(s:restoremap_{a:suffix},'|\s*$','','e') + if s:restoremap_{a:suffix} != "" +" call Decho("exe ".s:restoremap_{a:suffix}) + exe "silent! ".s:restoremap_{a:suffix} + endif + unlet s:restoremap_{a:suffix} + endif +" call Dret("RestoreUserMaps") +endfun + +" --------------------------------------------------------------------- +" Restore: {{{1 +let &cpo= s:keepcpo +unlet s:keepcpo + +" --------------------------------------------------------------------- +" Modelines: {{{1 +" vim: ts=4 fdm=marker
mitechie/pyvim
bca43cfa26e39cc652082f22a044a114180da030
* update the mako filetype hook
diff --git a/.vimrc b/.vimrc index edc6df3..d6b613c 100644 --- a/.vimrc +++ b/.vimrc @@ -1,415 +1,415 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> " replace the default grep program with ack set grepprg=ack-grep " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html -au BufNewFile,BufRead *.mako set filetype=mako +au BufNewFile,BufRead,BufEnter *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
c74fcaaa39f247ccbc6a00f5cdaf487a43971175
* add support for ack-grep for :grep cmd
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index 9713688..a1423cc 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,5 +1,6 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =3 +let g:netrw_dirhist_cnt =4 let g:netrw_dirhist_1='/home/rharding/configs/pyvim' let g:netrw_dirhist_2='/home/rharding/configs/dotfiles/awesome/autostart' let g:netrw_dirhist_3='/home/rharding/.offlineimap' +let g:netrw_dirhist_4='/home/rharding/configs/dotfiles/awesome/autostart' diff --git a/.vimrc b/.vimrc index bc69d03..edc6df3 100644 --- a/.vimrc +++ b/.vimrc @@ -1,412 +1,415 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " F11 - toggle :set paste on/off " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url " " ----------------------------------------- " Printing: " set printoptions=paper:A4,syntax:y,wrap:y " http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " set the paste toggle key set pastetoggle=<F11> +" replace the default grep program with ack +set grepprg=ack-grep + " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
35ee67b79c78e5bb1d64e782602f9596f0a7f8a5
* add a pastetoggle key and notes on customizing print output
diff --git a/.vim/.netrwhist b/.vim/.netrwhist index bd9ef16..9713688 100644 --- a/.vim/.netrwhist +++ b/.vim/.netrwhist @@ -1,3 +1,5 @@ let g:netrw_dirhistmax =10 -let g:netrw_dirhist_cnt =1 +let g:netrw_dirhist_cnt =3 let g:netrw_dirhist_1='/home/rharding/configs/pyvim' +let g:netrw_dirhist_2='/home/rharding/configs/dotfiles/awesome/autostart' +let g:netrw_dirhist_3='/home/rharding/.offlineimap' diff --git a/.vimrc b/.vimrc index 7dbc2ea..bc69d03 100644 --- a/.vimrc +++ b/.vimrc @@ -1,402 +1,412 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " +" F11 - toggle :set paste on/off +" " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url +" +" ----------------------------------------- +" Printing: +" set printoptions=paper:A4,syntax:y,wrap:y +" http://vim.runpaint.org/basics/printing/ syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc +" set the paste toggle key +set pastetoggle=<F11> + " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " show the registers from things cut/yanked nmap <leader>r :registers<CR> " map the various registers to a leader shortcut for pasting from them nmap <leader>0 "0p nmap <leader>1 "1p nmap <leader>2 "2p nmap <leader>3 "3p nmap <leader>4 "4p nmap <leader>5 "5p nmap <leader>6 "6p nmap <leader>7 "7p nmap <leader>8 "8p nmap <leader>9 "9p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
a2c3a9f7918aebad236ac1bf4e23c29ef88913b2
* add the install.py file for creating the links, move the vimrc and vim to .vim/rc so that the filenames match the final locations
diff --git a/vim/.VimballRecord b/.vim/.VimballRecord similarity index 100% rename from vim/.VimballRecord rename to .vim/.VimballRecord diff --git a/.vim/.netrwhist b/.vim/.netrwhist new file mode 100644 index 0000000..bd9ef16 --- /dev/null +++ b/.vim/.netrwhist @@ -0,0 +1,3 @@ +let g:netrw_dirhistmax =10 +let g:netrw_dirhist_cnt =1 +let g:netrw_dirhist_1='/home/rharding/configs/pyvim' diff --git a/.vim/after/plugin/.snipMate.vim.swp b/.vim/after/plugin/.snipMate.vim.swp new file mode 100644 index 0000000..a915865 Binary files /dev/null and b/.vim/after/plugin/.snipMate.vim.swp differ diff --git a/vim/after/plugin/snipMate.vim b/.vim/after/plugin/snipMate.vim similarity index 100% rename from vim/after/plugin/snipMate.vim rename to .vim/after/plugin/snipMate.vim diff --git a/vim/autoload/snipMate.vim b/.vim/autoload/snipMate.vim similarity index 100% rename from vim/autoload/snipMate.vim rename to .vim/autoload/snipMate.vim diff --git a/vim/autoload/tcomment.vim b/.vim/autoload/tcomment.vim similarity index 100% rename from vim/autoload/tcomment.vim rename to .vim/autoload/tcomment.vim diff --git a/vim/colors/darkburn.vim b/.vim/colors/darkburn.vim similarity index 100% rename from vim/colors/darkburn.vim rename to .vim/colors/darkburn.vim diff --git a/vim/colors/darkdevel.vim b/.vim/colors/darkdevel.vim similarity index 100% rename from vim/colors/darkdevel.vim rename to .vim/colors/darkdevel.vim diff --git a/vim/colors/darkspectrum.vim b/.vim/colors/darkspectrum.vim similarity index 100% rename from vim/colors/darkspectrum.vim rename to .vim/colors/darkspectrum.vim diff --git a/vim/colors/hornet.vim b/.vim/colors/hornet.vim similarity index 100% rename from vim/colors/hornet.vim rename to .vim/colors/hornet.vim diff --git a/vim/colors/kaltex.vim b/.vim/colors/kaltex.vim similarity index 100% rename from vim/colors/kaltex.vim rename to .vim/colors/kaltex.vim diff --git a/vim/colors/lucius.vim b/.vim/colors/lucius.vim similarity index 100% rename from vim/colors/lucius.vim rename to .vim/colors/lucius.vim diff --git a/vim/colors/tango2.vim b/.vim/colors/tango2.vim similarity index 100% rename from vim/colors/tango2.vim rename to .vim/colors/tango2.vim diff --git a/vim/doc/NERD_tree.txt b/.vim/doc/NERD_tree.txt similarity index 100% rename from vim/doc/NERD_tree.txt rename to .vim/doc/NERD_tree.txt diff --git a/vim/doc/snipMate.txt b/.vim/doc/snipMate.txt similarity index 100% rename from vim/doc/snipMate.txt rename to .vim/doc/snipMate.txt diff --git a/vim/doc/supertab.txt b/.vim/doc/supertab.txt similarity index 100% rename from vim/doc/supertab.txt rename to .vim/doc/supertab.txt diff --git a/vim/doc/tComment.txt b/.vim/doc/tComment.txt similarity index 100% rename from vim/doc/tComment.txt rename to .vim/doc/tComment.txt diff --git a/vim/doc/tags b/.vim/doc/tags similarity index 100% rename from vim/doc/tags rename to .vim/doc/tags diff --git a/vim/doc/twitvim.txt b/.vim/doc/twitvim.txt similarity index 100% rename from vim/doc/twitvim.txt rename to .vim/doc/twitvim.txt diff --git a/vim/doc/xml-plugin.txt b/.vim/doc/xml-plugin.txt similarity index 100% rename from vim/doc/xml-plugin.txt rename to .vim/doc/xml-plugin.txt diff --git a/vim/ftplugin/html.vim b/.vim/ftplugin/html.vim similarity index 100% rename from vim/ftplugin/html.vim rename to .vim/ftplugin/html.vim diff --git a/vim/ftplugin/html_snip_helper.vim b/.vim/ftplugin/html_snip_helper.vim similarity index 100% rename from vim/ftplugin/html_snip_helper.vim rename to .vim/ftplugin/html_snip_helper.vim diff --git a/vim/ftplugin/javascript.vim b/.vim/ftplugin/javascript.vim similarity index 100% rename from vim/ftplugin/javascript.vim rename to .vim/ftplugin/javascript.vim diff --git a/vim/ftplugin/python.vim b/.vim/ftplugin/python.vim similarity index 100% rename from vim/ftplugin/python.vim rename to .vim/ftplugin/python.vim diff --git a/vim/ftplugin/xml.vim b/.vim/ftplugin/xml.vim similarity index 100% rename from vim/ftplugin/xml.vim rename to .vim/ftplugin/xml.vim diff --git a/vim/nerdtree_plugin/exec_menuitem.vim b/.vim/nerdtree_plugin/exec_menuitem.vim similarity index 100% rename from vim/nerdtree_plugin/exec_menuitem.vim rename to .vim/nerdtree_plugin/exec_menuitem.vim diff --git a/vim/nerdtree_plugin/fs_menu.vim b/.vim/nerdtree_plugin/fs_menu.vim similarity index 100% rename from vim/nerdtree_plugin/fs_menu.vim rename to .vim/nerdtree_plugin/fs_menu.vim diff --git a/vim/plugin/NERD_tree.vim b/.vim/plugin/NERD_tree.vim similarity index 100% rename from vim/plugin/NERD_tree.vim rename to .vim/plugin/NERD_tree.vim diff --git a/vim/plugin/gist.vim b/.vim/plugin/gist.vim similarity index 100% rename from vim/plugin/gist.vim rename to .vim/plugin/gist.vim diff --git a/vim/plugin/lusty-juggler.vim b/.vim/plugin/lusty-juggler.vim similarity index 100% rename from vim/plugin/lusty-juggler.vim rename to .vim/plugin/lusty-juggler.vim diff --git a/vim/plugin/pep8.vim b/.vim/plugin/pep8.vim similarity index 100% rename from vim/plugin/pep8.vim rename to .vim/plugin/pep8.vim diff --git a/vim/plugin/pydoc.vim b/.vim/plugin/pydoc.vim similarity index 100% rename from vim/plugin/pydoc.vim rename to .vim/plugin/pydoc.vim diff --git a/vim/plugin/snipMate.vim b/.vim/plugin/snipMate.vim similarity index 100% rename from vim/plugin/snipMate.vim rename to .vim/plugin/snipMate.vim diff --git a/vim/plugin/supertab.vim b/.vim/plugin/supertab.vim similarity index 100% rename from vim/plugin/supertab.vim rename to .vim/plugin/supertab.vim diff --git a/vim/plugin/tComment.vim b/.vim/plugin/tComment.vim similarity index 100% rename from vim/plugin/tComment.vim rename to .vim/plugin/tComment.vim diff --git a/vim/plugin/twitvim.vim b/.vim/plugin/twitvim.vim similarity index 100% rename from vim/plugin/twitvim.vim rename to .vim/plugin/twitvim.vim diff --git a/vim/snippets/_.snippets b/.vim/snippets/_.snippets similarity index 100% rename from vim/snippets/_.snippets rename to .vim/snippets/_.snippets diff --git a/vim/snippets/autoit.snippets b/.vim/snippets/autoit.snippets similarity index 100% rename from vim/snippets/autoit.snippets rename to .vim/snippets/autoit.snippets diff --git a/vim/snippets/c.snippets b/.vim/snippets/c.snippets similarity index 100% rename from vim/snippets/c.snippets rename to .vim/snippets/c.snippets diff --git a/vim/snippets/cpp.snippets b/.vim/snippets/cpp.snippets similarity index 100% rename from vim/snippets/cpp.snippets rename to .vim/snippets/cpp.snippets diff --git a/vim/snippets/html.snippets b/.vim/snippets/html.snippets similarity index 100% rename from vim/snippets/html.snippets rename to .vim/snippets/html.snippets diff --git a/vim/snippets/java.snippets b/.vim/snippets/java.snippets similarity index 100% rename from vim/snippets/java.snippets rename to .vim/snippets/java.snippets diff --git a/vim/snippets/javascript.snippets b/.vim/snippets/javascript.snippets similarity index 100% rename from vim/snippets/javascript.snippets rename to .vim/snippets/javascript.snippets diff --git a/vim/snippets/mako.snippets b/.vim/snippets/mako.snippets similarity index 100% rename from vim/snippets/mako.snippets rename to .vim/snippets/mako.snippets diff --git a/vim/snippets/objc.snippets b/.vim/snippets/objc.snippets similarity index 100% rename from vim/snippets/objc.snippets rename to .vim/snippets/objc.snippets diff --git a/vim/snippets/perl.snippets b/.vim/snippets/perl.snippets similarity index 100% rename from vim/snippets/perl.snippets rename to .vim/snippets/perl.snippets diff --git a/vim/snippets/php.snippets b/.vim/snippets/php.snippets similarity index 100% rename from vim/snippets/php.snippets rename to .vim/snippets/php.snippets diff --git a/vim/snippets/python.snippets b/.vim/snippets/python.snippets similarity index 100% rename from vim/snippets/python.snippets rename to .vim/snippets/python.snippets diff --git a/vim/snippets/ruby.snippets b/.vim/snippets/ruby.snippets similarity index 100% rename from vim/snippets/ruby.snippets rename to .vim/snippets/ruby.snippets diff --git a/vim/snippets/sh.snippets b/.vim/snippets/sh.snippets similarity index 100% rename from vim/snippets/sh.snippets rename to .vim/snippets/sh.snippets diff --git a/vim/snippets/snippet.snippets b/.vim/snippets/snippet.snippets similarity index 100% rename from vim/snippets/snippet.snippets rename to .vim/snippets/snippet.snippets diff --git a/vim/snippets/tcl.snippets b/.vim/snippets/tcl.snippets similarity index 100% rename from vim/snippets/tcl.snippets rename to .vim/snippets/tcl.snippets diff --git a/vim/snippets/tex.snippets b/.vim/snippets/tex.snippets similarity index 100% rename from vim/snippets/tex.snippets rename to .vim/snippets/tex.snippets diff --git a/vim/snippets/vim.snippets b/.vim/snippets/vim.snippets similarity index 100% rename from vim/snippets/vim.snippets rename to .vim/snippets/vim.snippets diff --git a/vim/snippets/zsh.snippets b/.vim/snippets/zsh.snippets similarity index 100% rename from vim/snippets/zsh.snippets rename to .vim/snippets/zsh.snippets diff --git a/vim/syntax/jinja.vim b/.vim/syntax/jinja.vim similarity index 100% rename from vim/syntax/jinja.vim rename to .vim/syntax/jinja.vim diff --git a/vim/syntax/jquery.vim b/.vim/syntax/jquery.vim similarity index 100% rename from vim/syntax/jquery.vim rename to .vim/syntax/jquery.vim diff --git a/vim/syntax/json.vim b/.vim/syntax/json.vim similarity index 100% rename from vim/syntax/json.vim rename to .vim/syntax/json.vim diff --git a/vim/syntax/mako.vim b/.vim/syntax/mako.vim similarity index 100% rename from vim/syntax/mako.vim rename to .vim/syntax/mako.vim diff --git a/vim/syntax/snippet.vim b/.vim/syntax/snippet.vim similarity index 100% rename from vim/syntax/snippet.vim rename to .vim/syntax/snippet.vim diff --git a/vimrc b/.vimrc similarity index 100% rename from vimrc rename to .vimrc diff --git a/install.py b/install.py new file mode 100755 index 0000000..c00db21 --- /dev/null +++ b/install.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +""" Install the files needed for this config to operate on the user's system +correctly""" + +import os +import subprocess + +# command to run: +# ln -s /home/rharding/configs/pyvim/vimrc ~/.vimrc +# ln -s /home/rharding/configs/pyvim/vim ~/.vim + + +CONFIG_FILES = ['.vimrc', '.vim'] +HOME_PATH = os.path.expanduser('~') + + +def removefile(filename): + removing_link = "%s/%s" % (HOME_PATH, filename) + ret = subprocess.call(["rm", "-rf", removing_link]) + print "%s is the status code for removing %s" % (ret, removing_link) + return + + +def linkfile(filename): + link_location = "%s/%s" % (HOME_PATH, filename) + is_located = "%s" % (os.path.abspath(filename)) + + ret = subprocess.call(["ln", "-s", is_located, link_location]) + print "%s is the status code for linking %s to %s" % (ret, + is_located, + link_location) + return + + + +for conf_file in CONFIG_FILES: + removefile(conf_file) + linkfile(conf_file)
mitechie/pyvim
e7728866ff850878c59c69a8893a9018b18bba43
* add some leader shortcuts for dealing with registers
diff --git a/vimrc b/vimrc index 1c63c6d..7dbc2ea 100644 --- a/vimrc +++ b/vimrc @@ -1,385 +1,402 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree +" ,r - view registers " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard -nmap <leader>y "*y +nmap <leader>y "*y + " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p +" show the registers from things cut/yanked +nmap <leader>r :registers<CR> + +" map the various registers to a leader shortcut for pasting from them +nmap <leader>0 "0p +nmap <leader>1 "1p +nmap <leader>2 "2p +nmap <leader>3 "3p +nmap <leader>4 "4p +nmap <leader>5 "5p +nmap <leader>6 "6p +nmap <leader>7 "7p +nmap <leader>8 "8p +nmap <leader>9 "9p + " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html au BufNewFile,BufRead *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " RopeVim " http://rope.sourceforge.net/ropevim.html " Refactoring engine using python-rope source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
432f3eb8647e3ef7249880b5cc609927179f8d82
+ add support for mako.vim and the filetype detect for it
diff --git a/vim/syntax/mako.vim b/vim/syntax/mako.vim new file mode 100644 index 0000000..3103eb0 --- /dev/null +++ b/vim/syntax/mako.vim @@ -0,0 +1,86 @@ +" Vim syntax file +" Language: Mako +" Maintainer: Armin Ronacher <[email protected]> +" URL: http://lucumr.pocoo.org/ +" Last Change: 2008 September 12 +" Version: 0.6.1 +" +" Thanks to Brine Rue <[email protected]> who noticed a bug in the +" delimiter handling. +" +" Known Limitations +" the <%text> block does not have correct attributes + +" For version 5.x: Clear all syntax items +" For version 6.x: Quit when a syntax file was already loaded +if version < 600 + syntax clear +elseif exists("b:current_syntax") + finish +endif + +if !exists("main_syntax") + let main_syntax = "html" +endif + +"Source the html syntax file +ru! syntax/html.vim +unlet b:current_syntax + +"Put the python syntax file in @pythonTop +syn include @pythonTop syntax/python.vim + +" End keywords +syn keyword makoEnd contained endfor endwhile endif endtry enddef + +" Block rules +syn region makoLine matchgroup=makoDelim start=#^\s*%# end=#$# keepend contains=@pythonTop,makoEnd +syn region makoBlock matchgroup=makoDelim start=#<%!\?# end=#%># keepend contains=@pythonTop,makoEnd + +" Variables +syn region makoNested start="{" end="}" transparent display contained contains=makoNested,@pythonTop +syn region makoVariable matchgroup=makoDelim start=#\${# end=#}# contains=makoNested,@pythonTop + +" Comments +syn region makoComment start="^\s*##" end="$" +syn region makoDocComment matchgroup=makoDelim start="<%doc>" end="</%doc>" keepend + +" Literal Blocks +syn region makoText matchgroup=makoDelim start="<%text[^>]*>" end="</%text>" + +" Attribute Sublexing +syn match makoAttributeKey containedin=makoTag contained "[a-zA-Z_][a-zA-Z0-9_]*=" +syn region makoAttributeValue containedin=makoTag contained start=/"/ skip=/\\"/ end=/"/ +syn region makoAttributeValue containedin=MakoTag contained start=/'/ skip=/\\'/ end=/'/ + +" Tags +syn region makoTag matchgroup=makoDelim start="<%\(def\|call\|page\|include\|namespace\|inherit\)\>" end="/\?>" +syn match makoDelim "</%\(def\|call\|namespace\)>" + +" Newline Escapes +syn match makoEscape /\\$/ + +" Default highlighting links +if version >= 508 || !exists("did_mako_syn_inits") + if version < 508 + let did_mako_syn_inits = 1 + com -nargs=+ HiLink hi link <args> + else + com -nargs=+ HiLink hi def link <args> + endif + + HiLink makoDocComment makoComment + HiLink makoDefEnd makoDelim + + HiLink makoAttributeKey Type + HiLink makoAttributeValue String + HiLink makoText Normal + HiLink makoDelim Preproc + HiLink makoEnd Keyword + HiLink makoComment Comment + HiLink makoEscape Special + + delc HiLink +endif + +let b:current_syntax = "eruby" diff --git a/vimrc b/vimrc index 2cfd8c0..1c63c6d 100644 --- a/vimrc +++ b/vimrc @@ -1,378 +1,385 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin +" python-rope - for ropevim plugin " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html +au BufNewFile,BufRead *.mako set filetype=mako " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim +" RopeVim +" http://rope.sourceforge.net/ropevim.html +" Refactoring engine using python-rope +source /usr/local/lib/python2.6/dist-packages/ropevim-0.3_rc-py2.6.egg/ropevim.vim + " ================================================== " Custom Functions " ================================================== " PGrep function to basically do vimgrep within the predefined $PROJ_DIR from " workit scripts. " :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
7d17c240b862993a4cddb1a9187a7cdd5f71e9ad
* add some comments on the PGrep function
diff --git a/vimrc b/vimrc index f8c33f1..2cfd8c0 100644 --- a/vimrc +++ b/vimrc @@ -1,376 +1,378 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin -" " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit " " TwitVim " <F7>/<F8> - load timelines " :Bpost... - post " ,g - load user's timeline " ,d - direct message " ,@ - load the parent to this post " :IsGd {url} - shorten the url syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " TwitVim " http://vim.sourceforge.net/scripts/script.php?script_id=2204 " Twitter/Identica client for vim " F7/F8 for loading identica/twitter source ~/.vim/twitvim.vim " ================================================== " Custom Functions " ================================================== +" PGrep function to basically do vimgrep within the predefined $PROJ_DIR from +" workit scripts. +" :PG support php -- search the project for /support/j **/*.php function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
1be466a729a11f8e8654ab573403cd92eecb9c33
* update the vimsync command
diff --git a/vimsync.sh b/vimsync.sh index 6fe1ba7..53e95b5 100755 --- a/vimsync.sh +++ b/vimsync.sh @@ -1,44 +1,43 @@ #! /bin/zsh # add to ~/bin: ln -s ~/configs/pyvim/vimsync.sh ~/bin/vimsync.sh # Sync my vim config to a remote host specified. # Steps: # 1. cd ~/configs/pyvim # 2. git co portable # 3. rsync -avz --delete -e ssh ~/configs/pyvim dc:~ # 4. ln -s vim/vim .vim # 5. ln -s vim/.vimrc .vimrc # Notes: # Using a git branch since some stuff I run locally won't be on remote hosts # This setups the files in a directory on the host called vim and then it # symlinks the .vimrc and vim directory to the user's home dir # This currently syncs the git stuff as well, at some point should probably do # some fancy export to a tmp dir and rsync those files over instead VIMCONF="/home/rharding/configs/pyvim" VIMBRANCH="portable" cd $VIMCONF git checkout $VIMBRANCH # get the hostname if [ $# -ne 1 ] then echo "Usage: vimsync HOSTNAME" return 65 fi HOSTNAME=$1 -rsync -avz --delete -e ssh ~/configs/pyvim $HOSTNAME:~ +rsync -avz --delete -e ssh ~/configs/pyvim $HOSTNAME:~/ -ssh $HOSTNAME 'ln -s vim/vim .vim && ln -s vim/vimrc .vimrc' +ssh $HOSTNAME 'rm -r ~/.vimrc ~/.vim ; ln -s pyvim/vim .vim && ln -s pyvim/vimrc .vimrc' # make sure we restore our local vim config to master git checkout master - # @todo move the above into a shell function, setup a list of hosts, and loop # through them to sync all hosts at once
mitechie/pyvim
a3533c32b96e9151f2fea9347de01cbd9cb34c1c
* add support for twitvim
diff --git a/vim/.VimballRecord b/vim/.VimballRecord index 2ca4d01..70818d6 100644 --- a/vim/.VimballRecord +++ b/vim/.VimballRecord @@ -1,2 +1,3 @@ tComment.vba: call delete('/home/rharding/.vim/doc/tComment.txt')|call delete('/home/rharding/.vim/plugin/tComment.vim')|call delete('/home/rharding/.vim/autoload/tcomment.vim') supertab.vba: call delete('/home/rharding/.vim/doc/supertab.txt')|call delete('/home/rharding/.vim/plugin/supertab.vim') +twitvim-0.4.5.vba: call delete('/home/rharding/.vim/plugin/twitvim.vim')|call delete('/home/rharding/.vim/doc/twitvim.txt') diff --git a/vim/doc/tags b/vim/doc/tags index 1150162..07e7c9b 100644 --- a/vim/doc/tags +++ b/vim/doc/tags @@ -1,164 +1,301 @@ 'NERDChristmasTree' NERD_tree.txt /*'NERDChristmasTree'* 'NERDTreeAutoCenter' NERD_tree.txt /*'NERDTreeAutoCenter'* 'NERDTreeAutoCenterThreshold' NERD_tree.txt /*'NERDTreeAutoCenterThreshold'* 'NERDTreeBookmarksFile' NERD_tree.txt /*'NERDTreeBookmarksFile'* 'NERDTreeCaseSensitiveSort' NERD_tree.txt /*'NERDTreeCaseSensitiveSort'* 'NERDTreeChDirMode' NERD_tree.txt /*'NERDTreeChDirMode'* 'NERDTreeHighlightCursorline' NERD_tree.txt /*'NERDTreeHighlightCursorline'* 'NERDTreeHijackNetrw' NERD_tree.txt /*'NERDTreeHijackNetrw'* 'NERDTreeIgnore' NERD_tree.txt /*'NERDTreeIgnore'* 'NERDTreeMouseMode' NERD_tree.txt /*'NERDTreeMouseMode'* 'NERDTreeQuitOnOpen' NERD_tree.txt /*'NERDTreeQuitOnOpen'* 'NERDTreeShowBookmarks' NERD_tree.txt /*'NERDTreeShowBookmarks'* 'NERDTreeShowFiles' NERD_tree.txt /*'NERDTreeShowFiles'* 'NERDTreeShowHidden' NERD_tree.txt /*'NERDTreeShowHidden'* 'NERDTreeShowLineNumbers' NERD_tree.txt /*'NERDTreeShowLineNumbers'* 'NERDTreeSortOrder' NERD_tree.txt /*'NERDTreeSortOrder'* 'NERDTreeStatusline' NERD_tree.txt /*'NERDTreeStatusline'* 'NERDTreeWinPos' NERD_tree.txt /*'NERDTreeWinPos'* 'NERDTreeWinSize' NERD_tree.txt /*'NERDTreeWinSize'* 'loaded_nerd_tree' NERD_tree.txt /*'loaded_nerd_tree'* 'snippets' snipMate.txt /*'snippets'* .snippet snipMate.txt /*.snippet* .snippets snipMate.txt /*.snippets* +:ABitLy twitvim.txt /*:ABitLy* +:ACligs twitvim.txt /*:ACligs* +:AIsGd twitvim.txt /*:AIsGd* +:AMetamark twitvim.txt /*:AMetamark* +:ASnipurl twitvim.txt /*:ASnipurl* +:ATinyURL twitvim.txt /*:ATinyURL* +:ATrim twitvim.txt /*:ATrim* +:ATweetburner twitvim.txt /*:ATweetburner* +:AUrlBorg twitvim.txt /*:AUrlBorg* +:AZima twitvim.txt /*:AZima* +:BPosttoTwitter twitvim.txt /*:BPosttoTwitter* +:BackTwitter twitvim.txt /*:BackTwitter* +:BitLy twitvim.txt /*:BitLy* +:CPosttoTwitter twitvim.txt /*:CPosttoTwitter* +:Cligs twitvim.txt /*:Cligs* +:DMSentTwitter twitvim.txt /*:DMSentTwitter* +:DMTwitter twitvim.txt /*:DMTwitter* +:ForwardTwitter twitvim.txt /*:ForwardTwitter* +:FriendsTwitter twitvim.txt /*:FriendsTwitter* +:IsGd twitvim.txt /*:IsGd* +:ListTwitter twitvim.txt /*:ListTwitter* +:LocationTwitter twitvim.txt /*:LocationTwitter* +:Metamark twitvim.txt /*:Metamark* :NERDTree NERD_tree.txt /*:NERDTree* :NERDTreeClose NERD_tree.txt /*:NERDTreeClose* :NERDTreeFind NERD_tree.txt /*:NERDTreeFind* :NERDTreeFromBookmark NERD_tree.txt /*:NERDTreeFromBookmark* :NERDTreeMirror NERD_tree.txt /*:NERDTreeMirror* :NERDTreeToggle NERD_tree.txt /*:NERDTreeToggle* +:NextTwitter twitvim.txt /*:NextTwitter* +:PBitLy twitvim.txt /*:PBitLy* +:PCligs twitvim.txt /*:PCligs* +:PIsGd twitvim.txt /*:PIsGd* +:PMetamark twitvim.txt /*:PMetamark* +:PSnipurl twitvim.txt /*:PSnipurl* +:PTinyURL twitvim.txt /*:PTinyURL* +:PTrim twitvim.txt /*:PTrim* +:PTweetburner twitvim.txt /*:PTweetburner* +:PUrlBorg twitvim.txt /*:PUrlBorg* +:PZima twitvim.txt /*:PZima* +:PosttoTwitter twitvim.txt /*:PosttoTwitter* +:PreviousTwitter twitvim.txt /*:PreviousTwitter* +:ProfileTwitter twitvim.txt /*:ProfileTwitter* +:PublicTwitter twitvim.txt /*:PublicTwitter* +:RateLimitTwitter twitvim.txt /*:RateLimitTwitter* +:RefreshTwitter twitvim.txt /*:RefreshTwitter* +:RepliesTwitter twitvim.txt /*:RepliesTwitter* +:ResetLoginTwitter twitvim.txt /*:ResetLoginTwitter* +:RetweetedByMeTwitter twitvim.txt /*:RetweetedByMeTwitter* +:RetweetedToMeTwitter twitvim.txt /*:RetweetedToMeTwitter* +:SearchTwitter twitvim.txt /*:SearchTwitter* +:SendDMTwitter twitvim.txt /*:SendDMTwitter* +:SetLoginTwitter twitvim.txt /*:SetLoginTwitter* +:Snipurl twitvim.txt /*:Snipurl* :TComment tComment.txt /*:TComment* :TCommentAs tComment.txt /*:TCommentAs* :TCommentBlock tComment.txt /*:TCommentBlock* :TCommentInline tComment.txt /*:TCommentInline* :TCommentRight tComment.txt /*:TCommentRight* +:TinyURL twitvim.txt /*:TinyURL* +:Trim twitvim.txt /*:Trim* +:Tweetburner twitvim.txt /*:Tweetburner* +:UrlBorg twitvim.txt /*:UrlBorg* +:UserTwitter twitvim.txt /*:UserTwitter* +:Zima twitvim.txt /*:Zima* ExtractSnips() snipMate.txt /*ExtractSnips()* ExtractSnipsFile() snipMate.txt /*ExtractSnipsFile()* Filename() snipMate.txt /*Filename()* NERDTree NERD_tree.txt /*NERDTree* NERDTree-? NERD_tree.txt /*NERDTree-?* NERDTree-A NERD_tree.txt /*NERDTree-A* NERDTree-B NERD_tree.txt /*NERDTree-B* NERDTree-C NERD_tree.txt /*NERDTree-C* NERDTree-C-J NERD_tree.txt /*NERDTree-C-J* NERDTree-C-K NERD_tree.txt /*NERDTree-C-K* NERDTree-D NERD_tree.txt /*NERDTree-D* NERDTree-F NERD_tree.txt /*NERDTree-F* NERDTree-I NERD_tree.txt /*NERDTree-I* NERDTree-J NERD_tree.txt /*NERDTree-J* NERDTree-K NERD_tree.txt /*NERDTree-K* NERDTree-O NERD_tree.txt /*NERDTree-O* NERDTree-P NERD_tree.txt /*NERDTree-P* NERDTree-R NERD_tree.txt /*NERDTree-R* NERDTree-T NERD_tree.txt /*NERDTree-T* NERDTree-U NERD_tree.txt /*NERDTree-U* NERDTree-X NERD_tree.txt /*NERDTree-X* NERDTree-cd NERD_tree.txt /*NERDTree-cd* NERDTree-contents NERD_tree.txt /*NERDTree-contents* NERDTree-e NERD_tree.txt /*NERDTree-e* NERDTree-f NERD_tree.txt /*NERDTree-f* NERDTree-gi NERD_tree.txt /*NERDTree-gi* NERDTree-go NERD_tree.txt /*NERDTree-go* NERDTree-gs NERD_tree.txt /*NERDTree-gs* NERDTree-i NERD_tree.txt /*NERDTree-i* NERDTree-m NERD_tree.txt /*NERDTree-m* NERDTree-o NERD_tree.txt /*NERDTree-o* NERDTree-p NERD_tree.txt /*NERDTree-p* NERDTree-q NERD_tree.txt /*NERDTree-q* NERDTree-r NERD_tree.txt /*NERDTree-r* NERDTree-s NERD_tree.txt /*NERDTree-s* NERDTree-t NERD_tree.txt /*NERDTree-t* NERDTree-u NERD_tree.txt /*NERDTree-u* NERDTree-x NERD_tree.txt /*NERDTree-x* NERDTreeAPI NERD_tree.txt /*NERDTreeAPI* NERDTreeAbout NERD_tree.txt /*NERDTreeAbout* NERDTreeAddKeyMap() NERD_tree.txt /*NERDTreeAddKeyMap()* NERDTreeAddMenuItem() NERD_tree.txt /*NERDTreeAddMenuItem()* NERDTreeAddMenuSeparator() NERD_tree.txt /*NERDTreeAddMenuSeparator()* NERDTreeAddSubmenu() NERD_tree.txt /*NERDTreeAddSubmenu()* NERDTreeBookmarkCommands NERD_tree.txt /*NERDTreeBookmarkCommands* NERDTreeBookmarkTable NERD_tree.txt /*NERDTreeBookmarkTable* NERDTreeBookmarks NERD_tree.txt /*NERDTreeBookmarks* NERDTreeChangelog NERD_tree.txt /*NERDTreeChangelog* NERDTreeCredits NERD_tree.txt /*NERDTreeCredits* NERDTreeFunctionality NERD_tree.txt /*NERDTreeFunctionality* NERDTreeGlobalCommands NERD_tree.txt /*NERDTreeGlobalCommands* NERDTreeInvalidBookmarks NERD_tree.txt /*NERDTreeInvalidBookmarks* NERDTreeKeymapAPI NERD_tree.txt /*NERDTreeKeymapAPI* NERDTreeLicense NERD_tree.txt /*NERDTreeLicense* NERDTreeMappings NERD_tree.txt /*NERDTreeMappings* NERDTreeMenu NERD_tree.txt /*NERDTreeMenu* NERDTreeMenuAPI NERD_tree.txt /*NERDTreeMenuAPI* NERDTreeOptionDetails NERD_tree.txt /*NERDTreeOptionDetails* NERDTreeOptionSummary NERD_tree.txt /*NERDTreeOptionSummary* NERDTreeOptions NERD_tree.txt /*NERDTreeOptions* NERDTreeRender() NERD_tree.txt /*NERDTreeRender()* NERD_tree.txt NERD_tree.txt /*NERD_tree.txt* ResetSnippets() snipMate.txt /*ResetSnippets()* TCommentDefineType() tComment.txt /*TCommentDefineType()* +TwitVim twitvim.txt /*TwitVim* +TwitVim-A-d twitvim.txt /*TwitVim-A-d* +TwitVim-A-g twitvim.txt /*TwitVim-A-g* +TwitVim-A-r twitvim.txt /*TwitVim-A-r* +TwitVim-A-t twitvim.txt /*TwitVim-A-t* +TwitVim-C-PageDown twitvim.txt /*TwitVim-C-PageDown* +TwitVim-C-PageUp twitvim.txt /*TwitVim-C-PageUp* +TwitVim-C-i twitvim.txt /*TwitVim-C-i* +TwitVim-C-o twitvim.txt /*TwitVim-C-o* +TwitVim-C-t twitvim.txt /*TwitVim-C-t* +TwitVim-Leader-@ twitvim.txt /*TwitVim-Leader-@* +TwitVim-Leader-C-r twitvim.txt /*TwitVim-Leader-C-r* +TwitVim-Leader-Leader twitvim.txt /*TwitVim-Leader-Leader* +TwitVim-Leader-S-r twitvim.txt /*TwitVim-Leader-S-r* +TwitVim-Leader-X twitvim.txt /*TwitVim-Leader-X* +TwitVim-Leader-d twitvim.txt /*TwitVim-Leader-d* +TwitVim-Leader-e twitvim.txt /*TwitVim-Leader-e* +TwitVim-Leader-g twitvim.txt /*TwitVim-Leader-g* +TwitVim-Leader-p twitvim.txt /*TwitVim-Leader-p* +TwitVim-Leader-r twitvim.txt /*TwitVim-Leader-r* +TwitVim-LongURL twitvim.txt /*TwitVim-LongURL* +TwitVim-add twitvim.txt /*TwitVim-add* +TwitVim-cURL twitvim.txt /*TwitVim-cURL* +TwitVim-contents twitvim.txt /*TwitVim-contents* +TwitVim-credits twitvim.txt /*TwitVim-credits* +TwitVim-delete twitvim.txt /*TwitVim-delete* +TwitVim-direct-message twitvim.txt /*TwitVim-direct-message* +TwitVim-goto twitvim.txt /*TwitVim-goto* +TwitVim-highlight twitvim.txt /*TwitVim-highlight* +TwitVim-history twitvim.txt /*TwitVim-history* +TwitVim-hotkeys twitvim.txt /*TwitVim-hotkeys* +TwitVim-inreplyto twitvim.txt /*TwitVim-inreplyto* +TwitVim-install twitvim.txt /*TwitVim-install* +TwitVim-intro twitvim.txt /*TwitVim-intro* +TwitVim-line-length twitvim.txt /*TwitVim-line-length* +TwitVim-login-base64 twitvim.txt /*TwitVim-login-base64* +TwitVim-manual twitvim.txt /*TwitVim-manual* +TwitVim-mappings twitvim.txt /*TwitVim-mappings* +TwitVim-next twitvim.txt /*TwitVim-next* +TwitVim-non-cURL twitvim.txt /*TwitVim-non-cURL* +TwitVim-previous twitvim.txt /*TwitVim-previous* +TwitVim-profile twitvim.txt /*TwitVim-profile* +TwitVim-refresh twitvim.txt /*TwitVim-refresh* +TwitVim-reply twitvim.txt /*TwitVim-reply* +TwitVim-reply-all twitvim.txt /*TwitVim-reply-all* +TwitVim-retweet twitvim.txt /*TwitVim-retweet* +TwitVim-ssl twitvim.txt /*TwitVim-ssl* +TwitVim-ssl-curl twitvim.txt /*TwitVim-ssl-curl* +TwitVim-ssl-perl twitvim.txt /*TwitVim-ssl-perl* +TwitVim-ssl-python twitvim.txt /*TwitVim-ssl-python* +TwitVim-ssl-ruby twitvim.txt /*TwitVim-ssl-ruby* +TwitVim-switch twitvim.txt /*TwitVim-switch* +TwitVim-timeline-commands twitvim.txt /*TwitVim-timeline-commands* +TwitVim-tips twitvim.txt /*TwitVim-tips* +TwitVim-update-commands twitvim.txt /*TwitVim-update-commands* +TwitVim-utility twitvim.txt /*TwitVim-utility* g:SuperTabCompletionContexts supertab.txt /*g:SuperTabCompletionContexts* g:SuperTabContextDefaultCompletionType supertab.txt /*g:SuperTabContextDefaultCompletionType* g:SuperTabDefaultCompletionType supertab.txt /*g:SuperTabDefaultCompletionType* g:SuperTabLongestHighlight supertab.txt /*g:SuperTabLongestHighlight* g:SuperTabMappingBackward supertab.txt /*g:SuperTabMappingBackward* g:SuperTabMappingForward supertab.txt /*g:SuperTabMappingForward* g:SuperTabMappingTabLiteral supertab.txt /*g:SuperTabMappingTabLiteral* g:SuperTabMidWordCompletion supertab.txt /*g:SuperTabMidWordCompletion* g:SuperTabRetainCompletionDuration supertab.txt /*g:SuperTabRetainCompletionDuration* g:snippets_dir snipMate.txt /*g:snippets_dir* g:snips_author snipMate.txt /*g:snips_author* g:tcommentMapLeader1 tComment.txt /*g:tcommentMapLeader1* g:tcommentMapLeader2 tComment.txt /*g:tcommentMapLeader2* g:tcommentMapLeaderOp1 tComment.txt /*g:tcommentMapLeaderOp1* g:tcommentMapLeaderOp2 tComment.txt /*g:tcommentMapLeaderOp2* g:tcommentOpModeExtra tComment.txt /*g:tcommentOpModeExtra* +hl-twitterLink twitvim.txt /*hl-twitterLink* +hl-twitterReply twitvim.txt /*hl-twitterReply* +hl-twitterTime twitvim.txt /*hl-twitterTime* +hl-twitterTitle twitvim.txt /*hl-twitterTitle* +hl-twitterUser twitvim.txt /*hl-twitterUser* i_CTRL-R_<Tab> snipMate.txt /*i_CTRL-R_<Tab>* list-snippets snipMate.txt /*list-snippets* multi_snip snipMate.txt /*multi_snip* snipMate snipMate.txt /*snipMate* snipMate-$# snipMate.txt /*snipMate-$#* snipMate-${#:} snipMate.txt /*snipMate-${#:}* snipMate-${#} snipMate.txt /*snipMate-${#}* snipMate-author snipMate.txt /*snipMate-author* snipMate-commands snipMate.txt /*snipMate-commands* snipMate-contact snipMate.txt /*snipMate-contact* snipMate-description snipMate.txt /*snipMate-description* snipMate-disadvantages snipMate.txt /*snipMate-disadvantages* snipMate-expandtab snipMate.txt /*snipMate-expandtab* snipMate-features snipMate.txt /*snipMate-features* snipMate-filename snipMate.txt /*snipMate-filename* snipMate-indenting snipMate.txt /*snipMate-indenting* snipMate-placeholders snipMate.txt /*snipMate-placeholders* snipMate-remap snipMate.txt /*snipMate-remap* snipMate-settings snipMate.txt /*snipMate-settings* snipMate-usage snipMate.txt /*snipMate-usage* snipMate.txt snipMate.txt /*snipMate.txt* snippet snipMate.txt /*snippet* snippet-syntax snipMate.txt /*snippet-syntax* snippets snipMate.txt /*snippets* supertab supertab.txt /*supertab* supertab-completioncontexts supertab.txt /*supertab-completioncontexts* supertab-contextdefault supertab.txt /*supertab-contextdefault* supertab-contextdiscover supertab.txt /*supertab-contextdiscover* supertab-contextexample supertab.txt /*supertab-contextexample* supertab-contexttext supertab.txt /*supertab-contexttext* supertab-defaultcompletion supertab.txt /*supertab-defaultcompletion* supertab-duration supertab.txt /*supertab-duration* supertab-forwardbackward supertab.txt /*supertab-forwardbackward* supertab-intro supertab.txt /*supertab-intro* supertab-longesthighlight supertab.txt /*supertab-longesthighlight* supertab-mappingtabliteral supertab.txt /*supertab-mappingtabliteral* supertab-midword supertab.txt /*supertab-midword* supertab-options supertab.txt /*supertab-options* supertab-usage supertab.txt /*supertab-usage* supertab.txt supertab.txt /*supertab.txt* tComment-Installation tComment.txt /*tComment-Installation* tComment-Key-Bindings tComment.txt /*tComment-Key-Bindings* tComment-Usage tComment.txt /*tComment-Usage* tComment-commands tComment.txt /*tComment-commands* tComment.txt tComment.txt /*tComment.txt* +twitvim-identi.ca twitvim.txt /*twitvim-identi.ca* +twitvim.txt twitvim.txt /*twitvim.txt* +twitvim_api_root twitvim.txt /*twitvim_api_root* +twitvim_bitly_key twitvim.txt /*twitvim_bitly_key* +twitvim_bitly_user twitvim.txt /*twitvim_bitly_user* +twitvim_browser_cmd twitvim.txt /*twitvim_browser_cmd* +twitvim_cert_insecure twitvim.txt /*twitvim_cert_insecure* +twitvim_cligs_key twitvim.txt /*twitvim_cligs_key* +twitvim_count twitvim.txt /*twitvim_count* +twitvim_enable_perl twitvim.txt /*twitvim_enable_perl* +twitvim_enable_python twitvim.txt /*twitvim_enable_python* +twitvim_enable_ruby twitvim.txt /*twitvim_enable_ruby* +twitvim_enable_tcl twitvim.txt /*twitvim_enable_tcl* +twitvim_login twitvim.txt /*twitvim_login* +twitvim_login_b64 twitvim.txt /*twitvim_login_b64* +twitvim_old_retweet twitvim.txt /*twitvim_old_retweet* +twitvim_proxy twitvim.txt /*twitvim_proxy* +twitvim_proxy_login twitvim.txt /*twitvim_proxy_login* +twitvim_proxy_login_b64 twitvim.txt /*twitvim_proxy_login_b64* +twitvim_retweet_format twitvim.txt /*twitvim_retweet_format* +twitvim_trim_login twitvim.txt /*twitvim_trim_login* +twitvim_urlborg_key twitvim.txt /*twitvim_urlborg_key* xml-plugin-callbacks xml-plugin.txt /*xml-plugin-callbacks* xml-plugin-html xml-plugin.txt /*xml-plugin-html* xml-plugin-mappings xml-plugin.txt /*xml-plugin-mappings* xml-plugin-settings xml-plugin.txt /*xml-plugin-settings* xml-plugin.txt xml-plugin.txt /*xml-plugin.txt* diff --git a/vim/doc/twitvim.txt b/vim/doc/twitvim.txt new file mode 100644 index 0000000..6dd5b7d --- /dev/null +++ b/vim/doc/twitvim.txt @@ -0,0 +1,1468 @@ +*twitvim.txt* Twitter client for Vim + + --------------------------------- + TwitVim: A Twitter client for Vim + --------------------------------- + +Author: Po Shan Cheah <[email protected]> + http://twitter.com/mortonfox + +License: The Vim License applies to twitvim.vim and twitvim.txt (see + |copyright|) except use "TwitVim" instead of "Vim". No warranty, + express or implied. Use at your own risk. + + +============================================================================== +1. Contents *TwitVim* *TwitVim-contents* + + 1. Contents...............................: |TwitVim-contents| + 2. Introduction...........................: |TwitVim-intro| + 3. Installation...........................: |TwitVim-install| + cURL...................................: |TwitVim-cURL| + twitvim.vim............................: |TwitVim-add| + twitvim_login..........................: |twitvim_login| + twitvim_proxy..........................: |twitvim_proxy| + twitvim_proxy_login....................: |twitvim_proxy_login| + twitvim_api_root.......................: |twitvim_api_root| + twitvim-identi.ca......................: |twitvim-identi.ca| + 3.1. Base64-Encoded Login.................: |TwitVim-login-base64| + twitvim_login_b64....................: |twitvim_login_b64| + twitvim_proxy_login_b64..............: |twitvim_proxy_login_b64| + 3.2. Alternatives to cURL.................: |TwitVim-non-cURL| + twitvim_enable_perl..................: |twitvim_enable_perl| + twitvim_enable_python................: |twitvim_enable_python| + twitvim_enable_ruby..................: |twitvim_enable_ruby| + twitvim_enable_tcl...................: |twitvim_enable_tcl| + 3.3. Using Twitter SSL API................: |TwitVim-ssl| + Twitter SSL via cURL.................: |TwitVim-ssl-curl| + twitvim_cert_insecure................: |twitvim_cert_insecure| + Twitter SSL via Perl interface.......: |TwitVim-ssl-perl| + Twitter SSL via Ruby interface.......: |TwitVim-ssl-ruby| + Twitter SSL via Python interface.....: |TwitVim-ssl-python| + 4. Manual.................................: |TwitVim-manual| + 4.1. Update Commands......................: |TwitVim-update-commands| + :PosttoTwitter.......................: |:PosttoTwitter| + :CPosttoTwitter......................: |:CPosttoTwitter| + :BPosttoTwitter......................: |:BPosttoTwitter| + :SendDMTwitter.......................: |:SendDMTwitter| + 4.2. Timeline Commands....................: |TwitVim-timeline-commands| + :UserTwitter.........................: |:UserTwitter| + twitvim_count........................: |twitvim_count| + :FriendsTwitter......................: |:FriendsTwitter| + :RepliesTwitter......................: |:RepliesTwitter| + :PublicTwitter.......................: |:PublicTwitter| + :DMTwitter...........................: |:DMTwitter| + :DMSentTwitter.......................: |:DMSentTwitter| + :ListTwitter.........................: |:ListTwitter| + :RetweetedToMeTwitter................: |:RetweetedToMeTwitter| + :RetweetedByMeTwitter................: |:RetweetedByMeTwitter| + :BackTwitter.........................: |:BackTwitter| + :ForwardTwitter......................: |:ForwardTwitter| + :RefreshTwitter......................: |:RefreshTwitter| + :NextTwitter.........................: |:NextTwitter| + :PreviousTwitter.....................: |:PreviousTwitter| + :SetLoginTwitter.....................: |:SetLoginTwitter| + :ResetLoginTwitter...................: |:ResetLoginTwitter| + 4.3. Mappings.............................: |TwitVim-mappings| + Alt-T................................: |TwitVim-A-t| + Ctrl-T...............................: |TwitVim-C-t| + Reply Feature........................: |TwitVim-reply| + Alt-R................................: |TwitVim-A-r| + <Leader>r............................: |TwitVim-Leader-r| + Reply to all Feature.................: |TwitVim-reply-all| + <Leader>Ctrl-R.......................: |TwitVim-Leader-C-r| + Retweet Feature......................: |TwitVim-retweet| + <Leader>R............................: |TwitVim-Leader-S-r| + Old-style retweets...................: |twitvim_old_retweet| + twitvim_retweet_format...............: |twitvim_retweet_format| + Direct Message Feature...............: |TwitVim-direct-message| + Alt-D................................: |TwitVim-A-d| + <Leader>d............................: |TwitVim-Leader-d| + Goto Feature.........................: |TwitVim-goto| + Alt-G................................: |TwitVim-A-g| + <Leader>g............................: |TwitVim-Leader-g| + twitvim_browser_cmd..................: |twitvim_browser_cmd| + LongURL Feature......................: |TwitVim-LongURL| + <Leader>e............................: |TwitVim-Leader-e| + User Profiles........................: |TwitVim-profile| + <Leader>p............................: |TwitVim-Leader-p| + In-reply-to..........................: |TwitVim-inreplyto| + <Leader>@............................: |TwitVim-Leader-@| + Delete...............................: |TwitVim-delete| + <Leader>X............................: |TwitVim-Leader-X| + Ctrl-O...............................: |TwitVim-C-o| + Ctrl-I...............................: |TwitVim-C-i| + Refresh..............................: |TwitVim-refresh| + <Leader><Leader>.....................: |TwitVim-Leader-Leader| + Next page............................: |TwitVim-next| + Ctrl-PageDown........................: |TwitVim-C-PageDown| + Previous page........................: |TwitVim-previous| + Ctrl-PageUp..........................: |TwitVim-C-PageUp| + 4.4. Utility Commands.....................: |TwitVim-utility| + :Tweetburner.........................: |:Tweetburner| + :ATweetburner........................: |:ATweetburner| + :PTweetburner........................: |:PTweetburner| + :Snipurl.............................: |:Snipurl| + :ASnipurl............................: |:ASnipurl| + :PSnipurl............................: |:PSnipurl| + :Metamark............................: |:Metamark| + :AMetamark...........................: |:AMetamark| + :PMetamark...........................: |:PMetamark| + :TinyURL.............................: |:TinyURL| + :ATinyURL............................: |:ATinyURL| + :PTinyURL............................: |:PTinyURL| + :BitLy...............................: |:BitLy| + twitvim_bitly_user...................: |twitvim_bitly_user| + twitvim_bitly_key....................: |twitvim_bitly_key| + :ABitLy..............................: |:ABitLy| + :PBitLy..............................: |:PBitLy| + :IsGd................................: |:IsGd| + :AIsGd...............................: |:AIsGd| + :PIsGd...............................: |:PIsGd| + :UrlBorg.............................: |:UrlBorg| + twitvim_urlborg_key..................: |twitvim_urlborg_key| + :AUrlBorg............................: |:AUrlBorg| + :PUrlBorg............................: |:PUrlBorg| + :Trim................................: |:Trim| + twitvim_trim_login...................: |twitvim_trim_login| + :ATrim...............................: |:ATrim| + :PTrim...............................: |:PTrim| + :Cligs...............................: |:Cligs| + twitvim_cligs_key....................: |twitvim_cligs_key| + :ACligs..............................: |:ACligs| + :PCligs..............................: |:PCligs| + :Zima................................: |:Zima| + :AZima...............................: |:AZima| + :PZima...............................: |:PZima| + :SearchTwitter.......................: |:SearchTwitter| + :RateLimitTwitter....................: |:RateLimitTwitter| + :ProfileTwitter......................: |:ProfileTwitter| + :LocationTwitter.....................: |:LocationTwitter| + 5. Timeline Highlighting..................: |TwitVim-highlight| + twitterUser............................: |hl-twitterUser| + twitterTime............................: |hl-twitterTime| + twitterTitle...........................: |hl-twitterTitle| + twitterLink............................: |hl-twitterLink| + twitterReply...........................: |hl-twitterReply| + 6. Tips and Tricks........................: |TwitVim-tips| + 6.1. Timeline Hotkeys.....................: |TwitVim-hotkeys| + 6.2. Switching between services...........: |TwitVim-switch| + 6.3. Line length in status line...........: |TwitVim-line-length| + 7. History................................: |TwitVim-history| + 8. Credits................................: |TwitVim-credits| + + +============================================================================== +2. Introduction *TwitVim-intro* + + TwitVim is a plugin that allows you to post to Twitter, a + microblogging service at http://www.twitter.com. + + Since version 0.2.19, TwitVim also supports other microblogging + services, such as identi.ca, that offer Twitter-compatible APIs. See + |twitvim_api_root| for information on configuring TwitVim for those + services. + + +============================================================================== +3. Installation *TwitVim-install* + + 1. Install cURL. *TwitVim-cURL* + + If you don't already have cURL on your system, download it from + http://curl.haxx.se/. Make sure that the curl executable is in a + directory listed in your PATH environment variable, or the equivalent + for your system. + + If you have the Perl, Python, Ruby, or Tcl interfaces, you may use one + of those instead of installing cURL. See |TwitVim-non-cURL| for + setup details. + + + 2. twitvim.vim *TwitVim-add* + + Add twitvim.vim to your plugins directory. The location depends on + your operating system. See |add-global-plugin| for details. + + If you installed from the Vimball (.vba) file, twitvim.vim should + already be in its correct place. + + + 3. twitvim_login *twitvim_login* + + Add the following to your vimrc: + + let twitvim_login = "USER:PASS" + + Replace USER with your Twitter user name and PASS with your Twitter + password. + + It is possible to avoid having your Twitter password in plaintext in + your vimrc. See |TwitVim-login-base64| for details. + + + If you haven't configured twitvim_login, the first TwitVim command + that needs Twitter authentication will prompt you to log into Twitter. + This alternative is provided to allow TwitVim use on a public, shared, + or non-secure system where storing passwords in the vimrc is not + advisable. + + If you make a mistake when entering your Twitter login information, + simply run |:SetLoginTwitter| to reenter your Twitter login + information or use |:ResetLoginTwitter| to remove the login + information so that the next TwitVim command will prompt you to log in + again. + + + 4. twitvim_proxy *twitvim_proxy* + + This step is only needed if you access the web through a HTTP proxy. + If you use a HTTP proxy, add the following to your vimrc: + + let twitvim_proxy = "proxyserver:proxyport" + + Replace proxyserver with the address of the HTTP proxy and proxyport + with the port number of the HTTP proxy. + + + 5. twitvim_proxy_login *twitvim_proxy_login* + + If the HTTP proxy requires authentication, add the following to your + vimrc: + + let twitvim_proxy_login = "proxyuser:proxypassword" + + Where proxyuser is your proxy user and proxypassword is your proxy + password. + + It is possible to avoid having your proxy password in plaintext in + your vimrc. See |TwitVim-login-base64| for details. + + + 6. twitvim_api_root *twitvim_api_root* + + This setting allows you to configure TwitVim to communicate with + servers other than twitter.com that implement a Twitter-compatible + API. + + *twitvim-identi.ca* + For instance, to use identi.ca instead of Twitter, add this to your + vimrc: + + let twitvim_api_root = "http://identi.ca/api" + + A server implementing a Twitter-compatible API may not support all of + Twitter's features, so some TwitVim commands may not work. + + +------------------------------------------------------------------------------ +3.1. Base64-Encoded Login *TwitVim-login-base64* + + For safety purposes, TwitVim allows you to specify your Twitter login + and proxy login information preencoded in base64. This is not truly + secure as it is not encryption but it can stop casual onlookers + from reading off your password when you edit your vimrc. + + *twitvim_login_b64* + To do that, set the following in your vimrc: + + let twitvim_login_b64 = "base64string" + + *twitvim_proxy_login_b64* + If your HTTP proxy needs authentication, set the following: + + let twitvim_proxy_login_b64 = "base64string" + + + Where base64string is your username:password encoded in base64. + + + An example: + + Let's say Joe User has a Twitter login of "joeuser" and a password of + "joepassword". His first step is to encode "joeuser:joepassword" in + Base64. He can either use a standalone utility to do that or, in a + pinch, he can do the encoding at websites such as the following: + http://makcoder.sourceforge.net/demo/base64.php + http://www.opinionatedgeek.com/dotnet/tools/Base64Encode/ + + The result is: am9ldXNlcjpqb2VwYXNzd29yZA== + + Then he adds the following to his vimrc: + + let twitvim_login_b64 = "am9ldXNlcjpqb2VwYXNzd29yZA==" + + And his setup is ready. + + +------------------------------------------------------------------------------ +3.2. Alternatives to cURL *TwitVim-non-cURL* + + TwitVim supports http networking through Vim's Perl, Python, Ruby, and + Tcl interfaces, so if you have any of those interfaces compiled into + your Vim program, you can use that instead of cURL. + + Generally, it is slightly faster to use one of those scripting + interfaces for networking because it avoids running an external + program. On Windows, it also avoids a brief taskbar flash when cURL + runs. + + To find out if you have those interfaces, use the |:version| command + and check the |+feature-list|. Then to enable this special http + networking code in TwitVim, add one of the following lines to your + vimrc: + + let twitvim_enable_perl = 1 + let twitvim_enable_python = 1 + let twitvim_enable_ruby = 1 + let twitvim_enable_tcl = 1 + + You can enable more than one scripting language but TwitVim will only + use the first one it finds. + + + 1. Perl interface *twitvim_enable_perl* + + To enable TwitVim's Perl networking code, add the following to your + vimrc: + + let twitvim_enable_perl = 1 + + TwitVim requires the MIME::Base64 and LWP::UserAgent modules. If you + have ActivePerl, these modules are included in the default + installation. + + + 2. Python interface *twitvim_enable_python* + + To enable TwitVim's Python networking code, add the following to your + vimrc: + + let twitvim_enable_python = 1 + + TwitVim requires the urllib, urllib2, and base64 modules. These + modules are in the Python standard library. + + + 3. Ruby interface *twitvim_enable_ruby* + + To enable TwitVim's Ruby networking code, add the following to your + vimrc: + + let twitvim_enable_ruby = 1 + + TwitVim requires the net/http, uri, and Base64 modules. These modules + are in the Ruby standard library. + + In addition, TwitVim requires a Vim patch to fix an if_ruby networking + problem. See the following message: + + http://www.mail-archive.com/[email protected]/msg03693.html + + and also Bram's correction to the patch: + + http://www.mail-archive.com/[email protected]/msg03713.html + + + 3. Tcl interface *twitvim_enable_tcl* + + To enable TwitVim's Tcl networking code, add the following to your + vimrc: + + let twitvim_enable_tcl = 1 + + TwitVim requires the http, uri, and base64 modules. uri and base64 are + in the Tcllib library so you may need to install that. See + http://tcllib.sourceforge.net/ + + If you have ActiveTcl 8.5, the default installation does not include + Tcllib. Run the following command from the shell to add Tcllib: + + teacup install tcllib + + +------------------------------------------------------------------------------ +3.3. Using Twitter SSL API *TwitVim-ssl* + + For added security, TwitVim can use the Twitter SSL API instead of the + regular Twitter API. You configure this by setting |twitvim_api_root| + to the https version of the URL: + + let twitvim_api_root = "https://twitter.com" + + For identi.ca: + + let twitvim_api_root = "https://identi.ca/api" + + There are certain pre-requisites, as explained below. + + + 1. Twitter SSL via cURL *TwitVim-ssl-curl* + + To use SSL via cURL, you need to install the SSL libraries and an + SSL-enabled build of cURL. + + *twitvim_cert_insecure* + Even after you've done that, cURL may complain about certificates that + failed verification. If you need to override certificate checking, set + twitvim_cert_insecure: + + let twitvim_cert_insecure = 1 + + + 2. Twitter SSL via Perl interface *TwitVim-ssl-perl* + + To use SSL via the TwitVim Perl interface (See |twitvim_enable_perl|), + you need to install the SSL libraries and the Crypt::SSLeay Perl + module. + + If you are using Twitter SSL over a proxy, do not set twitvim_proxy + and twitvim_proxy_login. Crypt::SSLeay gets proxy information from + the environment, so do this instead: + + let $HTTPS_PROXY="http://proxyserver:proxyport" + let $HTTPS_PROXY_USERNAME="user" + let $HTTPS_PROXY_PASSWORD="password" + + Alternatively, you can set those environment variables before starting + Vim. + + + 3. Twitter SSL via Ruby interface *TwitVim-ssl-ruby* + + To use SSL via Ruby, you need to install the SSL libraries and an + SSL-enabled build of Ruby. + + If Ruby produces the error "`write': Bad file descriptor" in http.rb, + then you need to check your certificates or override certificate + checking. See |twitvim_cert_insecure|. + + Set twitvim_proxy and twitvim_proxy_login as usual if using Twitter + SSL over a proxy. + + + 4. Twitter SSL via Python interface *TwitVim-ssl-python* + + To use SSL via Python, you need to install the SSL libraries and an + SSL-enabled build of Python. + + The Python interface does not yet support Twitter SSL over a proxy. + This is due to a missing feature in urllib2. + + + 5. Twitter SSL via TCL interface + + I do not know how to make this work with Twitter SSL yet. If you + succeed, let me know what you did. + + +============================================================================== +4. TwitVim Manual *TwitVim-manual* + +------------------------------------------------------------------------------ +4.1. Update Commands *TwitVim-update-commands* + + These commands post an update to your Twitter account. If the friends, + user, or public timeline is visible, TwitVim will insert the update + into the timeline view after posting it. + + :PosttoTwitter *:PosttoTwitter* + + This command will prompt you for a message and post it to Twitter. + + :CPosttoTwitter *:CPosttoTwitter* + + This command posts the current line in the current buffer to Twitter. + + :BPosttoTwitter *:BPosttoTwitter* + + This command posts the contents of the current buffer to Twitter. + + :SendDMTwitter {username} *:SendDMTwitter* + + This command will prompt you for a direct message to send to user + {username}. + +------------------------------------------------------------------------------ +4.2. Timeline Commands *TwitVim-timeline-commands* + + These commands retrieve a Twitter timeline and display it in a special + Twitter buffer. TwitVim applies syntax highlighting to highlight + certain elements in the timeline view. See |TwitVim-highlight| for a + list of highlighting groups it uses. + + + :[count]UserTwitter *:UserTwitter* + :[count]UserTwitter {username} + + This command displays your Twitter timeline. + + If you specify a {username}, this command displays the timeline for + that user. + + If you specify [count], that number is used as the page number. For + example, :2UserTwitter displays the second page from your user + timeline. + + *twitvim_count* + You can configure the number of tweets returned by :UserTwitter by + setting twitvim_count. For example, + + let twitvim_count = 50 + + will make :UserTwitter return 50 tweets instead of the default of 20. + You can set twitvim_count to any integer from 1 to 200. + + + :[count]FriendsTwitter *:FriendsTwitter* + + This command displays your Twitter timeline with updates from friends + merged in. + + If you specify [count], that number is used as the page number. For + example, :2FriendsTwitter displays the second page from your friends + timeline. + + You can configure the number of tweets returned by :FriendsTwitter by + setting |twitvim_count|. + + + :[count]RepliesTwitter *:RepliesTwitter* + + This command displays a timeline of @-replies that you've received + from other Twitter users. + + If you specify [count], that number is used as the page number. For + example, :2RepliesTwitter displays the second page from your replies + timeline. + + + :PublicTwitter *:PublicTwitter* + + This command displays the public timeline. + + + :[count]DMTwitter *:DMTwitter* + + This command displays direct messages that you've received. + + If you specify [count], that number is used as the page number. For + example, :2DMTwitter displays the second page from your direct + messages timeline. + + + :[count]DMSentTwitter *:DMSentTwitter* + + This command displays direct messages that you've sent. + + If you specify [count], that number is used as the page number. For + example, :2DMSentTwitter displays the second page from your direct + messages sent timeline. + + + :[count]ListTwitter {list} *:ListTwitter* + :[count]ListTwitter {user} {list} + + This command displays a Twitter list timeline. + + In the first form, {user} is assumed to be you so the command will + display a list of yours named {list}. + + In the second form, the command displays list {list} from user + {user}. + + If you specify [count], that number is used as the page number. For + example, :2ListTwitter list1 displays the second page from the list1 + list timeline. + + + :[count]RetweetedToMeTwitter *:RetweetedToMeTwitter* + + This command displays a timeline of retweets by others to you. + + If you specify [count], that number is used as the page number. For + example, :2RetweetedToMeTwitter displays the second page from the + retweets timeline. + + + :[count]RetweetedByMeTwitter *:RetweetedByMeTwitter* + + This command displays a timeline of retweets by you. + + If you specify [count], that number is used as the page number. For + example, :2RetweetedByMeTwitter displays the second page from the + retweets timeline. + + + :BackTwitter *:BackTwitter* + + This command takes you back to the previous timeline in the timeline + stack. TwitVim saves a limited number of timelines. This command + will display a warning if you attempt to go beyond the oldest saved + timeline. See |TwitVim-C-o|. + + + :ForwardTwitter *:ForwardTwitter* + + This command takes you to the next timeline in the timeline stack. + It will display a warning if you attempt to go past the newest saved + timeline so this command can only be used after :BackTwitter. + See |TwitVim-C-i|. + + + :RefreshTwitter *:RefreshTwitter* + + This command refreshes the timeline. See |TwitVim-Leader-Leader|. + + + :NextTwitter *:NextTwitter* + + This command loads the next (older) page in the timeline. + See |TwitVim-C-PageDown|. + + + :PreviousTwitter *:PreviousTwitter* + + This command loads the previous (newer) page in the timeline. If the + timeline is on the first page, it issues a warning and doesn't do + anything. See |TwitVim-C-PageUp|. + + + :SetLoginTwitter *:SetLoginTwitter* + + This command prompts you for a Twitter username and password, which + will then be used for subsequent TwitVim commands. This is an + alternative to configuring |twitvim_login| in your vimrc. + + If you make a mistake when entering your Twitter login information, + simply rerun |:SetLoginTwitter| to reenter your Twitter login + information or use |:ResetLoginTwitter| to remove the login + information so that the next TwitVim command will prompt you to log in + again. + + + :ResetLoginTwitter *:ResetLoginTwitter* + + This command removes stored Twitter login information. The next + TwitVim command that needs Twitter authentication will then prompt you + log in again. + + +------------------------------------------------------------------------------ +4.3. Mappings *TwitVim-mappings* + + Alt-T *TwitVim-A-t* + Ctrl-T *TwitVim-C-t* + + In visual mode, Alt-T posts the highlighted text to Twitter. + + Ctrl-T is an alternative to the Alt-T mapping. If the menu bar is + enabled, Alt-T pulls down the Tools menu. So use Ctrl-T instead. + + + *TwitVim-reply* + Alt-R *TwitVim-A-r* + <Leader>r *TwitVim-Leader-r* + + This mapping is local to the timeline buffer. In the timeline buffer, + it starts composing an @-reply on the command line to the author of + the tweet on the current line. + + Under Cygwin, Alt-R is not recognized so you can use <Leader>r as an + alternative. The <Leader> character defaults to \ (backslash) but see + |mapleader| for information on customizing that. + + + *TwitVim-reply-all* + <Leader>Ctrl-R *TwitVim-Leader-C-r* + + This mapping is local to the timeline buffer. It starts composing a + reply to all, i.e. a reply to the tweet author and also to everyone + mentioned in @-replies on the current line. + + + *TwitVim-retweet* + <Leader>R *TwitVim-Leader-S-r* + + This mapping (Note: uppercase 'R' instead of 'r'.) is local to the + timeline buffer. It is similar to the retweet feature in popular + Twitter clients. In the timeline buffer, it retweets the current line. + + + *twitvim_old_retweet* + If you prefer old-style retweets, add this to your vimrc: + + let twitvim_old_retweet = 1 + + The difference is an old-style retweet does not use the retweet API. + Instead, it sends the current line to the command line so that you can + repost it as a new tweet and optionally edit it or add your own + comments. + + *twitvim_retweet_format* + If you use old-style retweets, you can configure the retweet format. + By default, TwitVim retweets tweets in the following format: + + RT @user: text of the tweet + + You can customize the retweet format by adding the following to your + vimrc, for example: + + let twitvim_retweet_format = 'Retweet from %s: %t' + + let twitvim_retweet_format = '%t (retweeted from %s)' + + When you retweet a tweet, TwitVim will replace "%s" in + twitvim_retweet_format with the user name of the original poster and + "%t" with the text of the tweet. + + The default setting of twitvim_retweet_format is "RT %s: %t" + + + *TwitVim-direct-message* + Alt-D *TwitVim-A-d* + <Leader>d *TwitVim-Leader-d* + + This mapping is local to the timeline buffer. In the timeline buffer, + it starts composing a direct message on the command line to the author + of the tweet on the current line. + + Under Cygwin, Alt-D is not recognized so you can use <Leader>d as an + alternative. The <Leader> character defaults to \ (backslash) but see + |mapleader| for information on customizing that. + + + *TwitVim-goto* + Alt-G *TwitVim-A-g* + <Leader>g *TwitVim-Leader-g* + + This mapping is local to the timeline and user profile buffers. It + launches the web browser with the URL at the cursor position. If you + visually select text before invoking this mapping, it launches the web + browser with the selected text as is. + + As a special case, if the cursor is on a word of the form @user or + user:, TwitVim will display that user's timeline in the timeline + buffer. This will not launch the web browser. + + In addition, if the cursor is on a word of the form #hashtag, TwitVim + will do a Twitter Search for that #hashtag. This too will not launch + the web browser. + + *twitvim_browser_cmd* + Before using this command, you need to tell TwitVim how to launch your + browser. For example, you can add the following to your vimrc: + + let twitvim_browser_cmd = 'firefox.exe' + + Of course, replace firefox.exe with the browser of your choice. + + + *TwitVim-LongURL* + <Leader>e *TwitVim-Leader-e* + + This mapping is local to the timeline and user profile buffers. It + calls the LongURL API (see http://longurl.org/) to expand the short + URL at the cursor position. A short URL is a URL from a URL shortening + service such as TinyURL, SnipURL, etc. Use this feature if you wish to + preview a URL before browsing to it with |TwitVim-goto|. + + If you visually select text before invoking this mapping, it calls the + LongURL API with the selected text as is. + + If successful, TwitVim will display the result from LongURL in the + message area. + + + *TwitVim-profile* + <Leader>p *TwitVim-Leader-p* + + This mapping is local to the timeline and user profile buffers. It + calls the Twitter API to retrieve user profile information (e.g. name, + location, bio, update count) for the user name at the cursor position. + It displays the information in a user profile buffer. + + If you visually select text before invoking this mapping, it uses the + selected text as is for the user name. + + See also |:ProfileTwitter|. + + + *TwitVim-inreplyto* + <Leader>@ *TwitVim-Leader-@* + + This mapping is local to the timeline buffer. If the current line is + an @-reply tweet, it calls the Twitter API to retrieve the tweet to + which this one is replying. Then it will display that predecessor + tweet below the current one. + + If there is no in-reply-to information, it will show a warning and do + nothing. + + This mapping is useful in the replies timeline. See |:RepliesTwitter|. + + + *TwitVim-delete* + <Leader>X *TwitVim-Leader-X* + + This mapping is local to the timeline buffer. The 'X' in the mapping + is uppercase. It calls the Twitter API to delete the tweet or message + on the current line. + + Note: You have to be the author of the tweet in order to delete it. + You can delete direct messages that you sent or received. + + + Ctrl-O *TwitVim-C-o* + + This mapping takes you to the previous timeline in the timeline stack. + See |:BackTwitter|. + + Ctrl-I *TwitVim-C-i* + + This mapping takes you to the next timeline in the timeline stack. + See |:ForwardTwitter|. + + + *TwitVim-refresh* + <Leader><Leader> *TwitVim-Leader-Leader* + + This mapping refreshes the timeline. See |:RefreshTwitter|. + + + *TwitVim-next* + Ctrl-PageDown *TwitVim-C-PageDown* + + This mapping loads the next (older) page in the timeline. + See |:NextTwitter|. + + + *TwitVim-previous* + Ctrl-PageUp *TwitVim-C-PageUp* + + This command loads the previous (newer) page in the timeline. If the + timeline is on the first page, it issues a warning and doesn't do + anything. See |:PreviousTwitter|. + + +------------------------------------------------------------------------------ +4.4. Utility Commands *TwitVim-utility* + + :Tweetburner *:Tweetburner* + :Tweetburner {url} + + Tweetburner is a URL forwarding and shortening service. See + http://tweetburner.com/ + + This command calls the Tweetburner API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + :ATweetburner *:ATweetburner* + :ATweetburner {url} + + Same as :Tweetburner but appends, i.e. inserts after the current + position instead of at the current position, the short URL instead. + + :PTweetburner *:PTweetburner* + :PTweetburner {url} + + Same as :Tweetburner but prompts for a tweet on the command line with + the short URL already inserted. + + + :Snipurl *:Snipurl* + :Snipurl {url} + + SnipURL is a URL forwarding and shortening service. See + http://www.snipurl.com/ + + This command calls the SnipURL API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + :ASnipurl *:ASnipurl* + :ASnipurl {url} + + Same as :Snipurl but appends, i.e. inserts after the current + position instead of at the current position, the short URL instead. + + :PSnipurl *:PSnipurl* + :PSnipurl {url} + + Same as :Snipurl but prompts for a tweet on the command line with + the short URL already inserted. + + + :Metamark *:Metamark* + :Metamark {url} + + Metamark is a URL forwarding and shortening service. See + http://metamark.net/ + + This command calls the Metamark API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + :AMetamark *:AMetamark* + :AMetamark {url} + + Same as :Metamark but appends, i.e. inserts after the current + position instead of at the current position, the short URL instead. + + :PMetamark *:PMetamark* + :PMetamark {url} + + Same as :Metamark but prompts for a tweet on the command line with + the short URL already inserted. + + + :TinyURL *:TinyURL* + :TinyURL {url} + + TinyURL is a URL forwarding and shortening service. See + http://tinyurl.com + + This command calls the TinyURL API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + :ATinyURL *:ATinyURL* + :ATinyURL {url} + + Same as :TinyURL but appends, i.e. inserts after the current + position instead of at the current position, the short URL instead. + + :PTinyURL *:PTinyURL* + :PTinyURL {url} + + Same as :TinyURL but prompts for a tweet on the command line with + the short URL already inserted. + + + :BitLy *:BitLy* + :BitLy {url} + + bit.ly is a URL forwarding and shortening service. See + http://bit.ly/ + + This command calls the bit.ly API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + The bit.ly API requires a bit.ly login and a bit.ly API key. A default + login and key pair is provided with TwitVim and no configuration is + needed. However, if you wish to supply your own login and key to track + your bit.ly history and stats, visit + http://bit.ly/account/your_api_key to retrieve your API info and add + the following to your vimrc: + + *twitvim_bitly_user* + *twitvim_bitly_key* + let twitvim_bitly_user = "username" + let twitvim_bitly_key = "R_123456789" + + Replace username with your bit.ly login and R_123456789 with your + bit.ly API key. + + :ABitLy *:ABitLy* + :ABitLy {url} + + Same as :BitLy but appends, i.e. inserts after the current + position instead of at the current position, the short URL instead. + + :PBitLy *:PBitLy* + :PBitLy {url} + + Same as :BitLy but prompts for a tweet on the command line with + the short URL already inserted. + + + :IsGd *:IsGd* + :IsGd {url} + + is.gd is a URL forwarding and shortening service. See + http://is.gd + + This command calls the is.gd API to get a short URL in place of <url>. + If {url} is not provided on the command line, the command will prompt + you to enter a URL. The short URL is then inserted into the current + buffer at the current position. + + :AIsGd *:AIsGd* + :AIsGd {url} + + Same as :IsGd but appends, i.e. inserts after the current position + instead of at the current position, the short URL instead. + + :PIsGd *:PIsGd* + :PIsGd {url} + + Same as :IsGd but prompts for a tweet on the command line with the + short URL already inserted. + + + :UrlBorg *:UrlBorg* + :UrlBorg {url} + + urlBorg is a URL forwarding and shortening service. See + http://urlborg.com + + This command calls the urlBorg API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + The urlBorg API requires an API key. A default API key is provided + with TwitVim and no configuration is needed. However, if you wish to + supply your own key in order to track your urlBorg history and stats, + visit http://urlborg.com/a/account/ to retrieve your API key and then + add the following to your vimrc: + + *twitvim_urlborg_key* + let twitvim_urlborg_key = "12345-6789" + + Replace 12345-6789 with your API key. + + :AUrlBorg *:AUrlBorg* + :AUrlBorg {url} + + Same as :UrlBorg but appends, i.e. inserts after the current position + instead of at the current position, the short URL instead. + + :PUrlBorg *:PUrlBorg* + :PUrlBorg {url} + + Same as :UrlBorg but prompts for a tweet on the command line with the + short URL already inserted. + + + :Trim *:Trim* + :Trim {url} + + tr.im is a URL forwarding and shortening service. See http://tr.im/ + + This command calls the tr.im API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + If you login to the tr.im API, tr.im will keep track + of URLs that you have shortened. In order to do that, add the + following to your vimrc: + + *twitvim_trim_login* + let twitvim_trim_login = "trimuser:trimpassword" + + Where trimuser and trimpassword are your tr.im account user name and + password. + + You may also specify trimuser:trimpassword as a base64 encoded string: + + let twitvim_trim_login = "base64string" + + See |TwitVim-login-base64| for information on generating base64 + strings. + + :ATrim *:ATrim* + :ATrim {url} + + Same as :Trim but appends, i.e. inserts after the current position + instead of at the current position, the short URL instead. + + :PTrim *:PTrim* + :PTrim {url} + + Same as :Trim but prompts for a tweet on the command line with the + short URL already inserted. + + + :Cligs *:Cligs* + :Cligs {url} + + Cligs is a URL forwarding and shortening service. See http://cli.gs/ + + This command calls the Cligs API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + If you supply a Cligs API key, Cligs will keep track of URLs that you + have shortened. In order to do that, add the following to your vimrc: + + *twitvim_cligs_key* + let twitvim_cligs_key = "hexstring" + + where hexstring is the API key. You can get an API key by registering + for a user account at Cligs and then visiting http://cli.gs/user/api + + :ACligs *:ACligs* + :ACligs {url} + + Same as :Cligs but appends, i.e. inserts after the current position + instead of at the current position, the short URL instead. + + :PCligs *:PCligs* + :PCligs {url} + + Same as :Cligs but prompts for a tweet on the command line with the + short URL already inserted. + + + :Zima *:Zima* + :Zima {url} + + Zima is a URL forwarding and shortening service. See http://zi.ma/ + + This command calls the Zi.ma API to get a short URL in place of + <url>. If {url} is not provided on the command line, the command will + prompt you to enter a URL. The short URL is then inserted into the + current buffer at the current position. + + :AZima *:AZima* + :AZima {url} + + Same as :Zima but appends, i.e. inserts after the current position + instead of at the current position, the short URL instead. + + :PZima *:PZima* + :PZima {url} + + Same as :Zima but prompts for a tweet on the command line with the + short URL already inserted. + + + :[count]SearchTwitter *:SearchTwitter* + :[count]SearchTwitter {query} + + This command calls the Twitter Search API to search for {query}. If + {query} is not provided on the command line, the command will prompt + you for it. Search results are then displayed in the timeline buffer. + + All of the Twitter Search operators are supported implicitly. See + http://search.twitter.com/operators for a list of search operators. + + If you specify [count], that number is used as the page number. For + example, :2SearchTwitter hello displays the second page of search + results for the word hello. + + You can configure the number of tweets returned by :SearchTwitter by + setting |twitvim_count|. + + + :RateLimitTwitter *:RateLimitTwitter* + + This command calls the Twitter API to retrieve rate limit information. + It shows the current hourly limit, how many API calls you have + remaining, and when your quota will be reset. You can use it to check + if you have been temporarily locked out of Twitter for hitting the + rate limit. This command does not work on identi.ca. + + + :ProfileTwitter {username} *:ProfileTwitter* + + This command calls the Twitter API to retrieve user profile + information (e.g. name, location, bio, update count) for the specified + user. It displays the information in a user profile buffer. + + See also |TwitVim-Leader-p|. + + + :LocationTwitter {location} *:LocationTwitter* + + This command calls the Twitter API to set the location field in your + profile. There is no mandatory format for the location. It could be a + zip code, a town, coordinates, or pretty much anything. + + For example: + :LocationTwitter 10027 + :LocationTwitter New York, NY, USA + :LocationTwitter 40.811583, -73.954486 + + +============================================================================== +5. Timeline Highlighting *TwitVim-highlight* + + TwitVim uses a number of highlighting groups to highlight certain + elements in the Twitter timeline views. See |:highlight| for details + on how to customize these highlighting groups. + + twitterUser *hl-twitterUser* + + The Twitter user name at the beginning of each line. + + twitterTime *hl-twitterTime* + + The time a Twitter update was posted. + + twitterTitle *hl-twitterTitle* + + The header at the top of the timeline view. + + twitterLink *hl-twitterLink* + + Link URLs and #hashtags in a Twitter status. + + twitterReply *hl-twitterReply* + + @-reply in a Twitter status. + + +============================================================================== +6. Tips and Tricks *TwitVim-tips* + + Here are a few tips for using TwitVim more efficiently. + + +------------------------------------------------------------------------------ +6.1. Timeline Hotkeys *TwitVim-hotkeys* + + TwitVim does not autorefresh. However, you can make refreshing your + timeline easier by mapping keys to the timeline commands. For example, + I use the <F8> key for that: + + nnoremap <F8> :FriendsTwitter<cr> + nnoremap <S-F8> :UserTwitter<cr> + nnoremap <A-F8> :RepliesTwitter<cr> + nnoremap <C-F8> :DMTwitter<cr> + + +------------------------------------------------------------------------------ +6.2. Switching between services *TwitVim-switch* + + I have user accounts on both Twitter and identi.ca. Here is what I + added to my vimrc to make it easy to switch between the two services + within the same TwitVim session: + + function! Switch_to_twitter() + let g:twitvim_api_root = "http://twitter.com" + + let g:twitvim_login_b64 = "Twitter Base64 login" + + FriendsTwitter + endfunction + + function! Switch_to_identica() + let g:twitvim_api_root = "http://identi.ca/api" + + let g:twitvim_login_b64 = "identi.ca Base64 login" + + FriendsTwitter + endfunction + + command! ToTwitter :call Switch_to_twitter() + command! ToIdentica :call Switch_to_identica() + + With that in place, I can use :ToTwitter and :ToIdentica to switch + between services. I added a call to FriendsTwitter at the end of each + function so that I'll have a fresh timeline view after switching. You + may also use this technique to switch between different user accounts + on the same service. + + +------------------------------------------------------------------------------ +6.3. Line length in status line *TwitVim-line-length* + + Add the following to your |'statusline'| to display the length of the + current line: + + %{strlen(getline('.'))} + + This is useful if you compose tweets in a separate buffer and post + them with |:CPosttoTwitter|. With the line length in your status line, + you will know when you've reached the 140-character boundary. + +============================================================================== +7. TwitVim History *TwitVim-history* + + 0.4.5 : 2009-12-20 * Prompt for login info if not configured. + |:SetLoginTwitter| |:ResetLoginTwitter| + * Reintroduced old-style retweet via + |twitvim_old_retweet|. + 0.4.4 : 2009-12-13 * Upgraded bit.ly API support to version 2.0.1 + with configurable user login and key. + * Added support for Zima. |:Zima| + * Fixed :BackTwitter behavior when browsing + multiple lists. + * Added support for displaying retweets in + friends timeline. + * Use Twitter Retweet API to retweet. + * Added commands to display retweets to you or + by you. |:RetweetedToMeTwitter| + |:RetweetedByMeTwitter| + 0.4.3 : 2009-11-27 * Fixed some minor breakage in LongURL support. + * Added |:ListTwitter| + * Omit author's name from the list when doing a + reply to all. |TwitVim-reply-all| + 0.4.2 : 2009-06-22 * Bugfix: Reset syntax items in Twitter window. + * Bugfix: Show progress message before querying + for in-reply-to tweet. + * Added reply to all feature. |TwitVim-reply-all| + 0.4.1 : 2009-03-30 * Fixed a problem with usernames and search terms + that begin with digits. + 0.4.0 : 2009-03-09 * Added |:SendDMTwitter| to send direct messages + through API without relying on the "d user ..." + syntax. + * Modified Alt-D mapping in timeline to use + the :SendDMTwitter code. + * Added |:BackTwitter| and |:ForwardTwitter| + commands, Ctrl-O and Ctrl-I mappings to move back + and forth in the timeline stack. + * Improvements in window handling. TwitVim commands + will restore the cursor to the original window + when possible. + * Wrote some notes on using TwitVim with Twitter + SSL API. + * Added mapping to show predecessor tweet for an + @-reply. |TwitVim-inreplyto| + * Added mapping to delete a tweet or message. + |TwitVim-delete| + * Added commands and mappings to refresh the + timeline and load the next or previous page. + |TwitVim-refresh|, |TwitVim-next|, + |TwitVim-previous|. + 0.3.5 : 2009-01-30 * Added support for pagination and page length to + :SearchTwitter. + * Shortened default retweet prefix to "RT". + 0.3.4 : 2008-11-11 * Added |twitvim_count| option to allow user to + configure the number of tweets returned by + :FriendsTwitter and :UserTwitter. + 0.3.3 : 2008-10-06 * Added support for Cligs. |:Cligs| + * Fixed a problem with not being able to unset + the proxy if using Tcl http. + 0.3.2 : 2008-09-30 * Added command to display rate limit info. + |:RateLimitTwitter| + * Improved error reporting for :UserTwitter. + * Added command and mapping to display user + profile information. |:ProfileTwitter| + |TwitVim-Leader-p| + * Added command for updating location. + |:LocationTwitter| + * Added support for tr.im. |:Trim| + * Fixed error reporting in Tcl http code. + 0.3.1 : 2008-09-18 * Added support for LongURL. |TwitVim-LongURL| + * Added support for posting multibyte/Unicode + tweets in cURL mode. + * Remove newlines from text before retweeting. + 0.3.0 : 2008-09-12 * Added support for http networking through Vim's + Perl, Python, Ruby, and Tcl interfaces, as + alternatives to cURL. |TwitVim-non-cURL| + * Removed UrlTea support. + 0.2.24 : 2008-08-28 * Added retweet feature. See |TwitVim-retweet| + 0.2.23 : 2008-08-25 * Support in_reply_to_status_id parameter. + * Added tip on line length in statusline. + * Report browser launch errors. + * Set syntax highlighting on every timeline refresh. + 0.2.22 : 2008-08-13 * Rewrote time conversion code in Vim script + so we don't need Perl or Python any more. + * Do not URL-encode digits 0 to 9. + 0.2.21 : 2008-08-12 * Added tips section to documentation. + * Use create_or_reuse instead of create in UrlBorg + API so that it will always generate the same + short URL for the same long URL. + * Added support for highlighting #hashtags and + jumping to Twitter Searches for #hashtags. + * Added Python code to convert Twitter timestamps + to local time and simplify them. + 0.2.20 : 2008-07-24 * Switched from Summize to Twitter Search. + |:SearchTwitter| + 0.2.19 : 2008-07-23 * Added support for non-Twitter servers + implementing the Twitter API. This is for + identi.ca support. See |twitvim-identi.ca|. + 0.2.18 : 2008-07-14 * Added support for urlBorg API. |:UrlBorg| + 0.2.17 : 2008-07-11 * Added command to show DM Sent Timeline. + |:DMSentTwitter| + * Added support for pagination in Friends, User, + Replies, DM, and DM Sent timelines. + * Added support for bit.ly API and is.gd API. + |:BitLy| |:IsGd| + 0.2.16 : 2008-05-16 * Removed quotes around browser launch URL. + * Escape ! character in browser launch URL. + 0.2.15 : 2008-05-13 * Extend :UserTwitter and :FriendsTwitter to show + another user's timeline if argument supplied. + * Extend Alt-G mapping to jump to another user's + timeline if invoked over @user or user: + * Escape special Vim shell characters in URL when + launching web browser. + 0.2.14 : 2008-05-12 * Added support for Summize search API. + 0.2.13 : 2008-05-07 * Added mappings to launch web browser on URLs in + timeline. + 0.2.12 : 2008-05-05 * Allow user to specify Twitter login info and + proxy login info preencoded in base64. + |twitvim_login_b64| |twitvim_proxy_login_b64| + 0.2.11 : 2008-05-02 * Scroll to top in timeline window after adding + an update line. + * Add <Leader>r and <Leader>d mappings as + alternative to Alt-R and Alt-D because the + latter are not valid key combos under Cygwin. + 0.2.10 : 2008-04-25 * Shortened snipurl.com to snipr.com + * Added support for proxy authentication. + |twitvim_proxy_login| + * Handle Perl module load failure. Not that I + expect those modules to ever be missing. + 0.2.9 : 2008-04-23 * Added some status messages. + * Added menu items under Plugin menu. + * Allow Ctrl-T as an alternative to Alt-T to avoid + conflict with the menu bar. + * Added support for UrlTea API. + * Generalize URL encoding to all non-alpha chars. + 0.2.8 : 2008-04-22 * Encode URLs sent to URL-shortening services. + 0.2.7 : 2008-04-21 * Add support for TinyURL API. |:TinyURL| + * Add quick direct message feature. + |TwitVim-direct-message| + 0.2.6 : 2008-04-15 * Delete Twitter buffer to the blackhole register + to avoid stepping on registers unnecessarily. + * Quote login and proxy arguments before sending to + cURL. + * Added support for SnipURL API and Metamark API. + |:Snipurl| |:Metamark| + 0.2.5 : 2008-04-14 * Escape the "+" character in sent tweets. + * Added Perl code to convert Twitter timestamps to + local time and simplify them. + * Fix for timestamp highlight when the "|" + character appears in a tweet. + 0.2.4 : 2008-04-13 * Use <q-args> in Tweetburner commands. + * Improve XML parsing so that order of elements + does not matter. + * Changed T mapping to Alt-T to avoid overriding + the |T| command. + 0.2.3 : 2008-04-12 * Added more Tweetburner commands. + 0.2.2 : 2008-04-11 * Added quick reply feature. + * Added Tweetburner support. |:Tweetburner| + * Changed client ident to "from twitvim". + 0.2.1 : 2008-04-10 * Bug fix for Chinese characters in timeline. + Thanks to Leiyue. + * Scroll up to newest tweet after refreshing + timeline. + * Changed Twitter window name to avoid unsafe + special characters and clashes with file names. + 0.2.0 : 2008-04-09 * Added views for public, friends, user timelines, + replies, and direct messages. + * Automatically insert user's posts into + public, friends, or user timeline, if visible. + * Added syntax highlighting for timeline view. + 0.1.2 : 2008-04-03 * Make plugin conform to guidelines in + |write-plugin|. + * Add help documentation. + 0.1.1 : 2008-04-01 * Add error reporting for cURL problems. + 0.1 : 2008-03-28 * Initial release. + + +============================================================================== +8. TwitVim Credits *TwitVim-credits* + + Thanks to Travis Jeffery, the author of the original VimTwitter script + (vimscript #2124), who came up with the idea of running cURL from Vim + to access the Twitter API. + + Techniques for managing the Twitter buffer were adapted from the NERD + Tree plugin (vimscript #1658) by Marty Grenfell. + + +============================================================================== +vim:tw=78:ts=8:ft=help:norl: diff --git a/vim/plugin/twitvim.vim b/vim/plugin/twitvim.vim new file mode 100644 index 0000000..af138e8 --- /dev/null +++ b/vim/plugin/twitvim.vim @@ -0,0 +1,2828 @@ +" ============================================================== +" TwitVim - Post to Twitter from Vim +" Based on Twitter Vim script by Travis Jeffery <[email protected]> +" +" Version: 0.4.5 +" License: Vim license. See :help license +" Language: Vim script +" Maintainer: Po Shan Cheah <[email protected]> +" Created: March 28, 2008 +" Last updated: December 26, 2009 +" +" GetLatestVimScripts: 2204 1 twitvim.vim +" ============================================================== + +" Load this module only once. +if exists('loaded_twitvim') + finish +endif +let loaded_twitvim = 1 + +" Avoid side-effects from cpoptions setting. +let s:save_cpo = &cpo +set cpo&vim + +" The extended character limit is 246. Twitter will display a tweet longer than +" 140 characters in truncated form with a link to the full tweet. If that is +" undesirable, set s:char_limit to 140. +let s:char_limit = 246 + +" Allow the user to override the API root, e.g. for identi.ca, which offers a +" Twitter-compatible API. +function! s:get_api_root() + return exists('g:twitvim_api_root') ? g:twitvim_api_root : "http://api.twitter.com/1" +endfunction + +" Allow user to set the format for retweets. +function! s:get_retweet_fmt() + return exists('g:twitvim_retweet_format') ? g:twitvim_retweet_format : "RT %s: %t" +endfunction + +" Allow user to enable Python networking code by setting twitvim_enable_python. +function! s:get_enable_python() + return exists('g:twitvim_enable_python') ? g:twitvim_enable_python : 0 +endfunction + +" Allow user to enable Perl networking code by setting twitvim_enable_perl. +function! s:get_enable_perl() + return exists('g:twitvim_enable_perl') ? g:twitvim_enable_perl : 0 +endfunction + +" Allow user to enable Ruby code by setting twitvim_enable_ruby. +function! s:get_enable_ruby() + return exists('g:twitvim_enable_ruby') ? g:twitvim_enable_ruby : 0 +endfunction + +" Allow user to enable Tcl code by setting twitvim_enable_tcl. +function! s:get_enable_tcl() + return exists('g:twitvim_enable_tcl') ? g:twitvim_enable_tcl : 0 +endfunction + +" Get proxy setting from twitvim_proxy in .vimrc or _vimrc. +" Format is proxysite:proxyport +function! s:get_proxy() + return exists('g:twitvim_proxy') ? g:twitvim_proxy : '' +endfunction + +" If twitvim_proxy_login exists, use that as the proxy login. +" Format is proxyuser:proxypassword +" If twitvim_proxy_login_b64 exists, use that instead. This is the proxy +" user:password in base64 encoding. +function! s:get_proxy_login() + if exists('g:twitvim_proxy_login_b64') && g:twitvim_proxy_login_b64 != '' + return g:twitvim_proxy_login_b64 + else + return exists('g:twitvim_proxy_login') ? g:twitvim_proxy_login : '' + endif +endfunction + +" Get twitvim_count, if it exists. This will be the number of tweets returned +" by :FriendsTwitter, :UserTwitter, and :SearchTwitter. +function! s:get_count() + if exists('g:twitvim_count') + if g:twitvim_count < 1 + return 1 + elseif g:twitvim_count > 200 + return 200 + else + return g:twitvim_count + endif + endif + return 0 +endfunction + +" Display an error message in the message area. +function! s:errormsg(msg) + redraw + echohl ErrorMsg + echomsg a:msg + echohl None +endfunction + +" Display a warning message in the message area. +function! s:warnmsg(msg) + redraw + echohl WarningMsg + echo a:msg + echohl None +endfunction + +" Get Twitter login info from twitvim_login in .vimrc or _vimrc. +" Format is username:password +" If twitvim_login_b64 exists, use that instead. This is the user:password +" in base64 encoding. +" Use this function if the API call doesn't require authentication but +" can use it if available. +function! s:get_twitvim_login_noerror() + if exists('g:twitvim_login_b64') && g:twitvim_login_b64 != '' + return g:twitvim_login_b64 + elseif exists('g:twitvim_login') && g:twitvim_login != '' + return g:twitvim_login + else + return '' + endif +endfunction + +" Reset login info. +function! s:reset_twitvim_login() + unlet! g:twitvim_login + unlet! g:twitvim_login_b64 +endfunction + +" Verify login info. This will be used to check whether a username and password +" pair entered by the user is a valid login. +" +" Returns 1 if login succeeded, 0 if login failed, <0 for other errors. +function! s:check_twitvim_login(user, password) + let login = a:user.':'.a:password + + redraw + echo "Logging into Twitter..." + + let url = s:get_api_root()."/account/verify_credentials.xml" + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + if error =~ '401' + return 0 + endif + + if error != '' + call s:errormsg("Error logging into Twitter: ".error) + return -1 + endif + + " The following check should not be required because Twitter is supposed to + " return a 401 HTTP status on login failure, but you never know with + " Twitter. + let error = s:xml_get_element(output, 'error') + if error =~ '\ccould not authenticate' + return 0 + endif + + if error != '' + call s:errormsg("Error logging into Twitter: ".error) + return -1 + endif + + redraw + echo "Twitter login succeeded." + + return 1 +endfunction + +" Ask user for Twitter login info. +" Returns user:password. Also saves it in g:twitvim_login for future use. +" Returns empty string if login canceled or failed. +function! s:prompt_twitvim_login() + let failed = 0 + + while 1 + call inputsave() + redraw + let user = input((failed ? 'Login failed. Try again. ' : 'Please log in. ')."Twitter username (Esc=exit): ") + call inputrestore() + + if user == '' + call s:warnmsg("Twitter login not set.") + return '' + endif + + call inputsave() + redraw + let pass = inputsecret("Twitter password (Esc=exit): ") + call inputrestore() + + if pass == '' + call s:warnmsg("Twitter login not set.") + return '' + endif + + let result = s:check_twitvim_login(user, pass) + if result < 0 + " Login didn't succeed or fail but there was some kind of error. + return '' + endif + + if result > 0 + " Login succeeded. + break + endif + + let failed = 1 + endwhile + + call s:reset_twitvim_login() + let g:twitvim_login = user.':'.pass + return g:twitvim_login +endfunction + +" Get Twitter login info from twitvim_login in .vimrc or _vimrc. +" Format is username:password +" If twitvim_login_b64 exists, use that instead. This is the user:password +" in base64 encoding. +function! s:get_twitvim_login() + let login = s:get_twitvim_login_noerror() + if login == '' + + " Prompt user to enter login info if not already configured. + let login = s:prompt_twitvim_login() + if login == '' + return '' + endif + + " Beep and error-highlight + " execute "normal \<Esc>" + " call s:errormsg('Twitter login not set. Please add to .vimrc: let twitvim_login="USER:PASS"') + " return '' + endif + return login +endfunction + +let s:cached_login = '' +let s:cached_username = '' + +" Get Twitter user name by verifying login credentials +function! s:get_twitvim_username() + let login = s:get_twitvim_login() + if login == '' + return '' + endif + + " If we already got the info, no need to get it again. + if login == s:cached_login + return s:cached_username + endif + + redraw + echo "Verifying login credentials with Twitter..." + + let url = s:get_api_root()."/account/verify_credentials.xml" + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error verifying login credentials: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error verifying login credentials: ".error) + return + endif + + redraw + echo "Twitter login credentials verified." + + let username = s:xml_get_element(output, 'screen_name') + + " Save it so we don't have to do it again unless the user switches to + " a different login. + let s:cached_login = login + let s:cached_username = username + + return username +endfunction + +" If set, twitvim_cert_insecure turns off certificate verification if using +" https Twitter API over cURL or Ruby. +function! s:get_twitvim_cert_insecure() + return exists('g:twitvim_cert_insecure') ? g:twitvim_cert_insecure : 0 +endfunction + +" === XML helper functions === + +" Get the content of the n'th element in a series of elements. +function! s:xml_get_nth(xmlstr, elem, n) + let matchres = matchlist(a:xmlstr, '<'.a:elem.'\%( [^>]*\)\?>\(.\{-}\)</'.a:elem.'>', -1, a:n) + return matchres == [] ? "" : matchres[1] +endfunction + +" Get the content of the specified element. +function! s:xml_get_element(xmlstr, elem) + return s:xml_get_nth(a:xmlstr, a:elem, 1) +endfunction + +" Remove any number of the specified element from the string. Used for removing +" sub-elements so that you can parse the remaining elements safely. +function! s:xml_remove_elements(xmlstr, elem) + return substitute(a:xmlstr, '<'.a:elem.'>.\{-}</'.a:elem.'>', '', "g") +endfunction + +" Get the attributes of the n'th element in a series of elements. +function! s:xml_get_attr_nth(xmlstr, elem, n) + let matchres = matchlist(a:xmlstr, '<'.a:elem.'\s\+\([^>]*\)>', -1, a:n) + if matchres == [] + return {} + endif + + let matchcount = 1 + let attrstr = matchres[1] + let attrs = {} + + while 1 + let matchres = matchlist(attrstr, '\(\w\+\)="\([^"]*\)"', -1, matchcount) + if matchres == [] + break + endif + + let attrs[matchres[1]] = matchres[2] + let matchcount += 1 + endwhile + + return attrs +endfunction + +" Get attributes of the specified element. +function! s:xml_get_attr(xmlstr, elem) + return s:xml_get_attr_nth(a:xmlstr, a:elem, 1) +endfunction + +" === End of XML helper functions === + +" === Time parser === + +" Convert date to Julian date. +function! s:julian(year, mon, mday) + let month = (a:mon - 1 + 10) % 12 + let year = a:year - month / 10 + return a:mday + 365 * year + year / 4 - year / 100 + year / 400 + ((month * 306) + 5) / 10 +endfunction + +" Calculate number of days since UNIX Epoch. +function! s:daygm(year, mon, mday) + return s:julian(a:year, a:mon, a:mday) - s:julian(1970, 1, 1) +endfunction + +" Convert date/time to UNIX time. (seconds since Epoch) +function! s:timegm(year, mon, mday, hour, min, sec) + return a:sec + a:min * 60 + a:hour * 60 * 60 + s:daygm(a:year, a:mon, a:mday) * 60 * 60 * 24 +endfunction + +" Convert abbreviated month name to month number. +function! s:conv_month(s) + let monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] + for mon in range(len(monthnames)) + if monthnames[mon] == tolower(a:s) + return mon + 1 + endif + endfor + return 0 +endfunction + +function! s:timegm2(matchres, indxlist) + let args = [] + for i in a:indxlist + if i < 0 + let mon = s:conv_month(a:matchres[-i]) + if mon == 0 + return -1 + endif + let args = add(args, mon) + else + let args = add(args, a:matchres[i] + 0) + endif + endfor + return call('s:timegm', args) +endfunction + +" Parse a Twitter time string. +function! s:parse_time(str) + " This timestamp format is used by Twitter in timelines. + let matchres = matchlist(a:str, '^\w\+,\s\+\(\d\+\)\s\+\(\w\+\)\s\+\(\d\+\)\s\+\(\d\+\):\(\d\+\):\(\d\+\)\s\++0000$') + if matchres != [] + return s:timegm2(matchres, [3, -2, 1, 4, 5, 6]) + endif + + " This timestamp format is used by Twitter in response to an update. + let matchres = matchlist(a:str, '^\w\+\s\+\(\w\+\)\s\+\(\d\+\)\s\+\(\d\+\):\(\d\+\):\(\d\+\)\s\++0000\s\+\(\d\+\)$') + if matchres != [] + return s:timegm2(matchres, [6, -1, 2, 3, 4, 5]) + endif + + " This timestamp format is used by Twitter Search. + let matchres = matchlist(a:str, '^\(\d\+\)-\(\d\+\)-\(\d\+\)T\(\d\+\):\(\d\+\):\(\d\+\)Z$') + if matchres != [] + return s:timegm2(matchres, range(1, 6)) + endif + + " This timestamp format is used by Twitter Rate Limit. + let matchres = matchlist(a:str, '^\(\d\+\)-\(\d\+\)-\(\d\+\)T\(\d\+\):\(\d\+\):\(\d\+\)+00:00$') + if matchres != [] + return s:timegm2(matchres, range(1, 6)) + endif + + return -1 +endfunction + +" Convert the Twitter timestamp to local time and simplify it. +function s:time_filter(str) + if !exists("*strftime") + return a:str + endif + let t = s:parse_time(a:str) + return t < 0 ? a:str : strftime('%I:%M %p %b %d, %Y', t) +endfunction + +" === End of time parser === + +" === Networking code === + +function! s:url_encode_char(c) + let utf = iconv(a:c, &encoding, "utf-8") + if utf == "" + return a:c + else + let s = "" + for i in range(strlen(utf)) + let s .= printf("%%%02X", char2nr(utf[i])) + endfor + return s + endif +endfunction + +" URL-encode a string. +function! s:url_encode(str) + return substitute(a:str, '[^a-zA-Z0-9_-]', '\=s:url_encode_char(submatch(0))', 'g') +endfunction + +" Use curl to fetch a web page. +function! s:curl_curl(url, login, proxy, proxylogin, parms) + let error = "" + let output = "" + + let curlcmd = "curl -s -f -S " + + if s:get_twitvim_cert_insecure() + let curlcmd .= "-k " + endif + + if a:proxy != "" + let curlcmd .= '-x "'.a:proxy.'" ' + endif + + if a:proxylogin != "" + if stridx(a:proxylogin, ':') != -1 + let curlcmd .= '-U "'.a:proxylogin.'" ' + else + let curlcmd .= '-H "Proxy-Authorization: Basic '.a:proxylogin.'" ' + endif + endif + + if a:login != "" + if stridx(a:login, ':') != -1 + let curlcmd .= '-u "'.a:login.'" ' + else + let curlcmd .= '-H "Authorization: Basic '.a:login.'" ' + endif + endif + + for [k, v] in items(a:parms) + let curlcmd .= '-d "'.s:url_encode(k).'='.s:url_encode(v).'" ' + endfor + + let curlcmd .= '"'.a:url.'"' + + let output = system(curlcmd) + if v:shell_error != 0 + let error = output + endif + + return [ error, output ] +endfunction + +" Check if we can use Python. +function! s:check_python() + let can_python = 1 + python <<EOF +import vim +try: + import urllib + import urllib2 + import base64 +except: + vim.command('let can_python = 0') +EOF + return can_python +endfunction + +" Use Python to fetch a web page. +function! s:python_curl(url, login, proxy, proxylogin, parms) + let error = "" + let output = "" + python <<EOF +import urllib +import urllib2 +import base64 +import vim + +def make_base64(s): + if s.find(':') != -1: + s = base64.b64encode(s) + return s + +try: + url = vim.eval("a:url") + parms = vim.eval("a:parms") + req = parms == {} and urllib2.Request(url) or urllib2.Request(url, urllib.urlencode(parms)) + + login = vim.eval("a:login") + if login != "": + req.add_header('Authorization', 'Basic %s' % make_base64(login)) + + proxy = vim.eval("a:proxy") + if proxy != "": + req.set_proxy(proxy, 'http') + + proxylogin = vim.eval("a:proxylogin") + if proxylogin != "": + req.add_header('Proxy-Authorization', 'Basic %s' % make_base64(proxylogin)) + + f = urllib2.urlopen(req) + out = ''.join(f.readlines()) +except urllib2.HTTPError, (httperr): + vim.command("let error='%s'" % str(httperr).replace("'", "''")) +else: + vim.command("let output='%s'" % out.replace("'", "''")) +EOF + + return [ error, output ] +endfunction + +" Check if we can use Perl. +function! s:check_perl() + let can_perl = 1 + perl <<EOF +eval { + require MIME::Base64; + MIME::Base64->import; + + require LWP::UserAgent; + LWP::UserAgent->import; +}; + +if ($@) { + VIM::DoCommand('let can_perl = 0'); +} +EOF + return can_perl +endfunction + +" Use Perl to fetch a web page. +function! s:perl_curl(url, login, proxy, proxylogin, parms) + let error = "" + let output = "" + + perl <<EOF +require MIME::Base64; +MIME::Base64->import; + +require LWP::UserAgent; +LWP::UserAgent->import; + +sub make_base64 { + my $s = shift; + $s =~ /:/ ? encode_base64($s) : $s; +} + +my $ua = LWP::UserAgent->new; + +my $url = VIM::Eval('a:url'); + +my $login = VIM::Eval('a:login'); +$login ne '' and $ua->default_header('Authorization' => 'Basic '.make_base64($login)); + +my $proxy = VIM::Eval('a:proxy'); +$proxy ne '' and $ua->proxy('http', "http://$proxy"); + +my $proxylogin = VIM::Eval('a:proxylogin'); +$proxylogin ne '' and $ua->default_header('Proxy-Authorization' => 'Basic '.make_base64($proxylogin)); + +my %parms = (); +my $keys = VIM::Eval('keys(a:parms)'); +for $k (split(/\n/, $keys)) { + $parms{$k} = VIM::Eval("a:parms['$k']"); +} + +my $response = %parms ? $ua->post($url, \%parms) : $ua->get($url); +if ($response->is_success) { + my $output = $response->content; + $output =~ s/'/''/g; + VIM::DoCommand("let output ='$output'"); +} +else { + my $error = $response->status_line; + $error =~ s/'/''/g; + VIM::DoCommand("let error ='$error'"); +} +EOF + + return [ error, output ] +endfunction + +" Check if we can use Ruby. +" +" Note: Before the networking code will function in Ruby under Windows, you +" need the patch from here: +" http://www.mail-archive.com/[email protected]/msg03693.html +" +" and Bram's correction to the patch from here: +" http://www.mail-archive.com/[email protected]/msg03713.html +" +function! s:check_ruby() + let can_ruby = 1 + ruby <<EOF +begin + require 'net/http' + require 'net/https' + require 'uri' + require 'Base64' +rescue LoadError + VIM.command('let can_ruby = 0') +end +EOF + return can_ruby +endfunction + +" Use Ruby to fetch a web page. +function! s:ruby_curl(url, login, proxy, proxylogin, parms) + let error = "" + let output = "" + + ruby <<EOF +require 'net/http' +require 'net/https' +require 'uri' +require 'Base64' + +def make_base64(s) + s =~ /:/ ? Base64.encode64(s) : s +end + +def parse_user_password(s) + (s =~ /:/ ? s : Base64.decode64(s)).split(':', 2) +end + +url = URI.parse(VIM.evaluate('a:url')) +httpargs = [ url.host, url.port ] + +proxy = VIM.evaluate('a:proxy') +if proxy != '' + prox = URI.parse("http://#{proxy}") + httpargs += [ prox.host, prox.port ] +end + +proxylogin = VIM.evaluate('a:proxylogin') +if proxylogin != '' + httpargs += parse_user_password(proxylogin) +end + +net = Net::HTTP.new(*httpargs) + +net.use_ssl = (url.scheme == 'https') + +# Disable certificate verification if user sets this variable. +cert_insecure = VIM.evaluate('s:get_twitvim_cert_insecure()') +if cert_insecure != '0' + net.verify_mode = OpenSSL::SSL::VERIFY_NONE +end + +parms = {} +keys = VIM.evaluate('keys(a:parms)') +keys.split(/\n/).each { |k| + parms[k] = VIM.evaluate("a:parms['#{k}']") +} + +res = net.start { |http| + path = "#{url.path}?#{url.query}" + if parms == {} + req = Net::HTTP::Get.new(path) + else + req = Net::HTTP::Post.new(path) + req.set_form_data(parms) + end + + login = VIM.evaluate('a:login') + if login != '' + req.add_field 'Authorization', "Basic #{make_base64(login)}" + end + + # proxylogin = VIM.evaluate('a:proxylogin') + # if proxylogin != '' + # req.add_field 'Proxy-Authorization', "Basic #{make_base64(proxylogin)}" + # end + + http.request(req) +} +case res +when Net::HTTPSuccess + output = res.body.gsub("'", "''") + VIM.command("let output='#{output}'") +else + error = "#{res.code} #{res.message}".gsub("'", "''") + VIM.command("let error='#{error}'") +end +EOF + + return [error, output] +endfunction + +" Check if we can use Tcl. +" +" Note: ActiveTcl 8.5 doesn't include Tcllib in the download. You need to run the following after installing ActiveTcl: +" +" teacup install tcllib +" +function! s:check_tcl() + let can_tcl = 1 + tcl <<EOF +if [catch { + package require http + package require uri + package require base64 +} result] { + ::vim::command "let can_tcl = 0" +} +EOF + return can_tcl +endfunction + +" Use Tcl to fetch a web page. +function! s:tcl_curl(url, login, proxy, proxylogin, parms) + let error = "" + let output = "" + + tcl << EOF +package require http +package require uri +package require base64 + +proc make_base64 {s} { + if { [string first : $s] >= 0 } { + return [base64::encode $s] + } + return $s +} + +set url [::vim::expr a:url] + +set headers [list] + +::http::config -proxyhost "" +set proxy [::vim::expr a:proxy] +if { $proxy != "" } { + array set prox [uri::split "http://$proxy"] + ::http::config -proxyhost $prox(host) + ::http::config -proxyport $prox(port) +} + +set proxylogin [::vim::expr a:proxylogin] +if { $proxylogin != "" } { + lappend headers "Proxy-Authorization" "Basic [make_base64 $proxylogin]" +} + +set login [::vim::expr a:login] +if { $login != "" } { + lappend headers "Authorization" "Basic [make_base64 $login]" +} + +set parms [list] +set keys [split [::vim::expr "keys(a:parms)"] "\n"] +if { [llength $keys] > 0 } { + foreach key $keys { + lappend parms $key [::vim::expr "a:parms\['$key']"] + } + set query [eval [concat ::http::formatQuery $parms]] + set res [::http::geturl $url -headers $headers -query $query] +} else { + set res [::http::geturl $url -headers $headers] +} + +upvar #0 $res state + +if { $state(status) == "ok" } { + if { [ ::http::ncode $res ] >= 400 } { + set error $state(http) + ::vim::command "let error = '$error'" + } else { + set output [string map {' ''} $state(body)] + ::vim::command "let output = '$output'" + } +} else { + if { [ info exists state(error) ] } { + set error [string map {' ''} $state(error)] + } else { + set error "$state(status) error" + } + ::vim::command "let error = '$error'" +} + +::http::cleanup $res +EOF + + return [error, output] +endfunction + +" Find out which method we can use to fetch a web page. +function! s:get_curl_method() + if !exists('s:curl_method') + let s:curl_method = 'curl' + + if s:get_enable_perl() && has('perl') + if s:check_perl() + let s:curl_method = 'perl' + endif + elseif s:get_enable_python() && has('python') + if s:check_python() + let s:curl_method = 'python' + endif + elseif s:get_enable_ruby() && has('ruby') + if s:check_ruby() + let s:curl_method = 'ruby' + endif + elseif s:get_enable_tcl() && has('tcl') + if s:check_tcl() + let s:curl_method = 'tcl' + endif + endif + endif + + return s:curl_method +endfunction + +function! s:run_curl(url, login, proxy, proxylogin, parms) + return s:{s:get_curl_method()}_curl(a:url, a:login, a:proxy, a:proxylogin, a:parms) +endfunction + +function! s:reset_curl_method() + if exists('s:curl_method') + unlet s:curl_method + endif +endfunction + +function! s:show_curl_method() + echo 'Method:' s:get_curl_method() +endfunction + +" For debugging. Reset networking method. +if !exists(":TwitVimResetMethod") + command TwitVimResetMethod :call <SID>reset_curl_method() +endif + +" For debugging. Show current networking method. +if !exists(":TwitVimShowMethod") + command TwitVimShowMethod :call <SID>show_curl_method() +endif + +" === End of networking code === + +" === Buffer stack code === + +" Each buffer record holds the following fields: +" +" buftype: Buffer type = dmrecv, dmsent, search, public, friends, user, replies, list +" user: For user buffers if other than current user +" list: List slug if displaying a Twitter list. +" page: Keep track of pagination. +" statuses: Tweet IDs. For use by in_reply_to_status_id +" inreplyto: IDs of predecessor messages for @-replies. +" dmids: Direct Message IDs. +" buffer: The buffer text + +let s:curbuffer = {} + +let s:bufstack = [] + +" Maximum items in the buffer stack. Adding a new item after this limit will +" get rid of the first item. +let s:bufstackmax = 10 + +" Buffer stack pointer. -1 if no items yet. May not point to the end of the +" list if user has gone back one or more buffers. +let s:bufstackptr = -1 + +" Add current buffer to the buffer stack at the next position after current. +" Remove all buffers after that. +function! s:add_buffer() + + " If stack is already full, remove the buffer at the bottom of the stack to + " make room. + if s:bufstackptr >= s:bufstackmax + call remove(s:bufstack, 0) + let s:bufstackptr -= 1 + endif + + let s:bufstackptr += 1 + + " Suppress errors because there may not be anything to remove after current + " position. + silent! call remove(s:bufstack, s:bufstackptr, -1) + + call add(s:bufstack, s:curbuffer) +endfunction + +" If current buffer is same type as the buffer at the buffer stack pointer then +" just copy it into the buffer stack. Otherwise, add it to buffer stack. +function! s:save_buffer() + if s:curbuffer == {} + return + endif + + " Save buffer contents and cursor position. + let twit_bufnr = bufwinnr('^'.s:twit_winname.'$') + if twit_bufnr > 0 + let curwin = winnr() + execute twit_bufnr . "wincmd w" + let s:curbuffer.buffer = getline(1, '$') + let s:curbuffer.view = winsaveview() + execute curwin . "wincmd w" + endif + + " If current buffer is the same type as buffer at the top of the stack, + " then just copy it. + if s:bufstackptr >= 0 && s:curbuffer.buftype == s:bufstack[s:bufstackptr].buftype && s:curbuffer.list == s:bufstack[s:bufstackptr].list && s:curbuffer.user == s:bufstack[s:bufstackptr].user && s:curbuffer.page == s:bufstack[s:bufstackptr].page + + let s:bufstack[s:bufstackptr] = deepcopy(s:curbuffer) + return + endif + + " Otherwise, push the current buffer onto the stack. + call s:add_buffer() +endfunction + +" Go back one buffer in the buffer stack. +function! s:back_buffer() + call s:save_buffer() + + if s:bufstackptr < 1 + call s:warnmsg("Already at oldest buffer. Can't go back further.") + return -1 + endif + + let s:bufstackptr -= 1 + let s:curbuffer = deepcopy(s:bufstack[s:bufstackptr]) + + call s:twitter_wintext_view(s:curbuffer.buffer, "timeline", s:curbuffer.view) + return 0 +endfunction + +" Go forward one buffer in the buffer stack. +function! s:fwd_buffer() + call s:save_buffer() + + if s:bufstackptr + 1 >= len(s:bufstack) + call s:warnmsg("Already at newest buffer. Can't go forward.") + return -1 + endif + + let s:bufstackptr += 1 + let s:curbuffer = deepcopy(s:bufstack[s:bufstackptr]) + + call s:twitter_wintext_view(s:curbuffer.buffer, "timeline", s:curbuffer.view) + return 0 +endfunction + +if !exists(":BackTwitter") + command BackTwitter :call <SID>back_buffer() +endif +if !exists(":ForwardTwitter") + command ForwardTwitter :call <SID>fwd_buffer() +endif + +" For debugging. Show the buffer stack. +function! s:show_bufstack() + for i in range(len(s:bufstack) - 1, 0, -1) + echo i.':' 'type='.s:bufstack[i].buftype 'user='.s:bufstack[i].user 'page='.s:bufstack[i].page + endfor +endfunction + +if !exists(":TwitVimShowBufstack") + command TwitVimShowBufstack :call <SID>show_bufstack() +endif + +" For debugging. Show curbuffer variable. +if !exists(":TwitVimShowCurbuffer") + command TwitVimShowCurbuffer :echo s:curbuffer +endif + +" === End of buffer stack code === + +" Add update to Twitter buffer if public, friends, or user timeline. +function! s:add_update(output) + if has_key(s:curbuffer, 'buftype') && (s:curbuffer.buftype == "public" || s:curbuffer.buftype == "friends" || s:curbuffer.buftype == "user" || s:curbuffer.buftype == "replies" || s:curbuffer.buftype == "list" || s:curbuffer.buftype == "retweeted_by_me" || s:curbuffer.buftype == "retweeted_to_me") + + " Parse the output from the Twitter update call. + let line = s:format_status_xml(a:output) + + " Add the status ID to the current buffer's statuses list. + call insert(s:curbuffer.statuses, s:xml_get_element(a:output, 'id'), 3) + + " Add in-reply-to ID to current buffer's in-reply-to list. + call insert(s:curbuffer.inreplyto, s:xml_get_element(a:output, 'in_reply_to_status_id'), 3) + + let twit_bufnr = bufwinnr('^'.s:twit_winname.'$') + if twit_bufnr > 0 + let curwin = winnr() + execute twit_bufnr . "wincmd w" + set modifiable + call append(2, line) + normal 3G + set nomodifiable + execute curwin . "wincmd w" + endif + endif +endfunction + +" Count number of characters in a multibyte string. Use technique from +" :help strlen(). +function! s:mbstrlen(s) + return strlen(substitute(a:s, ".", "x", "g")) +endfunction + +" Common code to post a message to Twitter. +function! s:post_twitter(mesg, inreplyto) + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + let parms = {} + + " Add in_reply_to_status_id if status ID is available. + if a:inreplyto != 0 + let parms["in_reply_to_status_id"] = a:inreplyto + endif + + let mesg = a:mesg + + " Remove trailing newline. You see that when you visual-select an entire + " line. Don't let it count towards the tweet length. + let mesg = substitute(mesg, '\n$', '', "") + + " Convert internal newlines to spaces. + let mesg = substitute(mesg, '\n', ' ', "g") + + let mesglen = s:mbstrlen(mesg) + + " Check tweet length. Note that the tweet length should be checked before + " URL-encoding the special characters because URL-encoding increases the + " string length. + if mesglen > s:char_limit + call s:warnmsg("Your tweet has ".(mesglen - s:char_limit)." too many characters. It was not sent.") + elseif mesglen < 1 + call s:warnmsg("Your tweet was empty. It was not sent.") + else + redraw + echo "Sending update to Twitter..." + + let url = s:get_api_root()."/statuses/update.xml?source=twitvim" + let parms["status"] = mesg + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), parms) + + if error != '' + call s:errormsg("Error posting your tweet: ".error) + else + call s:add_update(output) + redraw + echo "Your tweet was sent. You used ".mesglen." characters." + endif + endif +endfunction + +" Prompt user for tweet and then post it. +" If initstr is given, use that as the initial input. +function! s:CmdLine_Twitter(initstr, inreplyto) + " Do this here too to check for twitvim_login. This is to avoid having the + " user type in the message only to be told that his configuration is + " incomplete. + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + call inputsave() + redraw + let mesg = input("Your Twitter: ", a:initstr) + call inputrestore() + call s:post_twitter(mesg, a:inreplyto) +endfunction + +" Extract the user name from a line in the timeline. +function! s:get_user_name(line) + let line = substitute(a:line, '^+ ', '', '') + let matchres = matchlist(line, '^\(\w\+\):') + return matchres != [] ? matchres[1] : "" +endfunction + +" This is for a local mapping in the timeline. Start an @-reply on the command +" line to the author of the tweet on the current line. +function! s:Quick_Reply() + let username = s:get_user_name(getline('.')) + if username != "" + " If the status ID is not available, get() will return 0 and + " post_twitter() won't add in_reply_to_status_id to the update. + call s:CmdLine_Twitter('@'.username.' ', get(s:curbuffer.statuses, line('.'))) + endif +endfunction + +" Extract all user names from a line in the timeline. Return the poster's name as well as names from all the @replies. +function! s:get_all_names(line) + let names = [] + let dictnames = {} + + let username = s:get_user_name(getline('.')) + if username != "" + " Add this to the beginning of the list because we want the tweet + " author to be the main addressee in the reply to all. + let names = [ username ] + let dictnames[tolower(username)] = 1 + endif + + let matchcount = 1 + while 1 + let matchres = matchlist(a:line, '@\(\w\+\)', -1, matchcount) + if matchres == [] + break + endif + let name = matchres[1] + " Don't add duplicate names. + if !has_key(dictnames, tolower(name)) + call add(names, name) + let dictnames[tolower(name)] = 1 + endif + let matchcount += 1 + endwhile + + return names +endfunction + +" Reply to everyone mentioned on a line in the timeline. +function! s:Reply_All() + let names = s:get_all_names(getline('.')) + + " Remove the author from the reply list so that he doesn't end up replying + " to himself. + let user = s:get_twitvim_username() + let names2 = [] + for name in names + if name != user + call add(names2, name) + endif + endfor + + let replystr = '@'.join(names2, ' @').' ' + + if names != [] + " If the status ID is not available, get() will return 0 and + " post_twitter() won't add in_reply_to_status_id to the update. + call s:CmdLine_Twitter(replystr, get(s:curbuffer.statuses, line('.'))) + endif +endfunction + +" This is for a local mapping in the timeline. Start a direct message on the +" command line to the author of the tweet on the current line. +function! s:Quick_DM() + let username = s:get_user_name(getline('.')) + if username != "" + " call s:CmdLine_Twitter('d '.username.' ', 0) + call s:send_dm(username, '') + endif +endfunction + +" Allow user to switch to old-style retweets by setting twitvim_old_retweet. +function! s:get_old_retweet() + return exists('g:twitvim_old_retweet') ? g:twitvim_old_retweet : 0 +endfunction + +" Extract the tweet text from a timeline buffer line. +function! s:get_tweet(line) + let line = substitute(a:line, '^\w\+:\s\+', '', '') + let line = substitute(line, '\s\+|[^|]\+|$', '', '') + + " Remove newlines. + let line = substitute(line, "\n", '', 'g') + + return line +endfunction + +" Retweet is for replicating a tweet from another user. +function! s:Retweet() + let line = getline('.') + let username = s:get_user_name(line) + if username != "" + let retweet = substitute(s:get_retweet_fmt(), '%s', '@'.username, '') + let retweet = substitute(retweet, '%t', s:get_tweet(line), '') + call s:CmdLine_Twitter(retweet, 0) + endif +endfunction + +" Use new-style retweet API to retweet a tweet from another user. +function! s:Retweet_2() + + " Do an old-style retweet if user has set twitvim_old_retweet. + if s:get_old_retweet() + call s:Retweet() + return + endif + + let status = get(s:curbuffer.statuses, line('.')) + if status == 0 + " Fall back to old-style retweeting if we can't get this tweet's status + " ID. + call s:Retweet() + return + endif + + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + let parms = {} + + " Force POST instead of GET. + let parms["dummy"] = "dummy1" + + let url = s:get_api_root()."/statuses/retweet/".status.".xml" + + redraw + echo "Retweeting..." + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), parms) + if error != '' + call s:errormsg("Error retweeting: ".error) + else + call s:add_update(output) + redraw + echo "Retweeted." + endif +endfunction + +" Show which tweet this one is replying to below the current line. +function! s:show_inreplyto() + let lineno = line('.') + + let inreplyto = get(s:curbuffer.inreplyto, lineno) + if inreplyto == 0 + call s:warnmsg("No in-reply-to information for current line.") + return + endif + + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + redraw + echo "Querying Twitter for in-reply-to tweet..." + + let url = s:get_api_root()."/statuses/show/".inreplyto.".xml" + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error getting in-reply-to tweet: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error getting in-reply-to tweet: ".error) + return + endif + + let line = s:format_status_xml(output) + + " Add the status ID to the current buffer's statuses list. + call insert(s:curbuffer.statuses, s:xml_get_element(output, 'id'), lineno + 1) + + " Add in-reply-to ID to current buffer's in-reply-to list. + call insert(s:curbuffer.inreplyto, s:xml_get_element(output, 'in_reply_to_status_id'), lineno + 1) + + " Already in the correct buffer so no need to search or switch buffers. + set modifiable + call append(lineno, '+ '.line) + set nomodifiable + + redraw + echo "In-reply-to tweet found." +endfunction + +" Truncate a string. Add '...' to the end of string was longer than +" the specified number of characters. +function! s:strtrunc(s, len) + let slen = strlen(substitute(a:s, ".", "x", "g")) + let s = substitute(a:s, '^\(.\{,'.a:len.'}\).*$', '\1', '') + if slen > a:len + let s .= '...' + endif + return s +endfunction + +" Delete tweet or DM on current line. +function! s:do_delete_tweet() + let lineno = line('.') + + let isdm = (s:curbuffer.buftype == "dmrecv" || s:curbuffer.buftype == "dmsent") + let obj = isdm ? "message" : "tweet" + let uobj = isdm ? "Message" : "Tweet" + + let id = get(isdm ? s:curbuffer.dmids : s:curbuffer.statuses, lineno) + + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + " The delete API call requires POST, not GET, so we supply a fake parameter + " to force run_curl() to use POST. + let parms = {} + let parms["id"] = id + + let url = s:get_api_root().'/'.(isdm ? "direct_messages" : "statuses")."/destroy/".id.".xml" + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), parms) + if error != '' + call s:errormsg("Error deleting ".obj.": ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error deleting ".obj.": ".error) + return + endif + + if isdm + call remove(s:curbuffer.dmids, lineno) + else + call remove(s:curbuffer.statuses, lineno) + call remove(s:curbuffer.inreplyto, lineno) + endif + + " Already in the correct buffer so no need to search or switch buffers. + set modifiable + normal dd + set nomodifiable + + redraw + echo uobj "deleted." +endfunction + +" Delete tweet or DM on current line. +function! s:delete_tweet() + let lineno = line('.') + + let isdm = (s:curbuffer.buftype == "dmrecv" || s:curbuffer.buftype == "dmsent") + let obj = isdm ? "message" : "tweet" + let uobj = isdm ? "Message" : "Tweet" + + let id = get(isdm ? s:curbuffer.dmids : s:curbuffer.statuses, lineno) + if id == 0 + call s:warnmsg("No erasable ".obj." on current line.") + return + endif + + call inputsave() + let answer = input('Delete "'.s:strtrunc(getline('.'), 40).'"? (y/n) ') + call inputrestore() + if answer == 'y' || answer == 'Y' + call s:do_delete_tweet() + else + redraw + echo uobj "not deleted." + endif +endfunction + +" Prompt user for tweet. +if !exists(":PosttoTwitter") + command PosttoTwitter :call <SID>CmdLine_Twitter('', 0) +endif + +nnoremenu Plugin.TwitVim.Post\ from\ cmdline :call <SID>CmdLine_Twitter('', 0)<cr> + +" Post current line to Twitter. +if !exists(":CPosttoTwitter") + command CPosttoTwitter :call <SID>post_twitter(getline('.'), 0) +endif + +nnoremenu Plugin.TwitVim.Post\ current\ line :call <SID>post_twitter(getline('.'), 0)<cr> + +" Post entire buffer to Twitter. +if !exists(":BPosttoTwitter") + command BPosttoTwitter :call <SID>post_twitter(join(getline(1, "$")), 0) +endif + +" Post visual selection to Twitter. +noremap <SID>Visual y:call <SID>post_twitter(@", 0)<cr> +noremap <unique> <script> <Plug>TwitvimVisual <SID>Visual +if !hasmapto('<Plug>TwitvimVisual') + vmap <unique> <A-t> <Plug>TwitvimVisual + + " Allow Ctrl-T as an alternative to Alt-T. + " Alt-T pulls down the Tools menu if the menu bar is enabled. + vmap <unique> <C-t> <Plug>TwitvimVisual +endif + +vmenu Plugin.TwitVim.Post\ selection <Plug>TwitvimVisual + +" Launch web browser with the given URL. +function! s:launch_browser(url) + if !exists('g:twitvim_browser_cmd') || g:twitvim_browser_cmd == '' + " Beep and error-highlight + execute "normal \<Esc>" + call s:errormsg('Browser cmd not set. Please add to .vimrc: let twitvim_browser_cmd="browsercmd"') + return -1 + endif + + let startcmd = has("win32") || has("win64") ? "!start " : "! " + let endcmd = has("unix") ? "&" : "" + + " Escape characters that have special meaning in the :! command. + let url = substitute(a:url, '!\|#\|%', '\\&', 'g') + + redraw + echo "Launching web browser..." + let v:errmsg = "" + silent! execute startcmd g:twitvim_browser_cmd url endcmd + if v:errmsg == "" + redraw + echo "Web browser launched." + else + call s:errormsg('Error launching browser: '.v:errmsg) + endif +endfunction + +" Launch web browser with the URL at the cursor position. If possible, this +" function will try to recognize a URL within the current word. Otherwise, +" it'll just use the whole word. +" If the cWORD happens to be @user or user:, show that user's timeline. +function! s:launch_url_cword() + let s = expand("<cWORD>") + + " Handle @-replies by showing that user's timeline. + let matchres = matchlist(s, '^@\(\w\+\)') + if matchres != [] + call s:get_timeline("user", matchres[1], 1) + return + endif + + " Handle username: at the beginning of the line by showing that user's + " timeline. + let matchres = matchlist(s, '^\(\w\+\):$') + if matchres != [] + call s:get_timeline("user", matchres[1], 1) + return + endif + + " Handle #-hashtags by showing the Twitter Search for that hashtag. + let matchres = matchlist(s, '^\(#\w\+\)') + if matchres != [] + call s:get_summize(matchres[1], 1) + return + endif + + let s = substitute(s, '.*\<\(\(http\|https\|ftp\)://\S\+\)', '\1', "") + call s:launch_browser(s) +endfunction + +" Call LongURL API on a shorturl to expand it. +function! s:call_longurl(url) + redraw + echo "Sending request to LongURL..." + + let url = 'http://api.longurl.org/v1/expand?url='.s:url_encode(a:url) + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error calling LongURL API: ".error) + return "" + else + redraw + echo "Received response from LongURL." + + let longurl = s:xml_get_element(output, 'long_url') + if longurl != "" + return substitute(longurl, '<!\[CDATA\[\(.*\)]]>', '\1', '') + endif + + let errormsg = s:xml_get_element(output, 'error') + if errormsg != "" + call s:errormsg("LongURL error: ".errormsg) + return "" + endif + + call s:errormsg("Unknown response from LongURL: ".output) + return "" + endif +endfunction + +" Call LongURL API on the given string. If no string is provided, use the +" current word. In the latter case, this function will try to recognize a URL +" within the word. Otherwise, it'll just use the whole word. +function! s:do_longurl(s) + let s = a:s + if s == "" + let s = expand("<cWORD>") + let s = substitute(s, '.*\<\(\(http\|https\|ftp\)://\S\+\)', '\1', "") + endif + let result = s:call_longurl(s) + if result != "" + redraw + echo s.' expands to '.result + endif +endfunction + +" Get info on the given user. If no user is provided, use the current word and +" strip off the @ or : if the current word is @user or user:. +function! s:do_user_info(s) + let s = a:s + if s == '' + let s = expand("<cword>") + + " Handle @-replies. + let matchres = matchlist(s, '^@\(\w\+\)') + if matchres != [] + let s = matchres[1] + else + " Handle username: at the beginning of the line. + let matchres = matchlist(s, '^\(\w\+\):$') + if matchres != [] + let s = matchres[1] + endif + endif + endif + + call s:get_user_info(s) +endfunction + +" Decode HTML entities. Twitter gives those to us a little weird. For example, +" a '<' character comes to us as &amp;lt; +function! s:convert_entity(str) + let s = a:str + let s = substitute(s, '&amp;', '\&', 'g') + let s = substitute(s, '&lt;', '<', 'g') + let s = substitute(s, '&gt;', '>', 'g') + let s = substitute(s, '&quot;', '"', 'g') + let s = substitute(s, '&#\(\d\+\);','\=nr2char(submatch(1))', 'g') + return s +endfunction + +let s:twit_winname = "Twitter_".localtime() + +" Set syntax highlighting in timeline window. +function! s:twitter_win_syntax(wintype) + " Beautify the Twitter window with syntax highlighting. + if has("syntax") && exists("g:syntax_on") + " Reset syntax items in case there are any predefined in the new buffer. + syntax clear + + " Twitter user name: from start of line to first colon. + syntax match twitterUser /^.\{-1,}:/ + + " Use the bars to recognize the time but hide the bars. + syntax match twitterTime /|[^|]\+|$/ contains=twitterTimeBar + syntax match twitterTimeBar /|/ contained + + " Highlight links in tweets. + syntax match twitterLink "\<http://\S\+" + syntax match twitterLink "\<https://\S\+" + syntax match twitterLink "\<ftp://\S\+" + + " An @-reply must be preceded by whitespace and ends at a non-word + " character. + syntax match twitterReply "\S\@<!@\w\+" + + " A #-hashtag must be preceded by whitespace and ends at a non-word + " character. + syntax match twitterLink "\S\@<!#\w\+" + + if a:wintype != "userinfo" + " Use the extra star at the end to recognize the title but hide the + " star. + syntax match twitterTitle /^.\+\*$/ contains=twitterTitleStar + syntax match twitterTitleStar /\*$/ contained + endif + + highlight default link twitterUser Identifier + highlight default link twitterTime String + highlight default link twitterTimeBar Ignore + highlight default link twitterTitle Title + highlight default link twitterTitleStar Ignore + highlight default link twitterLink Underlined + highlight default link twitterReply Label + endif +endfunction + +" Switch to the Twitter window if there is already one or open a new window for +" Twitter. +" Returns 1 if new window created, 0 otherwise. +function! s:twitter_win(wintype) + let winname = a:wintype == "userinfo" ? s:user_winname : s:twit_winname + let newwin = 0 + + let twit_bufnr = bufwinnr('^'.winname.'$') + if twit_bufnr > 0 + execute twit_bufnr . "wincmd w" + else + let newwin = 1 + execute "new " . winname + setlocal noswapfile + setlocal buftype=nofile + setlocal bufhidden=delete + setlocal foldcolumn=0 + setlocal nobuflisted + setlocal nospell + + " Launch browser with URL in visual selection or at cursor position. + nnoremap <buffer> <silent> <A-g> :call <SID>launch_url_cword()<cr> + nnoremap <buffer> <silent> <Leader>g :call <SID>launch_url_cword()<cr> + vnoremap <buffer> <silent> <A-g> y:call <SID>launch_browser(@")<cr> + vnoremap <buffer> <silent> <Leader>g y:call <SID>launch_browser(@")<cr> + + " Get user info for current word or selection. + nnoremap <buffer> <silent> <Leader>p :call <SID>do_user_info("")<cr> + vnoremap <buffer> <silent> <Leader>p y:call <SID>do_user_info(@")<cr> + + " Call LongURL API on current word or selection. + nnoremap <buffer> <silent> <Leader>e :call <SID>do_longurl("")<cr> + vnoremap <buffer> <silent> <Leader>e y:call <SID>do_longurl(@")<cr> + + if a:wintype != "userinfo" + + " Quick reply feature for replying from the timeline. + nnoremap <buffer> <silent> <A-r> :call <SID>Quick_Reply()<cr> + nnoremap <buffer> <silent> <Leader>r :call <SID>Quick_Reply()<cr> + + " Quick DM feature for direct messaging from the timeline. + nnoremap <buffer> <silent> <A-d> :call <SID>Quick_DM()<cr> + nnoremap <buffer> <silent> <Leader>d :call <SID>Quick_DM()<cr> + + " Retweet feature for replicating another user's tweet. + nnoremap <buffer> <silent> <Leader>R :call <SID>Retweet_2()<cr> + + " Reply to all feature. + nnoremap <buffer> <silent> <Leader><C-r> :call <SID>Reply_All()<cr> + + " Show in-reply-to for current tweet. + nnoremap <buffer> <silent> <Leader>@ :call <SID>show_inreplyto()<cr> + + " Delete tweet or message on current line. + nnoremap <buffer> <silent> <Leader>X :call <SID>delete_tweet()<cr> + + " Refresh timeline. + nnoremap <buffer> <silent> <Leader><Leader> :call <SID>RefreshTimeline()<cr> + + " Next page in timeline. + nnoremap <buffer> <silent> <C-PageDown> :call <SID>NextPageTimeline()<cr> + + " Previous page in timeline. + nnoremap <buffer> <silent> <C-PageUp> :call <SID>PrevPageTimeline()<cr> + + endif + + " Go back and forth through buffer stack. + nnoremap <buffer> <silent> <C-o> :call <SID>back_buffer()<cr> + nnoremap <buffer> <silent> <C-i> :call <SID>fwd_buffer()<cr> + endif + + call s:twitter_win_syntax(a:wintype) + return newwin +endfunction + +" Get a Twitter window and stuff text into it. If view is not an empty +" dictionary then restore the cursor position to the saved view. +function! s:twitter_wintext_view(text, wintype, view) + let curwin = winnr() + let newwin = s:twitter_win(a:wintype) + + set modifiable + + " Overwrite the entire buffer. + " Need to use 'silent' or a 'No lines in buffer' message will appear. + " Delete to the blackhole register "_ so that we don't affect registers. + silent %delete _ + call setline('.', a:text) + normal 1G + + set nomodifiable + + " Restore the saved view if provided. + if a:view != {} + call winrestview(a:view) + endif + + " Go back to original window after updating buffer. If a new window is + " created then our saved curwin number is wrong so the best we can do is to + " take the user back to the last-accessed window using 'wincmd p'. + if newwin + wincmd p + else + execute curwin . "wincmd w" + endif +endfunction + +" Get a Twitter window and stuff text into it. +function! s:twitter_wintext(text, wintype) + call s:twitter_wintext_view(a:text, a:wintype, {}) +endfunction + +" Format XML status as a display line. +function! s:format_status_xml(item) + let item = a:item + + " Quick hack. Even though we're getting new-style retweets in the timeline + " XML, we'll still use the old-style retweet text from it. + let item = s:xml_remove_elements(item, 'retweeted_status') + + let user = s:xml_get_element(item, 'screen_name') + let text = s:convert_entity(s:xml_get_element(item, 'text')) + let pubdate = s:time_filter(s:xml_get_element(item, 'created_at')) + + return user.': '.text.' |'.pubdate.'|' +endfunction + +" Show a timeline from XML stream data. +function! s:show_timeline_xml(timeline, tline_name, username, page) + let matchcount = 1 + let text = [] + + " Index of first status will be 3 to match line numbers in timeline display. + let s:curbuffer.statuses = [0, 0, 0] + let s:curbuffer.inreplyto = [0, 0, 0] + + let s:curbuffer.dmids = [] + + " Construct page title. + + let title = substitute(a:tline_name, '^.', '\u&', '')." timeline" + if a:username != '' + let title .= " for ".a:username + endif + + " Special case titles for Retweets. + if a:tline_name == "retweeted_to_me" + let title = "Retweets by others" + elseif a:tline_name == "retweeted_by_me" + let title = "Retweets by you" + endif + + if a:page > 1 + let title .= ' (page '.a:page.')' + endif + + " The extra stars at the end are for the syntax highlighter to recognize + " the title. Then the syntax highlighter hides the stars by coloring them + " the same as the background. It is a bad hack. + call add(text, title.'*') + call add(text, repeat('=', s:mbstrlen(title)).'*') + + while 1 + let item = s:xml_get_nth(a:timeline, 'status', matchcount) + if item == "" + break + endif + + call add(s:curbuffer.statuses, s:xml_get_element(item, 'id')) + call add(s:curbuffer.inreplyto, s:xml_get_element(item, 'in_reply_to_status_id')) + + let line = s:format_status_xml(item) + call add(text, line) + + let matchcount += 1 + endwhile + call s:twitter_wintext(text, "timeline") +endfunction + +" Generic timeline retrieval function. +function! s:get_timeline(tline_name, username, page) + let gotparam = 0 + + if a:tline_name == "public" + " No authentication is needed for public timeline. + let login = '' + else + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + endif + + " Twitter API allows you to specify a username for user_timeline to + " retrieve another user's timeline. + let user = a:username == '' ? '' : '/'.a:username + + let url_fname = (a:tline_name == "replies" || a:tline_name == "retweeted_to_me" || a:tline_name == "retweeted_by_me") ? a:tline_name.".xml" : a:tline_name == "friends" ? "home_timeline.xml" : a:tline_name."_timeline".user.".xml" + + " Support pagination. + if a:page > 1 + let url_fname .= '?page='.a:page + let gotparam = 1 + endif + + " Support count parameter in friends, user, and retweet timelines. + if a:tline_name == 'friends' || a:tline_name == 'user' || a:tline_name == 'retweeted_to_me' || a:tline_name == 'retweeted_by_me' + let tcount = s:get_count() + if tcount > 0 + let url_fname .= (gotparam ? '&' : '?').'count='.tcount + let gotparam = 1 + endif + endif + + redraw + echo "Sending" a:tline_name "timeline request to Twitter..." + + let url = s:get_api_root()."/statuses/".url_fname + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error getting Twitter ".a:tline_name." timeline: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error getting Twitter ".a:tline_name." timeline: ".error) + return + endif + + call s:save_buffer() + let s:curbuffer = {} + call s:show_timeline_xml(output, a:tline_name, a:username, a:page) + let s:curbuffer.buftype = a:tline_name + let s:curbuffer.user = a:username + let s:curbuffer.list = '' + let s:curbuffer.page = a:page + redraw + + let foruser = a:username == '' ? '' : ' for user '.a:username + + " Uppercase the first letter in the timeline name. + echo substitute(a:tline_name, '^.', '\u&', '') "timeline updated".foruser."." +endfunction + +" Retrieve a Twitter list timeline. +function! s:get_list_timeline(username, listname, page) + let gotparam = 0 + + let login = s:get_twitvim_login_noerror() + " No login is no problem because the list statuses API is documented + " to not require authentication. However, you won't see tweets from + " protected timelines. + + let user = a:username + if user == '' + let user = s:get_twitvim_username() + if user == '' + call s:errormsg('Twitter login not set. Please specify a username.') + return -1 + endif + endif + + let url = "/".user."/lists/".a:listname."/statuses.xml" + + " Support pagination. + if a:page > 1 + let url .= '?page='.a:page + let gotparam = 1 + endif + + " Support count parameter. + let tcount = s:get_count() + if tcount > 0 + let url .= (gotparam ? '&' : '?').'count='.tcount + let gotparam = 1 + endif + + redraw + echo "Sending list timeline request to Twitter..." + + let url = s:get_api_root().url + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error getting Twitter list timeline: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error getting Twitter list timeline: ".error) + return + endif + + call s:save_buffer() + let s:curbuffer = {} + call s:show_timeline_xml(output, "list", user."/".a:listname, a:page) + let s:curbuffer.buftype = "list" + let s:curbuffer.user = user + let s:curbuffer.list = a:listname + let s:curbuffer.page = a:page + redraw + + " Uppercase the first letter in the timeline name. + echo "List timeline updated for ".user."/".a:listname +endfunction + +" Show direct message sent or received by user. First argument should be 'sent' +" or 'received' depending on which timeline we are displaying. +function! s:show_dm_xml(sent_or_recv, timeline, page) + let matchcount = 1 + let text = [] + + "No status IDs in direct messages. + let s:curbuffer.statuses = [] + let s:curbuffer.inreplyto = [] + + " Index of first dmid will be 3 to match line numbers in timeline display. + let s:curbuffer.dmids = [0, 0, 0] + + let title = 'Direct messages '.a:sent_or_recv + + if a:page > 1 + let title .= ' (page '.a:page.')' + endif + + " The extra stars at the end are for the syntax highlighter to recognize + " the title. Then the syntax highlighter hides the stars by coloring them + " the same as the background. It is a bad hack. + call add(text, title.'*') + call add(text, repeat('=', s:mbstrlen(title)).'*') + + while 1 + let item = s:xml_get_nth(a:timeline, 'direct_message', matchcount) + if item == "" + break + endif + + call add(s:curbuffer.dmids, s:xml_get_element(item, 'id')) + + let user = s:xml_get_element(item, a:sent_or_recv == 'sent' ? 'recipient_screen_name' : 'sender_screen_name') + let mesg = s:xml_get_element(item, 'text') + let date = s:time_filter(s:xml_get_element(item, 'created_at')) + + call add(text, user.": ".s:convert_entity(mesg).' |'.date.'|') + + let matchcount += 1 + endwhile + call s:twitter_wintext(text, "timeline") +endfunction + +" Get direct messages sent to or received by user. +function! s:Direct_Messages(mode, page) + let sent = (a:mode == "dmsent") + let s_or_r = (sent ? "sent" : "received") + + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + " Support pagination. + let pagearg = '' + if a:page > 1 + let pagearg = '?page='.a:page + endif + + redraw + echo "Sending direct messages ".s_or_r." timeline request to Twitter..." + + let url = s:get_api_root()."/direct_messages".(sent ? "/sent" : "").".xml".pagearg + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error getting Twitter direct messages ".s_or_r." timeline: ".error) + return + endif + + call s:save_buffer() + let s:curbuffer = {} + call s:show_dm_xml(s_or_r, output, a:page) + let s:curbuffer.buftype = a:mode + let s:curbuffer.user = '' + let s:curbuffer.list = '' + let s:curbuffer.page = a:page + redraw + echo "Direct messages ".s_or_r." timeline updated." +endfunction + +" Function to load a timeline from the given parameters. For use by refresh and +" next/prev pagination commands. +function! s:load_timeline(buftype, user, list, page) + if a:buftype == "public" || a:buftype == "friends" || a:buftype == "user" || a:buftype == "replies" || a:buftype == "retweeted_by_me" || a:buftype == "retweeted_to_me" + call s:get_timeline(a:buftype, a:user, a:page) + elseif a:buftype == "list" + call s:get_list_timeline(a:user, a:list, a:page) + elseif a:buftype == "dmsent" || a:buftype == "dmrecv" + call s:Direct_Messages(a:buftype, a:page) + elseif a:buftype == "search" + call s:get_summize(a:user, a:page) + endif +endfunction + +" Refresh the timeline buffer. +function! s:RefreshTimeline() + if s:curbuffer != {} + call s:load_timeline(s:curbuffer.buftype, s:curbuffer.user, s:curbuffer.list, s:curbuffer.page) + else + call s:warnmsg("No timeline buffer to refresh.") + endif +endfunction + +" Go to next page in timeline. +function! s:NextPageTimeline() + if s:curbuffer != {} + call s:load_timeline(s:curbuffer.buftype, s:curbuffer.user, s:curbuffer.list, s:curbuffer.page + 1) + else + call s:warnmsg("No timeline buffer.") + endif +endfunction + +" Go to previous page in timeline. +function! s:PrevPageTimeline() + if s:curbuffer != {} + if s:curbuffer.page <= 1 + call s:warnmsg("Timeline is already on first page.") + else + call s:load_timeline(s:curbuffer.buftype, s:curbuffer.user, s:curbuffer.list, s:curbuffer.page - 1) + endif + else + call s:warnmsg("No timeline buffer.") + endif +endfunction + +" Get a Twitter list. Need to do a little fiddling because the +" username argument is optional. +function! s:DoList(page, arg1, ...) + let user = '' + let list = a:arg1 + if a:0 > 0 + let user = a:arg1 + let list = a:1 + endif + call s:get_list_timeline(user, list, a:page) +endfunction + +if !exists(":PublicTwitter") + command PublicTwitter :call <SID>get_timeline("public", '', 1) +endif +if !exists(":FriendsTwitter") + command -count=1 FriendsTwitter :call <SID>get_timeline("friends", '', <count>) +endif +if !exists(":UserTwitter") + command -range=1 -nargs=? UserTwitter :call <SID>get_timeline("user", <q-args>, <count>) +endif +if !exists(":RepliesTwitter") + command -count=1 RepliesTwitter :call <SID>get_timeline("replies", '', <count>) +endif +if !exists(":DMTwitter") + command -count=1 DMTwitter :call <SID>Direct_Messages("dmrecv", <count>) +endif +if !exists(":DMSentTwitter") + command -count=1 DMSentTwitter :call <SID>Direct_Messages("dmsent", <count>) +endif +if !exists(":ListTwitter") + command -range=1 -nargs=+ ListTwitter :call <SID>DoList(<count>, <f-args>) +endif +if !exists(":RetweetedByMeTwitter") + command -count=1 RetweetedByMeTwitter :call <SID>get_timeline("retweeted_by_me", '', <count>) +endif +if !exists(":RetweetedToMeTwitter") + command -count=1 RetweetedToMeTwitter :call <SID>get_timeline("retweeted_to_me", '', <count>) +endif + +nnoremenu Plugin.TwitVim.-Sep1- : +nnoremenu Plugin.TwitVim.&Friends\ Timeline :call <SID>get_timeline("friends", '', 1)<cr> +nnoremenu Plugin.TwitVim.&User\ Timeline :call <SID>get_timeline("user", '', 1)<cr> +nnoremenu Plugin.TwitVim.&Replies\ Timeline :call <SID>get_timeline("replies", '', 1)<cr> +nnoremenu Plugin.TwitVim.&Direct\ Messages :call <SID>Direct_Messages("dmrecv", 1)<cr> +nnoremenu Plugin.TwitVim.Direct\ Messages\ &Sent :call <SID>Direct_Messages("dmsent", 1)<cr> +nnoremenu Plugin.TwitVim.&Public\ Timeline :call <SID>get_timeline("public", '', 1)<cr> + +nnoremenu Plugin.TwitVim.Retweeted\ &By\ Me :call <SID>get_timeline("retweeted_by_me", '', 1)<cr> +nnoremenu Plugin.TwitVim.Retweeted\ &To\ Me :call <SID>get_timeline("retweeted_to_me", '', 1)<cr> + +if !exists(":RefreshTwitter") + command RefreshTwitter :call <SID>RefreshTimeline() +endif +if !exists(":NextTwitter") + command NextTwitter :call <SID>NextPageTimeline() +endif +if !exists(":PreviousTwitter") + command PreviousTwitter :call <SID>PrevPageTimeline() +endif + +if !exists(":SetLoginTwitter") + command SetLoginTwitter :call <SID>prompt_twitvim_login() +endif +if !exists(":ResetLoginTwitter") + command ResetLoginTwitter :call <SID>reset_twitvim_login() +endif + +nnoremenu Plugin.TwitVim.-Sep2- : +nnoremenu Plugin.TwitVim.Set\ Twitter\ Login :call <SID>prompt_twitvim_login()<cr> +nnoremenu Plugin.TwitVim.Reset\ Twitter\ Login :call <SID>reset_twitvim_login()<cr> + + +" Send a direct message. +function! s:do_send_dm(user, mesg) + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + let mesg = a:mesg + + " Remove trailing newline. You see that when you visual-select an entire + " line. Don't let it count towards the message length. + let mesg = substitute(mesg, '\n$', '', "") + + " Convert internal newlines to spaces. + let mesg = substitute(mesg, '\n', ' ', "g") + + let mesglen = s:mbstrlen(mesg) + + " Check message length. Note that the message length should be checked + " before URL-encoding the special characters because URL-encoding increases + " the string length. + if mesglen > s:char_limit + call s:warnmsg("Your message has ".(mesglen - s:char_limit)." too many characters. It was not sent.") + elseif mesglen < 1 + call s:warnmsg("Your message was empty. It was not sent.") + else + redraw + echo "Sending message to ".a:user."..." + + let url = s:get_api_root()."/direct_messages/new.xml?source=twitvim" + let parms = { "user" : a:user, "text" : mesg } + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), parms) + + if error != '' + call s:errormsg("Error sending your message: ".error) + else + redraw + echo "Your message was sent to ".a:user.". You used ".mesglen." characters." + endif + endif +endfunction + +" Send a direct message. Prompt user for message if not given. +function! s:send_dm(user, mesg) + if a:user == "" + call s:warnmsg("No recipient specified for direct message.") + return + endif + + let mesg = a:mesg + if mesg == "" + call inputsave() + let mesg = input("DM ".a:user.": ") + call inputrestore() + endif + + if mesg == "" + call s:warnmsg("Your message was empty. It was not sent.") + return + endif + + call s:do_send_dm(a:user, mesg) +endfunction + +if !exists(":SendDMTwitter") + command -nargs=1 SendDMTwitter :call <SID>send_dm(<q-args>, '') +endif + +" Call Twitter API to get rate limit information. +function! s:get_rate_limit() + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + redraw + echo "Querying Twitter for rate limit information..." + + let url = s:get_api_root()."/account/rate_limit_status.xml" + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error getting rate limit info: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error getting rate limit info: ".error) + return + endif + + let remaining = s:xml_get_element(output, 'remaining-hits') + let resettime = s:time_filter(s:xml_get_element(output, 'reset-time')) + let limit = s:xml_get_element(output, 'hourly-limit') + + redraw + echo "Rate limit: ".limit." Remaining: ".remaining." Reset at: ".resettime +endfunction + +if !exists(":RateLimitTwitter") + command RateLimitTwitter :call <SID>get_rate_limit() +endif + +" Set location field on Twitter profile. +function! s:set_location(loc) + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + redraw + echo "Setting location on Twitter profile..." + + let url = s:get_api_root()."/account/update_location.xml" + let parms = { 'location' : a:loc } + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), parms) + if error != '' + call s:errormsg("Error setting location: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error setting location: ".error) + return + endif + + redraw + echo "Location: ".s:xml_get_element(output, 'location') +endfunction + +if !exists(":LocationTwitter") + command -nargs=+ LocationTwitter :call <SID>set_location(<q-args>) +endif + +let s:user_winname = "TwitterUserInfo_".localtime() + +" Process/format the user information. +function! s:format_user_info(output) + let text = [] + let output = a:output + + let name = s:xml_get_element(output, 'name') + let screen = s:xml_get_element(output, 'screen_name') + call add(text, 'Name: '.screen.' ('.name.')') + + call add(text, 'Location: '.s:xml_get_element(output, 'location')) + call add(text, 'Website: '.s:xml_get_element(output, 'url')) + call add(text, 'Bio: '.s:xml_get_element(output, 'description')) + call add(text, '') + call add(text, 'Following: '.s:xml_get_element(output, 'friends_count')) + call add(text, 'Followers: '.s:xml_get_element(output, 'followers_count')) + call add(text, 'Updates: '.s:xml_get_element(output, 'statuses_count')) + call add(text, '') + + let status = s:xml_get_element(output, 'text') + let pubdate = s:time_filter(s:xml_get_element(output, 'created_at')) + call add(text, 'Status: '.s:convert_entity(status).' |'.pubdate.'|') + return text +endfunction + +" Call Twitter API to get user's info. +function! s:get_user_info(username) + let login = s:get_twitvim_login() + if login == '' + return -1 + endif + + if a:username == '' + call s:errormsg("Please specify a user name to retrieve info on.") + return + endif + + redraw + echo "Querying Twitter for user information..." + + let url = s:get_api_root()."/users/show/".a:username.".xml" + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error getting user info: ".error) + return + endif + + let error = s:xml_get_element(output, 'error') + if error != '' + call s:errormsg("Error getting user info: ".error) + return + endif + + call s:twitter_wintext(s:format_user_info(output), "userinfo") + + redraw + echo "User information retrieved." +endfunction + +if !exists(":ProfileTwitter") + command -nargs=1 ProfileTwitter :call <SID>get_user_info(<q-args>) +endif + + +" Call Tweetburner API to shorten a URL. +function! s:call_tweetburner(url) + redraw + echo "Sending request to Tweetburner..." + + let [error, output] = s:run_curl('http://tweetburner.com/links', '', s:get_proxy(), s:get_proxy_login(), {'link[url]' : a:url}) + + if error != '' + call s:errormsg("Error calling Tweetburner API: ".error) + return "" + else + redraw + echo "Received response from Tweetburner." + return output + endif +endfunction + +" Call SnipURL API to shorten a URL. +function! s:call_snipurl(url) + redraw + echo "Sending request to SnipURL..." + + let url = 'http://snipr.com/site/snip?r=simple&link='.s:url_encode(a:url) + + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error calling SnipURL API: ".error) + return "" + else + redraw + echo "Received response from SnipURL." + " Get rid of extraneous newline at the beginning of SnipURL's output. + return substitute(output, '^\n', '', '') + endif +endfunction + +" Call Metamark API to shorten a URL. +function! s:call_metamark(url) + redraw + echo "Sending request to Metamark..." + + let [error, output] = s:run_curl('http://metamark.net/api/rest/simple', '', s:get_proxy(), s:get_proxy_login(), {'long_url' : a:url}) + + if error != '' + call s:errormsg("Error calling Metamark API: ".error) + return "" + else + redraw + echo "Received response from Metamark." + return output + endif +endfunction + +" Call TinyURL API to shorten a URL. +function! s:call_tinyurl(url) + redraw + echo "Sending request to TinyURL..." + + let url = 'http://tinyurl.com/api-create.php?url='.a:url + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error calling TinyURL API: ".error) + return "" + else + redraw + echo "Received response from TinyURL." + return output + endif +endfunction + +" Get bit.ly username and api key if configured by the user. Otherwise, use a +" default username and api key. +function! s:get_bitly_key() + if exists('g:twitvim_bitly_user') && exists('g:twitvim_bitly_key') + return [ g:twitvim_bitly_user, g:twitvim_bitly_key ] + endif + return [ 'twitvim', 'R_a53414d2f36a90c3e189299c967e6efc' ] +endfunction + +" Call bit.ly API to shorten a URL. +function! s:call_bitly(url) + let [ user, key ] = s:get_bitly_key() + + redraw + echo "Sending request to bit.ly..." + + let url = 'http://api.bit.ly/shorten?version=2.0.1' + let url .= '&longUrl='.s:url_encode(a:url) + let url .= '&login='.user + let url .= '&apiKey='.key.'&format=xml&history=1' + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error calling bit.ly API: ".error) + return "" + endif + + let status = s:xml_get_element(output, 'statusCode') + if status != 'OK' + let errorcode = s:xml_get_element(output, 'errorCode') + let errormsg = s:xml_get_element(output, 'errorMessage') + if errorcode == 0 + " For reasons unknown, bit.ly sometimes return two error codes and + " the first one is 0. + let errorcode = s:xml_get_nth(output, 'errorCode', 2) + let errormsg = s:xml_get_nth(output, 'errorMessage', 2) + endif + call s:errormsg("Error from bit.ly: ".errorcode." ".errormsg) + return "" + endif + + let shorturl = s:xml_get_element(output, 'shortUrl') + redraw + echo "Received response from bit.ly." + return shorturl +endfunction + +" Call is.gd API to shorten a URL. +function! s:call_isgd(url) + redraw + echo "Sending request to is.gd..." + + let url = 'http://is.gd/api.php?longurl='.s:url_encode(a:url) + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error calling is.gd API: ".error) + return "" + else + redraw + echo "Received response from is.gd." + return output + endif +endfunction + + +" Get urlBorg API key if configured by the user. Otherwise, use a default API +" key. +function! s:get_urlborg_key() + return exists('g:twitvim_urlborg_key') ? g:twitvim_urlborg_key : '26361-80ab' +endfunction + +" Call urlBorg API to shorten a URL. +function! s:call_urlborg(url) + let key = s:get_urlborg_key() + redraw + echo "Sending request to urlBorg..." + + let url = 'http://urlborg.com/api/'.key.'/create_or_reuse/'.s:url_encode(a:url) + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error calling urlBorg API: ".error) + return "" + else + if output !~ '\c^http' + call s:errormsg("urlBorg error: ".output) + return "" + endif + + redraw + echo "Received response from urlBorg." + return output + endif +endfunction + + +" Get tr.im login info if configured by the user. +function! s:get_trim_login() + return exists('g:twitvim_trim_login') ? g:twitvim_trim_login : '' +endfunction + +" Call tr.im API to shorten a URL. +function! s:call_trim(url) + let login = s:get_trim_login() + + redraw + echo "Sending request to tr.im..." + + let url = 'http://tr.im/api/trim_url.xml?url='.s:url_encode(a:url) + + let [error, output] = s:run_curl(url, login, s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error calling tr.im API: ".error) + return "" + endif + + let statusattr = s:xml_get_attr(output, 'status') + + let trimmsg = statusattr['code'].' '.statusattr['message'] + + if statusattr['result'] == "OK" + return s:xml_get_element(output, 'url') + elseif statusattr['result'] == "ERROR" + call s:errormsg("tr.im error: ".trimmsg) + return "" + else + call s:errormsg("Unknown result from tr.im: ".trimmsg) + return "" + endif +endfunction + +" Get Cligs API key if configured by the user. +function! s:get_cligs_key() + return exists('g:twitvim_cligs_key') ? g:twitvim_cligs_key : '' +endfunction + +" Call Cligs API to shorten a URL. +function! s:call_cligs(url) + let url = 'http://cli.gs/api/v1/cligs/create?appid=twitvim&url='.s:url_encode(a:url) + + let key = s:get_cligs_key() + if key != '' + let url .= '&key='.key + endif + + redraw + echo "Sending request to Cligs..." + + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error calling Cligs API: ".error) + return "" + endif + + redraw + echo "Received response from Cligs." + return output +endfunction + +" Call Zi.ma API to shorten a URL. +function! s:call_zima(url) + let url = "http://zi.ma/?module=ShortURL&file=Add&mode=API&url=".s:url_encode(a:url) + + redraw + echo "Sending request to Zi.ma..." + + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + if error != '' + call s:errormsg("Error calling Zi.ma API: ".error) + return "" + endif + + let error = s:xml_get_element(output, 'h3') + if error != '' + call s:errormsg("Error from Zi.ma: ".error) + return "" + endif + + redraw + echo "Received response from Zi.ma." + return output +endfunction + +" Invoke URL shortening service to shorten a URL and insert it at the current +" position in the current buffer. +function! s:GetShortURL(tweetmode, url, shortfn) + let url = a:url + + " Prompt the user to enter a URL if not provided on :Tweetburner command + " line. + if url == "" + call inputsave() + let url = input("URL to shorten: ") + call inputrestore() + endif + + if url == "" + call s:warnmsg("No URL provided.") + return + endif + + let shorturl = call(function("s:".a:shortfn), [url]) + if shorturl != "" + if a:tweetmode == "cmdline" + call s:CmdLine_Twitter(shorturl." ", 0) + elseif a:tweetmode == "append" + execute "normal a".shorturl."\<esc>" + else + execute "normal i".shorturl." \<esc>" + endif + endif +endfunction + +if !exists(":Tweetburner") + command -nargs=? Tweetburner :call <SID>GetShortURL("insert", <q-args>, "call_tweetburner") +endif +if !exists(":ATweetburner") + command -nargs=? ATweetburner :call <SID>GetShortURL("append", <q-args>, "call_tweetburner") +endif +if !exists(":PTweetburner") + command -nargs=? PTweetburner :call <SID>GetShortURL("cmdline", <q-args>, "call_tweetburner") +endif + +if !exists(":Snipurl") + command -nargs=? Snipurl :call <SID>GetShortURL("insert", <q-args>, "call_snipurl") +endif +if !exists(":ASnipurl") + command -nargs=? ASnipurl :call <SID>GetShortURL("append", <q-args>, "call_snipurl") +endif +if !exists(":PSnipurl") + command -nargs=? PSnipurl :call <SID>GetShortURL("cmdline", <q-args>, "call_snipurl") +endif + +if !exists(":Metamark") + command -nargs=? Metamark :call <SID>GetShortURL("insert", <q-args>, "call_metamark") +endif +if !exists(":AMetamark") + command -nargs=? AMetamark :call <SID>GetShortURL("append", <q-args>, "call_metamark") +endif +if !exists(":PMetamark") + command -nargs=? PMetamark :call <SID>GetShortURL("cmdline", <q-args>, "call_metamark") +endif + +if !exists(":TinyURL") + command -nargs=? TinyURL :call <SID>GetShortURL("insert", <q-args>, "call_tinyurl") +endif +if !exists(":ATinyURL") + command -nargs=? ATinyURL :call <SID>GetShortURL("append", <q-args>, "call_tinyurl") +endif +if !exists(":PTinyURL") + command -nargs=? PTinyURL :call <SID>GetShortURL("cmdline", <q-args>, "call_tinyurl") +endif + +if !exists(":BitLy") + command -nargs=? BitLy :call <SID>GetShortURL("insert", <q-args>, "call_bitly") +endif +if !exists(":ABitLy") + command -nargs=? ABitLy :call <SID>GetShortURL("append", <q-args>, "call_bitly") +endif +if !exists(":PBitLy") + command -nargs=? PBitLy :call <SID>GetShortURL("cmdline", <q-args>, "call_bitly") +endif + +if !exists(":IsGd") + command -nargs=? IsGd :call <SID>GetShortURL("insert", <q-args>, "call_isgd") +endif +if !exists(":AIsGd") + command -nargs=? AIsGd :call <SID>GetShortURL("append", <q-args>, "call_isgd") +endif +if !exists(":PIsGd") + command -nargs=? PIsGd :call <SID>GetShortURL("cmdline", <q-args>, "call_isgd") +endif + +if !exists(":UrlBorg") + command -nargs=? UrlBorg :call <SID>GetShortURL("insert", <q-args>, "call_urlborg") +endif +if !exists(":AUrlBorg") + command -nargs=? AUrlBorg :call <SID>GetShortURL("append", <q-args>, "call_urlborg") +endif +if !exists(":PUrlBorg") + command -nargs=? PUrlBorg :call <SID>GetShortURL("cmdline", <q-args>, "call_urlborg") +endif + +if !exists(":Trim") + command -nargs=? Trim :call <SID>GetShortURL("insert", <q-args>, "call_trim") +endif +if !exists(":ATrim") + command -nargs=? ATrim :call <SID>GetShortURL("append", <q-args>, "call_trim") +endif +if !exists(":PTrim") + command -nargs=? PTrim :call <SID>GetShortURL("cmdline", <q-args>, "call_trim") +endif + +if !exists(":Cligs") + command -nargs=? Cligs :call <SID>GetShortURL("insert", <q-args>, "call_cligs") +endif +if !exists(":ACligs") + command -nargs=? ACligs :call <SID>GetShortURL("append", <q-args>, "call_cligs") +endif +if !exists(":PCligs") + command -nargs=? PCligs :call <SID>GetShortURL("cmdline", <q-args>, "call_cligs") +endif + +if !exists(":Zima") + command -nargs=? Zima :call <SID>GetShortURL("insert", <q-args>, "call_zima") +endif +if !exists(":AZima") + command -nargs=? AZima :call <SID>GetShortURL("append", <q-args>, "call_zima") +endif +if !exists(":PZima") + command -nargs=? PZima :call <SID>GetShortURL("cmdline", <q-args>, "call_zima") +endif + +" Parse and format search results from Twitter Search API. +function! s:show_summize(searchres, page) + let text = [] + let matchcount = 1 + + " Index of first status will be 3 to match line numbers in timeline display. + let s:curbuffer.statuses = [0, 0, 0] + let s:curbuffer.inreplyto = [0, 0, 0] + + let s:curbuffer.dmids = [] + + let channel = s:xml_remove_elements(a:searchres, 'entry') + let title = s:xml_get_element(channel, 'title') + + if a:page > 1 + let title .= ' (page '.a:page.')' + endif + + " The extra stars at the end are for the syntax highlighter to recognize + " the title. Then the syntax highlighter hides the stars by coloring them + " the same as the background. It is a bad hack. + call add(text, title.'*') + call add(text, repeat('=', strlen(title)).'*') + + while 1 + let item = s:xml_get_nth(a:searchres, 'entry', matchcount) + if item == "" + break + endif + + let title = s:xml_get_element(item, 'title') + let pubdate = s:time_filter(s:xml_get_element(item, 'updated')) + let sender = substitute(s:xml_get_element(item, 'uri'), 'http://twitter.com/', '', '') + + " Parse and save the status ID. + let status = substitute(s:xml_get_element(item, 'id'), '^.*:', '', '') + call add(s:curbuffer.statuses, status) + + call add(text, sender.": ".s:convert_entity(title).' |'.pubdate.'|') + + let matchcount += 1 + endwhile + call s:twitter_wintext(text, "timeline") +endfunction + +" Query Twitter Search API and retrieve results +function! s:get_summize(query, page) + redraw + echo "Sending search request to Twitter Search..." + + let param = '' + + " Support pagination. + if a:page > 1 + let param .= 'page='.a:page.'&' + endif + + " Support count parameter in search results. + let tcount = s:get_count() + if tcount > 0 + let param .= 'rpp='.tcount.'&' + endif + + let url = 'http://search.twitter.com/search.atom?'.param.'q='.s:url_encode(a:query) + let [error, output] = s:run_curl(url, '', s:get_proxy(), s:get_proxy_login(), {}) + + if error != '' + call s:errormsg("Error querying Twitter Search: ".error) + return + endif + + call s:save_buffer() + let s:curbuffer = {} + call s:show_summize(output, a:page) + let s:curbuffer.buftype = "search" + + " Stick the query in here to differentiate between sets of search results. + let s:curbuffer.user = a:query + + let s:curbuffer.list = '' + let s:curbuffer.page = a:page + redraw + echo "Received search results from Twitter Search." +endfunction + +" Prompt user for Twitter Search query string if not entered on command line. +function! s:Summize(query, page) + let query = a:query + + " Prompt the user to enter a query if not provided on :SearchTwitter + " command line. + if query == "" + call inputsave() + let query = input("Search Twitter: ") + call inputrestore() + endif + + if query == "" + call s:warnmsg("No query provided for Twitter Search.") + return + endif + + call s:get_summize(query, a:page) +endfunction + +if !exists(":Summize") + command -range=1 -nargs=? Summize :call <SID>Summize(<q-args>, <count>) +endif +if !exists(":SearchTwitter") + command -range=1 -nargs=? SearchTwitter :call <SID>Summize(<q-args>, <count>) +endif + +let &cpo = s:save_cpo +finish + +" vim:set tw=0: diff --git a/vimrc b/vimrc index e14330b..f8c33f1 100644 --- a/vimrc +++ b/vimrc @@ -1,362 +1,376 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files requird workit +" +" TwitVim +" <F7>/<F8> - load timelines +" :Bpost... - post +" ,g - load user's timeline +" ,d - direct message +" ,@ - load the parent to this post +" :IsGd {url} - shorten the url syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " Y yanks to the end of the line nmap Y y$ " shortcuts for copying to clipboard nmap <leader>y "*y " copy the current line to the clipboard nmap <leader>Y "*yy nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 +" TwitVim +" http://vim.sourceforge.net/scripts/script.php?script_id=2204 +" Twitter/Identica client for vim +" F7/F8 for loading identica/twitter +source ~/.vim/twitvim.vim + " ================================================== " Custom Functions " ================================================== function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
bf26ed3a9f69c4017ba3611182e51bf27a1e7bc3
* add a shortcut for copying current line to clipboard
diff --git a/vimrc b/vimrc index 27aeb29..e14330b 100644 --- a/vimrc +++ b/vimrc @@ -1,359 +1,362 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard +" ,Y - yank current line to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " -" :PG XXXX php - vimgrep the project for XXXX in .php files +" :PG XXXX php - vimgrep the project for XXXX in .php files requird workit syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> -" Y yanks to the end of the line -nmap Y y$ - " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> +" Y yanks to the end of the line +nmap Y y$ + " shortcuts for copying to clipboard nmap <leader>y "*y +" copy the current line to the clipboard +nmap <leader>Y "*yy nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " ================================================== " Custom Functions " ================================================== function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") :exe 'cd '.proj_path let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
beb70abd72291806cb3349a0236661119c717866
* update the PG function to cd first so the filenames in the quickfix are smaller/more local
diff --git a/vimrc b/vimrc index 8489ee7..27aeb29 100644 --- a/vimrc +++ b/vimrc @@ -1,358 +1,359 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Y yanks to the end of the line nmap Y y$ " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " shortcuts for copying to clipboard nmap <leader>y "*y nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " XML Completion " http://www.vim.org/scripts/script.php?script_id=301 " close xml/html tags like <div> " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " ================================================== " Custom Functions " ================================================== function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") + :exe 'cd '.proj_path - let search_path = proj_path . "/**/*." . ext + let search_path = "**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
eb84e35600176bf0c55c16202ee5b30443a84279
* update the docs on the xml.vim plugin we're using
diff --git a/vimrc b/vimrc index 4c5d433..8489ee7 100644 --- a/vimrc +++ b/vimrc @@ -1,354 +1,358 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) " " :PG XXXX php - vimgrep the project for XXXX in .php files syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Y yanks to the end of the line nmap Y y$ " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " shortcuts for copying to clipboard nmap <leader>y "*y nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line +" XML Completion +" http://www.vim.org/scripts/script.php?script_id=301 +" close xml/html tags like <div> + " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 " ================================================== " Custom Functions " ================================================== function! PGrep(pattern, ...) let pattern = a:pattern if a:0 == 0 let ext = '*' else let ext = a:1 endif let proj_path = system("echo $PROJ_PATH | tr -d '\n'") let search_path = proj_path . "/**/*." . ext :execute "vimgrep /" . pattern . "/j " search_path | :copen endfunction command! -nargs=* PG :call PGrep(<f-args>)
mitechie/pyvim
d4c7bb5dc660fc5f82575825f4afc78d74a7fd9d
* add a :PG function for easier vimgrep using workit variable
diff --git a/vimrc b/vimrc index b88a38d..4c5d433 100644 --- a/vimrc +++ b/vimrc @@ -1,330 +1,354 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) +" +" :PG XXXX php - vimgrep the project for XXXX in .php files syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Y yanks to the end of the line nmap Y y$ " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " shortcuts for copying to clipboard nmap <leader>y "*y nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " make the smarty .tpl files html files for our purposes au BufNewFile,BufRead *.tpl set filetype=html " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1 +" ================================================== +" Custom Functions +" ================================================== + +function! PGrep(pattern, ...) + let pattern = a:pattern + + if a:0 == 0 + let ext = '*' + else + let ext = a:1 + endif + + let proj_path = system("echo $PROJ_PATH | tr -d '\n'") + + let search_path = proj_path . "/**/*." . ext + + :execute "vimgrep /" . pattern . "/j " search_path | :copen +endfunction +command! -nargs=* PG :call PGrep(<f-args>) + + diff --git a/vimsync.sh b/vimsync.sh new file mode 100755 index 0000000..6fe1ba7 --- /dev/null +++ b/vimsync.sh @@ -0,0 +1,44 @@ +#! /bin/zsh + +# add to ~/bin: ln -s ~/configs/pyvim/vimsync.sh ~/bin/vimsync.sh + +# Sync my vim config to a remote host specified. +# Steps: +# 1. cd ~/configs/pyvim +# 2. git co portable +# 3. rsync -avz --delete -e ssh ~/configs/pyvim dc:~ +# 4. ln -s vim/vim .vim +# 5. ln -s vim/.vimrc .vimrc + +# Notes: +# Using a git branch since some stuff I run locally won't be on remote hosts +# This setups the files in a directory on the host called vim and then it +# symlinks the .vimrc and vim directory to the user's home dir +# This currently syncs the git stuff as well, at some point should probably do +# some fancy export to a tmp dir and rsync those files over instead + +VIMCONF="/home/rharding/configs/pyvim" +VIMBRANCH="portable" + +cd $VIMCONF +git checkout $VIMBRANCH + +# get the hostname +if [ $# -ne 1 ] +then + echo "Usage: vimsync HOSTNAME" + return 65 +fi + +HOSTNAME=$1 + +rsync -avz --delete -e ssh ~/configs/pyvim $HOSTNAME:~ + +ssh $HOSTNAME 'ln -s vim/vim .vim && ln -s vim/vimrc .vimrc' + +# make sure we restore our local vim config to master +git checkout master + + +# @todo move the above into a shell function, setup a list of hosts, and loop +# through them to sync all hosts at once
mitechie/pyvim
f268f03927203b0aa27dce4fd456611ebd631d4d
* make the xml completion plugin work with html files
diff --git a/vim/ftplugin/html.vim b/vim/ftplugin/html.vim new file mode 100755 index 0000000..7117c9f --- /dev/null +++ b/vim/ftplugin/html.vim @@ -0,0 +1,953 @@ +" Vim script file vim600:fdm=marker: +" FileType: XML +" Author: Devin Weaver <suki (at) tritarget.com> +" Maintainer: Devin Weaver <suki (at) tritarget.com> +" Last Change: Tue Apr 07 11:12:08 EDT 2009 +" Version: 1.84 +" Location: http://www.vim.org/scripts/script.php?script_id=301 +" Licence: This program is free software; you can redistribute it +" and/or modify it under the terms of the GNU General Public +" License. See http://www.gnu.org/copyleft/gpl.txt +" Credits: Brad Phelan <bphelan (at) mathworks.co.uk> for completing +" tag matching and visual tag completion. +" Ma, Xiangjiang <Xiangjiang.Ma (at) broadvision.com> for +" pointing out VIM 6.0 map <buffer> feature. +" Luc Hermitte <hermitte (at) free.fr> for testing the self +" install documentation code and providing good bug fixes. +" Guo-Peng Wen for the self install documentation code. +" Shawn Boles <ickybots (at) gmail.com> for fixing the +" <Leader>x cancelation bug. +" Martijn van der Kwast <[email protected]> for patching +" problems with multi-languages (XML and PHP). + +" This script provides some convenience when editing XML (and some SGML) +" formated documents. + +" Section: Documentation +" ---------------------- +" +" Documentation should be available by ":help xml-plugin" command, once the +" script has been copied in you .vim/plugin directory. +" +" You still can read the documentation at the end of this file. Locate it by +" searching the "xml-plugin" string (and set ft=help to have +" appropriate syntaxic coloration). + +" Note: If you used the 5.x version of this file (xmledit.vim) you'll need to +" comment out the section where you called it since it is no longer used in +" version 6.x. + +" TODO: Revamp ParseTag to pull appart a tag a rebuild it properly. +" a tag like: < test nowrap testatt=foo > +" should be fixed to: <test nowrap="nowrap" testatt="foo"></test> + +"============================================================================== + +" Only do this when not done yet for this buffer +if exists("b:did_ftplugin") + finish +endif +" sboles, init these variables so vim doesn't complain on wrap cancel +let b:last_wrap_tag_used = "" +let b:last_wrap_atts_used = "" + +" WrapTag -> Places an XML tag around a visual selection. {{{1 +" Brad Phelan: Wrap the argument in an XML tag +" Added nice GUI support to the dialogs. +" Rewrote function to implement new algorythem that addresses several bugs. +if !exists("*s:WrapTag") +function s:WrapTag(text) + if (line(".") < line("'<")) + let insert_cmd = "o" + elseif (col(".") < col("'<")) + let insert_cmd = "a" + else + let insert_cmd = "i" + endif + if strlen(a:text) > 10 + let input_text = strpart(a:text, 0, 10) . '...' + else + let input_text = a:text + endif + let wraptag = inputdialog('Tag to wrap "' . input_text . '" : ') + if strlen(wraptag)==0 + if strlen(b:last_wrap_tag_used)==0 + undo + return + endif + let wraptag = b:last_wrap_tag_used + let atts = b:last_wrap_atts_used + else + let atts = inputdialog('Attributes in <' . wraptag . '> : ') + endif + if (visualmode() ==# 'V') + let text = strpart(a:text,0,strlen(a:text)-1) + if (insert_cmd ==# "o") + let eol_cmd = "" + else + let eol_cmd = "\<Cr>" + endif + else + let text = a:text + let eol_cmd = "" + endif + if strlen(atts)==0 + let text = "<".wraptag.">".text."</".wraptag.">" + let b:last_wrap_tag_used = wraptag + let b:last_wrap_atts_used = "" + else + let text = "<".wraptag." ".atts.">".text."</".wraptag.">" + let b:last_wrap_tag_used = wraptag + let b:last_wrap_atts_used = atts + endif + execute "normal! ".insert_cmd.text.eol_cmd +endfunction +endif + +" NewFileXML -> Inserts <?xml?> at top of new file. {{{1 +if !exists("*s:NewFileXML") +function s:NewFileXML( ) + " Where is g:did_xhtmlcf_inits defined? + if &filetype == 'docbk' || &filetype == 'xml' || (!exists ("g:did_xhtmlcf_inits") && exists ("g:xml_use_xhtml") && (&filetype == 'html' || &filetype == 'xhtml')) + if append (0, '<?xml version="1.0"?>') + normal! G + endif + endif +endfunction +endif + + +" Callback -> Checks for tag callbacks and executes them. {{{1 +if !exists("*s:Callback") +function s:Callback( xml_tag, isHtml ) + let text = 0 + if a:isHtml == 1 && exists ("*HtmlAttribCallback") + let text = HtmlAttribCallback (a:xml_tag) + elseif exists ("*XmlAttribCallback") + let text = XmlAttribCallback (a:xml_tag) + endif + if text != '0' + execute "normal! i " . text ."\<Esc>l" + endif +endfunction +endif + + +" IsParsableTag -> Check to see if the tag is a real tag. {{{1 +if !exists("*s:IsParsableTag") +function s:IsParsableTag( tag ) + " The "Should I parse?" flag. + let parse = 1 + + " make sure a:tag has a proper tag in it and is not a instruction or end tag. + if a:tag !~ '^<[[:alnum:]_:\-].*>$' + let parse = 0 + endif + + " make sure this tag isn't already closed. + if strpart (a:tag, strlen (a:tag) - 2, 1) == '/' + let parse = 0 + endif + + return parse +endfunction +endif + + +" ParseTag -> The major work hourse for tag completion. {{{1 +if !exists("*s:ParseTag") +function s:ParseTag( ) + " Save registers + let old_reg_save = @" + let old_save_x = @x + + if (!exists("g:xml_no_auto_nesting") && strpart (getline ("."), col (".") - 2, 2) == '>>') + let multi_line = 1 + execute "normal! \"xX" + else + let multi_line = 0 + endif + + let @" = "" + execute "normal! \"xy%%" + let ltag = @" + if (&filetype == 'html' || &filetype == 'xhtml') && (!exists ("g:xml_no_html")) + let html_mode = 1 + let ltag = substitute (ltag, '[^[:graph:]]\+', ' ', 'g') + let ltag = substitute (ltag, '<\s*\([^[:alnum:]_:\-[:blank:]]\=\)\s*\([[:alnum:]_:\-]\+\)\>', '<\1\2', '') + else + let html_mode = 0 + endif + + if <SID>IsParsableTag (ltag) + " find the break between tag name and atributes (or closing of tag) + let index = matchend (ltag, '[[:alnum:]_:\-]\+') + + let tag_name = strpart (ltag, 1, index - 1) + if strpart (ltag, index) =~ '[^/>[:blank:]]' + let has_attrib = 1 + else + let has_attrib = 0 + endif + + " That's (index - 1) + 2, 2 for the '</' and 1 for the extra character the + " while includes (the '>' is ignored because <Esc> puts the curser on top + " of the '>' + let index = index + 2 + + " print out the end tag and place the cursor back were it left off + if html_mode && tag_name =~? '^\(img\|input\|param\|frame\|br\|hr\|meta\|link\|base\|area\)$' + if has_attrib == 0 + call <SID>Callback (tag_name, html_mode) + endif + if exists ("g:xml_use_xhtml") + execute "normal! i /\<Esc>l" + endif + else + if multi_line + " Can't use \<Tab> because that indents 'tabstop' not 'shiftwidth' + " Also >> doesn't shift on an empty line hence the temporary char 'x' + let com_save = &comments + set comments-=n:> + execute "normal! a\<Cr>\<Cr>\<Esc>kAx\<Esc>>>$\"xx" + execute "set comments=" . substitute(com_save, " ", "\\\\ ", "g") + else + if has_attrib == 0 + call <SID>Callback (tag_name, html_mode) + endif + if exists("g:xml_jump_string") + let index = index + strlen(g:xml_jump_string) + let jump_char = g:xml_jump_string + call <SID>InitEditFromJump() + else + let jump_char = "" + endif + execute "normal! a</" . tag_name . ">" . jump_char . "\<Esc>" . index . "h" + endif + endif + endif + + " restore registers + let @" = old_reg_save + let @x = old_save_x + + if multi_line + startinsert! + else + execute "normal! l" + startinsert + endif +endfunction +endif + + +" ParseTag2 -> Experimental function to replace ParseTag {{{1 +"if !exists("*s:ParseTag2") +"function s:ParseTag2( ) + " My thought is to pull the tag out and reformat it to a normalized tag + " and put it back. +"endfunction +"endif + + +" BuildTagName -> Grabs the tag's name for tag matching. {{{1 +if !exists("*s:BuildTagName") +function s:BuildTagName( ) + "First check to see if we Are allready on the end of the tag. The / search + "forwards command will jump to the next tag otherwise + + " Store contents of register x in a variable + let b:xreg = @x + + exec "normal! v\"xy" + if @x=='>' + " Don't do anything + else + exec "normal! />/\<Cr>" + endif + + " Now we head back to the < to reach the beginning. + exec "normal! ?<?\<Cr>" + + " Capture the tag (a > will be catured by the /$/ match) + exec "normal! v/\\s\\|$/\<Cr>\"xy" + + " We need to strip off any junk at the end. + let @x=strpart(@x, 0, match(@x, "[[:blank:]>\<C-J>]")) + + "remove <, > + let @x=substitute(@x,'^<\|>$','','') + + " remove spaces. + let @x=substitute(@x,'/\s*','/', '') + let @x=substitute(@x,'^\s*','', '') + + " Swap @x and b:xreg + let temp = @x + let @x = b:xreg + let b:xreg = temp +endfunction +endif + +" TagMatch1 -> First step in tag matching. {{{1 +" Brad Phelan: First step in tag matching. +if !exists("*s:TagMatch1") +function s:TagMatch1() + " Save registers + let old_reg_save = @" + + "Drop a marker here just in case we have a mismatched tag and + "wish to return (:mark looses column position) + normal! mz + + call <SID>BuildTagName() + + "Check to see if it is an end tag. If it is place a 1 in endtag + if match(b:xreg, '^/')==-1 + let endtag = 0 + else + let endtag = 1 + endif + + " Extract the tag from the whole tag block + " eg if the block = + " tag attrib1=blah attrib2=blah + " we will end up with + " tag + " with no trailing or leading spaces + let b:xreg=substitute(b:xreg,'^/','','g') + + " Make sure the tag is valid. + " Malformed tags could be <?xml ?>, <![CDATA[]]>, etc. + if match(b:xreg,'^[[:alnum:]_:\-]') != -1 + " Pass the tag to the matching + " routine + call <SID>TagMatch2(b:xreg, endtag) + endif + " Restore registers + let @" = old_reg_save +endfunction +endif + + +" TagMatch2 -> Second step in tag matching. {{{1 +" Brad Phelan: Second step in tag matching. +if !exists("*s:TagMatch2") +function s:TagMatch2(tag,endtag) + let match_type='' + + " Build the pattern for searching for XML tags based + " on the 'tag' type passed into the function. + " Note we search forwards for end tags and + " backwards for start tags + if a:endtag==0 + "let nextMatch='normal /\(<\s*' . a:tag . '\(\s\+.\{-}\)*>\)\|\(<\/' . a:tag . '\s*>\)' + let match_type = '/' + else + "let nextMatch='normal ?\(<\s*' . a:tag . '\(\s\+.\{-}\)*>\)\|\(<\/' . a:tag . '\s*>\)' + let match_type = '?' + endif + + if a:endtag==0 + let stk = 1 + else + let stk = 1 + end + + " wrapscan must be turned on. We'll recored the value and reset it afterward. + " We have it on because if we don't we'll get a nasty error if the search hits + " BOF or EOF. + let wrapval = &wrapscan + let &wrapscan = 1 + + "Get the current location of the cursor so we can + "detect if we wrap on ourselves + let lpos = line(".") + let cpos = col(".") + + if a:endtag==0 + " If we are trying to find a start tag + " then decrement when we find a start tag + let iter = 1 + else + " If we are trying to find an end tag + " then increment when we find a start tag + let iter = -1 + endif + + "Loop until stk == 0. + while 1 + " exec search. + " Make sure to avoid />$/ as well as /\s$/ and /$/. + exec "normal! " . match_type . '<\s*\/*\s*' . a:tag . '\([[:blank:]>]\|$\)' . "\<Cr>" + + " Check to see if our match makes sence. + if a:endtag == 0 + if line(".") < lpos + call <SID>MisMatchedTag (0, a:tag) + break + elseif line(".") == lpos && col(".") <= cpos + call <SID>MisMatchedTag (1, a:tag) + break + endif + else + if line(".") > lpos + call <SID>MisMatchedTag (2, '/'.a:tag) + break + elseif line(".") == lpos && col(".") >= cpos + call <SID>MisMatchedTag (3, '/'.a:tag) + break + endif + endif + + call <SID>BuildTagName() + + if match(b:xreg,'^/')==-1 + " Found start tag + let stk = stk + iter + else + " Found end tag + let stk = stk - iter + endif + + if stk == 0 + break + endif + endwhile + + let &wrapscan = wrapval +endfunction +endif + +" MisMatchedTag -> What to do if a tag is mismatched. {{{1 +if !exists("*s:MisMatchedTag") +function s:MisMatchedTag( id, tag ) + "Jump back to our formor spot + normal! `z + normal zz + echohl WarningMsg + " For debugging + "echo "Mismatched tag " . a:id . ": <" . a:tag . ">" + " For release + echo "Mismatched tag <" . a:tag . ">" + echohl None +endfunction +endif + +" DeleteTag -> Deletes surrounding tags from cursor. {{{1 +" Modifies mark z +if !exists("*s:DeleteTag") +function s:DeleteTag( ) + if strpart (getline ("."), col (".") - 1, 1) == "<" + normal! l + endif + if search ("<[^\/]", "bW") == 0 + return + endif + normal! mz + normal \5 + normal! d%`zd% +endfunction +endif + +" VisualTag -> Selects Tag body in a visual selection. {{{1 +" Modifies mark z +if !exists("*s:VisualTag") +function s:VisualTag( ) + if strpart (getline ("."), col (".") - 1, 1) == "<" + normal! l + endif + if search ("<[^\/]", "bW") == 0 + return + endif + normal! mz + normal \5 + normal! % + exe "normal! " . visualmode() + normal! `z +endfunction +endif + +" InsertGt -> close tags only if the cursor is in a HTML or XML context {{{1 +" Else continue editing +if !exists("*s:InsertGt") +function s:InsertGt( ) + let save_matchpairs = &matchpairs + set matchpairs-=<:> + execute "normal! a>" + execute "set matchpairs=" . save_matchpairs + " When the current char is text within a tag it will not proccess as a + " syntax'ed element and return nothing below. Since the multi line wrap + " feture relies on using the '>' char as text within a tag we must use the + " char prior to establish if it is valid html/xml + if (getline('.')[col('.') - 1] == '>') + let char_syn=synIDattr(synID(line("."), col(".") - 1, 1), "name") + endif + if -1 == match(char_syn, "xmlProcessing") && (0 == match(char_syn, 'html') || 0 == match(char_syn, 'xml') || 0 == match(char_syn, 'docbk')) + call <SID>ParseTag() + else + if col(".") == col("$") - 1 + startinsert! + else + execute "normal! l" + startinsert + endif + endif +endfunction +endif + +" InitEditFromJump -> Set some needed autocommands and syntax highlights for EditFromJump. {{{1 +if !exists("*s:InitEditFromJump") +function s:InitEditFromJump( ) + " Add a syntax highlight for the xml_jump_string. + execute "syntax match Error /\\V" . g:xml_jump_string . "/" +endfunction +endif + +" ClearJumpMarks -> Clean out extranious left over xml_jump_string garbage. {{{1 +if !exists("*s:ClearJumpMarks") +function s:ClearJumpMarks( ) + if exists("g:xml_jump_string") + if g:xml_jump_string != "" + execute ":%s/" . g:xml_jump_string . "//ge" + endif + endif +endfunction +endif + +" EditFromJump -> Jump to the end of the tag and continue editing. {{{1 +" g:xml_jump_string must be set. +if !exists("*s:EditFromJump") +function s:EditFromJump( ) + if exists("g:xml_jump_string") + if g:xml_jump_string != "" + let foo = search(g:xml_jump_string, 'csW') " Moves cursor by default + execute "normal! " . strlen(g:xml_jump_string) . "x" + if col(".") == col("$") - 1 + startinsert! + else + startinsert + endif + endif + else + echohl WarningMsg + echo "Function disabled. xml_jump_string not defined." + echohl None + endif +endfunction +endif + +" Section: Doc installation {{{1 +" Function: s:XmlInstallDocumentation(full_name, revision) {{{2 +" Install help documentation. +" Arguments: +" full_name: Full name of this vim plugin script, including path name. +" revision: Revision of the vim script. #version# mark in the document file +" will be replaced with this string with 'v' prefix. +" Return: +" 1 if new document installed, 0 otherwise. +" Note: Cleaned and generalized by guo-peng Wen +"''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +function! s:XmlInstallDocumentation(full_name, revision) + " Name of the document path based on the system we use: + if (has("unix")) + " On UNIX like system, using forward slash: + let l:slash_char = '/' + let l:mkdir_cmd = ':silent !mkdir -p ' + else + " On M$ system, use backslash. Also mkdir syntax is different. + " This should only work on W2K and up. + let l:slash_char = '\' + let l:mkdir_cmd = ':silent !mkdir ' + endif + + let l:doc_path = l:slash_char . 'doc' + "let l:doc_home = l:slash_char . '.vim' . l:slash_char . 'doc' + + " Figure out document path based on full name of this script: + let l:vim_plugin_path = fnamemodify(a:full_name, ':h') + "let l:vim_doc_path = fnamemodify(a:full_name, ':h:h') . l:doc_path + let l:vim_doc_path = matchstr(l:vim_plugin_path, + \ '.\{-}\ze\%(\%(ft\)\=plugin\|macros\)') . l:doc_path + if (!(filewritable(l:vim_doc_path) == 2)) + echomsg "Doc path: " . l:vim_doc_path + execute l:mkdir_cmd . l:vim_doc_path + if (!(filewritable(l:vim_doc_path) == 2)) + " Try a default configuration in user home: + "let l:vim_doc_path = expand("~") . l:doc_home + let l:vim_doc_path = matchstr(&rtp, + \ escape($HOME, '\') .'[/\\]\%(\.vim\|vimfiles\)') + if (!(filewritable(l:vim_doc_path) == 2)) + execute l:mkdir_cmd . l:vim_doc_path + if (!(filewritable(l:vim_doc_path) == 2)) + " Put a warning: + echomsg "Unable to open documentation directory" + echomsg " type :help add-local-help for more informations." + return 0 + endif + endif + endif + endif + + " Exit if we have problem to access the document directory: + if (!isdirectory(l:vim_plugin_path) + \ || !isdirectory(l:vim_doc_path) + \ || filewritable(l:vim_doc_path) != 2) + return 0 + endif + + " Full name of script and documentation file: + let l:script_name = 'xml.vim' + let l:doc_name = 'xml-plugin.txt' + let l:plugin_file = l:vim_plugin_path . l:slash_char . l:script_name + let l:doc_file = l:vim_doc_path . l:slash_char . l:doc_name + + " Bail out if document file is still up to date: + if (filereadable(l:doc_file) && + \ getftime(l:plugin_file) < getftime(l:doc_file)) + return 0 + endif + + " Prepare window position restoring command: + if (strlen(@%)) + let l:go_back = 'b ' . bufnr("%") + else + let l:go_back = 'enew!' + endif + + " Create a new buffer & read in the plugin file (me): + setl nomodeline + exe 'enew!' + exe 'r ' . l:plugin_file + + setl modeline + let l:buf = bufnr("%") + setl noswapfile modifiable + + norm zR + norm gg + + " Delete from first line to a line starts with + " === START_DOC + 1,/^=\{3,}\s\+START_DOC\C/ d + + " Delete from a line starts with + " === END_DOC + " to the end of the documents: + /^=\{3,}\s\+END_DOC\C/,$ d + + " Remove fold marks: + % s/{\{3}[1-9]/ / + + " Add modeline for help doc: the modeline string is mangled intentionally + " to avoid it be recognized by VIM: + call append(line('$'), '') + call append(line('$'), ' v' . 'im:tw=78:ts=8:ft=help:norl:') + + " Replace revision: + exe "normal :1,5s/#version#/ v" . a:revision . "/\<CR>" + + " Save the help document: + exe 'w! ' . l:doc_file + exe l:go_back + exe 'bw ' . l:buf + + " Build help tags: + exe 'helptags ' . l:vim_doc_path + + return 1 +endfunction +" }}}2 + +let s:script_lines = readfile(expand("<sfile>"), "", 6) +let s:revision= + \ substitute(s:script_lines[5], '^" Version:\s*\|\s*$', '', '') +" \ substitute("$Revision: 83 $",'\$\S*: \([.0-9]\+\) \$','\1','') +silent! let s:install_status = + \ s:XmlInstallDocumentation(expand('<sfile>:p'), s:revision) +if (s:install_status == 1) + echom expand("<sfile>:t:r") . '-plugin v' . s:revision . + \ ': Help-documentation installed.' +endif + + +" Mappings and Settings. {{{1 +" This makes the '%' jump between the start and end of a single tag. +setlocal matchpairs+=<:> +setlocal commentstring=<!--%s--> + +" Have this as an escape incase you want a literal '>' not to run the +" ParseTag function. +if !exists("g:xml_tag_completion_map") + inoremap <buffer> <LocalLeader>. > + inoremap <buffer> <LocalLeader>> > +endif + +" Jump between the beggining and end tags. +nnoremap <buffer> <LocalLeader>5 :call <SID>TagMatch1()<Cr> +nnoremap <buffer> <LocalLeader>% :call <SID>TagMatch1()<Cr> +vnoremap <buffer> <LocalLeader>5 <Esc>:call <SID>VisualTag()<Cr> +vnoremap <buffer> <LocalLeader>% <Esc>:call <SID>VisualTag()<Cr> + +" Wrap selection in XML tag +vnoremap <buffer> <LocalLeader>x "xx:call <SID>WrapTag(@x)<Cr> +nnoremap <buffer> <LocalLeader>d :call <SID>DeleteTag()<Cr> + +" Parse the tag after pressing the close '>'. +if !exists("g:xml_tag_completion_map") + " inoremap <buffer> > ><Esc>:call <SID>ParseTag()<Cr> + inoremap <buffer> > <Esc>:call <SID>InsertGt()<Cr> +else + execute "inoremap <buffer> " . g:xml_tag_completion_map . " <Esc>:call <SID>InsertGt()<Cr>" +endif + +nnoremap <buffer> <LocalLeader><Space> :call <SID>EditFromJump()<Cr> +inoremap <buffer> <LocalLeader><Space> <Esc>:call <SID>EditFromJump()<Cr> +" Clear out all left over xml_jump_string garbage +nnoremap <buffer> <LocalLeader>w :call <SID>ClearJumpMarks()<Cr> +" The syntax files clear out any predefined syntax definitions. Recreate +" this when ever a xml_jump_string is created. (in ParseTag) + +augroup xml + au! + au BufNewFile * call <SID>NewFileXML() + " Remove left over garbage from xml_jump_string on file save. + au BufWritePre <buffer> call <SID>ClearJumpMarks() +augroup END +"}}}1 +finish + +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +" Section: Documentation content {{{1 +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +=== START_DOC +*xml-plugin.txt* Help edit XML and SGML documents. #version# + + XML Edit {{{2 ~ + +A filetype plugin to help edit XML and SGML documents. + +This script provides some convenience when editing XML (and some SGML +including HTML) formated documents. It allows you to jump to the beginning +or end of the tag block your cursor is in. '%' will jump between '<' and '>' +within the tag your cursor is in. When in insert mode and you finish a tag +(pressing '>') the tag will be completed. If you press '>' twice it will +complete the tag and place the cursor in the middle of the tags on it's own +line (helps with nested tags). + +Usage: Place this file into your ftplugin directory. To add html support +Sym-link or copy this file to html.vim in your ftplugin directory. To activte +the script place 'filetype plugin on' in your |.vimrc| file. See |ftplugins| +for more information on this topic. + +If the file edited is of type "html" and "xml_use_html" is defined then the +following tags will not auto complete: +<img>, <input>, <param>, <frame>, <br>, <hr>, <meta>, <link>, <base>, <area> + +If the file edited is of type 'html' and 'xml_use_xhtml' is defined the above +tags will autocomplete the xml closing staying xhtml compatable. +ex. <hr> becomes <hr /> (see |xml-plugin-settings|) + +NOTE: If you used the VIM 5.x version of this file (xmledit.vim) you'll need +to comment out the section where you called it. It is no longer used in the +VIM 6.x version. + +Known Bugs {{{2 ~ + +- This script will modify registers ". and "x; register "" will be restored. +- < & > marks inside of a CDATA section are interpreted as actual XML tags + even if unmatched. +- Although the script can handle leading spaces such as < tag></ tag> it is + illegal XML syntax and considered very bad form. +- Placing a literal `>' in an attribute value will auto complete dispite that + the start tag isn't finished. This is poor XML anyway you should use + &gt; instead. +- The matching algorithm can handle illegal tag characters where as the tag + completion algorithm can not. + +------------------------------------------------------------------------------ + *xml-plugin-mappings* +Mappings {{{2 ~ + +<LocalLeader> is a setting in VIM that depicts a prefix for scripts and +plugins to use. By default this is the backslash key `\'. See |mapleader| +for details. + +<LocalLeader><Space> + Normal or Insert - Continue editing after the ending tag. This + option requires xml_jump_string to be set to function. When a tag + is completed it will append the xml_jump_string. Once this mapping + is ran it will delete the next xml_jump_string pattern to the right + of the curser and delete it leaving you in insert mode to continue + editing. + +<LocalLeader>w + Normal - Will clear the entire file of left over xml_jump_string garbage. + * This will also happen automatically when you save the file. * + +<LocalLeader>x + Visual - Place a custom XML tag to suround the selected text. You + need to have selected text in visual mode before you can use this + mapping. See |visual-mode| for details. + +<LocalLeader>. or <LocalLeader>> + Insert - Place a literal '>' without parsing tag. + +<LocalLeader>5 or <LocalLeader>% + Normal or Visual - Jump to the begining or end tag. + +<LocalLeader>d + Normal - Deletes the surrounding tags from the cursor. > + <tag1>outter <tag2>inner text</tag2> text</tag1> + ^ +< Turns to: > + outter <tag2>inner text</tag2> text + ^ +< + +------------------------------------------------------------------------------ + *xml-plugin-settings* +Options {{{2 ~ + +(All options must be placed in your |.vimrc| prior to the |ftplugin| +command.) + +xml_tag_completion_map + Use this setting to change the default mapping to auto complete a + tag. By default typing a literal `>' will cause the tag your editing + to auto complete; pressing twice will auto nest the tag. By using + this setting the `>' will be a literal `>' and you must use the new + mapping to perform auto completion and auto nesting. For example if + you wanted Control-L to perform auto completion inmstead of typing a + `>' place the following into your .vimrc: > + let xml_tag_completion_map = "<C-l>" +< +xml_no_auto_nesting + This turns off the auto nesting feature. After a completion is made + and another `>' is typed xml-edit automatically will break the tag + accross multiple lines and indent the curser to make creating nested + tqags easier. This feature turns it off. Enter the following in your + .vimrc: > + let xml_no_auto_nesting = 1 +< +xml_use_xhtml + When editing HTML this will auto close the short tags to make valid + XML like <hr /> and <br />. Enter the following in your vimrc to + turn this option on: > + let xml_use_xhtml = 1 +< +xml_no_html + This turns off the support for HTML specific tags. Place this in your + .vimrc: > + let xml_no_html = 1 +< +xml_jump_string + This turns off the support for continuing edits after an ending tag. + xml_jump_string can be any string how ever a simple character will + suffice. Pick a character or small string that is unique and will + not interfer with your normal editing. See the <LocalLeader>Space + mapping for more. + .vimrc: > + let xml_jump_string = "`" +< +------------------------------------------------------------------------------ + *xml-plugin-callbacks* +Callback Functions {{{2 ~ + +A callback function is a function used to customize features on a per tag +basis. For example say you wish to have a default set of attributs when you +type an empty tag like this: + You type: <tag> + You get: <tag default="attributes"></tag> + +This is for any script programmers who wish to add xml-plugin support to +there own filetype plugins. + +Callback functions recive one attribute variable which is the tag name. The +all must return either a string or the number zero. If it returns a string +the plugin will place the string in the proper location. If it is a zero the +plugin will ignore and continue as if no callback existed. + +The following are implemented callback functions: + +HtmlAttribCallback + This is used to add default attributes to html tag. It is intended + for HTML files only. + +XmlAttribCallback + This is a generic callback for xml tags intended to add attributes. + + *xml-plugin-html* +Callback Example {{{2 ~ + +The following is an example of using XmlAttribCallback in your .vimrc +> + function XmlAttribCallback (xml_tag) + if a:xml_tag ==? "my-xml-tag" + return "attributes=\"my xml attributes\"" + else + return 0 + endif + endfunction +< +The following is a sample html.vim file type plugin you could use: +> + " Vim script file vim600:fdm=marker: + " FileType: HTML + " Maintainer: Devin Weaver <vim (at) tritarget.com> + " Location: http://www.vim.org/scripts/script.php?script_id=301 + + " This is a wrapper script to add extra html support to xml documents. + " Original script can be seen in xml-plugin documentation. + + " Only do this when not done yet for this buffer + if exists("b:did_ftplugin") + finish + endif + " Don't set 'b:did_ftplugin = 1' because that is xml.vim's responsability. + + let b:html_mode = 1 + + if !exists("*HtmlAttribCallback") + function HtmlAttribCallback( xml_tag ) + if a:xml_tag ==? "table" + return "cellpadding=\"0\" cellspacing=\"0\" border=\"0\"" + elseif a:xml_tag ==? "link" + return "href=\"/site.css\" rel=\"StyleSheet\" type=\"text/css\"" + elseif a:xml_tag ==? "body" + return "bgcolor=\"white\"" + elseif a:xml_tag ==? "frame" + return "name=\"NAME\" src=\"/\" scrolling=\"auto\" noresize" + elseif a:xml_tag ==? "frameset" + return "rows=\"0,*\" cols=\"*,0\" border=\"0\"" + elseif a:xml_tag ==? "img" + return "src=\"\" width=\"0\" height=\"0\" border=\"0\" alt=\"\"" + elseif a:xml_tag ==? "a" + if has("browse") + " Look up a file to fill the href. Used in local relative file + " links. typeing your own href before closing the tag with `>' + " will override this. + let cwd = getcwd() + let cwd = substitute (cwd, "\\", "/", "g") + let href = browse (0, "Link to href...", getcwd(), "") + let href = substitute (href, cwd . "/", "", "") + let href = substitute (href, " ", "%20", "g") + else + let href = "" + endif + return "href=\"" . href . "\"" + else + return 0 + endif + endfunction + endif + + " On to loading xml.vim + runtime ftplugin/xml.vim +< +=== END_DOC +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" +" vim: set tabstop=8 shiftwidth=4 softtabstop=4 smartindent +" vim600: set foldmethod=marker smarttab fileencoding=iso-8859-15 + diff --git a/vim/snippets/javascript.snippets b/vim/snippets/javascript.snippets index 51f5e05..f7599cc 100644 --- a/vim/snippets/javascript.snippets +++ b/vim/snippets/javascript.snippets @@ -1,74 +1,77 @@ # Prototype snippet proto ${1:class_name}.prototype.${2:method_name} = function(${3:first_argument}) { ${4:// body...} }; # Function snippet fun function ${1:function_name} (${2:argument}) { ${3:// body...} } # Anonymous Function snippet f function(${1}) {${2}}; # if snippet if if (${1:true}) {${2}}; # if ... else snippet ife if (${1:true}) {${2}} else{${3}}; # tertiary conditional snippet t ${1:/* condition */} ? ${2:a} : ${3:b} # switch snippet switch switch(${1:expression}) { case '${3:case}': ${4:// code} break; ${5} default: ${2:// code} } # case snippet case case '${1:case}': ${2:// code} break; ${3} # for (...) {...} snippet for for (var ${2:i} = 0; $2 < ${1:Things}.length; $2${3:++}) { ${4:$1[$2]} }; # for (...) {...} (Improved Native For-Loop) snippet forr for (var ${2:i} = ${1:Things}.length - 1; $2 >= 0; $2${3:--}) { ${4:$1[$2]} }; # while (...) {...} snippet wh while (${1:/* condition */}) { ${2:/* code */} } # do...while snippet do do { ${2:/* code */} } while (${1:/* condition */}); # Object Method snippet :f ${1:method_name}: function(${2:attribute}) { ${4} }${3:,} # setTimeout function snippet timeout setTimeout(function() {${3}}${2}, ${1:10}; # Get Elements snippet get getElementsBy${1:TagName}('${2}')${3} # Get Element snippet gett getElementBy${1:Id}('${2}')${3} +# console dump +snippet dmp + console.log(${1:var}) diff --git a/vim/snippets/php.snippets b/vim/snippets/php.snippets index 3ce9e26..bbfd651 100644 --- a/vim/snippets/php.snippets +++ b/vim/snippets/php.snippets @@ -1,216 +1,222 @@ snippet php <?php ${1} ?> snippet ec echo "${1:string}"${2}; snippet inc include '${1:file}';${2} snippet inc1 include_once '${1:file}';${2} snippet req require '${1:file}';${2} snippet req1 require_once '${1:file}';${2} # $GLOBALS['...'] snippet globals $GLOBALS['${1:variable}']${2: = }${3:something}${4:;}${5} snippet $_ COOKIE['...'] $_COOKIE['${1:variable}']${2} snippet $_ ENV['...'] $_ENV['${1:variable}']${2} snippet $_ FILES['...'] $_FILES['${1:variable}']${2} snippet $_ Get['...'] $_GET['${1:variable}']${2} snippet $_ POST['...'] $_POST['${1:variable}']${2} snippet $_ REQUEST['...'] $_REQUEST['${1:variable}']${2} snippet $_ SERVER['...'] $_SERVER['${1:variable}']${2} snippet $_ SESSION['...'] $_SESSION['${1:variable}']${2} # Start Docblock snippet /* /** * ${1} **/ # Class - post doc snippet doc_cp /** * ${1:undocumented class} * * @package ${2:default} * @author ${3:`g:snips_author`} **/${4} # Class Variable - post doc snippet doc_vp /** * ${1:undocumented class variable} * * @var ${2:string} **/${3} # Class Variable snippet doc_v /** * ${3:undocumented class variable} * * @var ${4:string} **/ ${1:var} $${2};${5} # Class snippet doc_c /** * ${3:undocumented class} * * @packaged ${4:default} * @author ${5:`g:snips_author`} **/ ${1:}class ${2:} {${6} } // END $1class $2 # Constant Definition - post doc snippet doc_dp /** * ${1:undocumented constant} **/${2} # Constant Definition snippet doc_d /** * ${3:undocumented constant} **/ define(${1}, ${2});${4} # Function - post doc snippet doc_fp /** * ${1:undocumented function} * * @return ${2:void} * @author ${3:`g:snips_author`} **/${4} # Function signature snippet doc_s /** * ${4:undocumented function} * * @return ${5:void} * @author ${6:`g:snips_author`} **/ ${1}function ${2}(${3});${7} # Function snippet doc_f /** * ${4:undocumented function} * * @return ${5:void} * @author ${6:`g:snips_author`} **/ ${1}function ${2}(${3}) {${7} } # Header snippet doc_h /** * ${1} * * @author ${2:`g:snips_author`} * @version ${3:$Id$} * @copyright ${4:$2}, `strftime('%d %B, %Y')` * @package ${5:default} **/ /** * Define DocBlock *// # Interface snippet doc_i /** * ${2:undocumented class} * * @package ${3:default} * @author ${4:`g:snips_author`} **/ interface ${1:} {${5} } // END interface $1 # class ... snippet class /** * ${1} **/ class ${2:ClassName} { ${3} function ${4:__construct}(${5:argument}) { ${6:// code...} } } # define(...) snippet def define('${1}'${2});${3} # defined(...) snippet def? ${1}defined('${2}')${3} snippet wh while (${1:/* condition */}) { ${2:// code...} } # do ... while snippet do do { ${2:// code... } } while (${1:/* condition */}); snippet if if (${1:/* condition */}) { ${2:// code...} } snippet ife if (${1:/* condition */}) { ${2:// code...} } else { ${3:// code...} } ${4} snippet else else { ${1:// code...} } snippet elseif elseif (${1:/* condition */}) { ${2:// code...} } # Tertiary conditional snippet t $${1:retVal} = (${2:condition}) ? ${3:a} : ${4:b};${5} snippet switch switch ($${1:variable}) { case '${2:value}': ${3:// code...} break; ${5} default: ${4:// code...} break; } snippet case case '${1:value}': ${2:// code...} break;${3} snippet for for ($${2:i} = 0; $$2 < ${1:count}; $$2${3:++}) { ${4: // code...} } snippet foreach foreach ($${1:variable} as $${2:key}) { ${3:// code...} } snippet fun ${1:public }function ${2:FunctionName}(${3}) { ${4:// code...} } # $... = array (...) snippet array $${1:arrayName} = array('${2}' => ${3});${4} +# firephp log +snippet log + FB::Log(${1:var}); +# var_dump +snippet dmp + var_dump(${1:var}); diff --git a/vim/snippets/python.snippets b/vim/snippets/python.snippets index d511184..2af3ff1 100644 --- a/vim/snippets/python.snippets +++ b/vim/snippets/python.snippets @@ -1,86 +1,89 @@ snippet #! #!/usr/bin/python snippet imp import ${1:module} # Module Docstring snippet docs ''' File: ${1:`Filename('$1.py', 'foo.py')`} Author: ${2:`g:snips_author`} Description: ${3} ''' snippet wh while ${1:condition}: ${2:# code...} snippet for for ${1:needle} in ${2:haystack}: ${3:# code...} # New Class snippet cl class ${1:ClassName}(${2:object}): """${3:docstring for $1}""" def __init__(self, ${4:arg}): ${5:super($1, self).__init__()} self.$4 = $4 ${6} # New Function snippet def def ${1:fname}(${2:`indent('.') ? 'self' : ''`}): """${3:docstring for $1}""" ${4:pass} snippet deff def ${1:fname}(${2:`indent('.') ? 'self' : ''`}): ${3} # New Method snippet defs def ${1:mname}(self, ${2:arg}): ${3:pass} # New Property snippet property def ${1:foo}(): doc = "${2:The $1 property.}" def fget(self): ${3:return self._$1} def fset(self, value): ${4:self._$1 = value} # Lambda snippet ld ${1:var} = lambda ${2:vars} : ${3:action} snippet . self. snippet try Try/Except try: ${1:pass} except ${2:Exception}, ${3:e}: ${4:raise $3} snippet try Try/Except/Else try: ${1:pass} except ${2:Exception}, ${3:e}: ${4:raise $3} else: ${5:pass} snippet try Try/Except/Finally try: ${1:pass} except ${2:Exception}, ${3:e}: ${4:raise $3} finally: ${5:pass} snippet try Try/Except/Else/Finally try: ${1:pass} except ${2:Exception}, ${3:e}: ${4:raise $3} else: ${5:pass} finally: ${6:pass} # if __name__ == '__main__': snippet ifmain if __name__ == '__main__': ${1:main()} # __magic__ snippet _ __${1:init}__${2} +# dump out var +snippet dmp + print ${1:var} diff --git a/vimrc b/vimrc index ba18093..b88a38d 100644 --- a/vimrc +++ b/vimrc @@ -1,327 +1,330 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script " curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " " ,pw - search for keyword in pydocs " ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Y yanks to the end of the line nmap Y y$ " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " shortcuts for copying to clipboard nmap <leader>y "*y nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h +" make the smarty .tpl files html files for our purposes +au BufNewFile,BufRead *.tpl set filetype=html + " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype " Gist - github pastbin " http://www.vim.org/scripts/script.php?script_id=2423 " :Gist " :Gist -p (private) " :Gist XXXX (fetch Gist XXXX and load) let g:gist_detect_filetype = 1 let g:gist_open_browser_after_post = 1
mitechie/pyvim
be0b6a520bf929e75c1c66a8965a835f21deebc1
+ add the gist support for pastebin, love that you can download a gist right into vim
diff --git a/vim/plugin/gist.vim b/vim/plugin/gist.vim new file mode 100644 index 0000000..9e23655 --- /dev/null +++ b/vim/plugin/gist.vim @@ -0,0 +1,663 @@ +"============================================================================= +" File: gist.vim +" Author: Yasuhiro Matsumoto <[email protected]> +" Last Change: 03-Feb-2010. +" Version: 3.2 +" WebPage: http://github.com/mattn/gist-vim/tree/master +" Usage: +" +" :Gist +" post whole text to gist. +" +" :'<,'>Gist +" post selected text to gist. +" +" :Gist -p +" post whole text to gist with private. +" +" :Gist -a +" post whole text to gist with anonymous. +" +" :Gist -m +" post multi buffer to gist. +" +" :Gist -e +" edit the gist. (shoud be work on gist buffer) +" you can update the gist with :w command on gist buffer. +" +" :Gist -d +" delete the gist. (should be work on gist buffer) +" password authentication is needed. +" +" :Gist -e foo.js +" edit the gist with name 'foo.js'. (shoud be work on gist buffer) +" +" :Gist XXXXX +" edit gist XXXXX. +" +" :Gist -c XXXXX. +" get gist XXXXX and put to clipboard. +" +" :Gist -l +" list gists from mine. +" +" :Gist -l mattn +" list gists from mattn. +" +" :Gist -la +" list gists from all. +" +" Tips: +" * if set g:gist_clip_command, gist.vim will copy the gist code +" with option '-c'. +" +" # mac +" let g:gist_clip_command = 'pbcopy' +" +" # linux +" let g:gist_clip_command = 'xclip -selection clipboard' +" +" # others(cygwin?) +" let g:gist_clip_command = 'putclip' +" +" * if you want to detect filetype from gist's filename... +" +" # detect filetype if vim failed auto-detection. +" let g:gist_detect_filetype = 1 +" +" # detect filetype always. +" let g:gist_detect_filetype = 2 +" +" * if you want to open browser after the post... +" +" let g:gist_open_browser_after_post = 1 +" +" * if you want to change the browser... +" +" let g:gist_browser_command = 'w3m %URL%' +" +" or +" +" let g:gist_browser_command = 'opera %URL% &' +" +" on windows, should work with your setting. +" +" Thanks: +" MATSUU Takuto: +" removed carriage return +" gist_browser_command enhancement +" edit support +" +" GetLatestVimScripts: 2423 1 :AutoInstall: gist.vim +" script type: plugin + +if &cp || (exists('g:loaded_gist_vim') && g:loaded_gist_vim) + finish +endif +let g:loaded_gist_vim = 1 + +if (!exists('g:github_user') || !exists('g:github_token')) && !executable('git') + echoerr "Gist: require 'git' command" + finish +endif + +if !executable('curl') + echoerr "Gist: require 'curl' command" + finish +endif + +if !exists('g:gist_open_browser_after_post') + let g:gist_open_browser_after_post = 0 +endif + +if !exists('g:gist_browser_command') + if has('win32') + let g:gist_browser_command = "!start rundll32 url.dll,FileProtocolHandler %URL%" + elseif has('mac') + let g:gist_browser_command = "open %URL%" + elseif executable('xdg-open') + let g:gist_browser_command = "xdg-open %URL%" + else + let g:gist_browser_command = "firefox %URL% &" + endif +endif + +if !exists('g:gist_detect_filetype') + let g:gist_detect_filetype = 0 +endif + +if !exists('g:gist_show_privates') + let g:gist_show_privates = 0 +endif + +function! s:nr2hex(nr) + let n = a:nr + let r = "" + while n + let r = '0123456789ABCDEF'[n % 16] . r + let n = n / 16 + endwhile + return r +endfunction + +function! s:encodeURIComponent(instr) + let instr = iconv(a:instr, &enc, "utf-8") + let len = strlen(instr) + let i = 0 + let outstr = '' + while i < len + let ch = instr[i] + if ch =~# '[0-9A-Za-z-._~!''()*]' + let outstr = outstr . ch + elseif ch == ' ' + let outstr = outstr . '+' + else + let outstr = outstr . '%' . substitute('0' . s:nr2hex(char2nr(ch)), '^.*\(..\)$', '\1', '') + endif + let i = i + 1 + endwhile + return outstr +endfunction + +function! s:GistList(user, token, gistls) + if a:gistls == '-all' + let url = 'http://gist.github.com/gists' + else + let url = 'http://gist.github.com/'.a:gistls + endif + let winnum = bufwinnr(bufnr('gist:'.a:gistls)) + if winnum != -1 + if winnum != bufwinnr('%') + exe "normal \<c-w>".winnum."w" + endif + setlocal modifiable + else + exec 'silent split gist:'.a:gistls + endif + + if g:gist_show_privates + let password = inputsecret('Password:') + if len(password) == 0 + echo 'Canceled' + return + endif + echon "Login to gist... " + let cookie = s:GistGetSessionID(a:user, password) + if len(cookie) == 0 + echo 'Failed' + return + endif + silent %d _ + let quote = &shellxquote == '"' ? "'" : '"' + exec 'silent 0r! curl -i -b '.quote.substitute(cookie,'%','\\%','g').quote.' '.url + else + silent %d _ + exec 'silent 0r! curl -s '.url + endif + + silent! %s/>/>\r/g + silent! %s/</\r</g + silent! %g/<pre/,/<\/pre/join! + silent! %g/<span class="date"/,/<\/span/join + silent! %g/^<span class="date"/s/> */>/g + silent! %v/^\(gist:\|<pre>\|<span class="date">\)/d _ + silent! %s/<div[^>]*>/\r /g + silent! %s/<\/pre>/\r/g + silent! %g/^gist:/,/<span class="date"/join + silent! %s/<[^>]\+>//g + silent! %s/\r//g + silent! %s/&nbsp;/ /g + silent! %s/&quot;/"/g + silent! %s/&amp;/\&/g + silent! %s/&gt;/>/g + silent! %s/&lt;/</g + silent! %s/&#\(\d\d\);/\=nr2char(submatch(1))/g + silent! %g/^gist: /s/ //g + + setlocal buftype=nofile bufhidden=hide noswapfile + setlocal nomodified + syntax match SpecialKey /^gist:/he=e-1 + exec 'nnoremap <silent> <buffer> <cr> :call <SID>GistListAction()<cr>' + + cal cursor(1,1) + setlocal foldmethod=expr + setlocal foldexpr=getline(v:lnum)=~'^gist:'?'>1':'=' + setlocal foldtext=getline(v:foldstart) +endfunction + +function! s:GistGetFileName(gistid) + let url = 'http://gist.github.com/'.a:gistid + let res = system('curl -s '.url) + let res = substitute(res, '^.*<a href="/raw/[^"]\+/\([^"]\+\)".*$', '\1', '') + if res =~ '/' + return '' + else + return res + endif +endfunction + +function! s:GistDetectFiletype(gistid) + let url = 'http://gist.github.com/'.a:gistid + let res = system('curl -s '.url) + let res = substitute(res, '^.*<div class="meta">[\r\n ]*<div class="info">[\r\n ]*<span>\([^>]\+\)</span>.*$', '\1', '') + let res = substitute(res, '.*\(\.[^\.]\+\)$', '\1', '') + if res =~ '^\.' + silent! exec "doau BufRead *".res + else + silent! exec "setlocal ft=".tolower(res) + endif +endfunction + +function! s:GistWrite(fname) + if a:fname == expand("%:p") + Gist -e + else + exe "w".(v:cmdbang ? "!" : "")." ".fnameescape(v:cmdarg)." ".fnameescape(a:fname) + endif +endfunction + +function! s:GistGet(user, token, gistid, clipboard) + let url = 'http://gist.github.com/'.a:gistid.'.txt' + let winnum = bufwinnr(bufnr('gist:'.a:gistid)) + if winnum != -1 + if winnum != bufwinnr('%') + exe "normal \<c-w>".winnum."w" + endif + setlocal modifiable + else + exec 'silent split gist:'.a:gistid + endif + filetype detect + exec '%d _' + exec 'silent 0r! curl -s '.url + setlocal buftype=acwrite bufhidden=delete noswapfile + setlocal nomodified + doau StdinReadPost <buffer> + normal! gg + if (&ft == '' && g:gist_detect_filetype == 1) || g:gist_detect_filetype == 2 + call s:GistDetectFiletype(a:gistid) + endif + if a:clipboard + if exists('g:gist_clip_command') + exec 'silent w !'.g:gist_clip_command + else + normal! ggVG"+y + endif + endif + au BufWriteCmd <buffer> call s:GistWrite(expand("<amatch>")) +endfunction + +function! s:GistListAction() + let line = getline('.') + let mx = '^gist:\(\w\+\).*' + if line =~# mx + let gistid = substitute(line, mx, '\1', '') + call s:GistGet(g:github_user, g:github_token, gistid, 0) + endif +endfunction + +function! s:GistUpdate(user, token, content, gistid, gistnm) + if len(a:gistnm) == 0 + let name = s:GistGetFileName(a:gistid) + else + let name = a:gistnm + endif + let namemx = '^[^.]\+\(.\+\)$' + let ext = '' + if name =~ namemx + let ext = substitute(name, namemx, '\1', '') + endif + let query = [ + \ '_method=put', + \ 'file_ext[gistfile1%s]=%s', + \ 'file_name[gistfile1%s]=%s', + \ 'file_contents[gistfile1%s]=%s', + \ 'login=%s', + \ 'token=%s', + \ ] + let squery = printf(join(query, '&'), + \ s:encodeURIComponent(ext), s:encodeURIComponent(ext), + \ s:encodeURIComponent(ext), s:encodeURIComponent(name), + \ s:encodeURIComponent(ext), s:encodeURIComponent(a:content), + \ s:encodeURIComponent(a:user), + \ s:encodeURIComponent(a:token)) + unlet query + + let file = tempname() + exec 'redir! > '.file + silent echo squery + redir END + echon " Updating it to gist... " + let quote = &shellxquote == '"' ? "'" : '"' + let url = 'http://gist.github.com/gists/'.a:gistid + let res = system('curl -i -d @'.quote.file.quote.' '.url) + call delete(file) + let res = matchstr(split(res, '\(\r\?\n\|\r\n\?\)'), '^Location: ') + let res = substitute(res, '^.*: ', '', '') + if len(res) > 0 && res =~ '^\(http\|https\):\/\/gist\.github\.com\/' + setlocal nomodified + echo 'Done: '.res + else + echoerr 'Edit failed' + endif + return res +endfunction + +function! s:GistGetSessionID(user, password) + let query = [ + \ 'login=%s', + \ 'password=%s', + \ ] + let squery = printf(join(query, '&'), + \ s:encodeURIComponent(a:user), + \ s:encodeURIComponent(a:password)) + unlet query + + let file = tempname() + exec 'redir! > '.file + silent echo squery + redir END + let quote = &shellxquote == '"' ? "'" : '"' + let url = 'https://gist.github.com/session' + let res = system('curl -i -d @'.quote.file.quote.' '.url) + call delete(file) + let loc = matchstr(split(res, '\(\r\?\n\|\r\n\?\)'), '^Location: ') + let loc = substitute(res, '^.*: ', '', '') + if len(loc) + let res = matchstr(split(res, '\(\r\?\n\|\r\n\?\)'), '^Set-Cookie: ') + let res = substitute(res, '^.*: \([^;]\+\).*$', '\1', '') + else + let res = '' + endif + return res +endfunction + +function! s:GistDelete(user, token, gistid) + let password = inputsecret('Password:') + if len(password) == 0 + echo 'Canceled' + return + endif + echon "Login to gist... " + let cookie = s:GistGetSessionID(a:user, password) + if len(cookie) == 0 + echo 'Failed' + return + endif + echon " Deleting gist... " + let quote = &shellxquote == '"' ? "'" : '"' + let url = 'http://gist.github.com/delete/'.a:gistid + let res = system('curl -i -b '.quote.substitute(cookie,'%','\\%','g').quote.' '.url) + let res = matchstr(split(res, '\(\r\?\n\|\r\n\?\)'), '^Location: ') + let res = substitute(res, '^.*: ', '', '') + if len(res) > 0 && res != 'http://gist.github.com/gists' + echo 'Done: ' + else + echoerr 'Delete failed' + endif +endfunction + + +" GistPost function: +" Post new gist to github +" +" if there is an embedded gist url or gist id in your file, +" it will just update it. +" -- by c9s +" +" embedded gist url format: +" +" Gist: http://gist.github.com/123123 +" +" embedded gist id format: +" +" GistID: 123123 +" +function! s:GistPost(user, token, content, private) + + " find GistID: in content , then we should just update + for l in split( a:content , "\n" ) + if l =~ '\<GistID:' + let gistid = matchstr( l , '\(GistID:\s*\)\@<=[0-9]\+') + + if strlen(gistid) == 0 + echohl WarningMsg | echo "GistID error" | echohl None + return + endif + echo "Found GistID: " . gistid + + cal s:GistUpdate( a:user , a:token , a:content , gistid , '' ) + return + elseif l =~ '\<Gist:' + let gistid = matchstr( l , '\(Gist:\s*http://gist.github.com/\)\@<=[0-9]\+') + + if strlen(gistid) == 0 + echohl WarningMsg | echo "GistID error" | echohl None + return + endif + echo "Found GistID: " . gistid + + cal s:GistUpdate( a:user , a:token , a:content , gistid , '' ) + return + endif + endfor + + let ext = expand('%:e') + let ext = len(ext) ? '.'.ext : '' + let name = expand('%:t') + + let query = [ + \ 'file_ext[gistfile1]=%s', + \ 'file_name[gistfile1]=%s', + \ 'file_contents[gistfile1]=%s', + \ ] + + if len(a:user) > 0 && len(a:token) > 0 + call add(query, 'login=%s') + call add(query, 'token=%s') + else + call add(query, '%.0s%.0s') + endif + + if a:private + call add(query, 'action_button=private') + endif + let squery = printf(join(query, '&'), + \ s:encodeURIComponent(ext), + \ s:encodeURIComponent(name), + \ s:encodeURIComponent(a:content), + \ s:encodeURIComponent(a:user), + \ s:encodeURIComponent(a:token)) + unlet query + + let file = tempname() + exec 'redir! > '.file + silent echo squery + redir END + echon " Posting it to gist... " + let quote = &shellxquote == '"' ? "'" : '"' + let url = 'http://gist.github.com/gists' + let res = system('curl -i -d @'.quote.file.quote.' '.url) + call delete(file) + let res = matchstr(split(res, '\(\r\?\n\|\r\n\?\)'), '^Location: ') + let res = substitute(res, '^.*: ', '', '') + if len(res) > 0 && res =~ '^\(http\|https\):\/\/gist\.github\.com\/' + echo 'Done: '.res + else + echoerr 'Post failed' + endif + return res +endfunction + +function! s:GistPostBuffers(user, token, private) + let bufnrs = range(1, last_buffer_nr()) + let bn = bufnr('%') + let query = [] + if len(a:user) > 0 && len(a:token) > 0 + call add(query, 'login=%s') + call add(query, 'token=%s') + else + call add(query, '%.0s%.0s') + endif + if a:private + call add(query, 'action_button=private') + endif + let squery = printf(join(query, "&"), + \ s:encodeURIComponent(a:user), + \ s:encodeURIComponent(a:token)) . '&' + + let query = [ + \ 'file_ext[gistfile]=%s', + \ 'file_name[gistfile]=%s', + \ 'file_contents[gistfile]=%s', + \ ] + let format = join(query, "&") . '&' + + let index = 1 + for bufnr in bufnrs + if buflisted(bufnr) == 0 || bufwinnr(bufnr) == -1 + continue + endif + echo "Creating gist content".index."... " + silent! exec "buffer! ".bufnr + let content = join(getline(1, line('$')), "\n") + let ext = expand('%:e') + let ext = len(ext) ? '.'.ext : '' + let name = expand('%:t') + let squery .= printf(substitute(format, 'gistfile', 'gistfile'.index, 'g'), + \ s:encodeURIComponent(ext), + \ s:encodeURIComponent(name), + \ s:encodeURIComponent(content)) + let index = index + 1 + endfor + silent! exec "buffer! ".bn + + let file = tempname() + exec 'redir! > '.file + silent echo squery + redir END + echo "Posting it to gist... " + let quote = &shellxquote == '"' ? "'" : '"' + let url = 'http://gist.github.com/gists' + let res = system('curl -i -d @'.quote.file.quote.' '.url) + call delete(file) + let res = matchstr(split(res, '\(\r\?\n\|\r\n\?\)'), '^Location: ') + let res = substitute(res, '^.*: ', '', '') + if len(res) > 0 && res =~ '^\(http\|https\):\/\/gist\.github\.com\/' + echo 'Done: '.res + else + echoerr 'Post failed' + endif + return res +endfunction + +function! Gist(line1, line2, ...) + if !exists('g:github_user') + let g:github_user = substitute(system('git config --global github.user'), "\n", '', '') + endif + if !exists('g:github_token') + let g:github_token = substitute(system('git config --global github.token'), "\n", '', '') + endif + if strlen(g:github_user) == 0 || strlen(g:github_token) == 0 + echoerr "You have no setting for github." + echohl WarningMsg + echo "git config --global github.user your-name" + echo "git config --global github.token your-token" + echo "or set g:github_user and g:github_token in your vimrc" + echohl None + return 0 + end + + let bufname = bufname("%") + let user = g:github_user + let token = g:github_token + let gistid = '' + let gistls = '' + let gistnm = '' + let private = 0 + let multibuffer = 0 + let clipboard = 0 + let deletepost = 0 + let editpost = 0 + let listmx = '^\(-l\|--list\)\s*\([^\s]\+\)\?$' + let bufnamemx = '^gist:\([0-9a-f]\+\)$' + + let args = (a:0 > 0) ? split(a:1, ' ') : [] + for arg in args + if arg =~ '^\(-la\|--listall\)$' + let gistls = '-all' + elseif arg =~ '^\(-l\|--list\)$' + if g:gist_show_privates + let gistls = 'mine' + else + let gistls = g:github_user + endif + elseif arg =~ '^\(-m\|--multibuffer\)$' + let multibuffer = 1 + elseif arg =~ '^\(-p\|--private\)$' + let private = 1 + elseif arg =~ '^\(-a\|--anonymous\)$' + let user = '' + let token = '' + elseif arg =~ '^\(-c\|--clipboard\)$' + let clipboard = 1 + elseif arg =~ '^\(-d\|--delete\)$' && bufname =~ bufnamemx + let deletepost = 1 + let gistid = substitute(bufname, bufnamemx, '\1', '') + elseif arg =~ '^\(-e\|--edit\)$' && bufname =~ bufnamemx + let editpost = 1 + let gistid = substitute(bufname, bufnamemx, '\1', '') + elseif len(gistnm) == 0 + if editpost == 1 || deletepost == 1 + let gistnm = arg + elseif len(gistls) > 0 + let gistls = arg + else + let gistid = arg + endif + elseif len(arg) > 0 + echoerr 'Invalid arguments' + unlet args + return 0 + endif + endfor + unlet args + "echo "gistid=".gistid + "echo "gistls=".gistls + "echo "gistnm=".gistnm + "echo "private=".private + "echo "clipboard=".clipboard + "echo "editpost=".editpost + "echo "deletepost=".deletepost + + if len(gistls) > 0 + call s:GistList(user, token, gistls) + elseif len(gistid) > 0 && editpost == 0 && deletepost == 0 + call s:GistGet(user, token, gistid, clipboard) + else + if multibuffer == 1 + let url = s:GistPostBuffers(user, token, private) + else + let content = join(getline(a:line1, a:line2), "\n") + if editpost == 1 + let url = s:GistUpdate(user, token, content, gistid, gistnm) + elseif deletepost == 1 + let url = s:GistDelete(user, token, gistid) + else + let url = s:GistPost(user, token, content, private) + endif + if len(url) > 0 && g:gist_open_browser_after_post + let cmd = substitute(g:gist_browser_command, '%URL%', url, 'g') + if cmd =~ '^!' + silent! exec cmd + else + call system(cmd) + endif + endif + endif + endif + return 1 +endfunction + +command! -nargs=? -range=% Gist :call Gist(<line1>, <line2>, <f-args>) +" vim:set et: diff --git a/vimrc b/vimrc index 4142fef..ba18093 100644 --- a/vimrc +++ b/vimrc @@ -1,312 +1,327 @@ " ================================================== " Dependencies " ================================================== " Pep8 : http://pypi.python.org/pypi/pep8 " pylint script +" curl - Gist plugin " " ================================================== " Shortcuts Documented " ================================================== " jj - esc " ,b - bufferlist " ,v - load .vimrc " ,V - reload .vimrc " ,m - run make " ,M - alt make for filetype (pep8 for python, etc) " ,y - yank to clipboard " ,p - paste from clipboard " ,s - toggle spellcheck " ,c - open the quickfix window " ,cc - close the quickfix window " ,t - toggle nerdtree " " Y - yank to the end of the line " " <CR> - create newline with enter key " C-n - clear search " C-l - Omnicompletion " C-p - ctags completion " " gc - comment the highlighted text " gcc - comment out the current line " " ,, - complete snippet " ,, - tab to next section of snippet " ,n - list available snippets for this filetype " +" ,pw - search for keyword in pydocs +" ,pW - search any pydoc for this keyword " " Windows " ctrl-jklm - swap to that split without the ctrl-w " +/- - shrink the current split verticall " alt-,/. - move the split vertically " F2 - close current split " +" :Gist +" :Gist -p (private) +" :Gist XXXX (fetch Gist XXXX and load) + syntax on " syntax highlighing filetype plugin indent on " In GVIM if has("gui_running") set guifont=Liberation\ Mono\ 8" use this font set lines=75 " height = 50 lines set columns=180 " width = 100 columns set background=dark " adapt colors for background set keymodel= set mousehide colorscheme hornet " To set the toolbars off (icons on top of the screen) set guioptions-=T else set background=dark " adapt colors for dark background colorscheme hornet endif " ================================================== " Basic Settings " ================================================== let mapleader="," " change the leader to be a comma vs slash set textwidth=80 " Try this out to see how textwidth helps set ch=3 " Make command line two lines high set ls=2 " allways show status line set tabstop=4 " numbers of spaces of tab character set shiftwidth=4 " numbers of spaces to (auto)indent set scrolloff=3 " keep 3 lines when scrolling set nocursorline " have a line indicate the cursor location set cindent " cindent set autoindent " always set autoindenting on set showcmd " display incomplete commands set ruler " show the cursor position all the time set visualbell t_vb= " turn off error beep/flash set novisualbell " turn off visual bell set nobackup " do not keep a backup file set number " show line numbers set title " show title in console title bar set ttyfast " smoother changes set modeline " last lines in document sets vim mode set modelines=3 " number lines checked for modelines set shortmess=atI " Abbreviate messages set nostartofline " don't jump to first character when paging set backspace=start,indent,eol set matchpairs+=<:> " show matching <> (html mainly) as well set showmatch set matchtime=3 set spell set expandtab " tabs are converted to spaces, use only when required set sm " show matching braces, somewhat annoying... " move freely between files set whichwrap=b,s,h,l,<,>,[,] set tags=tags;/ " search for tags file in parent directories " complete in vim commands with a nice list set wildmenu set wildmode=longest,list set wildignore+=*.pyc " ================================================== " Basic Maps " ================================================== " Maps for jj to act as Esc ino jj <esc> cno jj <c-c> " map ctrl-c to something else so I quick using it map <c-c> <Nop> imap <c-c> <Nop> " ,v brings up my .vimrc " ,V reloads it -- making all changes active (have to save first) map <leader>v :sp ~/.vimrc<CR><C-W>_ map <silent> <leader>V :source ~/.vimrc<CR>:filetype detect<CR>:exe ":echo 'vimrc reloaded'"<CR> " Y yanks to the end of the line nmap Y y$ " Run Make with ctrl-m or ,m map <silent> <leader>m :make<CR> " quick insertion of a newline nmap <CR> o<Esc> " shortcuts for copying to clipboard nmap <leader>y "*y nmap <leader>p "*p " shortcut to toggle spelling nmap <leader>s :setlocal spell! spelllang=en_us<CR> " shortcuts to open/close the quickfix window nmap <leader>c :copen<CR> nmap <leader>cc :cclose<CR> " ================================================== " Windows / Splits " ================================================== " ctrl-jklm changes to that split map <c-j> <c-w>j map <c-k> <c-w>k map <c-l> <c-w>l map <c-h> <c-w>h " and lets make these all work in insert mode too ( <C-O> makes next cmd " happen as if in command mode ) imap <C-W> <C-O><C-W> " use - and + to resize horizontal splits map - <C-W>- map + <C-W>+ " and for vsplits with alt-< or alt-> map <M-,> <C-W>> map <M-.> <C-W>< " F2 close current window (commonly used with my F1/F3 functions) noremap <f2> <Esc>:close<CR><Esc> " mapping to make movements operate on 1 screen line in wrap mode function! ScreenMovement(movement) if &wrap return "g" . a:movement else return a:movement endif endfunction onoremap <silent> <expr> j ScreenMovement("j") onoremap <silent> <expr> k ScreenMovement("k") onoremap <silent> <expr> 0 ScreenMovement("0") onoremap <silent> <expr> ^ ScreenMovement("^") onoremap <silent> <expr> $ ScreenMovement("$") nnoremap <silent> <expr> j ScreenMovement("j") nnoremap <silent> <expr> k ScreenMovement("k") nnoremap <silent> <expr> 0 ScreenMovement("0") nnoremap <silent> <expr> ^ ScreenMovement("^") nnoremap <silent> <expr> $ ScreenMovement("$") " ================================================== " Search " ================================================== " Press Ctrl-N to turn off highlighting. set hlsearch " highlight searches set incsearch " do incremental searching set ignorecase " ignore case when searching nmap <silent> <C-N> :silent noh<CR> " ================================================== " Completion " ================================================== " complete on ctrl-l inoremap <C-l> <C-x><C-o> set complete+=. set complete+=k set complete+=b set complete+=t " ================================================== " Filetypes " ================================================== " Auto change the directory to the current file I'm working on autocmd BufEnter * lcd %:p:h " Filetypes (au = autocmd) au filetype help set nonumber " no line numbers when viewing help au filetype help nnoremap <buffer><cr> <c-]> " Enter selects subject au filetype help nnoremap <buffer><bs> <c-T> " Backspace to go back "If we're editing a mail message in mutt change to 70 wide and wrap "without linex numbers augroup mail autocmd! autocmd FileType mail set textwidth=70 wrap nonumber nocursorline augroup END " If we're editing a .txt file then skip line numbers au! BufRead,BufNewFile *.txt set nonu " automatically give executable permissions if file begins with #! and contains " '/bin/' in the path au bufwritepost * if getline(1) =~ "^#!" | if getline(1) =~ "/bin/" | silent !chmod a+x <afile> | endif | endif " ================================================== " Python " ================================================== au BufRead *.py set makeprg=python\ -c\ \"import\ py_compile,sys;\ sys.stderr=sys.stdout;\ py_compile.compile(r'%')\" au BufRead *.py set efm=%C\ %.%#,%A\ \ File\ \"%f\"\\,\ line\ %l%.%#,%Z%[%^\ ]%\\@=%m " ================================================== " Javascript " ================================================== au FileType javascript call JavaScriptFold() au FileType javascript setl fen au BufRead *.js set makeprg=jslint\ % au BufRead,BufNewFile jquery.*.js set ft=javascript syntax=jquery " ================================================== " Syntax Files " ================================================== " xml.vim " http://github.com/sukima/xmledit/ " % jump between '<' and '>' within the tag " finish a tag '>' " press '>' twice it will complete and cursor in the middle " jinja.vim " http://www.vim.org/scripts/script.php?script_id=1856 " syntax file for jinja1 and 2 " ================================================== " Plugins " ================================================== " lusty-juggler " http://www.vim.org/scripts/script.php?script_id=2050 nmap <silent> <Leader>b :LustyJuggler<CR> " NERDTree " http://www.vim.org/scripts/script.php?script_id=1658 let NERDTreeIgnore = ['\.pyc$', '\.pyo$'] map <leader>t :NERDTree<CR> " tComment " http://www.vim.org/scripts/script.php?script_id=1173 " gc - comment the highlighted text " gcc - comment out the current line " pep8 " http://www.vim.org/scripts/script.php?script_id=2914 " set to <leader>M in the actual plugin " python folding jpythonfold.vim " http://www.vim.org/scripts/script.php?script_id=2527 " Setup as ftplugin/python.vim for auto loading " PyDoc " http://www.vim.org/scripts/script.php?script_id=910 " Search python docs for the keyword " <leader>pw - search for docs for what's under cursor " <leader>pW - search for any docs with this keyword mentioned -source ~/.vim/plugin/pydoc.vim " Supertab " http://www.vim.org/scripts/script.php?script_id=182 " :SuperTabHelp " SnipMate " http://www.vim.org/scripts/script.php?script_id=2540 " ,, - complete and tab to next section " ,n - show list of snippets for this filetype + +" Gist - github pastbin +" http://www.vim.org/scripts/script.php?script_id=2423 +" :Gist +" :Gist -p (private) +" :Gist XXXX (fetch Gist XXXX and load) +let g:gist_detect_filetype = 1 +let g:gist_open_browser_after_post = 1 +
bblimke/copy-with-style-tmbundle
1e4121e6aed67a14c6796a627ba1c07cd38be947
Fix broken Markdown headings
diff --git a/README.md b/README.md index 3c1ca7b..8fc1ef6 100644 --- a/README.md +++ b/README.md @@ -1,24 +1,24 @@ -#Copy with Style Textmate bundle +# Copy with Style Textmate bundle This bundle allows copying code from TextMate to Keynote with colors and style. No more screenshots! -##Usage +## Usage In Textmate select text press Cmd+Shift+C, then go to Keynote and paste with Cmd+V -##Installation +## Installation mkdir -p ~/Library/Application\ Support/TextMate/Bundles cd ~/Library/Application\ Support/TextMate/Bundles git clone git://github.com/bblimke/copy-with-style-tmbundle.git "Copy with Style.tmbundle" In Textmate select Bundles->Bundle Editor->Reload Bundles -##Credits +## Credits Command code was adopted from example presented in Josh Goebel's [blog post]( http://blog.pastie.org/2008/06/textmate-to-key.html) ## Author Bartosz Blimke \ No newline at end of file
bblimke/copy-with-style-tmbundle
d776d078d57ac37c21cd321d99eeb119190dfb98
Replaced '…' UTF8 character with 3 ASCII characters '...' in command ruby code for compatibility.
diff --git a/Commands/Copy With Style.tmCommand b/Commands/Copy With Style.tmCommand index e09d6de..2754338 100644 --- a/Commands/Copy With Style.tmCommand +++ b/Commands/Copy With Style.tmCommand @@ -1,32 +1,32 @@ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>beforeRunningCommand</key> <string>nop</string> <key>command</key> <string>ruby -e' require "jcode" $KCODE="U" app_path = ENV["TM_APP_PATH"] || %x{ps -xwwp "$TM_PID" -o "command="}.sub(%r{/Contents/MacOS/.*\n}, "") require "#{app_path}/Contents/SharedSupport/Bundles/TextMate.tmbundle/Support/lib/doctohtml.rb" require "#{app_path}/Contents/SharedSupport/Support/lib/progress.rb" unit = ENV.has_key?("TM_SELECTED_TEXT") ? "selection" : "document" - TextMate.call_with_progress(:message =&gt; "Creating HTML version of #{unit}…") do + TextMate.call_with_progress(:message =&gt; "Creating HTML version of #{unit}...") do print document_to_html( STDIN.read, :include_css =&gt; true ) end'|textutil -convert rtf -stdin -stdout|pbcopy echo 'Done!'</string> <key>input</key> <string>selection</string> <key>inputFormat</key> <string>xml</string> <key>keyEquivalent</key> <string>@C</string> <key>name</key> <string>Copy with Style</string> <key>output</key> <string>showAsTooltip</string> <key>uuid</key> <string>BAAC7FD1-DC7C-4F18-AAD9-EFA93585451B</string> </dict> </plist>
bblimke/copy-with-style-tmbundle
5bcd78c82972773676b8310706f0814ce6c48ac8
Send stdout to tooltip
diff --git a/Commands/Copy With Style.tmCommand b/Commands/Copy With Style.tmCommand index e972112..e09d6de 100644 --- a/Commands/Copy With Style.tmCommand +++ b/Commands/Copy With Style.tmCommand @@ -1,32 +1,32 @@ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>beforeRunningCommand</key> <string>nop</string> <key>command</key> <string>ruby -e' require "jcode" $KCODE="U" app_path = ENV["TM_APP_PATH"] || %x{ps -xwwp "$TM_PID" -o "command="}.sub(%r{/Contents/MacOS/.*\n}, "") require "#{app_path}/Contents/SharedSupport/Bundles/TextMate.tmbundle/Support/lib/doctohtml.rb" require "#{app_path}/Contents/SharedSupport/Support/lib/progress.rb" unit = ENV.has_key?("TM_SELECTED_TEXT") ? "selection" : "document" - TextMate.call_with_progress(:message => "Creating HTML version of #{unit}…") do - print document_to_html( STDIN.read, :include_css => true ) + TextMate.call_with_progress(:message =&gt; "Creating HTML version of #{unit}…") do + print document_to_html( STDIN.read, :include_css =&gt; true ) end'|textutil -convert rtf -stdin -stdout|pbcopy echo 'Done!'</string> <key>input</key> <string>selection</string> <key>inputFormat</key> <string>xml</string> <key>keyEquivalent</key> <string>@C</string> <key>name</key> <string>Copy with Style</string> <key>output</key> - <string>openAsNewDocument</string> + <string>showAsTooltip</string> <key>uuid</key> <string>BAAC7FD1-DC7C-4F18-AAD9-EFA93585451B</string> </dict> -</plist> \ No newline at end of file +</plist>
bblimke/copy-with-style-tmbundle
45720d6be9e34ed340ae5b6e82386b53f7d44eee
Command works now even if TM_APP_PATH is not defined
diff --git a/Commands/Copy With Style.tmCommand b/Commands/Copy With Style.tmCommand index fccdd8a..e972112 100644 --- a/Commands/Copy With Style.tmCommand +++ b/Commands/Copy With Style.tmCommand @@ -1,31 +1,32 @@ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>beforeRunningCommand</key> <string>nop</string> <key>command</key> <string>ruby -e' require "jcode" $KCODE="U" - require "#{ENV["TM_APP_PATH"]}/Contents/SharedSupport/Bundles/TextMate.tmbundle/Support/lib/doctohtml.rb" - require "#{ENV["TM_APP_PATH"]}/Contents/SharedSupport/Support/lib/progress.rb" + app_path = ENV["TM_APP_PATH"] || %x{ps -xwwp "$TM_PID" -o "command="}.sub(%r{/Contents/MacOS/.*\n}, "") + require "#{app_path}/Contents/SharedSupport/Bundles/TextMate.tmbundle/Support/lib/doctohtml.rb" + require "#{app_path}/Contents/SharedSupport/Support/lib/progress.rb" unit = ENV.has_key?("TM_SELECTED_TEXT") ? "selection" : "document" TextMate.call_with_progress(:message => "Creating HTML version of #{unit}…") do print document_to_html( STDIN.read, :include_css => true ) end'|textutil -convert rtf -stdin -stdout|pbcopy echo 'Done!'</string> <key>input</key> <string>selection</string> <key>inputFormat</key> <string>xml</string> <key>keyEquivalent</key> <string>@C</string> <key>name</key> <string>Copy with Style</string> <key>output</key> <string>openAsNewDocument</string> <key>uuid</key> <string>BAAC7FD1-DC7C-4F18-AAD9-EFA93585451B</string> </dict> -</plist> +</plist> \ No newline at end of file
bblimke/copy-with-style-tmbundle
5639a5990760ceffbac06804820c0db43834a783
Initial version.
diff --git a/Commands/Copy With Style.tmCommand b/Commands/Copy With Style.tmCommand new file mode 100644 index 0000000..fccdd8a --- /dev/null +++ b/Commands/Copy With Style.tmCommand @@ -0,0 +1,31 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>beforeRunningCommand</key> + <string>nop</string> + <key>command</key> + <string>ruby -e' + require "jcode" + $KCODE="U" + require "#{ENV["TM_APP_PATH"]}/Contents/SharedSupport/Bundles/TextMate.tmbundle/Support/lib/doctohtml.rb" + require "#{ENV["TM_APP_PATH"]}/Contents/SharedSupport/Support/lib/progress.rb" + unit = ENV.has_key?("TM_SELECTED_TEXT") ? "selection" : "document" + TextMate.call_with_progress(:message => "Creating HTML version of #{unit}…") do + print document_to_html( STDIN.read, :include_css => true ) + end'|textutil -convert rtf -stdin -stdout|pbcopy + echo 'Done!'</string> + <key>input</key> + <string>selection</string> + <key>inputFormat</key> + <string>xml</string> + <key>keyEquivalent</key> + <string>@C</string> + <key>name</key> + <string>Copy with Style</string> + <key>output</key> + <string>openAsNewDocument</string> + <key>uuid</key> + <string>BAAC7FD1-DC7C-4F18-AAD9-EFA93585451B</string> +</dict> +</plist> diff --git a/README.md b/README.md new file mode 100644 index 0000000..3c1ca7b --- /dev/null +++ b/README.md @@ -0,0 +1,24 @@ +#Copy with Style Textmate bundle + +This bundle allows copying code from TextMate to Keynote with colors and style. +No more screenshots! + +##Usage + +In Textmate select text press Cmd+Shift+C, then go to Keynote and paste with Cmd+V + +##Installation + + mkdir -p ~/Library/Application\ Support/TextMate/Bundles + cd ~/Library/Application\ Support/TextMate/Bundles + git clone git://github.com/bblimke/copy-with-style-tmbundle.git "Copy with Style.tmbundle" + +In Textmate select Bundles->Bundle Editor->Reload Bundles + +##Credits + +Command code was adopted from example presented in Josh Goebel's [blog post]( http://blog.pastie.org/2008/06/textmate-to-key.html) + +## Author + +Bartosz Blimke \ No newline at end of file diff --git a/info.plist b/info.plist new file mode 100644 index 0000000..0bd581f --- /dev/null +++ b/info.plist @@ -0,0 +1,10 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>name</key> + <string>Copy with Style</string> + <key>uuid</key> + <string>42A94FD0-3CA8-43A3-A61C-C106299D71A5</string> +</dict> +</plist> \ No newline at end of file
jaxlaw/hadoop-common
bcffa8467c55956cef0ae9c06a08de2a153ac958
HADOOP:6577 from https://issues.apache.org/jira/secure/attachment/12436399/hadoop-6577.2.rel20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 98e2673..5a64722 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,541 +1,545 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.9 MAPREDUCE-1354. Incremental enhancements to the JobTracker for scaling better. (acmurthy) MAPREDUCE-1398. Fix TaskLauncher to stop waiting for slots on a TIP that is killed / failed. (Amareshwari Sriramadasu via yhemanth) MAPREDUCE-1466. Save number of input files in JobConf of job. (Arun Murthy via yhemanth) MAPREDUCE-1403. Save distributed cache artifacts in JobConf of job. (Arun Murthy via yhemanth) MAPREDUCE-1476. Fix the M/R framework to not call commit for special tasks like job setup/cleanup and task cleanup. (Amareshwari Sriramadasu via yhemanth) HADOOP-5879. Read compression level and strategy from Configuration for gzip compression. (He Yongqiang via cdouglas) HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) HADOOP-6382 Mavenize the build.xml targets and update the bin scripts in preparation for publishing POM files (giri kesavan via ltucker) HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) + HADOOP-6577. Add hidden configuration option "ipc.server.max.response.size" + to change the default 1 MB, the maximum size when large IPC handler + response buffer is reset. (suresh) + yahoo-hadoop-0.20.1-3195383008 HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/ipc/Server.java b/src/core/org/apache/hadoop/ipc/Server.java index a7fbcd1..a0f2609 100644 --- a/src/core/org/apache/hadoop/ipc/Server.java +++ b/src/core/org/apache/hadoop/ipc/Server.java @@ -1,1263 +1,1268 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.nio.ByteBuffer; import java.nio.channels.CancelledKeyException; import java.nio.channels.ClosedChannelException; import java.nio.channels.ReadableByteChannel; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.nio.channels.WritableByteChannel; import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.SocketException; import java.net.UnknownHostException; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Iterator; import java.util.Map; import java.util.Random; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import javax.security.auth.Subject; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.security.authorize.AuthorizationException; /** An abstract IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. * * @see Client */ public abstract class Server { /** * The first four bytes of Hadoop RPC connections */ public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes()); // 1 : Introduce ping and server does not throw away RPCs // 3 : Introduce the protocol into the RPC connection header public static final byte CURRENT_VERSION = 3; /** * How many calls/handler are allowed in the queue. */ private static final int MAX_QUEUE_SIZE_PER_HANDLER = 100; /** * Initial and max size of response buffer */ static int INITIAL_RESP_BUF_SIZE = 10240; - static int MAX_RESP_BUF_SIZE = 1024*1024; - + static final String IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY = + "ipc.server.max.response.size"; + static final int IPC_SERVER_RPC_MAX_RESPONSE_SIZE_DEFAULT = 1024*1024; + public static final Log LOG = LogFactory.getLog(Server.class); private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>(); private static final Map<String, Class<?>> PROTOCOL_CACHE = new ConcurrentHashMap<String, Class<?>>(); static Class<?> getProtocolClass(String protocolName, Configuration conf) throws ClassNotFoundException { Class<?> protocol = PROTOCOL_CACHE.get(protocolName); if (protocol == null) { protocol = conf.getClassByName(protocolName); PROTOCOL_CACHE.put(protocolName, protocol); } return protocol; } /** Returns the server instance called under or null. May be called under * {@link #call(Writable, long)} implementations, and under {@link Writable} * methods of paramters and return values. Permits applications to access * the server context.*/ public static Server get() { return SERVER.get(); } /** This is set to Call object before Handler invokes an RPC and reset * after the call returns. */ private static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>(); /** Returns the remote side ip address when invoked inside an RPC * Returns null incase of an error. */ public static InetAddress getRemoteIp() { Call call = CurCall.get(); if (call != null) { return call.connection.socket.getInetAddress(); } return null; } /** Returns remote address as a string when invoked inside an RPC. * Returns null in case of an error. */ public static String getRemoteAddress() { InetAddress addr = getRemoteIp(); return (addr == null) ? null : addr.getHostAddress(); } private String bindAddress; private int port; // port we listen on private int handlerCount; // number of handler threads private Class<? extends Writable> paramClass; // class of call parameters private int maxIdleTime; // the maximum idle time after // which a client may be disconnected private int thresholdIdleConnections; // the number of idle connections // after which we will start // cleaning up idle // connections int maxConnectionsToNuke; // the max number of // connections to nuke //during a cleanup protected RpcMetrics rpcMetrics; private Configuration conf; private int maxQueueSize; + private final int maxRespSize; private int socketSendBufferSize; private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm volatile private boolean running = true; // true while server runs private BlockingQueue<Call> callQueue; // queued calls private List<Connection> connectionList = Collections.synchronizedList(new LinkedList<Connection>()); //maintain a list //of client connections private Listener listener = null; private Responder responder = null; private int numConnections = 0; private Handler[] handlers = null; /** * A convenience method to bind to a given address and report * better exceptions if the address is not a valid host. * @param socket the socket to bind * @param address the address to bind to * @param backlog the number of connections allowed in the queue * @throws BindException if the address can't be bound * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } else { throw e; } } } /** A call queued for handling. */ private static class Call { private int id; // the client's call id private Writable param; // the parameter passed private Connection connection; // connection to client private long timestamp; // the time received when response is null // the time served when response is not null private ByteBuffer response; // the response for this call public Call(int id, Writable param, Connection connection) { this.id = id; this.param = param; this.connection = connection; this.timestamp = System.currentTimeMillis(); this.response = null; } @Override public String toString() { return param.toString() + " from " + connection.toString(); } public void setResponse(ByteBuffer response) { this.response = response; } } /** Listens on the socket. Creates jobs for the handler threads*/ private class Listener extends Thread { private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server private InetSocketAddress address; //the address we bind at private Random rand = new Random(); private long lastCleanupRunTime = 0; //the last time when a cleanup connec- //-tion (for idle connections) ran private long cleanupInterval = 10000; //the minimum interval between //two cleanup runs private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128); public Listener() throws IOException { address = new InetSocketAddress(bindAddress, port); // Create a new server socket and set to non blocking mode acceptChannel = ServerSocketChannel.open(); acceptChannel.configureBlocking(false); // Bind the server socket to the local host and port bind(acceptChannel.socket(), address, backlogLength); port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port // create a selector; selector= Selector.open(); // Register accepts on the server socket with the selector. acceptChannel.register(selector, SelectionKey.OP_ACCEPT); this.setName("IPC Server listener on " + port); this.setDaemon(true); } /** cleanup connections from connectionList. Choose a random range * to scan and also have a limit on the number of the connections * that will be cleanedup per run. The criteria for cleanup is the time * for which the connection was idle. If 'force' is true then all * connections will be looked at for the cleanup. */ private void cleanupConnections(boolean force) { if (force || numConnections > thresholdIdleConnections) { long currentTime = System.currentTimeMillis(); if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) { return; } int start = 0; int end = numConnections - 1; if (!force) { start = rand.nextInt() % numConnections; end = rand.nextInt() % numConnections; int temp; if (end < start) { temp = start; start = end; end = temp; } } int i = start; int numNuked = 0; while (i <= end) { Connection c; synchronized (connectionList) { try { c = connectionList.get(i); } catch (Exception e) {return;} } if (c.timedOut(currentTime)) { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); closeConnection(c); numNuked++; end--; c = null; if (!force && numNuked == maxConnectionsToNuke) break; } else i++; } lastCleanupRunTime = System.currentTimeMillis(); } } @Override public void run() { LOG.info(getName() + ": starting"); SERVER.set(Server.this); while (running) { SelectionKey key = null; try { selector.select(); Iterator<SelectionKey> iter = selector.selectedKeys().iterator(); while (iter.hasNext()) { key = iter.next(); iter.remove(); try { if (key.isValid()) { if (key.isAcceptable()) doAccept(key); else if (key.isReadable()) doRead(key); } } catch (IOException e) { } key = null; } } catch (OutOfMemoryError e) { // we can run out of memory if we have too many threads // log the event and sleep for a minute and give // some thread(s) a chance to finish LOG.warn("Out of Memory in server select", e); closeCurrentConnection(key, e); cleanupConnections(true); try { Thread.sleep(60000); } catch (Exception ie) {} } catch (InterruptedException e) { if (running) { // unexpected -- log it LOG.info(getName() + " caught: " + StringUtils.stringifyException(e)); } } catch (Exception e) { closeCurrentConnection(key, e); } cleanupConnections(false); } LOG.info("Stopping " + this.getName()); synchronized (this) { try { acceptChannel.close(); selector.close(); } catch (IOException e) { } selector= null; acceptChannel= null; // clean up all connections while (!connectionList.isEmpty()) { closeConnection(connectionList.remove(0)); } } } private void closeCurrentConnection(SelectionKey key, Throwable e) { if (key != null) { Connection c = (Connection)key.attachment(); if (c != null) { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); closeConnection(c); c = null; } } } InetSocketAddress getAddress() { return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); } void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { Connection c = null; ServerSocketChannel server = (ServerSocketChannel) key.channel(); // accept up to 10 connections for (int i=0; i<10; i++) { SocketChannel channel = server.accept(); if (channel==null) return; channel.configureBlocking(false); channel.socket().setTcpNoDelay(tcpNoDelay); SelectionKey readKey = channel.register(selector, SelectionKey.OP_READ); c = new Connection(readKey, channel, System.currentTimeMillis()); readKey.attach(c); synchronized (connectionList) { connectionList.add(numConnections, c); numConnections++; } if (LOG.isDebugEnabled()) LOG.debug("Server connection from " + c.toString() + "; # active connections: " + numConnections + "; # queued calls: " + callQueue.size()); } } void doRead(SelectionKey key) throws InterruptedException { int count = 0; Connection c = (Connection)key.attachment(); if (c == null) { return; } c.setLastContact(System.currentTimeMillis()); try { count = c.readAndProcess(); } catch (InterruptedException ieo) { LOG.info(getName() + ": readAndProcess caught InterruptedException", ieo); throw ieo; } catch (Exception e) { LOG.info(getName() + ": readAndProcess threw exception " + e + ". Count of bytes read: " + count, e); count = -1; //so that the (count < 0) block is executed } if (count < 0) { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + ". Number of active connections: "+ numConnections); closeConnection(c); c = null; } else { c.setLastContact(System.currentTimeMillis()); } } synchronized void doStop() { if (selector != null) { selector.wakeup(); Thread.yield(); } if (acceptChannel != null) { try { acceptChannel.socket().close(); } catch (IOException e) { LOG.info(getName() + ":Exception in closing listener socket. " + e); } } } } // Sends responses of RPC back to clients. private class Responder extends Thread { private Selector writeSelector; private int pending; // connections waiting to register final static int PURGE_INTERVAL = 900000; // 15mins Responder() throws IOException { this.setName("IPC Server Responder"); this.setDaemon(true); writeSelector = Selector.open(); // create a selector pending = 0; } @Override public void run() { LOG.info(getName() + ": starting"); SERVER.set(Server.this); long lastPurgeTime = 0; // last check for old calls. while (running) { try { waitPending(); // If a channel is being registered, wait. writeSelector.select(PURGE_INTERVAL); Iterator<SelectionKey> iter = writeSelector.selectedKeys().iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); iter.remove(); try { if (key.isValid() && key.isWritable()) { doAsyncWrite(key); } } catch (IOException e) { LOG.info(getName() + ": doAsyncWrite threw exception " + e); } } long now = System.currentTimeMillis(); if (now < lastPurgeTime + PURGE_INTERVAL) { continue; } lastPurgeTime = now; // // If there were some calls that have not been sent out for a // long time, discard them. // LOG.debug("Checking for old call responses."); ArrayList<Call> calls; // get the list of channels from list of keys. synchronized (writeSelector.keys()) { calls = new ArrayList<Call>(writeSelector.keys().size()); iter = writeSelector.keys().iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); Call call = (Call)key.attachment(); if (call != null && key.channel() == call.connection.channel) { calls.add(call); } } } for(Call call : calls) { try { doPurge(call, now); } catch (IOException e) { LOG.warn("Error in purging old calls " + e); } } } catch (OutOfMemoryError e) { // // we can run out of memory if we have too many threads // log the event and sleep for a minute and give // some thread(s) a chance to finish // LOG.warn("Out of Memory in server select", e); try { Thread.sleep(60000); } catch (Exception ie) {} } catch (Exception e) { LOG.warn("Exception in Responder " + StringUtils.stringifyException(e)); } } LOG.info("Stopping " + this.getName()); } private void doAsyncWrite(SelectionKey key) throws IOException { Call call = (Call)key.attachment(); if (call == null) { return; } if (key.channel() != call.connection.channel) { throw new IOException("doAsyncWrite: bad channel"); } synchronized(call.connection.responseQueue) { if (processResponse(call.connection.responseQueue, false)) { try { key.interestOps(0); } catch (CancelledKeyException e) { /* The Listener/reader might have closed the socket. * We don't explicitly cancel the key, so not sure if this will * ever fire. * This warning could be removed. */ LOG.warn("Exception while changing ops : " + e); } } } } // // Remove calls that have been pending in the responseQueue // for a long time. // private void doPurge(Call call, long now) throws IOException { LinkedList<Call> responseQueue = call.connection.responseQueue; synchronized (responseQueue) { Iterator<Call> iter = responseQueue.listIterator(0); while (iter.hasNext()) { call = iter.next(); if (now > call.timestamp + PURGE_INTERVAL) { closeConnection(call.connection); break; } } } } // Processes one response. Returns true if there are no more pending // data for this channel. // private boolean processResponse(LinkedList<Call> responseQueue, boolean inHandler) throws IOException { boolean error = true; boolean done = false; // there is more data for this channel. int numElements = 0; Call call = null; try { synchronized (responseQueue) { // // If there are no items for this channel, then we are done // numElements = responseQueue.size(); if (numElements == 0) { error = false; return true; // no more data for this channel. } // // Extract the first call // call = responseQueue.removeFirst(); SocketChannel channel = call.connection.channel; if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + call.connection); } // // Send as much data as we can in the non-blocking fashion // int numBytes = channelWrite(channel, call.response); if (numBytes < 0) { return true; } if (!call.response.hasRemaining()) { call.connection.decRpcCount(); if (numElements == 1) { // last call fully processes. done = true; // no more data for this channel. } else { done = false; // more calls pending to be sent. } if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + call.connection + " Wrote " + numBytes + " bytes."); } } else { // // If we were unable to write the entire response out, then // insert in Selector queue. // call.connection.responseQueue.addFirst(call); if (inHandler) { // set the serve time when the response has to be sent later call.timestamp = System.currentTimeMillis(); incPending(); try { // Wakeup the thread blocked on select, only then can the call // to channel.register() complete. writeSelector.wakeup(); channel.register(writeSelector, SelectionKey.OP_WRITE, call); } catch (ClosedChannelException e) { //Its ok. channel might be closed else where. done = true; } finally { decPending(); } } if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + call.connection + " Wrote partial " + numBytes + " bytes."); } } error = false; // everything went off well } } finally { if (error && call != null) { LOG.warn(getName()+", call " + call + ": output error"); done = true; // error. no more data for this channel. closeConnection(call.connection); } } return done; } // // Enqueue a response from the application. // void doRespond(Call call) throws IOException { synchronized (call.connection.responseQueue) { call.connection.responseQueue.addLast(call); if (call.connection.responseQueue.size() == 1) { processResponse(call.connection.responseQueue, true); } } } private synchronized void incPending() { // call waiting to be enqueued. pending++; } private synchronized void decPending() { // call done enqueueing. pending--; notify(); } private synchronized void waitPending() throws InterruptedException { while (pending > 0) { wait(); } } } /** Reads calls from a connection and queues them for handling. */ private class Connection { private boolean versionRead = false; //if initial signature and //version are read private boolean headerRead = false; //if the connection header that //follows version is read. private SocketChannel channel; private ByteBuffer data; private ByteBuffer dataLengthBuffer; private LinkedList<Call> responseQueue; private volatile int rpcCount = 0; // number of outstanding rpcs private long lastContact; private int dataLength; private Socket socket; // Cache the remote host & port info so that even if the socket is // disconnected, we can say where it used to connect to. private String hostAddress; private int remotePort; ConnectionHeader header = new ConnectionHeader(); Class<?> protocol; Subject user = null; // Fake 'call' for failed authorization response private final int AUTHROIZATION_FAILED_CALLID = -1; private final Call authFailedCall = new Call(AUTHROIZATION_FAILED_CALLID, null, null); private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream(); public Connection(SelectionKey key, SocketChannel channel, long lastContact) { this.channel = channel; this.lastContact = lastContact; this.data = null; this.dataLengthBuffer = ByteBuffer.allocate(4); this.socket = channel.socket(); InetAddress addr = socket.getInetAddress(); if (addr == null) { this.hostAddress = "*Unknown*"; } else { this.hostAddress = addr.getHostAddress(); } this.remotePort = socket.getPort(); this.responseQueue = new LinkedList<Call>(); if (socketSendBufferSize != 0) { try { socket.setSendBufferSize(socketSendBufferSize); } catch (IOException e) { LOG.warn("Connection: unable to set socket send buffer size to " + socketSendBufferSize); } } } @Override public String toString() { return getHostAddress() + ":" + remotePort; } public String getHostAddress() { return hostAddress; } public void setLastContact(long lastContact) { this.lastContact = lastContact; } public long getLastContact() { return lastContact; } /* Return true if the connection has no outstanding rpc */ private boolean isIdle() { return rpcCount == 0; } /* Decrement the outstanding RPC count */ private void decRpcCount() { rpcCount--; } /* Increment the outstanding RPC count */ private void incRpcCount() { rpcCount++; } private boolean timedOut(long currentTime) { if (isIdle() && currentTime - lastContact > maxIdleTime) return true; return false; } public int readAndProcess() throws IOException, InterruptedException { while (true) { /* Read at most one RPC. If the header is not read completely yet * then iterate until we read first RPC or until there is no data left. */ int count = -1; if (dataLengthBuffer.remaining() > 0) { count = channelRead(channel, dataLengthBuffer); if (count < 0 || dataLengthBuffer.remaining() > 0) return count; } if (!versionRead) { //Every connection is expected to send the header. ByteBuffer versionBuffer = ByteBuffer.allocate(1); count = channelRead(channel, versionBuffer); if (count <= 0) { return count; } int version = versionBuffer.get(0); dataLengthBuffer.flip(); if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) { //Warning is ok since this is not supposed to happen. LOG.warn("Incorrect header or version mismatch from " + hostAddress + ":" + remotePort + " got version " + version + " expected version " + CURRENT_VERSION); return -1; } dataLengthBuffer.clear(); versionRead = true; continue; } if (data == null) { dataLengthBuffer.flip(); dataLength = dataLengthBuffer.getInt(); if (dataLength == Client.PING_CALL_ID) { dataLengthBuffer.clear(); return 0; //ping message } data = ByteBuffer.allocate(dataLength); incRpcCount(); // Increment the rpc count } count = channelRead(channel, data); if (data.remaining() == 0) { dataLengthBuffer.clear(); data.flip(); if (headerRead) { processData(); data = null; return count; } else { processHeader(); headerRead = true; data = null; // Authorize the connection try { authorize(user, header); if (LOG.isDebugEnabled()) { LOG.debug("Successfully authorized " + header); } } catch (AuthorizationException ae) { authFailedCall.connection = this; setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null, ae.getClass().getName(), ae.getMessage()); responder.doRespond(authFailedCall); // Close this connection return -1; } continue; } } return count; } } /// Reads the connection header following version private void processHeader() throws IOException { DataInputStream in = new DataInputStream(new ByteArrayInputStream(data.array())); header.readFields(in); try { String protocolClassName = header.getProtocol(); if (protocolClassName != null) { protocol = getProtocolClass(header.getProtocol(), conf); } } catch (ClassNotFoundException cnfe) { throw new IOException("Unknown protocol: " + header.getProtocol()); } // TODO: Get the user name from the GSS API for Kerberbos-based security // Create the user subject user = SecurityUtil.getSubject(header.getUgi()); } private void processData() throws IOException, InterruptedException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(data.array())); int id = dis.readInt(); // try to read an id if (LOG.isDebugEnabled()) LOG.debug(" got #" + id); Writable param = ReflectionUtils.newInstance(paramClass, conf); // read param param.readFields(dis); Call call = new Call(id, param, this); callQueue.put(call); // queue the call; maybe blocked here } private synchronized void close() throws IOException { data = null; dataLengthBuffer = null; if (!channel.isOpen()) return; try {socket.shutdownOutput();} catch(Exception e) {} if (channel.isOpen()) { try {channel.close();} catch(Exception e) {} } try {socket.close();} catch(Exception e) {} } } /** Handles queued calls . */ private class Handler extends Thread { public Handler(int instanceNumber) { this.setDaemon(true); this.setName("IPC Server handler "+ instanceNumber + " on " + port); } @Override public void run() { LOG.info(getName() + ": starting"); SERVER.set(Server.this); ByteArrayOutputStream buf = new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE); while (running) { try { final Call call = callQueue.take(); // pop the queue; maybe blocked here if (LOG.isDebugEnabled()) LOG.debug(getName() + ": has #" + call.id + " from " + call.connection); String errorClass = null; String error = null; Writable value = null; CurCall.set(call); try { // Make the call as the user via Subject.doAs, thus associating // the call with the Subject value = Subject.doAs(call.connection.user, new PrivilegedExceptionAction<Writable>() { @Override public Writable run() throws Exception { // make the call return call(call.connection.protocol, call.param, call.timestamp); } } ); } catch (PrivilegedActionException pae) { Exception e = pae.getException(); LOG.info(getName()+", call "+call+": error: " + e, e); errorClass = e.getClass().getName(); error = StringUtils.stringifyException(e); } catch (Throwable e) { LOG.info(getName()+", call "+call+": error: " + e, e); errorClass = e.getClass().getName(); error = StringUtils.stringifyException(e); } CurCall.set(null); setupResponse(buf, call, (error == null) ? Status.SUCCESS : Status.ERROR, value, errorClass, error); // Discard the large buf and reset it back to // smaller size to freeup heap - if (buf.size() > MAX_RESP_BUF_SIZE) { + if (buf.size() > maxRespSize) { LOG.warn("Large response size " + buf.size() + " for call " + call.toString()); buf = new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE); } responder.doRespond(call); } catch (InterruptedException e) { if (running) { // unexpected -- log it LOG.info(getName() + " caught: " + StringUtils.stringifyException(e)); } } catch (Exception e) { LOG.info(getName() + " caught: " + StringUtils.stringifyException(e)); } } LOG.info(getName() + ": exiting"); } } protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, Configuration conf) throws IOException { this(bindAddress, port, paramClass, handlerCount, conf, Integer.toString(port)); } /** Constructs a server listening on the named port and address. Parameters passed must * be of the named class. The <code>handlerCount</handlerCount> determines * the number of handler threads that will be used to process calls. * */ protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, Configuration conf, String serverName) throws IOException { this.bindAddress = bindAddress; this.conf = conf; this.port = port; this.paramClass = paramClass; this.handlerCount = handlerCount; this.socketSendBufferSize = 0; this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER; + this.maxRespSize = conf.getInt(IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY, + IPC_SERVER_RPC_MAX_RESPONSE_SIZE_DEFAULT); this.callQueue = new LinkedBlockingQueue<Call>(maxQueueSize); this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000); this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); // Start the listener here and let it bind to the port listener = new Listener(); this.port = listener.getAddress().getPort(); this.rpcMetrics = new RpcMetrics(serverName, Integer.toString(this.port), this); this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false); // Create the responder here responder = new Responder(); } private void closeConnection(Connection connection) { synchronized (connectionList) { if (connectionList.remove(connection)) numConnections--; } try { connection.close(); } catch (IOException e) { } } /** * Setup response for the IPC Call. * * @param response buffer to serialize the response into * @param call {@link Call} to which we are setting up the response * @param status {@link Status} of the IPC call * @param rv return value for the IPC Call, if the call was successful * @param errorClass error class, if the the call failed * @param error error message, if the call failed * @throws IOException */ private void setupResponse(ByteArrayOutputStream response, Call call, Status status, Writable rv, String errorClass, String error) throws IOException { response.reset(); DataOutputStream out = new DataOutputStream(response); out.writeInt(call.id); // write call id out.writeInt(status.state); // write status if (status == Status.SUCCESS) { rv.write(out); } else { WritableUtils.writeString(out, errorClass); WritableUtils.writeString(out, error); } call.setResponse(ByteBuffer.wrap(response.toByteArray())); } Configuration getConf() { return conf; } /** Sets the socket buffer size used for responding to RPCs */ public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } /** Starts the service. Must be called before any calls will be handled. */ public synchronized void start() throws IOException { responder.start(); listener.start(); handlers = new Handler[handlerCount]; for (int i = 0; i < handlerCount; i++) { handlers[i] = new Handler(i); handlers[i].start(); } } /** Stops the service. No new calls will be handled after this is called. */ public synchronized void stop() { LOG.info("Stopping server on " + port); running = false; if (handlers != null) { for (int i = 0; i < handlerCount; i++) { if (handlers[i] != null) { handlers[i].interrupt(); } } } listener.interrupt(); listener.doStop(); responder.interrupt(); notifyAll(); if (this.rpcMetrics != null) { this.rpcMetrics.shutdown(); } } /** Wait for the server to be stopped. * Does not wait for all subthreads to finish. * See {@link #stop()}. */ public synchronized void join() throws InterruptedException { while (running) { wait(); } } /** * Return the socket (ip+port) on which the RPC server is listening to. * @return the socket (ip+port) on which the RPC server is listening to. */ public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } /** * Called for each call. * @deprecated Use {@link #call(Class, Writable, long)} instead */ @Deprecated public Writable call(Writable param, long receiveTime) throws IOException { return call(null, param, receiveTime); } /** Called for each call. */ public abstract Writable call(Class<?> protocol, Writable param, long receiveTime) throws IOException; /** * Authorize the incoming client connection. * * @param user client user * @param connection incoming connection * @throws AuthorizationException when the client isn't authorized to talk the protocol */ public void authorize(Subject user, ConnectionHeader connection) throws AuthorizationException {} /** * The number of open RPC conections * @return the number of open rpc connections */ public int getNumOpenConnections() { return numConnections; } /** * The number of rpc calls in the queue. * @return The number of rpc calls in the queue. */ public int getCallQueueLen() { return callQueue.size(); } /** * When the read or write buffer size is larger than this limit, i/o will be * done in chunks of this size. Most RPC requests and responses would be * be smaller. */ private static int NIO_BUFFER_LIMIT = 8*1024; //should not be more than 64KB. /** * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * buffer increases. This also minimizes extra copies in NIO layer * as a result of multiple write operations required to write a large * buffer. * * @see WritableByteChannel#write(ByteBuffer) */ private static int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer) : channelIO(null, channel, buffer); } /** * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. * * @see ReadableByteChannel#read(ByteBuffer) */ private static int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); } /** * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)} * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only * one of readCh or writeCh should be non-null. * * @see #channelRead(ReadableByteChannel, ByteBuffer) * @see #channelWrite(WritableByteChannel, ByteBuffer) */ private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, ByteBuffer buf) throws IOException { int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); int ret = 0; while (buf.remaining() > 0) { try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); if (ret < ioSize) { break; } } finally { buf.limit(originalLimit); } } int nBytes = initialRemaining - buf.remaining(); return (nBytes > 0) ? nBytes : ret; } } diff --git a/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java b/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java index e1370fe..cb491d3 100644 --- a/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java +++ b/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java @@ -1,156 +1,158 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Random; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.net.NetUtils; /** * This test provokes partial writes in the server, which is * serving multiple clients. */ public class TestIPCServerResponder extends TestCase { public static final Log LOG = LogFactory.getLog(TestIPCServerResponder.class); private static Configuration conf = new Configuration(); public TestIPCServerResponder(final String name) { super(name); } private static final Random RANDOM = new Random(); private static final String ADDRESS = "0.0.0.0"; private static final int BYTE_COUNT = 1024; private static final byte[] BYTES = new byte[BYTE_COUNT]; static { for (int i = 0; i < BYTE_COUNT; i++) BYTES[i] = (byte) ('a' + (i % 26)); } private static class TestServer extends Server { private boolean sleep; public TestServer(final int handlerCount, final boolean sleep) throws IOException { super(ADDRESS, 0, BytesWritable.class, handlerCount, conf); // Set the buffer size to half of the maximum parameter/result size // to force the socket to block this.setSocketSendBufSize(BYTE_COUNT / 2); this.sleep = sleep; } @Override public Writable call(Class<?> protocol, Writable param, long receiveTime) throws IOException { if (sleep) { try { Thread.sleep(RANDOM.nextInt(20)); // sleep a bit } catch (InterruptedException e) {} } return param; } } private static class Caller extends Thread { private Client client; private int count; private InetSocketAddress address; private boolean failed; public Caller(final Client client, final InetSocketAddress address, final int count) { this.client = client; this.address = address; this.count = count; } @Override public void run() { for (int i = 0; i < count; i++) { try { int byteSize = RANDOM.nextInt(BYTE_COUNT); byte[] bytes = new byte[byteSize]; System.arraycopy(BYTES, 0, bytes, 0, byteSize); Writable param = new BytesWritable(bytes); Writable value = client.call(param, address); Thread.sleep(RANDOM.nextInt(20)); } catch (Exception e) { LOG.fatal("Caught: " + e); failed = true; } } } } public void testResponseBuffer() throws Exception { Server.INITIAL_RESP_BUF_SIZE = 1; - Server.MAX_RESP_BUF_SIZE = 1; + conf.setInt(Server.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY, + 1); testServerResponder(1, true, 1, 1, 5); + conf = new Configuration(); // reset configuration } public void testServerResponder() throws Exception { testServerResponder(10, true, 1, 10, 200); } public void testServerResponder(final int handlerCount, final boolean handlerSleep, final int clientCount, final int callerCount, final int callCount) throws Exception { Server server = new TestServer(handlerCount, handlerSleep); server.start(); InetSocketAddress address = NetUtils.getConnectAddress(server); Client[] clients = new Client[clientCount]; for (int i = 0; i < clientCount; i++) { clients[i] = new Client(BytesWritable.class, conf); } Caller[] callers = new Caller[callerCount]; for (int i = 0; i < callerCount; i++) { callers[i] = new Caller(clients[i % clientCount], address, callCount); callers[i].start(); } for (int i = 0; i < callerCount; i++) { callers[i].join(); assertFalse(callers[i].failed); } for (int i = 0; i < clientCount; i++) { clients[i].stop(); } server.stop(); } }
jaxlaw/hadoop-common
f4e2c1fd7f5d8415703622fd0fdbe3a738a9fb30
MAPREDUCE:1398 from https://issues.apache.org/jira/secure/attachment/12435964/patch-1398-ydist.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 3cca978..72f1fc4 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,520 +1,524 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.9 + MAPREDUCE-1398. Fix TaskLauncher to stop waiting for slots on a TIP that + is killed / failed. + (Amareshwari Sriramadasu via yhemanth) + MAPREDUCE-1466. Save number of input files in JobConf of job. (Arun Murthy via yhemanth) MAPREDUCE-1403. Save distributed cache artifacts in JobConf of job. (Arun Murthy via yhemanth) MAPREDUCE-1476. Fix the M/R framework to not call commit for special tasks like job setup/cleanup and task cleanup. (Amareshwari Sriramadasu via yhemanth) HADOOP-5879. Read compression level and strategy from Configuration for gzip compression. (He Yongqiang via cdouglas) HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) HADOOP-6382 Mavenize the build.xml targets and update the bin scripts in preparation for publishing POM files (giri kesavan via ltucker) HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) yahoo-hadoop-0.20.1-3195383008 HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java index 7ca0beb..6a0feec 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java @@ -1,3411 +1,3462 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.io.RandomAccessFile; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeMap; import java.util.Vector; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.regex.Pattern; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.mapred.TaskLog.LogFileDetail; import org.apache.hadoop.mapred.TaskLog.LogName; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.mapred.TaskController.TaskControllerPathDeletionContext; import org.apache.hadoop.mapred.TaskStatus.Phase; import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; import org.apache.hadoop.mapred.pipes.Submitter; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsException; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.authorize.ConfiguredPolicy; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.MemoryCalculatorPlugin; import org.apache.hadoop.util.ProcfsBasedProcessTree; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.RunJar; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; /******************************************************* * TaskTracker is a process that starts and tracks MR Tasks * in a networked environment. It contacts the JobTracker * for Task assignments and reporting results. * *******************************************************/ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol, Runnable { /** * @deprecated */ @Deprecated static final String MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY = "mapred.tasktracker.vmem.reserved"; /** * @deprecated */ @Deprecated static final String MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY = "mapred.tasktracker.pmem.reserved"; static final String MAP_USERLOG_RETAIN_SIZE = "mapreduce.cluster.map.userlog.retain-size"; static final String REDUCE_USERLOG_RETAIN_SIZE = "mapreduce.cluster.reduce.userlog.retain-size"; static final long WAIT_FOR_DONE = 3 * 1000; private int httpPort; static enum State {NORMAL, STALE, INTERRUPTED, DENIED} static{ Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); } public static final Log LOG = LogFactory.getLog(TaskTracker.class); public static final String MR_CLIENTTRACE_FORMAT = "src: %s" + // src IP ", dest: %s" + // dst IP ", bytes: %s" + // byte count ", op: %s" + // operation ", cliID: %s" + // task id ", duration: %s"; // duration public static final Log ClientTraceLog = LogFactory.getLog(TaskTracker.class.getName() + ".clienttrace"); volatile boolean running = true; private LocalDirAllocator localDirAllocator; String taskTrackerName; String localHostname; InetSocketAddress jobTrackAddr; InetSocketAddress taskReportAddress; Server taskReportServer = null; InterTrackerProtocol jobClient; // last heartbeat response recieved short heartbeatResponseId = -1; static final String TASK_CLEANUP_SUFFIX = ".cleanup"; /* * This is the last 'status' report sent by this tracker to the JobTracker. * * If the rpc call succeeds, this 'status' is cleared-out by this tracker; * indicating that a 'fresh' status report be generated; in the event the * rpc calls fails for whatever reason, the previous status report is sent * again. */ TaskTrackerStatus status = null; // The system-directory on HDFS where job files are stored Path systemDirectory = null; // The filesystem where job files are stored FileSystem systemFS = null; private final HttpServer server; volatile boolean shuttingDown = false; Map<TaskAttemptID, TaskInProgress> tasks = new HashMap<TaskAttemptID, TaskInProgress>(); /** * Map from taskId -> TaskInProgress. */ Map<TaskAttemptID, TaskInProgress> runningTasks = null; Map<JobID, RunningJob> runningJobs = null; volatile int mapTotal = 0; volatile int reduceTotal = 0; boolean justStarted = true; boolean justInited = true; // Mark reduce tasks that are shuffling to rollback their events index Set<TaskAttemptID> shouldReset = new HashSet<TaskAttemptID>(); //dir -> DF Map<String, DF> localDirsDf = new HashMap<String, DF>(); long minSpaceStart = 0; //must have this much space free to start new tasks boolean acceptNewTasks = true; long minSpaceKill = 0; //if we run under this limit, kill one task //and make sure we never receive any new jobs //until all the old tasks have been cleaned up. //this is if a machine is so full it's only good //for serving map output to the other nodes static Random r = new Random(); private static final String SUBDIR = "taskTracker"; private static final String CACHEDIR = "archive"; private static final String JOBCACHE = "jobcache"; private static final String OUTPUT = "output"; private JobConf originalConf; private JobConf fConf; private int maxMapSlots; private int maxReduceSlots; private int failures; private FileSystem localFs; // Performance-related config knob to send an out-of-band heartbeat // on task completion static final String TT_OUTOFBAND_HEARBEAT = "mapreduce.tasktracker.outofband.heartbeat"; private volatile boolean oobHeartbeatOnTaskCompletion; // Track number of completed tasks to send an out-of-band heartbeat private IntWritable finishedCount = new IntWritable(0); private MapEventsFetcherThread mapEventsFetcher; int workerThreads; CleanupQueue directoryCleanupThread; volatile JvmManager jvmManager; private TaskMemoryManagerThread taskMemoryManager; private boolean taskMemoryManagerEnabled = true; private long totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long mapSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long reduceSlotSizeMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; private TaskLogsMonitor taskLogsMonitor; static final String MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY = "mapred.tasktracker.memory_calculator_plugin"; /** * the minimum interval between jobtracker polls */ private volatile int heartbeatInterval = HEARTBEAT_INTERVAL_MIN; /** * Number of maptask completion events locations to poll for at one time */ private int probe_sample_size = 500; private IndexCache indexCache; /** * Handle to the specific instance of the {@link TaskController} class */ private TaskController taskController; /** * Handle to the specific instance of the {@link NodeHealthCheckerService} */ private NodeHealthCheckerService healthChecker; /* * A list of commitTaskActions for whom commit response has been received */ private List<TaskAttemptID> commitResponses = Collections.synchronizedList(new ArrayList<TaskAttemptID>()); private ShuffleServerMetrics shuffleServerMetrics; /** This class contains the methods that should be used for metrics-reporting * the specific metrics for shuffle. The TaskTracker is actually a server for * the shuffle and hence the name ShuffleServerMetrics. */ private class ShuffleServerMetrics implements Updater { private MetricsRecord shuffleMetricsRecord = null; private int serverHandlerBusy = 0; private long outputBytes = 0; private int failedOutputs = 0; private int successOutputs = 0; ShuffleServerMetrics(JobConf conf) { MetricsContext context = MetricsUtil.getContext("mapred"); shuffleMetricsRecord = MetricsUtil.createRecord(context, "shuffleOutput"); this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId()); context.registerUpdater(this); } synchronized void serverHandlerBusy() { ++serverHandlerBusy; } synchronized void serverHandlerFree() { --serverHandlerBusy; } synchronized void outputBytes(long bytes) { outputBytes += bytes; } synchronized void failedOutput() { ++failedOutputs; } synchronized void successOutput() { ++successOutputs; } public void doUpdates(MetricsContext unused) { synchronized (this) { if (workerThreads != 0) { shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 100*((float)serverHandlerBusy/workerThreads)); } else { shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0); } shuffleMetricsRecord.incrMetric("shuffle_output_bytes", outputBytes); shuffleMetricsRecord.incrMetric("shuffle_failed_outputs", failedOutputs); shuffleMetricsRecord.incrMetric("shuffle_success_outputs", successOutputs); outputBytes = 0; failedOutputs = 0; successOutputs = 0; } shuffleMetricsRecord.update(); } } private TaskTrackerInstrumentation myInstrumentation = null; public TaskTrackerInstrumentation getTaskTrackerInstrumentation() { return myInstrumentation; } /** * A list of tips that should be cleaned up. */ private BlockingQueue<TaskTrackerAction> tasksToCleanup = new LinkedBlockingQueue<TaskTrackerAction>(); /** * A daemon-thread that pulls tips off the list of things to cleanup. */ private Thread taskCleanupThread = new Thread(new Runnable() { public void run() { while (true) { try { TaskTrackerAction action = tasksToCleanup.take(); if (action instanceof KillJobAction) { purgeJob((KillJobAction) action); } else if (action instanceof KillTaskAction) { - TaskInProgress tip; - KillTaskAction killAction = (KillTaskAction) action; - synchronized (TaskTracker.this) { - tip = tasks.get(killAction.getTaskID()); - } - LOG.info("Received KillTaskAction for task: " + - killAction.getTaskID()); - purgeTask(tip, false); + processKillTaskAction((KillTaskAction) action); } else { LOG.error("Non-delete action given to cleanup thread: " + action); } } catch (Throwable except) { LOG.warn(StringUtils.stringifyException(except)); } } } }, "taskCleanup"); + void processKillTaskAction(KillTaskAction killAction) throws IOException { + TaskInProgress tip; + synchronized (TaskTracker.this) { + tip = tasks.get(killAction.getTaskID()); + } + LOG.info("Received KillTaskAction for task: " + killAction.getTaskID()); + purgeTask(tip, false); + } + TaskController getTaskController() { return taskController; } private RunningJob addTaskToJob(JobID jobId, TaskInProgress tip) { synchronized (runningJobs) { RunningJob rJob = null; if (!runningJobs.containsKey(jobId)) { rJob = new RunningJob(jobId); rJob.localized = false; rJob.tasks = new HashSet<TaskInProgress>(); runningJobs.put(jobId, rJob); } else { rJob = runningJobs.get(jobId); } synchronized (rJob) { rJob.tasks.add(tip); } runningJobs.notify(); //notify the fetcher thread return rJob; } } private void removeTaskFromJob(JobID jobId, TaskInProgress tip) { synchronized (runningJobs) { RunningJob rjob = runningJobs.get(jobId); if (rjob == null) { LOG.warn("Unknown job " + jobId + " being deleted."); } else { synchronized (rjob) { rjob.tasks.remove(tip); } } } } TaskLogsMonitor getTaskLogsMonitor() { return this.taskLogsMonitor; } void setTaskLogsMonitor(TaskLogsMonitor t) { this.taskLogsMonitor = t; } static String getCacheSubdir() { return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.CACHEDIR; } static String getJobCacheSubdir() { return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.JOBCACHE; } static String getLocalJobDir(String jobid) { return getJobCacheSubdir() + Path.SEPARATOR + jobid; } static String getLocalTaskDir(String jobid, String taskid) { return getLocalTaskDir(jobid, taskid, false) ; } static String getIntermediateOutputDir(String jobid, String taskid) { return getLocalTaskDir(jobid, taskid) + Path.SEPARATOR + TaskTracker.OUTPUT ; } static String getLocalTaskDir(String jobid, String taskid, boolean isCleanupAttempt) { String taskDir = getLocalJobDir(jobid) + Path.SEPARATOR + taskid; if (isCleanupAttempt) { taskDir = taskDir + TASK_CLEANUP_SUFFIX; } return taskDir; } String getPid(TaskAttemptID tid) { TaskInProgress tip = tasks.get(tid); if (tip != null) { return jvmManager.getPid(tip.getTaskRunner()); } return null; } public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(TaskUmbilicalProtocol.class.getName())) { return TaskUmbilicalProtocol.versionID; } else { throw new IOException("Unknown protocol for task tracker: " + protocol); } } /** * Do the real constructor work here. It's in a separate method * so we can call it again and "recycle" the object after calling * close(). */ synchronized void initialize() throws IOException { // use configured nameserver & interface to get local hostname this.fConf = new JobConf(originalConf); localFs = FileSystem.getLocal(fConf); if (fConf.get("slave.host.name") != null) { this.localHostname = fConf.get("slave.host.name"); } if (localHostname == null) { this.localHostname = DNS.getDefaultHost (fConf.get("mapred.tasktracker.dns.interface","default"), fConf.get("mapred.tasktracker.dns.nameserver","default")); } //check local disk checkLocalDirs(this.fConf.getLocalDirs()); fConf.deleteLocalFiles(SUBDIR); // Clear out state tables this.tasks.clear(); this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>(); this.runningJobs = new TreeMap<JobID, RunningJob>(); this.mapTotal = 0; this.reduceTotal = 0; this.acceptNewTasks = true; this.status = null; this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L); this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L); //tweak the probe sample size (make it a function of numCopiers) probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500); Class<? extends TaskTrackerInstrumentation> metricsInst = getInstrumentationClass(fConf); try { java.lang.reflect.Constructor<? extends TaskTrackerInstrumentation> c = metricsInst.getConstructor(new Class[] {TaskTracker.class} ); this.myInstrumentation = c.newInstance(this); } catch(Exception e) { //Reflection can throw lots of exceptions -- handle them all by //falling back on the default. LOG.error("failed to initialize taskTracker metrics", e); this.myInstrumentation = new TaskTrackerMetricsInst(this); } // bind address String address = NetUtils.getServerAddress(fConf, "mapred.task.tracker.report.bindAddress", "mapred.task.tracker.report.port", "mapred.task.tracker.report.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); String bindAddress = socAddr.getHostName(); int tmpPort = socAddr.getPort(); this.jvmManager = new JvmManager(this); // Set service-level authorization security policy if (this.fConf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider)(ReflectionUtils.newInstance( this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, MapReducePolicyProvider.class, PolicyProvider.class), this.fConf)); SecurityUtil.setPolicy(new ConfiguredPolicy(this.fConf, policyProvider)); } // RPC initialization int max = maxMapSlots > maxReduceSlots ? maxMapSlots : maxReduceSlots; //set the num handlers to max*2 since canCommit may wait for the duration //of a heartbeat RPC this.taskReportServer = RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf); this.taskReportServer.start(); // get the assigned address this.taskReportAddress = taskReportServer.getListenerAddress(); this.fConf.set("mapred.task.tracker.report.address", taskReportAddress.getHostName() + ":" + taskReportAddress.getPort()); LOG.info("TaskTracker up at: " + this.taskReportAddress); this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress; LOG.info("Starting tracker " + taskTrackerName); // Clear out temporary files that might be lying around DistributedCache.purgeCache(this.fConf); cleanupStorage(); this.jobClient = (InterTrackerProtocol) RPC.waitForProxy(InterTrackerProtocol.class, InterTrackerProtocol.versionID, jobTrackAddr, this.fConf); this.justInited = true; this.running = true; // start the thread that will fetch map task completion events this.mapEventsFetcher = new MapEventsFetcherThread(); mapEventsFetcher.setDaemon(true); mapEventsFetcher.setName( "Map-events fetcher for all reduce tasks " + "on " + taskTrackerName); mapEventsFetcher.start(); initializeMemoryManagement(); setTaskLogsMonitor(new TaskLogsMonitor(getMapUserLogRetainSize(), getReduceUserLogRetainSize())); getTaskLogsMonitor().start(); - this.indexCache = new IndexCache(this.fConf); + setIndexCache(new IndexCache(this.fConf)); mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots); reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots); mapLauncher.start(); reduceLauncher.start(); Class<? extends TaskController> taskControllerClass = fConf.getClass("mapred.task.tracker.task-controller", DefaultTaskController.class, TaskController.class); taskController = (TaskController)ReflectionUtils.newInstance( taskControllerClass, fConf); //setup and create jobcache directory with appropriate permissions taskController.setup(); //Start up node health checker service. if (shouldStartHealthMonitor(this.fConf)) { startHealthMonitor(this.fConf); } oobHeartbeatOnTaskCompletion = fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false); } public static Class<? extends TaskTrackerInstrumentation> getInstrumentationClass( Configuration conf) { return conf.getClass("mapred.tasktracker.instrumentation", TaskTrackerMetricsInst.class, TaskTrackerInstrumentation.class); } public static void setInstrumentationClass( Configuration conf, Class<? extends TaskTrackerInstrumentation> t) { conf.setClass("mapred.tasktracker.instrumentation", t, TaskTrackerInstrumentation.class); } /** * Removes all contents of temporary storage. Called upon * startup, to remove any leftovers from previous run. */ public void cleanupStorage() throws IOException { this.fConf.deleteLocalFiles(); } // Object on wait which MapEventsFetcherThread is going to wait. private Object waitingOn = new Object(); private class MapEventsFetcherThread extends Thread { private List <FetchStatus> reducesInShuffle() { List <FetchStatus> fList = new ArrayList<FetchStatus>(); for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) { RunningJob rjob = item.getValue(); JobID jobId = item.getKey(); FetchStatus f; synchronized (rjob) { f = rjob.getFetchStatus(); for (TaskInProgress tip : rjob.tasks) { Task task = tip.getTask(); if (!task.isMapTask()) { if (((ReduceTask)task).getPhase() == TaskStatus.Phase.SHUFFLE) { if (rjob.getFetchStatus() == null) { //this is a new job; we start fetching its map events f = new FetchStatus(jobId, ((ReduceTask)task).getNumMaps()); rjob.setFetchStatus(f); } f = rjob.getFetchStatus(); fList.add(f); break; //no need to check any more tasks belonging to this } } } } } //at this point, we have information about for which of //the running jobs do we need to query the jobtracker for map //outputs (actually map events). return fList; } @Override public void run() { LOG.info("Starting thread: " + this.getName()); while (running) { try { List <FetchStatus> fList = null; synchronized (runningJobs) { while (((fList = reducesInShuffle()).size()) == 0) { try { runningJobs.wait(); } catch (InterruptedException e) { LOG.info("Shutting down: " + this.getName()); return; } } } // now fetch all the map task events for all the reduce tasks // possibly belonging to different jobs boolean fetchAgain = false; //flag signifying whether we want to fetch //immediately again. for (FetchStatus f : fList) { long currentTime = System.currentTimeMillis(); try { //the method below will return true when we have not //fetched all available events yet if (f.fetchMapCompletionEvents(currentTime)) { fetchAgain = true; } } catch (Exception e) { LOG.warn( "Ignoring exception that fetch for map completion" + " events threw for " + f.jobId + " threw: " + StringUtils.stringifyException(e)); } if (!running) { break; } } synchronized (waitingOn) { try { if (!fetchAgain) { waitingOn.wait(heartbeatInterval); } } catch (InterruptedException ie) { LOG.info("Shutting down: " + this.getName()); return; } } } catch (Exception e) { LOG.info("Ignoring exception " + e.getMessage()); } } } } private class FetchStatus { /** The next event ID that we will start querying the JobTracker from*/ private IntWritable fromEventId; /** This is the cache of map events for a given job */ private List<TaskCompletionEvent> allMapEvents; /** What jobid this fetchstatus object is for*/ private JobID jobId; private long lastFetchTime; private boolean fetchAgain; public FetchStatus(JobID jobId, int numMaps) { this.fromEventId = new IntWritable(0); this.jobId = jobId; this.allMapEvents = new ArrayList<TaskCompletionEvent>(numMaps); } /** * Reset the events obtained so far. */ public void reset() { // Note that the sync is first on fromEventId and then on allMapEvents synchronized (fromEventId) { synchronized (allMapEvents) { fromEventId.set(0); // set the new index for TCE allMapEvents.clear(); } } } public TaskCompletionEvent[] getMapEvents(int fromId, int max) { TaskCompletionEvent[] mapEvents = TaskCompletionEvent.EMPTY_ARRAY; boolean notifyFetcher = false; synchronized (allMapEvents) { if (allMapEvents.size() > fromId) { int actualMax = Math.min(max, (allMapEvents.size() - fromId)); List <TaskCompletionEvent> eventSublist = allMapEvents.subList(fromId, actualMax + fromId); mapEvents = eventSublist.toArray(mapEvents); } else { // Notify Fetcher thread. notifyFetcher = true; } } if (notifyFetcher) { synchronized (waitingOn) { waitingOn.notify(); } } return mapEvents; } public boolean fetchMapCompletionEvents(long currTime) throws IOException { if (!fetchAgain && (currTime - lastFetchTime) < heartbeatInterval) { return false; } int currFromEventId = 0; synchronized (fromEventId) { currFromEventId = fromEventId.get(); List <TaskCompletionEvent> recentMapEvents = queryJobTracker(fromEventId, jobId, jobClient); synchronized (allMapEvents) { allMapEvents.addAll(recentMapEvents); } lastFetchTime = currTime; if (fromEventId.get() - currFromEventId >= probe_sample_size) { //return true when we have fetched the full payload, indicating //that we should fetch again immediately (there might be more to //fetch fetchAgain = true; return true; } } fetchAgain = false; return false; } } private static LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); // intialize the job directory private void localizeJob(TaskInProgress tip) throws IOException { Path localJarFile = null; Task t = tip.getTask(); JobID jobId = t.getJobID(); Path jobFile = new Path(t.getJobFile()); // Get sizes of JobFile and JarFile // sizes are -1 if they are not present. FileStatus status = null; long jobFileSize = -1; try { status = systemFS.getFileStatus(jobFile); jobFileSize = status.getLen(); } catch(FileNotFoundException fe) { jobFileSize = -1; } Path localJobFile = lDirAlloc.getLocalPathForWrite( getLocalJobDir(jobId.toString()) + Path.SEPARATOR + "job.xml", jobFileSize, fConf); RunningJob rjob = addTaskToJob(jobId, tip); synchronized (rjob) { if (!rjob.localized) { FileSystem localFs = FileSystem.getLocal(fConf); // this will happen on a partial execution of localizeJob. // Sometimes the job.xml gets copied but copying job.jar // might throw out an exception // we should clean up and then try again Path jobDir = localJobFile.getParent(); if (localFs.exists(jobDir)){ localFs.delete(jobDir, true); boolean b = localFs.mkdirs(jobDir); if (!b) throw new IOException("Not able to create job directory " + jobDir.toString()); } systemFS.copyToLocalFile(jobFile, localJobFile); JobConf localJobConf = new JobConf(localJobFile); // create the 'work' directory // job-specific shared directory for use as scratch space Path workDir = lDirAlloc.getLocalPathForWrite( (getLocalJobDir(jobId.toString()) + Path.SEPARATOR + "work"), fConf); if (!localFs.mkdirs(workDir)) { throw new IOException("Mkdirs failed to create " + workDir.toString()); } System.setProperty("job.local.dir", workDir.toString()); localJobConf.set("job.local.dir", workDir.toString()); // copy Jar file to the local FS and unjar it. String jarFile = localJobConf.getJar(); long jarFileSize = -1; if (jarFile != null) { Path jarFilePath = new Path(jarFile); try { status = systemFS.getFileStatus(jarFilePath); jarFileSize = status.getLen(); } catch(FileNotFoundException fe) { jarFileSize = -1; } // Here we check for and we check five times the size of jarFileSize // to accommodate for unjarring the jar file in work directory localJarFile = new Path(lDirAlloc.getLocalPathForWrite( getLocalJobDir(jobId.toString()) + Path.SEPARATOR + "jars", 5 * jarFileSize, fConf), "job.jar"); if (!localFs.mkdirs(localJarFile.getParent())) { throw new IOException("Mkdirs failed to create jars directory "); } systemFS.copyToLocalFile(jarFilePath, localJarFile); localJobConf.setJar(localJarFile.toString()); OutputStream out = localFs.create(localJobFile); try { localJobConf.writeXml(out); } finally { out.close(); } // also unjar the job.jar files RunJar.unJar(new File(localJarFile.toString()), new File(localJarFile.getParent().toString())); } rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) || localJobConf.getKeepFailedTaskFiles()); rjob.localized = true; rjob.jobConf = localJobConf; taskController.initializeJob(jobId); } } launchTaskForJob(tip, new JobConf(rjob.jobConf)); } private void launchTaskForJob(TaskInProgress tip, JobConf jobConf) throws IOException{ synchronized (tip) { tip.setJobConf(jobConf); tip.launchTask(); } } public synchronized void shutdown() throws IOException { shuttingDown = true; close(); if (this.server != null) { try { LOG.info("Shutting down StatusHttpServer"); this.server.stop(); } catch (Exception e) { LOG.warn("Exception shutting down TaskTracker", e); } } } /** * Close down the TaskTracker and all its components. We must also shutdown * any running tasks or threads, and cleanup disk space. A new TaskTracker * within the same process space might be restarted, so everything must be * clean. */ public synchronized void close() throws IOException { // // Kill running tasks. Do this in a 2nd vector, called 'tasksToClose', // because calling jobHasFinished() may result in an edit to 'tasks'. // TreeMap<TaskAttemptID, TaskInProgress> tasksToClose = new TreeMap<TaskAttemptID, TaskInProgress>(); tasksToClose.putAll(tasks); for (TaskInProgress tip : tasksToClose.values()) { tip.jobHasFinished(false); } this.running = false; // Clear local storage cleanupStorage(); // Shutdown the fetcher thread this.mapEventsFetcher.interrupt(); //stop the launchers this.mapLauncher.interrupt(); this.reduceLauncher.interrupt(); // All tasks are killed. So, they are removed from TaskLog monitoring also. // Interrupt the monitor. getTaskLogsMonitor().interrupt(); jvmManager.stop(); // shutdown RPC connections RPC.stopProxy(jobClient); // wait for the fetcher thread to exit for (boolean done = false; !done; ) { try { this.mapEventsFetcher.join(); done = true; } catch (InterruptedException e) { } } if (taskReportServer != null) { taskReportServer.stop(); taskReportServer = null; } if (healthChecker != null) { //stop node health checker service healthChecker.stop(); healthChecker = null; } } /** * Start with the local machine name, and the default JobTracker */ public TaskTracker(JobConf conf) throws IOException { originalConf = conf; maxMapSlots = conf.getInt( "mapred.tasktracker.map.tasks.maximum", 2); maxReduceSlots = conf.getInt( "mapred.tasktracker.reduce.tasks.maximum", 2); this.jobTrackAddr = JobTracker.getAddress(conf); String infoAddr = NetUtils.getServerAddress(conf, "tasktracker.http.bindAddress", "tasktracker.http.port", "mapred.task.tracker.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String httpBindAddress = infoSocAddr.getHostName(); int httpPort = infoSocAddr.getPort(); this.server = new HttpServer("task", httpBindAddress, httpPort, httpPort == 0, conf); workerThreads = conf.getInt("tasktracker.http.threads", 40); this.shuffleServerMetrics = new ShuffleServerMetrics(conf); server.setThreads(1, workerThreads); // let the jsp pages get to the task tracker, config, and other relevant // objects FileSystem local = FileSystem.getLocal(conf); this.localDirAllocator = new LocalDirAllocator("mapred.local.dir"); server.setAttribute("task.tracker", this); server.setAttribute("local.file.system", local); server.setAttribute("conf", conf); server.setAttribute("log", LOG); server.setAttribute("localDirAllocator", localDirAllocator); server.setAttribute("shuffleServerMetrics", shuffleServerMetrics); server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class); server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class); server.start(); this.httpPort = server.getPort(); checkJettyPort(httpPort); initialize(); } /** * Blank constructor. Only usable by tests. */ TaskTracker() { server = null; } private void checkJettyPort(int port) throws IOException { //See HADOOP-4744 if (port < 0) { shuttingDown = true; throw new IOException("Jetty problem. Jetty didn't bind to a " + "valid port"); } } private void startCleanupThreads() throws IOException { taskCleanupThread.setDaemon(true); taskCleanupThread.start(); directoryCleanupThread = new CleanupQueue(); } /** * The connection to the JobTracker, used by the TaskRunner * for locating remote files. */ public InterTrackerProtocol getJobClient() { return jobClient; } /** Return the port at which the tasktracker bound to */ public synchronized InetSocketAddress getTaskTrackerReportAddress() { return taskReportAddress; } /** Queries the job tracker for a set of outputs ready to be copied * @param fromEventId the first event ID we want to start from, this is * modified by the call to this method * @param jobClient the job tracker * @return a set of locations to copy outputs from * @throws IOException */ private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId, JobID jobId, InterTrackerProtocol jobClient) throws IOException { TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents( jobId, fromEventId.get(), probe_sample_size); //we are interested in map task completion events only. So store //only those List <TaskCompletionEvent> recentMapEvents = new ArrayList<TaskCompletionEvent>(); for (int i = 0; i < t.length; i++) { if (t[i].isMap) { recentMapEvents.add(t[i]); } } fromEventId.set(fromEventId.get() + t.length); return recentMapEvents; } /** * Main service loop. Will stay in this loop forever. */ State offerService() throws Exception { long lastHeartbeat = 0; while (running && !shuttingDown) { try { long now = System.currentTimeMillis(); long waitTime = heartbeatInterval - (now - lastHeartbeat); if (waitTime > 0) { // sleeps for the wait time or // until there are empty slots to schedule tasks synchronized (finishedCount) { if (finishedCount.get() == 0) { finishedCount.wait(waitTime); } finishedCount.set(0); } } // If the TaskTracker is just starting up: // 1. Verify the buildVersion // 2. Get the system directory & filesystem if(justInited) { String jobTrackerBV = jobClient.getBuildVersion(); if(!VersionInfo.getBuildVersion().equals(jobTrackerBV)) { String msg = "Shutting down. Incompatible buildVersion." + "\nJobTracker's: " + jobTrackerBV + "\nTaskTracker's: "+ VersionInfo.getBuildVersion(); LOG.error(msg); try { jobClient.reportTaskTrackerError(taskTrackerName, null, msg); } catch(Exception e ) { LOG.info("Problem reporting to jobtracker: " + e); } return State.DENIED; } String dir = jobClient.getSystemDir(); if (dir == null) { throw new IOException("Failed to get system directory"); } systemDirectory = new Path(dir); systemFS = systemDirectory.getFileSystem(fConf); } // Send the heartbeat and process the jobtracker's directives HeartbeatResponse heartbeatResponse = transmitHeartBeat(now); // Note the time when the heartbeat returned, use this to decide when to send the // next heartbeat lastHeartbeat = System.currentTimeMillis(); // Check if the map-event list needs purging Set<JobID> jobs = heartbeatResponse.getRecoveredJobs(); if (jobs.size() > 0) { synchronized (this) { // purge the local map events list for (JobID job : jobs) { RunningJob rjob; synchronized (runningJobs) { rjob = runningJobs.get(job); if (rjob != null) { synchronized (rjob) { FetchStatus f = rjob.getFetchStatus(); if (f != null) { f.reset(); } } } } } // Mark the reducers in shuffle for rollback synchronized (shouldReset) { for (Map.Entry<TaskAttemptID, TaskInProgress> entry : runningTasks.entrySet()) { if (entry.getValue().getStatus().getPhase() == Phase.SHUFFLE) { this.shouldReset.add(entry.getKey()); } } } } } TaskTrackerAction[] actions = heartbeatResponse.getActions(); if(LOG.isDebugEnabled()) { LOG.debug("Got heartbeatResponse from JobTracker with responseId: " + heartbeatResponse.getResponseId() + " and " + ((actions != null) ? actions.length : 0) + " actions"); } if (reinitTaskTracker(actions)) { return State.STALE; } // resetting heartbeat interval from the response. heartbeatInterval = heartbeatResponse.getHeartbeatInterval(); justStarted = false; justInited = false; if (actions != null){ for(TaskTrackerAction action: actions) { if (action instanceof LaunchTaskAction) { addToTaskQueue((LaunchTaskAction)action); } else if (action instanceof CommitTaskAction) { CommitTaskAction commitAction = (CommitTaskAction)action; if (!commitResponses.contains(commitAction.getTaskID())) { LOG.info("Received commit task action for " + commitAction.getTaskID()); commitResponses.add(commitAction.getTaskID()); } } else { tasksToCleanup.put(action); } } } markUnresponsiveTasks(); killOverflowingTasks(); //we've cleaned up, resume normal operation if (!acceptNewTasks && isIdle()) { acceptNewTasks=true; } //The check below may not be required every iteration but we are //erring on the side of caution here. We have seen many cases where //the call to jetty's getLocalPort() returns different values at //different times. Being a real paranoid here. checkJettyPort(server.getPort()); } catch (InterruptedException ie) { LOG.info("Interrupted. Closing down."); return State.INTERRUPTED; } catch (DiskErrorException de) { String msg = "Exiting task tracker for disk error:\n" + StringUtils.stringifyException(de); LOG.error(msg); synchronized (this) { jobClient.reportTaskTrackerError(taskTrackerName, "DiskErrorException", msg); } return State.STALE; } catch (RemoteException re) { String reClass = re.getClassName(); if (DisallowedTaskTrackerException.class.getName().equals(reClass)) { LOG.info("Tasktracker disallowed by JobTracker."); return State.DENIED; } } catch (Exception except) { String msg = "Caught exception: " + StringUtils.stringifyException(except); LOG.error(msg); } } return State.NORMAL; } private long previousUpdate = 0; + void setIndexCache(IndexCache cache) { + this.indexCache = cache; + } + /** * Build and transmit the heart beat to the JobTracker * @param now current time * @return false if the tracker was unknown * @throws IOException */ HeartbeatResponse transmitHeartBeat(long now) throws IOException { // Send Counters in the status once every COUNTER_UPDATE_INTERVAL boolean sendCounters; if (now > (previousUpdate + COUNTER_UPDATE_INTERVAL)) { sendCounters = true; previousUpdate = now; } else { sendCounters = false; } // // Check if the last heartbeat got through... // if so then build the heartbeat information for the JobTracker; // else resend the previous status information. // if (status == null) { synchronized (this) { status = new TaskTrackerStatus(taskTrackerName, localHostname, httpPort, cloneAndResetRunningTaskStatuses( sendCounters), failures, maxMapSlots, maxReduceSlots); } } else { LOG.info("Resending 'status' to '" + jobTrackAddr.getHostName() + "' with reponseId '" + heartbeatResponseId); } // // Check if we should ask for a new Task // boolean askForNewTask; long localMinSpaceStart; synchronized (this) { askForNewTask = ((status.countOccupiedMapSlots() < maxMapSlots || status.countOccupiedReduceSlots() < maxReduceSlots) && acceptNewTasks); localMinSpaceStart = minSpaceStart; } if (askForNewTask) { checkLocalDirs(fConf.getLocalDirs()); askForNewTask = enoughFreeSpace(localMinSpaceStart); long freeDiskSpace = getFreeSpace(); long totVmem = getTotalVirtualMemoryOnTT(); long totPmem = getTotalPhysicalMemoryOnTT(); status.getResourceStatus().setAvailableSpace(freeDiskSpace); status.getResourceStatus().setTotalVirtualMemory(totVmem); status.getResourceStatus().setTotalPhysicalMemory(totPmem); status.getResourceStatus().setMapSlotMemorySizeOnTT( mapSlotMemorySizeOnTT); status.getResourceStatus().setReduceSlotMemorySizeOnTT( reduceSlotSizeMemoryOnTT); } //add node health information TaskTrackerHealthStatus healthStatus = status.getHealthStatus(); synchronized (this) { if (healthChecker != null) { healthChecker.setHealthStatus(healthStatus); } else { healthStatus.setNodeHealthy(true); healthStatus.setLastReported(0L); healthStatus.setHealthReport(""); } } // // Xmit the heartbeat // HeartbeatResponse heartbeatResponse = jobClient.heartbeat(status, justStarted, justInited, askForNewTask, heartbeatResponseId); // // The heartbeat got through successfully! // heartbeatResponseId = heartbeatResponse.getResponseId(); synchronized (this) { for (TaskStatus taskStatus : status.getTaskReports()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && !taskStatus.inTaskCleanupPhase()) { if (taskStatus.getIsMap()) { mapTotal--; } else { reduceTotal--; } try { myInstrumentation.completeTask(taskStatus.getTaskID()); } catch (MetricsException me) { LOG.warn("Caught: " + StringUtils.stringifyException(me)); } runningTasks.remove(taskStatus.getTaskID()); } } // Clear transient status information which should only // be sent once to the JobTracker for (TaskInProgress tip: runningTasks.values()) { tip.getStatus().clearStatus(); } } // Force a rebuild of 'status' on the next iteration status = null; return heartbeatResponse; } long getMapUserLogRetainSize() { return fConf.getLong(MAP_USERLOG_RETAIN_SIZE, -1); } void setMapUserLogRetainSize(long retainSize) { fConf.setLong(MAP_USERLOG_RETAIN_SIZE, retainSize); } long getReduceUserLogRetainSize() { return fConf.getLong(REDUCE_USERLOG_RETAIN_SIZE, -1); } void setReduceUserLogRetainSize(long retainSize) { fConf.setLong(REDUCE_USERLOG_RETAIN_SIZE, retainSize); } /** * Return the total virtual memory available on this TaskTracker. * @return total size of virtual memory. */ long getTotalVirtualMemoryOnTT() { return totalVirtualMemoryOnTT; } /** * Return the total physical memory available on this TaskTracker. * @return total size of physical memory. */ long getTotalPhysicalMemoryOnTT() { return totalPhysicalMemoryOnTT; } long getTotalMemoryAllottedForTasksOnTT() { return totalMemoryAllottedForTasks; } /** * Check if the jobtracker directed a 'reset' of the tasktracker. * * @param actions the directives of the jobtracker for the tasktracker. * @return <code>true</code> if tasktracker is to be reset, * <code>false</code> otherwise. */ private boolean reinitTaskTracker(TaskTrackerAction[] actions) { if (actions != null) { for (TaskTrackerAction action : actions) { if (action.getActionId() == TaskTrackerAction.ActionType.REINIT_TRACKER) { LOG.info("Recieved RenitTrackerAction from JobTracker"); return true; } } } return false; } /** * Kill any tasks that have not reported progress in the last X seconds. */ private synchronized void markUnresponsiveTasks() throws IOException { long now = System.currentTimeMillis(); for (TaskInProgress tip: runningTasks.values()) { if (tip.getRunState() == TaskStatus.State.RUNNING || tip.getRunState() == TaskStatus.State.COMMIT_PENDING || tip.isCleaningup()) { // Check the per-job timeout interval for tasks; // an interval of '0' implies it is never timed-out long jobTaskTimeout = tip.getTaskTimeout(); if (jobTaskTimeout == 0) { continue; } // Check if the task has not reported progress for a // time-period greater than the configured time-out long timeSinceLastReport = now - tip.getLastProgressReport(); if (timeSinceLastReport > jobTaskTimeout && !tip.wasKilled) { String msg = "Task " + tip.getTask().getTaskID() + " failed to report status for " + (timeSinceLastReport / 1000) + " seconds. Killing!"; LOG.info(tip.getTask().getTaskID() + ": " + msg); ReflectionUtils.logThreadInfo(LOG, "lost task", 30); tip.reportDiagnosticInfo(msg); myInstrumentation.timedoutTask(tip.getTask().getTaskID()); purgeTask(tip, true); } } } } private static PathDeletionContext[] buildPathDeletionContexts(FileSystem fs, Path[] paths) { int i = 0; PathDeletionContext[] contexts = new PathDeletionContext[paths.length]; for (Path p : paths) { contexts[i++] = new PathDeletionContext(fs, p.toUri().getPath()); } return contexts; } static PathDeletionContext[] buildTaskControllerPathDeletionContexts( FileSystem fs, Path[] paths, Task task, boolean isWorkDir, TaskController taskController) throws IOException { int i = 0; PathDeletionContext[] contexts = new TaskControllerPathDeletionContext[paths.length]; for (Path p : paths) { contexts[i++] = new TaskControllerPathDeletionContext(fs, p, task, isWorkDir, taskController); } return contexts; } /** * The task tracker is done with this job, so we need to clean up. * @param action The action with the job * @throws IOException */ private synchronized void purgeJob(KillJobAction action) throws IOException { JobID jobId = action.getJobID(); LOG.info("Received 'KillJobAction' for job: " + jobId); RunningJob rjob = null; synchronized (runningJobs) { rjob = runningJobs.get(jobId); } if (rjob == null) { LOG.warn("Unknown job " + jobId + " being deleted."); } else { synchronized (rjob) { // Add this tips of this job to queue of tasks to be purged for (TaskInProgress tip : rjob.tasks) { tip.jobHasFinished(false); Task t = tip.getTask(); if (t.isMapTask()) { indexCache.removeMap(tip.getTask().getTaskID().toString()); } } // Delete the job directory for this // task if the job is done/failed if (!rjob.keepJobFiles){ PathDeletionContext[] contexts = buildPathDeletionContexts(localFs, getLocalFiles(fConf, getLocalJobDir(rjob.getJobID().toString()))); directoryCleanupThread.addToQueue(contexts); } // Remove this job rjob.tasks.clear(); } } synchronized(runningJobs) { runningJobs.remove(jobId); } } /** * Remove the tip and update all relevant state. * * @param tip {@link TaskInProgress} to be removed. * @param wasFailure did the task fail or was it killed? */ private void purgeTask(TaskInProgress tip, boolean wasFailure) throws IOException { if (tip != null) { LOG.info("About to purge task: " + tip.getTask().getTaskID()); // Remove the task from running jobs, // removing the job if it's the last task removeTaskFromJob(tip.getTask().getJobID(), tip); tip.jobHasFinished(wasFailure); if (tip.getTask().isMapTask()) { indexCache.removeMap(tip.getTask().getTaskID().toString()); } } } /** Check if we're dangerously low on disk space * If so, kill jobs to free up space and make sure * we don't accept any new tasks * Try killing the reduce jobs first, since I believe they * use up most space * Then pick the one with least progress */ private void killOverflowingTasks() throws IOException { long localMinSpaceKill; synchronized(this){ localMinSpaceKill = minSpaceKill; } if (!enoughFreeSpace(localMinSpaceKill)) { acceptNewTasks=false; //we give up! do not accept new tasks until //all the ones running have finished and they're all cleared up synchronized (this) { TaskInProgress killMe = findTaskToKill(null); if (killMe!=null) { String msg = "Tasktracker running out of space." + " Killing task."; LOG.info(killMe.getTask().getTaskID() + ": " + msg); killMe.reportDiagnosticInfo(msg); purgeTask(killMe, false); } } } } /** * Pick a task to kill to free up memory/disk-space * @param tasksToExclude tasks that are to be excluded while trying to find a * task to kill. If null, all runningTasks will be searched. * @return the task to kill or null, if one wasn't found */ synchronized TaskInProgress findTaskToKill(List<TaskAttemptID> tasksToExclude) { TaskInProgress killMe = null; for (Iterator it = runningTasks.values().iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); if (tasksToExclude != null && tasksToExclude.contains(tip.getTask().getTaskID())) { // exclude this task continue; } if ((tip.getRunState() == TaskStatus.State.RUNNING || tip.getRunState() == TaskStatus.State.COMMIT_PENDING) && !tip.wasKilled) { if (killMe == null) { killMe = tip; } else if (!tip.getTask().isMapTask()) { //reduce task, give priority if (killMe.getTask().isMapTask() || (tip.getTask().getProgress().get() < killMe.getTask().getProgress().get())) { killMe = tip; } } else if (killMe.getTask().isMapTask() && tip.getTask().getProgress().get() < killMe.getTask().getProgress().get()) { //map task, only add if the progress is lower killMe = tip; } } } return killMe; } /** * Check if any of the local directories has enough * free space (more than minSpace) * * If not, do not try to get a new task assigned * @return * @throws IOException */ private boolean enoughFreeSpace(long minSpace) throws IOException { if (minSpace == 0) { return true; } return minSpace < getFreeSpace(); } private long getFreeSpace() throws IOException { long biggestSeenSoFar = 0; String[] localDirs = fConf.getLocalDirs(); for (int i = 0; i < localDirs.length; i++) { DF df = null; if (localDirsDf.containsKey(localDirs[i])) { df = localDirsDf.get(localDirs[i]); } else { df = new DF(new File(localDirs[i]), fConf); localDirsDf.put(localDirs[i], df); } long availOnThisVol = df.getAvailable(); if (availOnThisVol > biggestSeenSoFar) { biggestSeenSoFar = availOnThisVol; } } //Should ultimately hold back the space we expect running tasks to use but //that estimate isn't currently being passed down to the TaskTrackers return biggestSeenSoFar; } /** * Try to get the size of output for this task. * Returns -1 if it can't be found. * @return */ long tryToGetOutputSize(TaskAttemptID taskId, JobConf conf) { try{ TaskInProgress tip; synchronized(this) { tip = tasks.get(taskId); } if(tip == null) return -1; if (!tip.getTask().isMapTask() || tip.getRunState() != TaskStatus.State.SUCCEEDED) { return -1; } MapOutputFile mapOutputFile = new MapOutputFile(); mapOutputFile.setJobId(taskId.getJobID()); mapOutputFile.setConf(conf); Path tmp_output = mapOutputFile.getOutputFile(taskId); if(tmp_output == null) return 0; FileSystem localFS = FileSystem.getLocal(conf); FileStatus stat = localFS.getFileStatus(tmp_output); if(stat == null) return 0; else return stat.getLen(); } catch(IOException e) { LOG.info(e); return -1; } } private TaskLauncher mapLauncher; private TaskLauncher reduceLauncher; public JvmManager getJvmManagerInstance() { return jvmManager; } private void addToTaskQueue(LaunchTaskAction action) { if (action.getTask().isMapTask()) { mapLauncher.addToTaskQueue(action); } else { reduceLauncher.addToTaskQueue(action); } } - private class TaskLauncher extends Thread { + class TaskLauncher extends Thread { private IntWritable numFreeSlots; private final int maxSlots; private List<TaskInProgress> tasksToLaunch; public TaskLauncher(TaskType taskType, int numSlots) { this.maxSlots = numSlots; this.numFreeSlots = new IntWritable(numSlots); this.tasksToLaunch = new LinkedList<TaskInProgress>(); setDaemon(true); setName("TaskLauncher for " + taskType + " tasks"); } public void addToTaskQueue(LaunchTaskAction action) { synchronized (tasksToLaunch) { TaskInProgress tip = registerTask(action, this); tasksToLaunch.add(tip); tasksToLaunch.notifyAll(); } } public void cleanTaskQueue() { tasksToLaunch.clear(); } public void addFreeSlots(int numSlots) { synchronized (numFreeSlots) { numFreeSlots.set(numFreeSlots.get() + numSlots); assert (numFreeSlots.get() <= maxSlots); LOG.info("addFreeSlot : current free slots : " + numFreeSlots.get()); numFreeSlots.notifyAll(); } } + void notifySlots() { + synchronized (numFreeSlots) { + numFreeSlots.notifyAll(); + } + } + + int getNumWaitingTasksToLaunch() { + synchronized (tasksToLaunch) { + return tasksToLaunch.size(); + } + } + public void run() { while (!Thread.interrupted()) { try { TaskInProgress tip; Task task; synchronized (tasksToLaunch) { while (tasksToLaunch.isEmpty()) { tasksToLaunch.wait(); } //get the TIP tip = tasksToLaunch.remove(0); task = tip.getTask(); LOG.info("Trying to launch : " + tip.getTask().getTaskID() + " which needs " + task.getNumSlotsRequired() + " slots"); } //wait for free slots to run synchronized (numFreeSlots) { + boolean canLaunch = true; while (numFreeSlots.get() < task.getNumSlotsRequired()) { + //Make sure that there is no kill task action for this task! + //We are not locking tip here, because it would reverse the + //locking order! + //Also, Lock for the tip is not required here! because : + // 1. runState of TaskStatus is volatile + // 2. Any notification is not missed because notification is + // synchronized on numFreeSlots. So, while we are doing the check, + // if the tip is half way through the kill(), we don't miss + // notification for the following wait(). + if (!tip.canBeLaunched()) { + //got killed externally while still in the launcher queue + LOG.info("Not blocking slots for " + task.getTaskID() + + " as it got killed externally. Task's state is " + + tip.getRunState()); + canLaunch = false; + break; + } LOG.info("TaskLauncher : Waiting for " + task.getNumSlotsRequired() + " to launch " + task.getTaskID() + ", currently we have " + numFreeSlots.get() + " free slots"); numFreeSlots.wait(); } + if (!canLaunch) { + continue; + } LOG.info("In TaskLauncher, current free slots : " + numFreeSlots.get()+ " and trying to launch "+tip.getTask().getTaskID() + " which needs " + task.getNumSlotsRequired() + " slots"); numFreeSlots.set(numFreeSlots.get() - task.getNumSlotsRequired()); assert (numFreeSlots.get() >= 0); } synchronized (tip) { //to make sure that there is no kill task action for this - if (tip.getRunState() != TaskStatus.State.UNASSIGNED && - tip.getRunState() != TaskStatus.State.FAILED_UNCLEAN && - tip.getRunState() != TaskStatus.State.KILLED_UNCLEAN) { + if (!tip.canBeLaunched()) { //got killed externally while still in the launcher queue + LOG.info("Not launching task " + task.getTaskID() + " as it got" + + " killed externally. Task's state is " + tip.getRunState()); addFreeSlots(task.getNumSlotsRequired()); continue; } tip.slotTaken = true; } //got a free slot. launch the task startNewTask(tip); } catch (InterruptedException e) { return; // ALL DONE } catch (Throwable th) { LOG.error("TaskLauncher error " + StringUtils.stringifyException(th)); } } } } private TaskInProgress registerTask(LaunchTaskAction action, TaskLauncher launcher) { Task t = action.getTask(); LOG.info("LaunchTaskAction (registerTask): " + t.getTaskID() + " task's state:" + t.getState()); TaskInProgress tip = new TaskInProgress(t, this.fConf, launcher); synchronized (this) { tasks.put(t.getTaskID(), tip); runningTasks.put(t.getTaskID(), tip); boolean isMap = t.isMapTask(); if (isMap) { mapTotal++; } else { reduceTotal++; } } return tip; } /** * Start a new task. * All exceptions are handled locally, so that we don't mess up the * task tracker. */ - private void startNewTask(TaskInProgress tip) { + void startNewTask(TaskInProgress tip) { try { localizeJob(tip); } catch (Throwable e) { String msg = ("Error initializing " + tip.getTask().getTaskID() + ":\n" + StringUtils.stringifyException(e)); LOG.warn(msg); tip.reportDiagnosticInfo(msg); try { tip.kill(true); tip.cleanup(true); } catch (IOException ie2) { LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" + StringUtils.stringifyException(ie2)); } // Careful! // This might not be an 'Exception' - don't handle 'Error' here! if (e instanceof Error) { throw ((Error) e); } } } void addToMemoryManager(TaskAttemptID attemptId, boolean isMap, JobConf conf) { if (isTaskMemoryManagerEnabled()) { taskMemoryManager.addTask(attemptId, isMap ? conf .getMemoryForMapTask() * 1024 * 1024L : conf .getMemoryForReduceTask() * 1024 * 1024L); } } void removeFromMemoryManager(TaskAttemptID attemptId) { // Remove the entry from taskMemoryManagerThread's data structures. if (isTaskMemoryManagerEnabled()) { taskMemoryManager.removeTask(attemptId); } } /** * Notify the tasktracker to send an out-of-band heartbeat. */ private void notifyTTAboutTaskCompletion() { if (oobHeartbeatOnTaskCompletion) { synchronized (finishedCount) { int value = finishedCount.get(); finishedCount.set(value+1); finishedCount.notify(); } } } /** * The server retry loop. * This while-loop attempts to connect to the JobTracker. It only * loops when the old TaskTracker has gone bad (its state is * stale somehow) and we need to reinitialize everything. */ public void run() { try { startCleanupThreads(); boolean denied = false; while (running && !shuttingDown && !denied) { boolean staleState = false; try { // This while-loop attempts reconnects if we get network errors while (running && !staleState && !shuttingDown && !denied) { try { State osState = offerService(); if (osState == State.STALE) { staleState = true; } else if (osState == State.DENIED) { denied = true; } } catch (Exception ex) { if (!shuttingDown) { LOG.info("Lost connection to JobTracker [" + jobTrackAddr + "]. Retrying...", ex); try { Thread.sleep(5000); } catch (InterruptedException ie) { } } } } } finally { close(); } if (shuttingDown) { return; } LOG.warn("Reinitializing local state"); initialize(); } if (denied) { shutdown(); } } catch (IOException iex) { LOG.error("Got fatal exception while reinitializing TaskTracker: " + StringUtils.stringifyException(iex)); return; } } /////////////////////////////////////////////////////// // TaskInProgress maintains all the info for a Task that // lives at this TaskTracker. It maintains the Task object, // its TaskStatus, and the TaskRunner. /////////////////////////////////////////////////////// class TaskInProgress { Task task; long lastProgressReport; StringBuffer diagnosticInfo = new StringBuffer(); private TaskRunner runner; volatile boolean done = false; volatile boolean wasKilled = false; private JobConf defaultJobConf; private JobConf localJobConf; private boolean keepFailedTaskFiles; private boolean alwaysKeepTaskFiles; private TaskStatus taskStatus; private long taskTimeout; private String debugCommand; private volatile boolean slotTaken = false; private TaskLauncher launcher; /** */ public TaskInProgress(Task task, JobConf conf) { this(task, conf, null); } public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) { this.task = task; this.launcher = launcher; this.lastProgressReport = System.currentTimeMillis(); this.defaultJobConf = conf; localJobConf = null; taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(), 0.0f, task.getNumSlotsRequired(), task.getState(), diagnosticInfo.toString(), "initializing", getName(), task.isTaskCleanupTask() ? TaskStatus.Phase.CLEANUP : task.isMapTask()? TaskStatus.Phase.MAP: TaskStatus.Phase.SHUFFLE, task.getCounters()); taskTimeout = (10 * 60 * 1000); } private void localizeTask(Task task) throws IOException{ Path localTaskDir = lDirAlloc.getLocalPathForWrite( TaskTracker.getLocalTaskDir(task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()), defaultJobConf ); FileSystem localFs = FileSystem.getLocal(fConf); if (!localFs.mkdirs(localTaskDir)) { throw new IOException("Mkdirs failed to create " + localTaskDir.toString()); } // create symlink for ../work if it already doesnt exist String workDir = lDirAlloc.getLocalPathToRead( TaskTracker.getLocalJobDir(task.getJobID().toString()) + Path.SEPARATOR + "work", defaultJobConf).toString(); String link = localTaskDir.getParent().toString() + Path.SEPARATOR + "work"; File flink = new File(link); if (!flink.exists()) FileUtil.symLink(workDir, link); // create the working-directory of the task Path cwd = lDirAlloc.getLocalPathForWrite( getLocalTaskDir(task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()) + Path.SEPARATOR + MRConstants.WORKDIR, defaultJobConf); if (!localFs.mkdirs(cwd)) { throw new IOException("Mkdirs failed to create " + cwd.toString()); } Path localTaskFile = new Path(localTaskDir, "job.xml"); task.setJobFile(localTaskFile.toString()); localJobConf.set("mapred.local.dir", fConf.get("mapred.local.dir")); if (fConf.get("slave.host.name") != null) { localJobConf.set("slave.host.name", fConf.get("slave.host.name")); } localJobConf.set("mapred.task.id", task.getTaskID().toString()); keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles(); task.localizeConfiguration(localJobConf); List<String[]> staticResolutions = NetUtils.getAllStaticResolutions(); if (staticResolutions != null && staticResolutions.size() > 0) { StringBuffer str = new StringBuffer(); for (int i = 0; i < staticResolutions.size(); i++) { String[] hostToResolved = staticResolutions.get(i); str.append(hostToResolved[0]+"="+hostToResolved[1]); if (i != staticResolutions.size() - 1) { str.append(','); } } localJobConf.set("hadoop.net.static.resolutions", str.toString()); } if (task.isMapTask()) { debugCommand = localJobConf.getMapDebugScript(); } else { debugCommand = localJobConf.getReduceDebugScript(); } String keepPattern = localJobConf.getKeepTaskFilesPattern(); if (keepPattern != null) { alwaysKeepTaskFiles = Pattern.matches(keepPattern, task.getTaskID().toString()); } else { alwaysKeepTaskFiles = false; } if (debugCommand != null || localJobConf.getProfileEnabled() || alwaysKeepTaskFiles || keepFailedTaskFiles) { //disable jvm reuse localJobConf.setNumTasksToExecutePerJvm(1); } if (isTaskMemoryManagerEnabled()) { localJobConf.setBoolean("task.memory.mgmt.enabled", true); } OutputStream out = localFs.create(localTaskFile); try { localJobConf.writeXml(out); } finally { out.close(); } task.setConf(localJobConf); } /** */ public Task getTask() { return task; } public TaskRunner getTaskRunner() { return runner; } public synchronized void setJobConf(JobConf lconf){ this.localJobConf = lconf; keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles(); taskTimeout = localJobConf.getLong("mapred.task.timeout", 10 * 60 * 1000); } public synchronized JobConf getJobConf() { return localJobConf; } /** */ public synchronized TaskStatus getStatus() { taskStatus.setDiagnosticInfo(diagnosticInfo.toString()); if (diagnosticInfo.length() > 0) { diagnosticInfo = new StringBuffer(); } return taskStatus; } /** * Kick off the task execution */ public synchronized void launchTask() throws IOException { if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED || this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { localizeTask(task); if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) { this.taskStatus.setRunState(TaskStatus.State.RUNNING); } this.runner = task.createRunner(TaskTracker.this, this); this.runner.start(); this.taskStatus.setStartTime(System.currentTimeMillis()); } else { LOG.info("Not launching task: " + task.getTaskID() + " since it's state is " + this.taskStatus.getRunState()); } } boolean isCleaningup() { return this.taskStatus.inTaskCleanupPhase(); } + // checks if state has been changed for the task to be launched + boolean canBeLaunched() { + return (getRunState() == TaskStatus.State.UNASSIGNED || + getRunState() == TaskStatus.State.FAILED_UNCLEAN || + getRunState() == TaskStatus.State.KILLED_UNCLEAN); + } + /** * The task is reporting its progress */ public synchronized void reportProgress(TaskStatus taskStatus) { LOG.info(task.getTaskID() + " " + taskStatus.getProgress() + "% " + taskStatus.getStateString()); // task will report its state as // COMMIT_PENDING when it is waiting for commit response and // when it is committing. // cleanup attempt will report its state as FAILED_UNCLEAN/KILLED_UNCLEAN if (this.done || (this.taskStatus.getRunState() != TaskStatus.State.RUNNING && this.taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && !isCleaningup()) || ((this.taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) && taskStatus.getRunState() == TaskStatus.State.RUNNING)) { //make sure we ignore progress messages after a task has //invoked TaskUmbilicalProtocol.done() or if the task has been //KILLED/FAILED/FAILED_UNCLEAN/KILLED_UNCLEAN //Also ignore progress update if the state change is from //COMMIT_PENDING/FAILED_UNCLEAN/KILLED_UNCLEA to RUNNING LOG.info(task.getTaskID() + " Ignoring status-update since " + ((this.done) ? "task is 'done'" : ("runState: " + this.taskStatus.getRunState())) ); return; } this.taskStatus.statusUpdate(taskStatus); this.lastProgressReport = System.currentTimeMillis(); } /** */ public long getLastProgressReport() { return lastProgressReport; } /** */ public TaskStatus.State getRunState() { return taskStatus.getRunState(); } /** * The task's configured timeout. * * @return the task's configured timeout. */ public long getTaskTimeout() { return taskTimeout; } /** * The task has reported some diagnostic info about its status */ public synchronized void reportDiagnosticInfo(String info) { this.diagnosticInfo.append(info); } public synchronized void reportNextRecordRange(SortedRanges.Range range) { this.taskStatus.setNextRecordRange(range); } /** * The task is reporting that it's done running */ public synchronized void reportDone() { if (isCleaningup()) { if (this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { this.taskStatus.setRunState(TaskStatus.State.FAILED); } else if (this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { this.taskStatus.setRunState(TaskStatus.State.KILLED); } } else { this.taskStatus.setRunState(TaskStatus.State.SUCCEEDED); } this.taskStatus.setProgress(1.0f); this.taskStatus.setFinishTime(System.currentTimeMillis()); this.done = true; jvmManager.taskFinished(runner); runner.signalDone(); LOG.info("Task " + task.getTaskID() + " is done."); LOG.info("reported output size for " + task.getTaskID() + " was " + taskStatus.getOutputSize()); } public boolean wasKilled() { return wasKilled; } /** * A task is reporting in as 'done'. * * We need to notify the tasktracker to send an out-of-band heartbeat. * If isn't <code>commitPending</code>, we need to finalize the task * and release the slot it's occupied. * * @param commitPending is the task-commit pending? */ void reportTaskFinished(boolean commitPending) { if (!commitPending) { taskFinished(); releaseSlot(); } notifyTTAboutTaskCompletion(); } /* State changes: * RUNNING/COMMIT_PENDING -> FAILED_UNCLEAN/FAILED/KILLED_UNCLEAN/KILLED * FAILED_UNCLEAN -> FAILED * KILLED_UNCLEAN -> KILLED */ private void setTaskFailState(boolean wasFailure) { // go FAILED_UNCLEAN -> FAILED and KILLED_UNCLEAN -> KILLED always if (taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { taskStatus.setRunState(TaskStatus.State.FAILED); } else if (taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { taskStatus.setRunState(TaskStatus.State.KILLED); } else if (task.isMapOrReduce() && taskStatus.getPhase() != TaskStatus.Phase.CLEANUP) { if (wasFailure) { taskStatus.setRunState(TaskStatus.State.FAILED_UNCLEAN); } else { taskStatus.setRunState(TaskStatus.State.KILLED_UNCLEAN); } } else { if (wasFailure) { taskStatus.setRunState(TaskStatus.State.FAILED); } else { taskStatus.setRunState(TaskStatus.State.KILLED); } } } /** * The task has actually finished running. */ public void taskFinished() { long start = System.currentTimeMillis(); // // Wait until task reports as done. If it hasn't reported in, // wait for a second and try again. // while (!done && (System.currentTimeMillis() - start < WAIT_FOR_DONE)) { try { Thread.sleep(1000); } catch (InterruptedException ie) { } } // // Change state to success or failure, depending on whether // task was 'done' before terminating // boolean needCleanup = false; synchronized (this) { // Remove the task from MemoryManager, if the task SUCCEEDED or FAILED. // KILLED tasks are removed in method kill(), because Kill // would result in launching a cleanup attempt before // TaskRunner returns; if remove happens here, it would remove // wrong task from memory manager. if (done || !wasKilled) { removeFromMemoryManager(task.getTaskID()); } if (!done) { if (!wasKilled) { failures += 1; setTaskFailState(true); // call the script here for the failed tasks. if (debugCommand != null) { String taskStdout =""; String taskStderr =""; String taskSyslog =""; String jobConf = task.getJobFile(); try { Map<LogName, LogFileDetail> allFilesDetails = TaskLog.getAllLogsFileDetails(task.getTaskID(), false); // get task's stdout file taskStdout = TaskLog.getRealTaskLogFilePath( allFilesDetails.get(LogName.STDOUT).location, LogName.STDOUT); // get task's stderr file taskStderr = TaskLog.getRealTaskLogFilePath( allFilesDetails.get(LogName.STDERR).location, LogName.STDERR); // get task's syslog file taskSyslog = TaskLog.getRealTaskLogFilePath( allFilesDetails.get(LogName.SYSLOG).location, LogName.SYSLOG); } catch(IOException e){ LOG.warn("Exception finding task's stdout/err/syslog files"); } File workDir = null; try { workDir = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()) + Path.SEPARATOR + MRConstants.WORKDIR, localJobConf). toString()); } catch (IOException e) { LOG.warn("Working Directory of the task " + task.getTaskID() + "doesnt exist. Caught exception " + StringUtils.stringifyException(e)); } // Build the command File stdout = TaskLog.getRealTaskLogFileLocation( task.getTaskID(), TaskLog.LogName.DEBUGOUT); // add pipes program as argument if it exists. String program =""; String executable = Submitter.getExecutable(localJobConf); if ( executable != null) { try { program = new URI(executable).getFragment(); } catch (URISyntaxException ur) { LOG.warn("Problem in the URI fragment for pipes executable"); } } String [] debug = debugCommand.split(" "); Vector<String> vargs = new Vector<String>(); for (String component : debug) { vargs.add(component); } vargs.add(taskStdout); vargs.add(taskStderr); vargs.add(taskSyslog); vargs.add(jobConf); vargs.add(program); try { List<String> wrappedCommand = TaskLog.captureDebugOut (vargs, stdout); // run the script. try { runScript(wrappedCommand, workDir); } catch (IOException ioe) { LOG.warn("runScript failed with: " + StringUtils. stringifyException(ioe)); } } catch(IOException e) { LOG.warn("Error in preparing wrapped debug command"); } // add all lines of debug out to diagnostics try { int num = localJobConf.getInt("mapred.debug.out.lines", -1); addDiagnostics(FileUtil.makeShellPath(stdout),num,"DEBUG OUT"); } catch(IOException ioe) { LOG.warn("Exception in add diagnostics!"); } // Debug-command is run. Do the post-debug-script-exit debug-logs // processing. Truncate the logs. getTaskLogsMonitor().addProcessForLogTruncation( task.getTaskID(), Arrays.asList(task)); } } taskStatus.setProgress(0.0f); } this.taskStatus.setFinishTime(System.currentTimeMillis()); needCleanup = (taskStatus.getRunState() == TaskStatus.State.FAILED || taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN || taskStatus.getRunState() == TaskStatus.State.KILLED); } // // If the task has failed, or if the task was killAndCleanup()'ed, // we should clean up right away. We only wait to cleanup // if the task succeeded, and its results might be useful // later on to downstream job processing. // if (needCleanup) { removeTaskFromJob(task.getJobID(), this); } try { cleanup(needCleanup); } catch (IOException ie) { } } /** * Runs the script given in args * @param args script name followed by its argumnets * @param dir current working directory. * @throws IOException */ public void runScript(List<String> args, File dir) throws IOException { ShellCommandExecutor shexec = new ShellCommandExecutor(args.toArray(new String[0]), dir); shexec.execute(); int exitCode = shexec.getExitCode(); if (exitCode != 0) { throw new IOException("Task debug script exit with nonzero status of " + exitCode + "."); } } /** * Add last 'num' lines of the given file to the diagnostics. * if num =-1, all the lines of file are added to the diagnostics. * @param file The file from which to collect diagnostics. * @param num The number of lines to be sent to diagnostics. * @param tag The tag is printed before the diagnostics are printed. */ public void addDiagnostics(String file, int num, String tag) { RandomAccessFile rafile = null; try { rafile = new RandomAccessFile(file,"r"); int no_lines =0; String line = null; StringBuffer tail = new StringBuffer(); tail.append("\n-------------------- "+tag+"---------------------\n"); String[] lines = null; if (num >0) { lines = new String[num]; } while ((line = rafile.readLine()) != null) { no_lines++; if (num >0) { if (no_lines <= num) { lines[no_lines-1] = line; } else { // shift them up for (int i=0; i<num-1; ++i) { lines[i] = lines[i+1]; } lines[num-1] = line; } } else if (num == -1) { tail.append(line); tail.append("\n"); } } int n = no_lines > num ?num:no_lines; if (num >0) { for (int i=0;i<n;i++) { tail.append(lines[i]); tail.append("\n"); } } if(n!=0) reportDiagnosticInfo(tail.toString()); } catch (FileNotFoundException fnfe){ LOG.warn("File "+file+ " not found"); } catch (IOException ioe){ LOG.warn("Error reading file "+file); } finally { try { if (rafile != null) { rafile.close(); } } catch (IOException ioe) { LOG.warn("Error closing file "+file); } } } /** * We no longer need anything from this task, as the job has * finished. If the task is still running, kill it and clean up. * * @param wasFailure did the task fail, as opposed to was it killed by * the framework */ public void jobHasFinished(boolean wasFailure) throws IOException { // Kill the task if it is still running synchronized(this){ if (getRunState() == TaskStatus.State.RUNNING || getRunState() == TaskStatus.State.UNASSIGNED || getRunState() == TaskStatus.State.COMMIT_PENDING || isCleaningup()) { kill(wasFailure); } } // Cleanup on the finished task cleanup(true); } /** * Something went wrong and the task must be killed. * @param wasFailure was it a failure (versus a kill request)? */ public synchronized void kill(boolean wasFailure) throws IOException { if (taskStatus.getRunState() == TaskStatus.State.RUNNING || taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || isCleaningup()) { wasKilled = true; if (wasFailure) { failures += 1; } // runner could be null if task-cleanup attempt is not localized yet if (runner != null) { runner.kill(); } setTaskFailState(wasFailure); } else if (taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) { if (wasFailure) { failures += 1; taskStatus.setRunState(TaskStatus.State.FAILED); } else { taskStatus.setRunState(TaskStatus.State.KILLED); } } taskStatus.setFinishTime(System.currentTimeMillis()); removeFromMemoryManager(task.getTaskID()); releaseSlot(); notifyTTAboutTaskCompletion(); } private synchronized void releaseSlot() { if (slotTaken) { if (launcher != null) { launcher.addFreeSlots(task.getNumSlotsRequired()); } slotTaken = false; + } else { + // wake up the launcher. it may be waiting to block slots for this task. + if (launcher != null) { + launcher.notifySlots(); + } } } /** * The map output has been lost. */ private synchronized void mapOutputLost(String failure ) throws IOException { if (taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || taskStatus.getRunState() == TaskStatus.State.SUCCEEDED) { // change status to failure LOG.info("Reporting output lost:"+task.getTaskID()); taskStatus.setRunState(TaskStatus.State.FAILED); taskStatus.setProgress(0.0f); reportDiagnosticInfo("Map output lost, rescheduling: " + failure); runningTasks.put(task.getTaskID(), this); mapTotal++; } else { LOG.warn("Output already reported lost:"+task.getTaskID()); } } /** * We no longer need anything from this task. Either the * controlling job is all done and the files have been copied * away, or the task failed and we don't need the remains. * Any calls to cleanup should not lock the tip first. * cleanup does the right thing- updates tasks in Tasktracker * by locking tasktracker first and then locks the tip. * * if needCleanup is true, the whole task directory is cleaned up. * otherwise the current working directory of the task * i.e. &lt;taskid&gt;/work is cleaned up. */ void cleanup(boolean needCleanup) throws IOException { TaskAttemptID taskId = task.getTaskID(); LOG.debug("Cleaning up " + taskId); synchronized (TaskTracker.this) { if (needCleanup) { // see if tasks data structure is holding this tip. // tasks could hold the tip for cleanup attempt, if cleanup attempt // got launched before this method. if (tasks.get(taskId) == this) { tasks.remove(taskId); } } synchronized (this){ if (alwaysKeepTaskFiles || (taskStatus.getRunState() == TaskStatus.State.FAILED && keepFailedTaskFiles)) { return; } } } synchronized (this) { try { // localJobConf could be null if localization has not happened // then no cleanup will be required. if (localJobConf == null) { return; } String taskDir = getLocalTaskDir(task.getJobID().toString(), taskId.toString(), task.isTaskCleanupTask()); if (needCleanup) { if (runner != null) { //cleans up the output directory of the task (where map outputs //and reduce inputs get stored) runner.close(); } //We don't delete the workdir //since some other task (running in the same JVM) //might be using the dir. The JVM running the tasks would clean //the workdir per a task in the task process itself. if (localJobConf.getNumTasksToExecutePerJvm() == 1) { PathDeletionContext[] contexts = buildTaskControllerPathDeletionContexts(localFs, getLocalDirs(), task, false/* not workDir */, taskController); directoryCleanupThread.addToQueue(contexts); } else { PathDeletionContext[] contexts = buildPathDeletionContexts( localFs, getLocalFiles(defaultJobConf, taskDir+"/job.xml")); directoryCleanupThread.addToQueue(contexts); } } else { if (localJobConf.getNumTasksToExecutePerJvm() == 1) { PathDeletionContext[] contexts = buildTaskControllerPathDeletionContexts(localFs, getLocalDirs(), task, true /* workDir */, taskController); directoryCleanupThread.addToQueue(contexts); } } } catch (Throwable ie) { LOG.info("Error cleaning up task runner: " + StringUtils.stringifyException(ie)); } } } @Override public boolean equals(Object obj) { return (obj instanceof TaskInProgress) && task.getTaskID().equals (((TaskInProgress) obj).getTask().getTaskID()); } @Override public int hashCode() { return task.getTaskID().hashCode(); } } // /////////////////////////////////////////////////////////////// // TaskUmbilicalProtocol ///////////////////////////////////////////////////////////////// /** * Called upon startup by the child process, to fetch Task data. */ public synchronized JvmTask getTask(JvmContext context) throws IOException { JVMId jvmId = context.jvmId; LOG.debug("JVM with ID : " + jvmId + " asked for a task"); // save pid of task JVM sent by child jvmManager.setPidToJvm(jvmId, context.pid); if (!jvmManager.isJvmKnown(jvmId)) { LOG.info("Killing unknown JVM " + jvmId); return new JvmTask(null, true); } RunningJob rjob = runningJobs.get(jvmId.getJobId()); if (rjob == null) { //kill the JVM since the job is dead LOG.info("Killing JVM " + jvmId + " since job " + jvmId.getJobId() + " is dead"); jvmManager.killJvm(jvmId); return new JvmTask(null, true); } TaskInProgress tip = jvmManager.getTaskForJvm(jvmId); if (tip == null) { return new JvmTask(null, false); } if (tasks.get(tip.getTask().getTaskID()) != null) { //is task still present LOG.info("JVM with ID: " + jvmId + " given task: " + tip.getTask().getTaskID()); return new JvmTask(tip.getTask(), false); } else { LOG.info("Killing JVM with ID: " + jvmId + " since scheduled task: " + tip.getTask().getTaskID() + " is " + tip.taskStatus.getRunState()); return new JvmTask(null, true); } } /** * Called periodically to report Task progress, from 0.0 to 1.0. */ public synchronized boolean statusUpdate(TaskAttemptID taskid, TaskStatus taskStatus) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.reportProgress(taskStatus); return true; } else { LOG.warn("Progress from unknown child task: "+taskid); return false; } } /** * Called when the task dies before completion, and we want to report back * diagnostic info */ public synchronized void reportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.reportDiagnosticInfo(info); } else { LOG.warn("Error from unknown child task: "+taskid+". Ignored."); } } public synchronized void reportNextRecordRange(TaskAttemptID taskid, SortedRanges.Range range) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.reportNextRecordRange(range); } else { LOG.warn("reportNextRecordRange from unknown child task: "+taskid+". " + "Ignored."); } } /** Child checking to see if we're alive. Normally does nothing.*/ public synchronized boolean ping(TaskAttemptID taskid) throws IOException { return tasks.get(taskid) != null; } /** * Task is reporting that it is in commit_pending * and it is waiting for the commit Response */ public synchronized void commitPending(TaskAttemptID taskid, TaskStatus taskStatus) throws IOException { LOG.info("Task " + taskid + " is in commit-pending," +"" + " task state:" +taskStatus.getRunState()); statusUpdate(taskid, taskStatus); reportTaskFinished(taskid, true); } /** * Child checking whether it can commit */ public synchronized boolean canCommit(TaskAttemptID taskid) { return commitResponses.contains(taskid); //don't remove it now } /** * The task is done. */ public synchronized void done(TaskAttemptID taskid) throws IOException { TaskInProgress tip = tasks.get(taskid); commitResponses.remove(taskid); if (tip != null) { tip.reportDone(); } else { LOG.warn("Unknown child task done: "+taskid+". Ignored."); } } /** * A reduce-task failed to shuffle the map-outputs. Kill the task. */ public synchronized void shuffleError(TaskAttemptID taskId, String message) throws IOException { LOG.fatal("Task: " + taskId + " - Killed due to Shuffle Failure: " + message); TaskInProgress tip = runningTasks.get(taskId); tip.reportDiagnosticInfo("Shuffle Error: " + message); purgeTask(tip, true); } /** * A child task had a local filesystem error. Kill the task. */ public synchronized void fsError(TaskAttemptID taskId, String message) throws IOException { LOG.fatal("Task: " + taskId + " - Killed due to FSError: " + message); TaskInProgress tip = runningTasks.get(taskId); tip.reportDiagnosticInfo("FSError: " + message); purgeTask(tip, true); } /** * A child task had a fatal error. Kill the task. */ public synchronized void fatalError(TaskAttemptID taskId, String msg) throws IOException { LOG.fatal("Task: " + taskId + " - Killed : " + msg); TaskInProgress tip = runningTasks.get(taskId); tip.reportDiagnosticInfo("Error: " + msg); purgeTask(tip, true); } public synchronized MapTaskCompletionEventsUpdate getMapCompletionEvents( JobID jobId, int fromEventId, int maxLocs, TaskAttemptID id) throws IOException { TaskCompletionEvent[]mapEvents = TaskCompletionEvent.EMPTY_ARRAY; synchronized (shouldReset) { if (shouldReset.remove(id)) { return new MapTaskCompletionEventsUpdate(mapEvents, true); } } RunningJob rjob; synchronized (runningJobs) { rjob = runningJobs.get(jobId); if (rjob != null) { synchronized (rjob) { FetchStatus f = rjob.getFetchStatus(); if (f != null) { mapEvents = f.getMapEvents(fromEventId, maxLocs); } } } } return new MapTaskCompletionEventsUpdate(mapEvents, false); } ///////////////////////////////////////////////////// // Called by TaskTracker thread after task process ends ///////////////////////////////////////////////////// /** * The task is no longer running. It may not have completed successfully */ void reportTaskFinished(TaskAttemptID taskid, boolean commitPending) { TaskInProgress tip; synchronized (this) { tip = tasks.get(taskid); } if (tip != null) { tip.reportTaskFinished(commitPending); } else { LOG.warn("Unknown child task finished: "+taskid+". Ignored."); } } /** * A completed map task's output has been lost. */ public synchronized void mapOutputLost(TaskAttemptID taskid, String errorMsg) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.mapOutputLost(errorMsg); } else { LOG.warn("Unknown child with bad map output: "+taskid+". Ignored."); } } /** * The datastructure for initializing a job */ static class RunningJob{ private JobID jobid; private JobConf jobConf; // keep this for later use volatile Set<TaskInProgress> tasks; boolean localized; boolean keepJobFiles; FetchStatus f; RunningJob(JobID jobid) { this.jobid = jobid; localized = false; tasks = new HashSet<TaskInProgress>(); keepJobFiles = false; } JobID getJobID() { return jobid; } void setFetchStatus(FetchStatus f) { this.f = f; } FetchStatus getFetchStatus() { return f; } } /** * Get the name for this task tracker. * @return the string like "tracker_mymachine:50010" */ String getName() { return taskTrackerName; } private synchronized List<TaskStatus> cloneAndResetRunningTaskStatuses( boolean sendCounters) { List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size()); for(TaskInProgress tip: runningTasks.values()) { TaskStatus status = tip.getStatus(); status.setIncludeCounters(sendCounters); status.setOutputSize(tryToGetOutputSize(status.getTaskID(), fConf)); // send counters for finished or failed tasks and commit pending tasks if (status.getRunState() != TaskStatus.State.RUNNING) { status.setIncludeCounters(true); } result.add((TaskStatus)status.clone()); status.clearStatus(); } return result; } /** * Get the list of tasks that will be reported back to the * job tracker in the next heartbeat cycle. * @return a copy of the list of TaskStatus objects */ synchronized List<TaskStatus> getRunningTaskStatuses() { List<TaskStatus> result = new ArrayList<TaskStatus>(runningTasks.size()); for(TaskInProgress tip: runningTasks.values()) { result.add(tip.getStatus()); } return result; } /** * Get the list of stored tasks on this task tracker. * @return */ synchronized List<TaskStatus> getNonRunningTasks() { List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size()); for(Map.Entry<TaskAttemptID, TaskInProgress> task: tasks.entrySet()) { if (!runningTasks.containsKey(task.getKey())) { result.add(task.getValue().getStatus()); } } return result; } /** * Get the list of tasks from running jobs on this task tracker. * @return a copy of the list of TaskStatus objects */ synchronized List<TaskStatus> getTasksFromRunningJobs() { List<TaskStatus> result = new ArrayList<TaskStatus>(tasks.size()); for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) { RunningJob rjob = item.getValue(); synchronized (rjob) { for (TaskInProgress tip : rjob.tasks) { result.add(tip.getStatus()); } } } return result; } /** * Get the default job conf for this tracker. */ JobConf getJobConf() { return fConf; } /** * Check if the given local directories * (and parent directories, if necessary) can be created. * @param localDirs where the new TaskTracker should keep its local files. * @throws DiskErrorException if all local directories are not writable */ private static void checkLocalDirs(String[] localDirs) throws DiskErrorException { boolean writable = false; if (localDirs != null) { for (int i = 0; i < localDirs.length; i++) { try { DiskChecker.checkDir(new File(localDirs[i])); writable = true; } catch(DiskErrorException e) { LOG.warn("Task Tracker local " + e.getMessage()); } } } if (!writable) throw new DiskErrorException( "all local directories are not writable"); } /** * Is this task tracker idle? * @return has this task tracker finished and cleaned up all of its tasks? */ public synchronized boolean isIdle() { return tasks.isEmpty() && tasksToCleanup.isEmpty(); } /** * Start the TaskTracker, point toward the indicated JobTracker */ public static void main(String argv[]) throws Exception { StringUtils.startupShutdownMessage(TaskTracker.class, argv, LOG); if (argv.length != 0) { System.out.println("usage: TaskTracker"); System.exit(-1); } try { JobConf conf=new JobConf(); // enable the server to track time spent waiting on locks ReflectionUtils.setContentionTracing (conf.getBoolean("tasktracker.contention.tracking", false)); new TaskTracker(conf).run(); } catch (Throwable e) { LOG.error("Can not start task tracker because "+ StringUtils.stringifyException(e)); System.exit(-1); } } /** * This class is used in TaskTracker's Jetty to serve the map outputs * to other nodes. */ public static class MapOutputServlet extends HttpServlet { private static final int MAX_BYTES_TO_READ = 64 * 1024; @Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { String mapId = request.getParameter("map"); String reduceId = request.getParameter("reduce"); String jobId = request.getParameter("job"); if (jobId == null) { throw new IOException("job parameter is required"); } if (mapId == null || reduceId == null) { throw new IOException("map and reduce parameters are required"); } ServletContext context = getServletContext(); int reduce = Integer.parseInt(reduceId); byte[] buffer = new byte[MAX_BYTES_TO_READ]; // true iff IOException was caused by attempt to access input boolean isInputException = true; OutputStream outStream = null; FSDataInputStream mapOutputIn = null; long totalRead = 0; ShuffleServerMetrics shuffleMetrics = (ShuffleServerMetrics) context.getAttribute("shuffleServerMetrics"); TaskTracker tracker = (TaskTracker) context.getAttribute("task.tracker"); long startTime = 0; try { shuffleMetrics.serverHandlerBusy(); if(ClientTraceLog.isInfoEnabled()) startTime = System.nanoTime(); outStream = response.getOutputStream(); JobConf conf = (JobConf) context.getAttribute("conf"); LocalDirAllocator lDirAlloc = (LocalDirAllocator)context.getAttribute("localDirAllocator"); FileSystem rfs = ((LocalFileSystem) context.getAttribute("local.file.system")).getRaw(); // Index file Path indexFileName = lDirAlloc.getLocalPathToRead( TaskTracker.getIntermediateOutputDir(jobId, mapId) + "/file.out.index", conf); // Map-output file Path mapOutputFileName = lDirAlloc.getLocalPathToRead( TaskTracker.getIntermediateOutputDir(jobId, mapId) + "/file.out", conf); /** * Read the index file to get the information about where * the map-output for the given reducer is available. */ IndexRecord info = tracker.indexCache.getIndexInformation(mapId, reduce,indexFileName); //set the custom "from-map-task" http header to the map task from which //the map output data is being transferred response.setHeader(FROM_MAP_TASK, mapId); //set the custom "Raw-Map-Output-Length" http header to //the raw (decompressed) length response.setHeader(RAW_MAP_OUTPUT_LENGTH, Long.toString(info.rawLength)); //set the custom "Map-Output-Length" http header to //the actual number of bytes being transferred response.setHeader(MAP_OUTPUT_LENGTH, Long.toString(info.partLength)); //set the custom "for-reduce-task" http header to the reduce task number //for which this map output is being transferred response.setHeader(FOR_REDUCE_TASK, Integer.toString(reduce)); //use the same buffersize as used for reading the data from disk response.setBufferSize(MAX_BYTES_TO_READ); /** * Read the data from the sigle map-output file and * send it to the reducer. */ //open the map-output file mapOutputIn = rfs.open(mapOutputFileName); //seek to the correct offset for the reduce mapOutputIn.seek(info.startOffset); long rem = info.partLength; int len = mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ)); while (rem > 0 && len >= 0) { rem -= len; try { shuffleMetrics.outputBytes(len); outStream.write(buffer, 0, len); outStream.flush(); } catch (IOException ie) { isInputException = false; throw ie; } totalRead += len; len = mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ)); } LOG.info("Sent out " + totalRead + " bytes for reduce: " + reduce + " from map: " + mapId + " given " + info.partLength + "/" + info.rawLength); } catch (IOException ie) { Log log = (Log) context.getAttribute("log"); String errorMsg = ("getMapOutput(" + mapId + "," + reduceId + ") failed :\n"+ StringUtils.stringifyException(ie)); log.warn(errorMsg); if (isInputException) { tracker.mapOutputLost(TaskAttemptID.forName(mapId), errorMsg); } response.sendError(HttpServletResponse.SC_GONE, errorMsg); shuffleMetrics.failedOutput(); throw ie; } finally { if (null != mapOutputIn) { mapOutputIn.close(); } final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; shuffleMetrics.serverHandlerFree(); if (ClientTraceLog.isInfoEnabled()) { ClientTraceLog.info(String.format(MR_CLIENTTRACE_FORMAT, request.getLocalAddr() + ":" + request.getLocalPort(), request.getRemoteAddr() + ":" + request.getRemotePort(), totalRead, "MAPRED_SHUFFLE", mapId, endTime-startTime)); } } outStream.close(); shuffleMetrics.successOutput(); } } // get the full paths of the directory in all the local disks. Path[] getLocalFiles(JobConf conf, String subdir) throws IOException{ String[] localDirs = conf.getLocalDirs(); Path[] paths = new Path[localDirs.length]; FileSystem localFs = FileSystem.getLocal(conf); for (int i = 0; i < localDirs.length; i++) { paths[i] = new Path(localDirs[i], subdir); paths[i] = paths[i].makeQualified(localFs); } return paths; } // get the paths in all the local disks. Path[] getLocalDirs() throws IOException{ String[] localDirs = fConf.getLocalDirs(); Path[] paths = new Path[localDirs.length]; FileSystem localFs = FileSystem.getLocal(fConf); for (int i = 0; i < localDirs.length; i++) { paths[i] = new Path(localDirs[i]); paths[i] = paths[i].makeQualified(localFs); } return paths; } FileSystem getLocalFileSystem(){ return localFs; } int getMaxCurrentMapTasks() { return maxMapSlots; } int getMaxCurrentReduceTasks() { return maxReduceSlots; } /** * Is the TaskMemoryManager Enabled on this system? * @return true if enabled, false otherwise. */ public boolean isTaskMemoryManagerEnabled() { return taskMemoryManagerEnabled; } public TaskMemoryManagerThread getTaskMemoryManager() { return taskMemoryManager; } /** * Normalize the negative values in configuration * * @param val * @return normalized val */ private long normalizeMemoryConfigValue(long val) { if (val < 0) { val = JobConf.DISABLED_MEMORY_LIMIT; } return val; } /** * Memory-related setup */ private void initializeMemoryManagement() { //handling @deprecated if (fConf.get(MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY) != null) { LOG.warn( JobConf.deprecatedString( MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY)); } //handling @deprecated if (fConf.get(MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY) != null) { LOG.warn( JobConf.deprecatedString( MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY)); } //handling @deprecated if (fConf.get(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY) != null) { LOG.warn( JobConf.deprecatedString( JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY)); } //handling @deprecated if (fConf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) { LOG.warn( JobConf.deprecatedString( JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY)); } Class<? extends MemoryCalculatorPlugin> clazz = fConf.getClass(MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY, null, MemoryCalculatorPlugin.class); MemoryCalculatorPlugin memoryCalculatorPlugin = (MemoryCalculatorPlugin) MemoryCalculatorPlugin .getMemoryCalculatorPlugin(clazz, fConf); LOG.info(" Using MemoryCalculatorPlugin : " + memoryCalculatorPlugin); if (memoryCalculatorPlugin != null) { totalVirtualMemoryOnTT = memoryCalculatorPlugin.getVirtualMemorySize(); if (totalVirtualMemoryOnTT <= 0) { LOG.warn("TaskTracker's totalVmem could not be calculated. " + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT); totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; } totalPhysicalMemoryOnTT = memoryCalculatorPlugin.getPhysicalMemorySize(); if (totalPhysicalMemoryOnTT <= 0) { LOG.warn("TaskTracker's totalPmem could not be calculated. " + "Setting it to " + JobConf.DISABLED_MEMORY_LIMIT); totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; } } mapSlotMemorySizeOnTT = fConf.getLong( JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT); reduceSlotSizeMemoryOnTT = fConf.getLong( JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT); totalMemoryAllottedForTasks = maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots * reduceSlotSizeMemoryOnTT; if (totalMemoryAllottedForTasks < 0) { //adding check for the old keys which might be used by the administrator //while configuration of the memory monitoring on TT long memoryAllotedForSlot = fConf.normalizeMemoryConfigValue( fConf.getLong(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); long limitVmPerTask = fConf.normalizeMemoryConfigValue( fConf.getLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); if(memoryAllotedForSlot == JobConf.DISABLED_MEMORY_LIMIT) { totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; } else { if(memoryAllotedForSlot > limitVmPerTask) { LOG.info("DefaultMaxVmPerTask is mis-configured. " + "It shouldn't be greater than task limits"); totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; } else { totalMemoryAllottedForTasks = (maxMapSlots + maxReduceSlots) * (memoryAllotedForSlot/(1024 * 1024)); } } } if (totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT) { LOG.info("totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT." + " Thrashing might happen."); } else if (totalMemoryAllottedForTasks > totalVirtualMemoryOnTT) { LOG.info("totalMemoryAllottedForTasks > totalVirtualMemoryOnTT." + " Thrashing might happen."); } // start the taskMemoryManager thread only if enabled setTaskMemoryManagerEnabledFlag(); if (isTaskMemoryManagerEnabled()) { taskMemoryManager = new TaskMemoryManagerThread(this); taskMemoryManager.setDaemon(true); taskMemoryManager.start(); } } - private void setTaskMemoryManagerEnabledFlag() { + void setTaskMemoryManagerEnabledFlag() { if (!ProcfsBasedProcessTree.isAvailable()) { LOG.info("ProcessTree implementation is missing on this system. " + "TaskMemoryManager is disabled."); taskMemoryManagerEnabled = false; return; } if (totalMemoryAllottedForTasks == JobConf.DISABLED_MEMORY_LIMIT) { taskMemoryManagerEnabled = false; LOG.warn("TaskTracker's totalMemoryAllottedForTasks is -1." + " TaskMemoryManager is disabled."); return; } taskMemoryManagerEnabled = true; } /** * Clean-up the task that TaskMemoryMangerThread requests to do so. * @param tid * @param wasFailure mark the task as failed or killed. 'failed' if true, * 'killed' otherwise * @param diagnosticMsg */ synchronized void cleanUpOverMemoryTask(TaskAttemptID tid, boolean wasFailure, String diagnosticMsg) { TaskInProgress tip = runningTasks.get(tid); if (tip != null) { tip.reportDiagnosticInfo(diagnosticMsg); try { purgeTask(tip, wasFailure); // Marking it as failed/killed. } catch (IOException ioe) { LOG.warn("Couldn't purge the task of " + tid + ". Error : " + ioe); } } } /** * Wrapper method used by TaskTracker to check if {@link NodeHealthCheckerService} * can be started * @param conf configuration used to check if service can be started * @return true if service can be started */ private boolean shouldStartHealthMonitor(Configuration conf) { return NodeHealthCheckerService.shouldRun(conf); } /** * Wrapper method used to start {@link NodeHealthCheckerService} for * Task Tracker * @param conf Configuration used by the service. */ private void startHealthMonitor(Configuration conf) { healthChecker = new NodeHealthCheckerService(conf); healthChecker.start(); } } diff --git a/src/test/org/apache/hadoop/mapred/TestTaskLauncher.java b/src/test/org/apache/hadoop/mapred/TestTaskLauncher.java new file mode 100644 index 0000000..4b8642f --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/TestTaskLauncher.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.TreeMap; + +import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; +import org.apache.hadoop.mapred.TaskTracker.TaskLauncher; +import org.apache.hadoop.mapreduce.TaskType; +import org.junit.Test; + +/** + * Tests {@link TaskLauncher} + */ +public class TestTaskLauncher { + private static int expectedLaunchAttemptId = 1; + + private static class MyTaskTracker extends TaskTracker { + // override startNewTask just to set the runState, + // not to launch the task really + @Override + void startNewTask(TaskInProgress tip) { + assertEquals(expectedLaunchAttemptId, tip.getTask().getTaskID().getId()); + tip.getStatus().setRunState(TaskStatus.State.RUNNING); + } + } + + /** + * Tests the case "task waiting to be launched is killed externally". + * + * Launches a task which will wait for ever to get slots. Kill the + * task and see if launcher is able to come out of the wait and pickup a + * another task. + * + * @throws IOException + */ + @Test + public void testExternalKillForLaunchTask() throws IOException { + // setup a TaskTracker + JobConf ttConf = new JobConf(); + ttConf.setInt("mapred.tasktracker.map.tasks.maximum", 4); + TaskTracker tt = new MyTaskTracker(); + tt.runningJobs = new TreeMap<JobID, TaskTracker.RunningJob>(); + tt.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>(); + tt.setIndexCache(new IndexCache(ttConf)); + tt.setTaskMemoryManagerEnabledFlag(); + + // start map-task launcher with four slots + TaskLauncher mapLauncher = tt.new TaskLauncher(TaskType.MAP, 4); + mapLauncher.start(); + + // launch a task which requires five slots + String jtId = "test"; + TaskAttemptID attemptID = new TaskAttemptID(jtId, 1, true, 0, 0); + Task task = new MapTask(null, attemptID, 0, null, null, 5, null); + mapLauncher.addToTaskQueue(new LaunchTaskAction(task)); + // verify that task is added to runningTasks + TaskInProgress killTip = tt.runningTasks.get(attemptID); + assertNotNull(killTip); + + // wait for a while for launcher to pick up the task + // this loop waits atmost for 30 seconds + for (int i = 0; i < 300; i++) { + if (mapLauncher.getNumWaitingTasksToLaunch() == 0) { + break; + } + UtilsForTests.waitFor(100); + } + assertEquals("Launcher didnt pick up the task " + attemptID + "to launch", + 0, mapLauncher.getNumWaitingTasksToLaunch()); + + // Now, that launcher has picked up the task, it waits until all five slots + // are available. i.e. it waits for-ever + // lets kill the task so that map launcher comes out + tt.processKillTaskAction(new KillTaskAction(attemptID)); + assertEquals(TaskStatus.State.KILLED, killTip.getRunState()); + + // launch another attempt which requires only one slot + TaskAttemptID runningAttemptID = new TaskAttemptID(jtId, 1, true, + 0, expectedLaunchAttemptId); + mapLauncher.addToTaskQueue(new LaunchTaskAction(new MapTask(null, + runningAttemptID, 0, null, null, 1, null))); + TaskInProgress runningTip = tt.runningTasks.get(runningAttemptID); + assertNotNull(runningTip); + + // wait for a while for the task to be launched + // this loop waits at most for 30 seconds + for (int i = 0; i < 300; i++) { + if (runningTip.getRunState().equals(TaskStatus.State.RUNNING)) { + break; + } + UtilsForTests.waitFor(100); + } + + // verify that the task went to running + assertEquals(TaskStatus.State.RUNNING, runningTip.getRunState()); + } + +}
jaxlaw/hadoop-common
df47bdc8df408b90e52e11ab8f29fbba5146e7c7
MAPREDUCE:1466 via https://issues.apache.org/jira/secure/attachment/12435948/MAPREDUCE-1466_yhadoop20-2.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 17be1b8..3cca978 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,520 +1,523 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.9 + MAPREDUCE-1466. Save number of input files in JobConf of job. + (Arun Murthy via yhemanth) + MAPREDUCE-1403. Save distributed cache artifacts in JobConf of job. (Arun Murthy via yhemanth) MAPREDUCE-1476. Fix the M/R framework to not call commit for special tasks like job setup/cleanup and task cleanup. (Amareshwari Sriramadasu via yhemanth) HADOOP-5879. Read compression level and strategy from Configuration for gzip compression. (He Yongqiang via cdouglas) HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) HADOOP-6382 Mavenize the build.xml targets and update the bin scripts in preparation for publishing POM files (giri kesavan via ltucker) HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) yahoo-hadoop-0.20.1-3195383008 HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt diff --git a/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java b/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java index 5e77c73..1278ceb 100644 --- a/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java +++ b/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java @@ -1,602 +1,607 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; /** * A base class for file-based {@link InputFormat}. * * <p><code>FileInputFormat</code> is the base class for all file-based * <code>InputFormat</code>s. This provides a generic implementation of * {@link #getSplits(JobConf, int)}. * Subclasses of <code>FileInputFormat</code> can also override the * {@link #isSplitable(FileSystem, Path)} method to ensure input-files are * not split-up and are processed as a whole by {@link Mapper}s. * @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat} * instead. */ @Deprecated public abstract class FileInputFormat<K, V> implements InputFormat<K, V> { public static final Log LOG = LogFactory.getLog(FileInputFormat.class); private static final double SPLIT_SLOP = 1.1; // 10% slop private long minSplitSize = 1; private static final PathFilter hiddenFileFilter = new PathFilter(){ public boolean accept(Path p){ String name = p.getName(); return !name.startsWith("_") && !name.startsWith("."); } }; + + static String NUM_INPUT_FILES = "mapreduce.input.num.files"; + protected void setMinSplitSize(long minSplitSize) { this.minSplitSize = minSplitSize; } /** * Proxy PathFilter that accepts a path only if all filters given in the * constructor do. Used by the listPaths() to apply the built-in * hiddenFileFilter together with a user provided one (if any). */ private static class MultiPathFilter implements PathFilter { private List<PathFilter> filters; public MultiPathFilter(List<PathFilter> filters) { this.filters = filters; } public boolean accept(Path path) { for (PathFilter filter : filters) { if (!filter.accept(path)) { return false; } } return true; } } /** * Is the given filename splitable? Usually, true, but if the file is * stream compressed, it will not be. * * <code>FileInputFormat</code> implementations can override this and return * <code>false</code> to ensure that individual input files are never split-up * so that {@link Mapper}s process entire files. * * @param fs the file system that the file is on * @param filename the file name to check * @return is this file splitable? */ protected boolean isSplitable(FileSystem fs, Path filename) { return true; } public abstract RecordReader<K, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException; /** * Set a PathFilter to be applied to the input paths for the map-reduce job. * * @param filter the PathFilter class use for filtering the input paths. */ public static void setInputPathFilter(JobConf conf, Class<? extends PathFilter> filter) { conf.setClass("mapred.input.pathFilter.class", filter, PathFilter.class); } /** * Get a PathFilter instance of the filter set for the input paths. * * @return the PathFilter instance set for the job, NULL if none has been set. */ public static PathFilter getInputPathFilter(JobConf conf) { Class<? extends PathFilter> filterClass = conf.getClass( "mapred.input.pathFilter.class", null, PathFilter.class); return (filterClass != null) ? ReflectionUtils.newInstance(filterClass, conf) : null; } /** List input directories. * Subclasses may override to, e.g., select only files matching a regular * expression. * * @param job the job to list input paths for * @return array of FileStatus objects * @throws IOException if zero items. */ protected FileStatus[] listStatus(JobConf job) throws IOException { Path[] dirs = getInputPaths(job); if (dirs.length == 0) { throw new IOException("No input paths specified in job"); } List<FileStatus> result = new ArrayList<FileStatus>(); List<IOException> errors = new ArrayList<IOException>(); // creates a MultiPathFilter with the hiddenFileFilter and the // user provided one (if any). List<PathFilter> filters = new ArrayList<PathFilter>(); filters.add(hiddenFileFilter); PathFilter jobFilter = getInputPathFilter(job); if (jobFilter != null) { filters.add(jobFilter); } PathFilter inputFilter = new MultiPathFilter(filters); for (Path p: dirs) { FileSystem fs = p.getFileSystem(job); FileStatus[] matches = fs.globStatus(p, inputFilter); if (matches == null) { errors.add(new IOException("Input path does not exist: " + p)); } else if (matches.length == 0) { errors.add(new IOException("Input Pattern " + p + " matches 0 files")); } else { for (FileStatus globStat: matches) { if (globStat.isDir()) { for(FileStatus stat: fs.listStatus(globStat.getPath(), inputFilter)) { result.add(stat); } } else { result.add(globStat); } } } } if (!errors.isEmpty()) { throw new InvalidInputException(errors); } LOG.info("Total input paths to process : " + result.size()); return result.toArray(new FileStatus[result.size()]); } /** Splits files returned by {@link #listStatus(JobConf)} when * they're too big.*/ @SuppressWarnings("deprecation") public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { FileStatus[] files = listStatus(job); + // Save the number of input files in the job-conf + job.setLong(NUM_INPUT_FILES, files.length); long totalSize = 0; // compute total size for (FileStatus file: files) { // check we have valid files if (file.isDir()) { throw new IOException("Not a file: "+ file.getPath()); } totalSize += file.getLen(); } long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits); long minSize = Math.max(job.getLong("mapred.min.split.size", 1), minSplitSize); // generate splits ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits); NetworkTopology clusterMap = new NetworkTopology(); for (FileStatus file: files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job); long length = file.getLen(); BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length); if ((length != 0) && isSplitable(fs, path)) { long blockSize = file.getBlockSize(); long splitSize = computeSplitSize(goalSize, minSize, blockSize); long bytesRemaining = length; while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) { String[] splitHosts = getSplitHosts(blkLocations, length-bytesRemaining, splitSize, clusterMap); splits.add(new FileSplit(path, length-bytesRemaining, splitSize, splitHosts)); bytesRemaining -= splitSize; } if (bytesRemaining != 0) { splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, blkLocations[blkLocations.length-1].getHosts())); } } else if (length != 0) { String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap); splits.add(new FileSplit(path, 0, length, splitHosts)); } else { //Create empty hosts array for zero length files splits.add(new FileSplit(path, 0, length, new String[0])); } } LOG.debug("Total # of splits: " + splits.size()); return splits.toArray(new FileSplit[splits.size()]); } protected long computeSplitSize(long goalSize, long minSize, long blockSize) { return Math.max(minSize, Math.min(goalSize, blockSize)); } protected int getBlockIndex(BlockLocation[] blkLocations, long offset) { for (int i = 0 ; i < blkLocations.length; i++) { // is the offset inside this block? if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){ return i; } } BlockLocation last = blkLocations[blkLocations.length -1]; long fileLength = last.getOffset() + last.getLength() -1; throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")"); } /** * Sets the given comma separated paths as the list of inputs * for the map-reduce job. * * @param conf Configuration of the job * @param commaSeparatedPaths Comma separated paths to be set as * the list of inputs for the map-reduce job. */ public static void setInputPaths(JobConf conf, String commaSeparatedPaths) { setInputPaths(conf, StringUtils.stringToPath( getPathStrings(commaSeparatedPaths))); } /** * Add the given comma separated paths to the list of inputs for * the map-reduce job. * * @param conf The configuration of the job * @param commaSeparatedPaths Comma separated paths to be added to * the list of inputs for the map-reduce job. */ public static void addInputPaths(JobConf conf, String commaSeparatedPaths) { for (String str : getPathStrings(commaSeparatedPaths)) { addInputPath(conf, new Path(str)); } } /** * Set the array of {@link Path}s as the list of inputs * for the map-reduce job. * * @param conf Configuration of the job. * @param inputPaths the {@link Path}s of the input directories/files * for the map-reduce job. */ public static void setInputPaths(JobConf conf, Path... inputPaths) { Path path = new Path(conf.getWorkingDirectory(), inputPaths[0]); StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString())); for(int i = 1; i < inputPaths.length;i++) { str.append(StringUtils.COMMA_STR); path = new Path(conf.getWorkingDirectory(), inputPaths[i]); str.append(StringUtils.escapeString(path.toString())); } conf.set("mapred.input.dir", str.toString()); } /** * Add a {@link Path} to the list of inputs for the map-reduce job. * * @param conf The configuration of the job * @param path {@link Path} to be added to the list of inputs for * the map-reduce job. */ public static void addInputPath(JobConf conf, Path path ) { path = new Path(conf.getWorkingDirectory(), path); String dirStr = StringUtils.escapeString(path.toString()); String dirs = conf.get("mapred.input.dir"); conf.set("mapred.input.dir", dirs == null ? dirStr : dirs + StringUtils.COMMA_STR + dirStr); } // This method escapes commas in the glob pattern of the given paths. private static String[] getPathStrings(String commaSeparatedPaths) { int length = commaSeparatedPaths.length(); int curlyOpen = 0; int pathStart = 0; boolean globPattern = false; List<String> pathStrings = new ArrayList<String>(); for (int i=0; i<length; i++) { char ch = commaSeparatedPaths.charAt(i); switch(ch) { case '{' : { curlyOpen++; if (!globPattern) { globPattern = true; } break; } case '}' : { curlyOpen--; if (curlyOpen == 0 && globPattern) { globPattern = false; } break; } case ',' : { if (!globPattern) { pathStrings.add(commaSeparatedPaths.substring(pathStart, i)); pathStart = i + 1 ; } break; } } } pathStrings.add(commaSeparatedPaths.substring(pathStart, length)); return pathStrings.toArray(new String[0]); } /** * Get the list of input {@link Path}s for the map-reduce job. * * @param conf The configuration of the job * @return the list of input {@link Path}s for the map-reduce job. */ public static Path[] getInputPaths(JobConf conf) { String dirs = conf.get("mapred.input.dir", ""); String [] list = StringUtils.split(dirs); Path[] result = new Path[list.length]; for (int i = 0; i < list.length; i++) { result[i] = new Path(StringUtils.unEscapeString(list[i])); } return result; } private void sortInDescendingOrder(List<NodeInfo> mylist) { Collections.sort(mylist, new Comparator<NodeInfo> () { public int compare(NodeInfo obj1, NodeInfo obj2) { if (obj1 == null || obj2 == null) return -1; if (obj1.getValue() == obj2.getValue()) { return 0; } else { return ((obj1.getValue() < obj2.getValue()) ? 1 : -1); } } } ); } /** * This function identifies and returns the hosts that contribute * most for a given split. For calculating the contribution, rack * locality is treated on par with host locality, so hosts from racks * that contribute the most are preferred over hosts on racks that * contribute less * @param blkLocations The list of block locations * @param offset * @param splitSize * @return array of hosts that contribute most to this split * @throws IOException */ protected String[] getSplitHosts(BlockLocation[] blkLocations, long offset, long splitSize, NetworkTopology clusterMap) throws IOException { int startIndex = getBlockIndex(blkLocations, offset); long bytesInThisBlock = blkLocations[startIndex].getOffset() + blkLocations[startIndex].getLength() - offset; //If this is the only block, just return if (bytesInThisBlock >= splitSize) { return blkLocations[startIndex].getHosts(); } long bytesInFirstBlock = bytesInThisBlock; int index = startIndex + 1; splitSize -= bytesInThisBlock; while (splitSize > 0) { bytesInThisBlock = Math.min(splitSize, blkLocations[index++].getLength()); splitSize -= bytesInThisBlock; } long bytesInLastBlock = bytesInThisBlock; int endIndex = index - 1; Map <Node,NodeInfo> hostsMap = new IdentityHashMap<Node,NodeInfo>(); Map <Node,NodeInfo> racksMap = new IdentityHashMap<Node,NodeInfo>(); String [] allTopos = new String[0]; // Build the hierarchy and aggregate the contribution of // bytes at each level. See TestGetSplitHosts.java for (index = startIndex; index <= endIndex; index++) { // Establish the bytes in this block if (index == startIndex) { bytesInThisBlock = bytesInFirstBlock; } else if (index == endIndex) { bytesInThisBlock = bytesInLastBlock; } else { bytesInThisBlock = blkLocations[index].getLength(); } allTopos = blkLocations[index].getTopologyPaths(); // If no topology information is available, just // prefix a fakeRack if (allTopos.length == 0) { allTopos = fakeRacks(blkLocations, index); } // NOTE: This code currently works only for one level of // hierarchy (rack/host). However, it is relatively easy // to extend this to support aggregation at different // levels for (String topo: allTopos) { Node node, parentNode; NodeInfo nodeInfo, parentNodeInfo; node = clusterMap.getNode(topo); if (node == null) { node = new NodeBase(topo); clusterMap.add(node); } nodeInfo = hostsMap.get(node); if (nodeInfo == null) { nodeInfo = new NodeInfo(node); hostsMap.put(node,nodeInfo); parentNode = node.getParent(); parentNodeInfo = racksMap.get(parentNode); if (parentNodeInfo == null) { parentNodeInfo = new NodeInfo(parentNode); racksMap.put(parentNode,parentNodeInfo); } parentNodeInfo.addLeaf(nodeInfo); } else { nodeInfo = hostsMap.get(node); parentNode = node.getParent(); parentNodeInfo = racksMap.get(parentNode); } nodeInfo.addValue(index, bytesInThisBlock); parentNodeInfo.addValue(index, bytesInThisBlock); } // for all topos } // for all indices return identifyHosts(allTopos.length, racksMap); } private String[] identifyHosts(int replicationFactor, Map<Node,NodeInfo> racksMap) { String [] retVal = new String[replicationFactor]; List <NodeInfo> rackList = new LinkedList<NodeInfo>(); rackList.addAll(racksMap.values()); // Sort the racks based on their contribution to this split sortInDescendingOrder(rackList); boolean done = false; int index = 0; // Get the host list for all our aggregated items, sort // them and return the top entries for (NodeInfo ni: rackList) { Set<NodeInfo> hostSet = ni.getLeaves(); List<NodeInfo>hostList = new LinkedList<NodeInfo>(); hostList.addAll(hostSet); // Sort the hosts in this rack based on their contribution sortInDescendingOrder(hostList); for (NodeInfo host: hostList) { // Strip out the port number from the host name retVal[index++] = host.node.getName().split(":")[0]; if (index == replicationFactor) { done = true; break; } } if (done == true) { break; } } return retVal; } private String[] fakeRacks(BlockLocation[] blkLocations, int index) throws IOException { String[] allHosts = blkLocations[index].getHosts(); String[] allTopos = new String[allHosts.length]; for (int i = 0; i < allHosts.length; i++) { allTopos[i] = NetworkTopology.DEFAULT_RACK + "/" + allHosts[i]; } return allTopos; } private static class NodeInfo { final Node node; final Set<Integer> blockIds; final Set<NodeInfo> leaves; private long value; NodeInfo(Node node) { this.node = node; blockIds = new HashSet<Integer>(); leaves = new HashSet<NodeInfo>(); } long getValue() {return value;} void addValue(int blockIndex, long value) { if (blockIds.add(blockIndex) == true) { this.value += value; } } Set<NodeInfo> getLeaves() { return leaves;} void addLeaf(NodeInfo nodeInfo) { leaves.add(nodeInfo); } } } diff --git a/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java index a2b6c49..dba50b6 100644 --- a/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java +++ b/src/mapred/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -1,419 +1,429 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.lib.input; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; /** * A base class for file-based {@link InputFormat}s. * * <p><code>FileInputFormat</code> is the base class for all file-based * <code>InputFormat</code>s. This provides a generic implementation of * {@link #getSplits(JobContext)}. * Subclasses of <code>FileInputFormat</code> can also override the * {@link #isSplitable(JobContext, Path)} method to ensure input-files are * not split-up and are processed as a whole by {@link Mapper}s. */ public abstract class FileInputFormat<K, V> extends InputFormat<K, V> { private static final Log LOG = LogFactory.getLog(FileInputFormat.class); private static final double SPLIT_SLOP = 1.1; // 10% slop private static final PathFilter hiddenFileFilter = new PathFilter(){ public boolean accept(Path p){ String name = p.getName(); return !name.startsWith("_") && !name.startsWith("."); } }; + /** + * {@link #NUM_INPUT_FILES} is not a public constant. + */ + public static String NUM_INPUT_FILES = "mapreduce.input.num.files"; + /** * Proxy PathFilter that accepts a path only if all filters given in the * constructor do. Used by the listPaths() to apply the built-in * hiddenFileFilter together with a user provided one (if any). */ private static class MultiPathFilter implements PathFilter { private List<PathFilter> filters; public MultiPathFilter(List<PathFilter> filters) { this.filters = filters; } public boolean accept(Path path) { for (PathFilter filter : filters) { if (!filter.accept(path)) { return false; } } return true; } } /** * Get the lower bound on split size imposed by the format. * @return the number of bytes of the minimal split for this format */ protected long getFormatMinSplitSize() { return 1; } /** * Is the given filename splitable? Usually, true, but if the file is * stream compressed, it will not be. * * <code>FileInputFormat</code> implementations can override this and return * <code>false</code> to ensure that individual input files are never split-up * so that {@link Mapper}s process entire files. * * @param context the job context * @param filename the file name to check * @return is this file splitable? */ protected boolean isSplitable(JobContext context, Path filename) { return true; } /** * Set a PathFilter to be applied to the input paths for the map-reduce job. * @param job the job to modify * @param filter the PathFilter class use for filtering the input paths. */ public static void setInputPathFilter(Job job, Class<? extends PathFilter> filter) { job.getConfiguration().setClass("mapred.input.pathFilter.class", filter, PathFilter.class); } /** * Set the minimum input split size * @param job the job to modify * @param size the minimum size */ public static void setMinInputSplitSize(Job job, long size) { job.getConfiguration().setLong("mapred.min.split.size", size); } /** * Get the minimum split size * @param job the job * @return the minimum number of bytes that can be in a split */ public static long getMinSplitSize(JobContext job) { return job.getConfiguration().getLong("mapred.min.split.size", 1L); } /** * Set the maximum split size * @param job the job to modify * @param size the maximum split size */ public static void setMaxInputSplitSize(Job job, long size) { job.getConfiguration().setLong("mapred.max.split.size", size); } /** * Get the maximum split size. * @param context the job to look at. * @return the maximum number of bytes a split can include */ public static long getMaxSplitSize(JobContext context) { return context.getConfiguration().getLong("mapred.max.split.size", Long.MAX_VALUE); } /** * Get a PathFilter instance of the filter set for the input paths. * * @return the PathFilter instance set for the job, NULL if none has been set. */ public static PathFilter getInputPathFilter(JobContext context) { Configuration conf = context.getConfiguration(); Class<?> filterClass = conf.getClass("mapred.input.pathFilter.class", null, PathFilter.class); return (filterClass != null) ? (PathFilter) ReflectionUtils.newInstance(filterClass, conf) : null; } /** List input directories. * Subclasses may override to, e.g., select only files matching a regular * expression. * * @param job the job to list input paths for * @return array of FileStatus objects * @throws IOException if zero items. */ protected List<FileStatus> listStatus(JobContext job ) throws IOException { List<FileStatus> result = new ArrayList<FileStatus>(); Path[] dirs = getInputPaths(job); if (dirs.length == 0) { throw new IOException("No input paths specified in job"); } List<IOException> errors = new ArrayList<IOException>(); // creates a MultiPathFilter with the hiddenFileFilter and the // user provided one (if any). List<PathFilter> filters = new ArrayList<PathFilter>(); filters.add(hiddenFileFilter); PathFilter jobFilter = getInputPathFilter(job); if (jobFilter != null) { filters.add(jobFilter); } PathFilter inputFilter = new MultiPathFilter(filters); for (int i=0; i < dirs.length; ++i) { Path p = dirs[i]; FileSystem fs = p.getFileSystem(job.getConfiguration()); FileStatus[] matches = fs.globStatus(p, inputFilter); if (matches == null) { errors.add(new IOException("Input path does not exist: " + p)); } else if (matches.length == 0) { errors.add(new IOException("Input Pattern " + p + " matches 0 files")); } else { for (FileStatus globStat: matches) { if (globStat.isDir()) { for(FileStatus stat: fs.listStatus(globStat.getPath(), inputFilter)) { result.add(stat); } } else { result.add(globStat); } } } } if (!errors.isEmpty()) { throw new InvalidInputException(errors); } LOG.info("Total input paths to process : " + result.size()); return result; } /** * Generate the list of files and make them into FileSplits. */ public List<InputSplit> getSplits(JobContext job ) throws IOException { long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); long maxSize = getMaxSplitSize(job); // generate splits List<InputSplit> splits = new ArrayList<InputSplit>(); - for (FileStatus file: listStatus(job)) { + List<FileStatus>files = listStatus(job); + for (FileStatus file: files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); long length = file.getLen(); BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length); if ((length != 0) && isSplitable(job, path)) { long blockSize = file.getBlockSize(); long splitSize = computeSplitSize(blockSize, minSize, maxSize); long bytesRemaining = length; while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) { int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining); splits.add(new FileSplit(path, length-bytesRemaining, splitSize, blkLocations[blkIndex].getHosts())); bytesRemaining -= splitSize; } if (bytesRemaining != 0) { splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, blkLocations[blkLocations.length-1].getHosts())); } } else if (length != 0) { splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts())); } else { //Create empty hosts array for zero length files splits.add(new FileSplit(path, 0, length, new String[0])); } } + + // Save the number of input files in the job-conf + job.getConfiguration().setLong(NUM_INPUT_FILES, files.size()); + LOG.debug("Total # of splits: " + splits.size()); return splits; } protected long computeSplitSize(long blockSize, long minSize, long maxSize) { return Math.max(minSize, Math.min(maxSize, blockSize)); } protected int getBlockIndex(BlockLocation[] blkLocations, long offset) { for (int i = 0 ; i < blkLocations.length; i++) { // is the offset inside this block? if ((blkLocations[i].getOffset() <= offset) && (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){ return i; } } BlockLocation last = blkLocations[blkLocations.length -1]; long fileLength = last.getOffset() + last.getLength() -1; throw new IllegalArgumentException("Offset " + offset + " is outside of file (0.." + fileLength + ")"); } /** * Sets the given comma separated paths as the list of inputs * for the map-reduce job. * * @param job the job * @param commaSeparatedPaths Comma separated paths to be set as * the list of inputs for the map-reduce job. */ public static void setInputPaths(Job job, String commaSeparatedPaths ) throws IOException { setInputPaths(job, StringUtils.stringToPath( getPathStrings(commaSeparatedPaths))); } /** * Add the given comma separated paths to the list of inputs for * the map-reduce job. * * @param job The job to modify * @param commaSeparatedPaths Comma separated paths to be added to * the list of inputs for the map-reduce job. */ public static void addInputPaths(Job job, String commaSeparatedPaths ) throws IOException { for (String str : getPathStrings(commaSeparatedPaths)) { addInputPath(job, new Path(str)); } } /** * Set the array of {@link Path}s as the list of inputs * for the map-reduce job. * * @param job The job to modify * @param inputPaths the {@link Path}s of the input directories/files * for the map-reduce job. */ public static void setInputPaths(Job job, Path... inputPaths) throws IOException { Configuration conf = job.getConfiguration(); FileSystem fs = FileSystem.get(conf); Path path = inputPaths[0].makeQualified(fs); StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString())); for(int i = 1; i < inputPaths.length;i++) { str.append(StringUtils.COMMA_STR); path = inputPaths[i].makeQualified(fs); str.append(StringUtils.escapeString(path.toString())); } conf.set("mapred.input.dir", str.toString()); } /** * Add a {@link Path} to the list of inputs for the map-reduce job. * * @param job The {@link Job} to modify * @param path {@link Path} to be added to the list of inputs for * the map-reduce job. */ public static void addInputPath(Job job, Path path) throws IOException { Configuration conf = job.getConfiguration(); FileSystem fs = FileSystem.get(conf); path = path.makeQualified(fs); String dirStr = StringUtils.escapeString(path.toString()); String dirs = conf.get("mapred.input.dir"); conf.set("mapred.input.dir", dirs == null ? dirStr : dirs + "," + dirStr); } // This method escapes commas in the glob pattern of the given paths. private static String[] getPathStrings(String commaSeparatedPaths) { int length = commaSeparatedPaths.length(); int curlyOpen = 0; int pathStart = 0; boolean globPattern = false; List<String> pathStrings = new ArrayList<String>(); for (int i=0; i<length; i++) { char ch = commaSeparatedPaths.charAt(i); switch(ch) { case '{' : { curlyOpen++; if (!globPattern) { globPattern = true; } break; } case '}' : { curlyOpen--; if (curlyOpen == 0 && globPattern) { globPattern = false; } break; } case ',' : { if (!globPattern) { pathStrings.add(commaSeparatedPaths.substring(pathStart, i)); pathStart = i + 1 ; } break; } } } pathStrings.add(commaSeparatedPaths.substring(pathStart, length)); return pathStrings.toArray(new String[0]); } /** * Get the list of input {@link Path}s for the map-reduce job. * * @param context The job * @return the list of input {@link Path}s for the map-reduce job. */ public static Path[] getInputPaths(JobContext context) { String dirs = context.getConfiguration().get("mapred.input.dir", ""); String [] list = StringUtils.split(dirs); Path[] result = new Path[list.length]; for (int i = 0; i < list.length; i++) { result[i] = new Path(StringUtils.unEscapeString(list[i])); } return result; } } diff --git a/src/test/org/apache/hadoop/mapred/TestFileInputFormat.java b/src/test/org/apache/hadoop/mapred/TestFileInputFormat.java index 1b44ee3..bc1a279 100644 --- a/src/test/org/apache/hadoop/mapred/TestFileInputFormat.java +++ b/src/test/org/apache/hadoop/mapred/TestFileInputFormat.java @@ -1,89 +1,130 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.DataOutputStream; +import java.io.IOException; import junit.framework.TestCase; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; public class TestFileInputFormat extends TestCase { + Configuration conf = new Configuration(); + MiniDFSCluster dfs = null; + + public void setUp() throws Exception { + dfs = new MiniDFSCluster(conf, 4, true, + new String[]{"/rack0", "/rack0", + "/rack1", "/rack1"}, + new String[]{"host0", "host1", + "host2", "host3"}); + } + public void testLocality() throws Exception { - JobConf conf = new JobConf(); - MiniDFSCluster dfs = null; - try { - dfs = new MiniDFSCluster(conf, 4, true, - new String[]{"/rack0", "/rack0", - "/rack1", "/rack1"}, - new String[]{"host0", "host1", - "host2", "host3"}); - FileSystem fs = dfs.getFileSystem(); - System.out.println("FileSystem " + fs.getUri()); - Path path = new Path("/foo/bar"); - // create a multi-block file on hdfs - DataOutputStream out = fs.create(path, true, 4096, - (short) 2, 512, null); - for(int i=0; i < 1000; ++i) { - out.writeChars("Hello\n"); - } - out.close(); - System.out.println("Wrote file"); + JobConf job = new JobConf(conf); + FileSystem fs = dfs.getFileSystem(); + System.out.println("FileSystem " + fs.getUri()); - // split it using a file input format - TextInputFormat.addInputPath(conf, path); - TextInputFormat inFormat = new TextInputFormat(); - inFormat.configure(conf); - InputSplit[] splits = inFormat.getSplits(conf, 1); - FileStatus fileStatus = fs.getFileStatus(path); - BlockLocation[] locations = - fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen()); - System.out.println("Made splits"); + Path inputDir = new Path("/foo/"); + String fileName = "part-0000"; + createInputs(fs, inputDir, fileName); - // make sure that each split is a block and the locations match - for(int i=0; i < splits.length; ++i) { - FileSplit fileSplit = (FileSplit) splits[i]; - System.out.println("File split: " + fileSplit); - for (String h: fileSplit.getLocations()) { - System.out.println("Location: " + h); - } - System.out.println("Block: " + locations[i]); - assertEquals(locations[i].getOffset(), fileSplit.getStart()); - assertEquals(locations[i].getLength(), fileSplit.getLength()); - String[] blockLocs = locations[i].getHosts(); - String[] splitLocs = fileSplit.getLocations(); - assertEquals(2, blockLocs.length); - assertEquals(2, splitLocs.length); - assertTrue((blockLocs[0].equals(splitLocs[0]) && - blockLocs[1].equals(splitLocs[1])) || - (blockLocs[1].equals(splitLocs[0]) && - blockLocs[0].equals(splitLocs[1]))); - } - } finally { - if (dfs != null) { - dfs.shutdown(); + // split it using a file input format + TextInputFormat.addInputPath(job, inputDir); + TextInputFormat inFormat = new TextInputFormat(); + inFormat.configure(job); + InputSplit[] splits = inFormat.getSplits(job, 1); + FileStatus fileStatus = fs.getFileStatus(new Path(inputDir, fileName)); + BlockLocation[] locations = + fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen()); + System.out.println("Made splits"); + + // make sure that each split is a block and the locations match + for(int i=0; i < splits.length; ++i) { + FileSplit fileSplit = (FileSplit) splits[i]; + System.out.println("File split: " + fileSplit); + for (String h: fileSplit.getLocations()) { + System.out.println("Location: " + h); } + System.out.println("Block: " + locations[i]); + assertEquals(locations[i].getOffset(), fileSplit.getStart()); + assertEquals(locations[i].getLength(), fileSplit.getLength()); + String[] blockLocs = locations[i].getHosts(); + String[] splitLocs = fileSplit.getLocations(); + assertEquals(2, blockLocs.length); + assertEquals(2, splitLocs.length); + assertTrue((blockLocs[0].equals(splitLocs[0]) && + blockLocs[1].equals(splitLocs[1])) || + (blockLocs[1].equals(splitLocs[0]) && + blockLocs[0].equals(splitLocs[1]))); + } + + assertEquals("Expected value of " + FileInputFormat.NUM_INPUT_FILES, + 1, job.getLong(FileInputFormat.NUM_INPUT_FILES, 0)); + } + + private void createInputs(FileSystem fs, Path inDir, String fileName) + throws IOException { + // create a multi-block file on hdfs + DataOutputStream out = fs.create(new Path(inDir, fileName), true, 4096, + (short) 2, 512, null); + for(int i=0; i < 1000; ++i) { + out.writeChars("Hello\n"); } + out.close(); + System.out.println("Wrote file"); } + + public void testNumInputs() throws Exception { + JobConf job = new JobConf(conf); + FileSystem fs = dfs.getFileSystem(); + System.out.println("FileSystem " + fs.getUri()); + Path inputDir = new Path("/foo/"); + final int numFiles = 10; + String fileNameBase = "part-0000"; + for (int i=0; i < numFiles; ++i) { + createInputs(fs, inputDir, fileNameBase + String.valueOf(i)); + } + createInputs(fs, inputDir, "_meta"); + createInputs(fs, inputDir, "_temp"); + + // split it using a file input format + TextInputFormat.addInputPath(job, inputDir); + TextInputFormat inFormat = new TextInputFormat(); + inFormat.configure(job); + InputSplit[] splits = inFormat.getSplits(job, 1); + + assertEquals("Expected value of " + FileInputFormat.NUM_INPUT_FILES, + numFiles, job.getLong(FileInputFormat.NUM_INPUT_FILES, 0)); + + } + + public void tearDown() throws Exception { + if (dfs != null) { + dfs.shutdown(); + } + } }
jaxlaw/hadoop-common
948658bf5ff87d60f27d9d1da69e519296118d5d
MAPREDUCE:1403 via https://issues.apache.org/jira/secure/attachment/12435768/MAPREDUCE-1403_yhadoop20-1.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index cd55a01..17be1b8 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,520 +1,523 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.9 + MAPREDUCE-1403. Save distributed cache artifacts in JobConf of job. + (Arun Murthy via yhemanth) + MAPREDUCE-1476. Fix the M/R framework to not call commit for special tasks like job setup/cleanup and task cleanup. (Amareshwari Sriramadasu via yhemanth) HADOOP-5879. Read compression level and strategy from Configuration for gzip compression. (He Yongqiang via cdouglas) HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) HADOOP-6382 Mavenize the build.xml targets and update the bin scripts in preparation for publishing POM files (giri kesavan via ltucker) HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) yahoo-hadoop-0.20.1-3195383008 HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) diff --git a/src/core/org/apache/hadoop/filecache/DistributedCache.java b/src/core/org/apache/hadoop/filecache/DistributedCache.java index 394c847..73dbba7 100644 --- a/src/core/org/apache/hadoop/filecache/DistributedCache.java +++ b/src/core/org/apache/hadoop/filecache/DistributedCache.java @@ -1,937 +1,960 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.filecache; import org.apache.commons.logging.*; import java.io.*; import java.util.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.util.*; import org.apache.hadoop.fs.*; import java.net.URI; /** * Distribute application-specific large, read-only files efficiently. * * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce * framework to cache files (text, archives, jars etc.) needed by applications. * </p> * * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached * via the {@link org.apache.hadoop.mapred.JobConf}. * The <code>DistributedCache</code> assumes that the * files specified via hdfs:// urls are already present on the * {@link FileSystem} at the path specified by the url.</p> * * <p>The framework will copy the necessary files on to the slave node before * any tasks for the job are executed on that node. Its efficiency stems from * the fact that the files are only copied once per job and the ability to * cache archives which are un-archived on the slaves.</p> * * <p><code>DistributedCache</code> can be used to distribute simple, read-only * data/text files and/or more complex types such as archives, jars etc. * Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes. * Jars may be optionally added to the classpath of the tasks, a rudimentary * software distribution mechanism. Files have execution permissions. * Optionally users can also direct it to symlink the distributed cache file(s) * into the working directory of the task.</p> * * <p><code>DistributedCache</code> tracks modification timestamps of the cache * files. Clearly the cache files should not be modified by the application * or externally while the job is executing.</p> * * <p>Here is an illustrative example on how to use the * <code>DistributedCache</code>:</p> * <p><blockquote><pre> * // Setting up the cache for the application * * 1. Copy the requisite files to the <code>FileSystem</code>: * * $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat * $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip * $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar * $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar * $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz * $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz * * 2. Setup the application's <code>JobConf</code>: * * JobConf job = new JobConf(); * DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), * job); * DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job); * DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job); * DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job); * * 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper} * or {@link org.apache.hadoop.mapred.Reducer}: * * public static class MapClass extends MapReduceBase * implements Mapper&lt;K, V, K, V&gt; { * * private Path[] localArchives; * private Path[] localFiles; * * public void configure(JobConf job) { * // Get the cached archives/files * localArchives = DistributedCache.getLocalCacheArchives(job); * localFiles = DistributedCache.getLocalCacheFiles(job); * } * * public void map(K key, V value, * OutputCollector&lt;K, V&gt; output, Reporter reporter) * throws IOException { * // Use data from the cached archives/files here * // ... * // ... * output.collect(k, v); * } * } * * </pre></blockquote></p> * * @see org.apache.hadoop.mapred.JobConf * @see org.apache.hadoop.mapred.JobClient */ public class DistributedCache { // cacheID to cacheStatus mapping private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>(); private static TreeMap<Path, Long> baseDirSize = new TreeMap<Path, Long>(); // default total cache size private static final long DEFAULT_CACHE_SIZE = 10737418240L; private static final Log LOG = LogFactory.getLog(DistributedCache.class); private static Random random = new Random(); + /** + * Warning: {@link #CACHE_FILES_SIZES} is not a *public* constant. + */ + public static final String CACHE_FILES_SIZES = "mapred.cache.files.filesizes"; + + /** + * Warning: {@link #CACHE_ARCHIVES_SIZES} is not a *public* constant. + */ + public static final String CACHE_ARCHIVES_SIZES = + "mapred.cache.archives.filesizes"; + /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred is * returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path baseDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir) throws IOException { return getLocalCache(cache, conf, baseDir, fileStatus, isArchive, confFileStamp, currentWorkDir, true, new LocalDirAllocator("mapred.local.dir")); } /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param subDir The sub cache Dir where you want to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred is * returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @param honorSymLinkConf if this is false, then the symlinks are not * created even if conf says so (this is required for an optimization in task * launches * @param lDirAllocator LocalDirAllocator of the tracker * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path subDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir, boolean honorSymLinkConf, LocalDirAllocator lDirAllocator) throws IOException { String key = getKey(cache, conf, confFileStamp); CacheStatus lcacheStatus; Path localizedPath; synchronized (cachedArchives) { lcacheStatus = cachedArchives.get(key); if (lcacheStatus == null) { // was never localized Path uniqueParentDir = new Path(subDir, String.valueOf(random.nextLong())); String cachePath = new Path(uniqueParentDir, makeRelative(cache, conf)).toString(); Path localPath = lDirAllocator.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf); lcacheStatus = new CacheStatus(new Path(localPath.toString().replace(cachePath, "")), localPath, uniqueParentDir); cachedArchives.put(key, lcacheStatus); } lcacheStatus.refcount++; } boolean initSuccessful = false; try { synchronized (lcacheStatus) { if (!lcacheStatus.isInited()) { localizedPath = localizeCache(conf, cache, confFileStamp, lcacheStatus, fileStatus, isArchive); lcacheStatus.initComplete(); } else { localizedPath = checkCacheStatusValidity(conf, cache, confFileStamp, lcacheStatus, fileStatus, isArchive); } createSymlink(conf, cache, lcacheStatus, isArchive, currentWorkDir, honorSymLinkConf); } // try deleting stuff if you can long size = 0; synchronized (lcacheStatus) { synchronized (baseDirSize) { Long get = baseDirSize.get(lcacheStatus.getBaseDir()); if (get != null) { size = get.longValue(); } else { LOG.warn("Cannot find size of baseDir: " + lcacheStatus.getBaseDir()); } } } // setting the cache size to a default of 10GB long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE); if (allowedSize < size) { // try some cache deletions deleteCache(conf); } initSuccessful = true; return localizedPath; } finally { if (!initSuccessful) { synchronized (cachedArchives) { lcacheStatus.refcount--; } } } } /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred * is returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path baseDir, boolean isArchive, long confFileStamp, Path currentWorkDir) throws IOException { return getLocalCache(cache, conf, baseDir, null, isArchive, confFileStamp, currentWorkDir); } /** * This is the opposite of getlocalcache. When you are done with * using the cache, you need to release the cache * @param cache The cache URI to be released * @param conf configuration which contains the filesystem the cache * is contained in. * @throws IOException */ public static void releaseCache(URI cache, Configuration conf, long timeStamp) throws IOException { String cacheId = getKey(cache, conf, timeStamp); synchronized (cachedArchives) { CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus == null) { LOG.warn("Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!"); return; } lcacheStatus.refcount--; } } // To delete the caches which have a refcount of zero private static void deleteCache(Configuration conf) throws IOException { Set<CacheStatus> deleteSet = new HashSet<CacheStatus>(); // try deleting cache Status with refcount of zero synchronized (cachedArchives) { for (Iterator it = cachedArchives.keySet().iterator(); it.hasNext();) { String cacheId = (String) it.next(); CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus.refcount == 0) { // delete this cache entry from the global list // and mark the localized file for deletion deleteSet.add(lcacheStatus); it.remove(); } } } // do the deletion, after releasing the global lock for (CacheStatus lcacheStatus : deleteSet) { synchronized (lcacheStatus) { FileSystem.getLocal(conf).delete(lcacheStatus.localizedLoadPath, true); LOG.info("Deleted path " + lcacheStatus.localizedLoadPath); // decrement the size of the cache from baseDirSize synchronized (baseDirSize) { Long dirSize = baseDirSize.get(lcacheStatus.localizedBaseDir); if ( dirSize != null ) { dirSize -= lcacheStatus.size; baseDirSize.put(lcacheStatus.localizedBaseDir, dirSize); } else { LOG.warn("Cannot find record of the baseDir: " + lcacheStatus.localizedBaseDir + " during delete!"); } } } } } /* * Returns the relative path of the dir this cache will be localized in * relative path that this cache will be localized in. For * hdfs://hostname:port/absolute_path -- the relative path is * hostname/absolute path -- if it is just /absolute_path -- then the * relative path is hostname of DFS this mapred cluster is running * on/absolute_path */ public static String makeRelative(URI cache, Configuration conf) throws IOException { String host = cache.getHost(); if (host == null) { host = cache.getScheme(); } if (host == null) { URI defaultUri = FileSystem.get(conf).getUri(); host = defaultUri.getHost(); if (host == null) { host = defaultUri.getScheme(); } } String path = host + cache.getPath(); path = path.replace(":/","/"); // remove windows device colon return path; } static String getKey(URI cache, Configuration conf, long timeStamp) throws IOException { return makeRelative(cache, conf) + String.valueOf(timeStamp); } private static Path checkCacheStatusValidity(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive ) throws IOException { FileSystem fs = FileSystem.get(cache, conf); // Has to be if (!ifExistsAndFresh(conf, fs, cache, confFileStamp, cacheStatus, fileStatus)) { throw new IOException("Stale cache file: " + cacheStatus.localizedLoadPath + " for cache-file: " + cache); } LOG.info(String.format("Using existing cache of %s->%s", cache.toString(), cacheStatus.localizedLoadPath)); return cacheStatus.localizedLoadPath; } private static void createSymlink(Configuration conf, URI cache, CacheStatus cacheStatus, boolean isArchive, Path currentWorkDir, boolean honorSymLinkConf) throws IOException { boolean doSymlink = honorSymLinkConf && DistributedCache.getSymlink(conf); if(cache.getFragment() == null) { doSymlink = false; } String link = currentWorkDir.toString() + Path.SEPARATOR + cache.getFragment(); File flink = new File(link); if (doSymlink){ if (!flink.exists()) { FileUtil.symLink(cacheStatus.localizedLoadPath.toString(), link); } } } // the method which actually copies the caches locally and unjars/unzips them // and does chmod for the files private static Path localizeCache(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive) throws IOException { FileSystem fs = getFileSystem(cache, conf); FileSystem localFs = FileSystem.getLocal(conf); Path parchive = null; if (isArchive) { parchive = new Path(cacheStatus.localizedLoadPath, new Path(cacheStatus.localizedLoadPath.getName())); } else { parchive = cacheStatus.localizedLoadPath; } if (!localFs.mkdirs(parchive.getParent())) { throw new IOException("Mkdirs failed to create directory " + cacheStatus.localizedLoadPath.toString()); } String cacheId = cache.getPath(); fs.copyToLocalFile(new Path(cacheId), parchive); if (isArchive) { String tmpArchive = parchive.toString().toLowerCase(); File srcFile = new File(parchive.toString()); File destDir = new File(parchive.getParent().toString()); if (tmpArchive.endsWith(".jar")) { RunJar.unJar(srcFile, destDir); } else if (tmpArchive.endsWith(".zip")) { FileUtil.unZip(srcFile, destDir); } else if (isTarFile(tmpArchive)) { FileUtil.unTar(srcFile, destDir); } // else will not do anyhting // and copy the file into the dir as it is } long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString())); cacheStatus.size = cacheSize; synchronized (baseDirSize) { Long dirSize = baseDirSize.get(cacheStatus.localizedBaseDir); if (dirSize == null) { dirSize = Long.valueOf(cacheSize); } else { dirSize += cacheSize; } baseDirSize.put(cacheStatus.localizedBaseDir, dirSize); } // do chmod here try { //Setting recursive permission to grant everyone read and execute Path localDir = new Path(cacheStatus.localizedBaseDir, cacheStatus.uniqueParentDir); LOG.info("Doing chmod on localdir :" + localDir); FileUtil.chmod(localDir.toString(), "ugo+rx", true); } catch(InterruptedException e) { LOG.warn("Exception in chmod" + e.toString()); } // update cacheStatus to reflect the newly cached file cacheStatus.mtime = getTimestamp(conf, cache); return cacheStatus.localizedLoadPath; } private static boolean isTarFile(String filename) { return (filename.endsWith(".tgz") || filename.endsWith(".tar.gz") || filename.endsWith(".tar")); } // Checks if the cache has already been localized and is fresh private static boolean ifExistsAndFresh(Configuration conf, FileSystem fs, URI cache, long confFileStamp, CacheStatus lcacheStatus, FileStatus fileStatus) throws IOException { // check for existence of the cache long dfsFileStamp; if (fileStatus != null) { dfsFileStamp = fileStatus.getModificationTime(); } else { dfsFileStamp = getTimestamp(conf, cache); } // ensure that the file on hdfs hasn't been modified since the job started if (dfsFileStamp != confFileStamp) { LOG.fatal("File: " + cache + " has changed on HDFS since job started"); throw new IOException("File: " + cache + " has changed on HDFS since job started"); } if (dfsFileStamp != lcacheStatus.mtime) { return false; } return true; } /** - * Returns mtime of a given cache file on hdfs. + * Returns {@link FileStatus} of a given cache file on hdfs. * @param conf configuration * @param cache cache file - * @return mtime of a given cache file on hdfs + * @return <code>FileStatus</code> of a given cache file on hdfs * @throws IOException */ - public static long getTimestamp(Configuration conf, URI cache) + public static FileStatus getFileStatus(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); Path filePath = new Path(cache.getPath()); - return fileSystem.getFileStatus(filePath).getModificationTime(); + return fileSystem.getFileStatus(filePath); + } + + /** + * Returns mtime of a given cache file on hdfs. + * @param conf configuration + * @param cache cache file + * @return mtime of a given cache file on hdfs + * @throws IOException + */ + public static long getTimestamp(Configuration conf, URI cache) + throws IOException { + return getFileStatus(conf, cache).getModificationTime(); } /** * This method create symlinks for all files in a given dir in another directory * @param conf the configuration * @param jobCacheDir the target directory for creating symlinks * @param workDir the directory in which the symlinks are created * @throws IOException */ public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir) throws IOException{ if ((jobCacheDir == null || !jobCacheDir.isDirectory()) || workDir == null || (!workDir.isDirectory())) { return; } boolean createSymlink = getSymlink(conf); if (createSymlink){ File[] list = jobCacheDir.listFiles(); for (int i=0; i < list.length; i++){ FileUtil.symLink(list[i].getAbsolutePath(), new File(workDir, list[i].getName()).toString()); } } } private static String getFileSysName(URI url) { String fsname = url.getScheme(); if ("hdfs".equals(fsname)) { String host = url.getHost(); int port = url.getPort(); return (port == (-1)) ? host : (host + ":" + port); } else { return null; } } private static FileSystem getFileSystem(URI cache, Configuration conf) throws IOException { String fileSysName = getFileSysName(cache); if (fileSysName != null) return FileSystem.getNamed(fileSysName, conf); else return FileSystem.get(conf); } /** * Set the configuration with the given set of archives * @param archives The list of archives that need to be localized * @param conf Configuration which will be changed */ public static void setCacheArchives(URI[] archives, Configuration conf) { String sarchives = StringUtils.uriToString(archives); conf.set("mapred.cache.archives", sarchives); } /** * Set the configuration with the given set of files * @param files The list of files that need to be localized * @param conf Configuration which will be changed */ public static void setCacheFiles(URI[] files, Configuration conf) { String sfiles = StringUtils.uriToString(files); conf.set("mapred.cache.files", sfiles); } /** * Get cache archives set in the Configuration * @param conf The configuration which contains the archives * @return A URI array of the caches set in the Configuration * @throws IOException */ public static URI[] getCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives")); } /** * Get cache files set in the Configuration * @param conf The configuration which contains the files * @return A URI array of the files set in the Configuration * @throws IOException */ public static URI[] getCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.files")); } /** * Return the path array of the localized caches * @param conf Configuration that contains the localized archives * @return A path array of localized caches * @throws IOException */ public static Path[] getLocalCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToPath(conf .getStrings("mapred.cache.localArchives")); } /** * Return the path array of the localized files * @param conf Configuration that contains the localized files * @return A path array of localized files * @throws IOException */ public static Path[] getLocalCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles")); } /** * Get the timestamps of the archives * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getArchiveTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.archives.timestamps"); } /** * Get the timestamps of the files * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getFileTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.files.timestamps"); } /** * This is to check the timestamp of the archives to be localized * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of archives. * The order should be the same as the order in which the archives are added. */ public static void setArchiveTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.archives.timestamps", timestamps); } /** * This is to check the timestamp of the files to be localized * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of files. * The order should be the same as the order in which the files are added. */ public static void setFileTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.files.timestamps", timestamps); } /** * Set the conf to contain the location for localized archives * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local archives */ public static void setLocalArchives(Configuration conf, String str) { conf.set("mapred.cache.localArchives", str); } /** * Set the conf to contain the location for localized files * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local files */ public static void setLocalFiles(Configuration conf, String str) { conf.set("mapred.cache.localFiles", str); } /** * Add a archives to be localized to the conf * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheArchive(URI uri, Configuration conf) { String archives = conf.get("mapred.cache.archives"); conf.set("mapred.cache.archives", archives == null ? uri.toString() : archives + "," + uri.toString()); } /** * Add a file to be localized to the conf * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheFile(URI uri, Configuration conf) { String files = conf.get("mapred.cache.files"); conf.set("mapred.cache.files", files == null ? uri.toString() : files + "," + uri.toString()); } /** * Add an file path to the current set of classpath entries It adds the file * to cache as well. * * @param file Path of the file to be added * @param conf Configuration that contains the classpath setting */ public static void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set("mapred.job.classpath.files", classpath == null ? file.toString() : classpath + System.getProperty("path.separator") + file.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(file).toUri(); addCacheFile(uri, conf); } /** * Get the file entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getFileClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.files"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System .getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } /** * Add an archive path to the current set of classpath entries. It adds the * archive to cache as well. * * @param archive Path of the archive to be added * @param conf Configuration that contains the classpath setting */ public static void addArchiveToClassPath(Path archive, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.archives"); conf.set("mapred.job.classpath.archives", classpath == null ? archive .toString() : classpath + System.getProperty("path.separator") + archive.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(archive).toUri(); addCacheArchive(uri, conf); } /** * Get the archive entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getArchiveClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.archives"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System .getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } /** * This method allows you to create symlinks in the current working directory * of the task to all the cache files/archives * @param conf the jobconf */ public static void createSymlink(Configuration conf){ conf.set("mapred.create.symlink", "yes"); } /** * This method checks to see if symlinks are to be create for the * localized cache files in the current working directory * @param conf the jobconf * @return true if symlinks are to be created- else return false */ public static boolean getSymlink(Configuration conf){ String result = conf.get("mapred.create.symlink"); if ("yes".equals(result)){ return true; } return false; } /** * This method checks if there is a conflict in the fragment names * of the uris. Also makes sure that each uri has a fragment. It * is only to be called if you want to create symlinks for * the various archives and files. * @param uriFiles The uri array of urifiles * @param uriArchives the uri array of uri archives */ public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives){ if ((uriFiles == null) && (uriArchives == null)){ return true; } if (uriFiles != null){ for (int i = 0; i < uriFiles.length; i++){ String frag1 = uriFiles[i].getFragment(); if (frag1 == null) return false; for (int j=i+1; j < uriFiles.length; j++){ String frag2 = uriFiles[j].getFragment(); if (frag2 == null) return false; if (frag1.equalsIgnoreCase(frag2)) return false; } if (uriArchives != null){ for (int j = 0; j < uriArchives.length; j++){ String frag2 = uriArchives[j].getFragment(); if (frag2 == null){ return false; } if (frag1.equalsIgnoreCase(frag2)) return false; for (int k=j+1; k < uriArchives.length; k++){ String frag3 = uriArchives[k].getFragment(); if (frag3 == null) return false; if (frag2.equalsIgnoreCase(frag3)) return false; } } } } } return true; } private static class CacheStatus { // the local load path of this cache Path localizedLoadPath; //the base dir where the cache lies Path localizedBaseDir; // the unique directory in localizedBaseDir, where the cache lies Path uniqueParentDir; //the size of this cache long size; // number of instances using this cache int refcount; // the cache-file modification time long mtime; // is it initialized? boolean inited = false; public CacheStatus(Path baseDir, Path localLoadPath, Path uniqueParentDir) { super(); this.localizedLoadPath = localLoadPath; this.refcount = 0; this.mtime = -1; this.localizedBaseDir = baseDir; this.size = 0; this.uniqueParentDir = uniqueParentDir; } // get the base dir for the cache Path getBaseDir() { return localizedBaseDir; } // Is it initialized? boolean isInited() { return inited; } // mark it as initalized void initComplete() { inited = true; } } /** * Clear the entire contents of the cache and delete the backing files. This * should only be used when the server is reinitializing, because the users * are going to lose their files. */ public static void purgeCache(Configuration conf) throws IOException { synchronized (cachedArchives) { FileSystem localFs = FileSystem.getLocal(conf); for (Map.Entry<String,CacheStatus> f: cachedArchives.entrySet()) { try { localFs.delete(f.getValue().localizedLoadPath, true); } catch (IOException ie) { LOG.debug("Error cleaning up cache", ie); } } cachedArchives.clear(); } } } diff --git a/src/mapred/org/apache/hadoop/mapred/JobClient.java b/src/mapred/org/apache/hadoop/mapred/JobClient.java index b0ac24e..ef88080 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobClient.java +++ b/src/mapred/org/apache/hadoop/mapred/JobClient.java @@ -117,1041 +117,1063 @@ import org.apache.hadoop.util.ToolRunner; * job.setInputPath(new Path("in")); * job.setOutputPath(new Path("out")); * * job.setMapperClass(MyJob.MyMapper.class); * job.setReducerClass(MyJob.MyReducer.class); * * // Submit the job, then poll for progress until the job is complete * JobClient.runJob(job); * </pre></blockquote></p> * * <h4 id="JobControl">Job Control</h4> * * <p>At times clients would chain map-reduce jobs to accomplish complex tasks * which cannot be done via a single map-reduce job. This is fairly easy since * the output of the job, typically, goes to distributed file-system and that * can be used as the input for the next job.</p> * * <p>However, this also means that the onus on ensuring jobs are complete * (success/failure) lies squarely on the clients. In such situations the * various job-control options are: * <ol> * <li> * {@link #runJob(JobConf)} : submits the job and returns only after * the job has completed. * </li> * <li> * {@link #submitJob(JobConf)} : only submits the job, then poll the * returned handle to the {@link RunningJob} to query status and make * scheduling decisions. * </li> * <li> * {@link JobConf#setJobEndNotificationURI(String)} : setup a notification * on job-completion, thus avoiding polling. * </li> * </ol></p> * * @see JobConf * @see ClusterStatus * @see Tool * @see DistributedCache */ public class JobClient extends Configured implements MRConstants, Tool { private static final Log LOG = LogFactory.getLog(JobClient.class); public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL } private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; private static final long MAX_JOBPROFILE_AGE = 1000 * 2; static{ Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); } /** * A NetworkedJob is an implementation of RunningJob. It holds * a JobProfile object to provide some info, and interacts with the * remote service to provide certain functionality. */ class NetworkedJob implements RunningJob { JobProfile profile; JobStatus status; long statustime; /** * We store a JobProfile and a timestamp for when we last * acquired the job profile. If the job is null, then we cannot * perform any of the tasks. The job might be null if the JobTracker * has completely forgotten about the job. (eg, 24 hours after the * job completes.) */ public NetworkedJob(JobStatus job) throws IOException { this.status = job; this.profile = jobSubmitClient.getJobProfile(job.getJobID()); this.statustime = System.currentTimeMillis(); } /** * Some methods rely on having a recent job profile object. Refresh * it, if necessary */ synchronized void ensureFreshStatus() throws IOException { if (System.currentTimeMillis() - statustime > MAX_JOBPROFILE_AGE) { updateStatus(); } } /** Some methods need to update status immediately. So, refresh * immediately * @throws IOException */ synchronized void updateStatus() throws IOException { this.status = jobSubmitClient.getJobStatus(profile.getJobID()); this.statustime = System.currentTimeMillis(); } /** * An identifier for the job */ public JobID getID() { return profile.getJobID(); } /** @deprecated This method is deprecated and will be removed. Applications should * rather use {@link #getID()}.*/ @Deprecated public String getJobID() { return profile.getJobID().toString(); } /** * The user-specified job name */ public String getJobName() { return profile.getJobName(); } /** * The name of the job file */ public String getJobFile() { return profile.getJobFile(); } /** * A URL where the job's status can be seen */ public String getTrackingURL() { return profile.getURL().toString(); } /** * A float between 0.0 and 1.0, indicating the % of map work * completed. */ public float mapProgress() throws IOException { ensureFreshStatus(); return status.mapProgress(); } /** * A float between 0.0 and 1.0, indicating the % of reduce work * completed. */ public float reduceProgress() throws IOException { ensureFreshStatus(); return status.reduceProgress(); } /** * A float between 0.0 and 1.0, indicating the % of cleanup work * completed. */ public float cleanupProgress() throws IOException { ensureFreshStatus(); return status.cleanupProgress(); } /** * A float between 0.0 and 1.0, indicating the % of setup work * completed. */ public float setupProgress() throws IOException { ensureFreshStatus(); return status.setupProgress(); } /** * Returns immediately whether the whole job is done yet or not. */ public synchronized boolean isComplete() throws IOException { updateStatus(); return (status.getRunState() == JobStatus.SUCCEEDED || status.getRunState() == JobStatus.FAILED || status.getRunState() == JobStatus.KILLED); } /** * True iff job completed successfully. */ public synchronized boolean isSuccessful() throws IOException { updateStatus(); return status.getRunState() == JobStatus.SUCCEEDED; } /** * Blocks until the job is finished */ public void waitForCompletion() throws IOException { while (!isComplete()) { try { Thread.sleep(5000); } catch (InterruptedException ie) { } } } /** * Tells the service to get the state of the current job. */ public synchronized int getJobState() throws IOException { updateStatus(); return status.getRunState(); } /** * Tells the service to terminate the current job. */ public synchronized void killJob() throws IOException { jobSubmitClient.killJob(getID()); } /** Set the priority of the job. * @param priority new priority of the job. */ public synchronized void setJobPriority(String priority) throws IOException { jobSubmitClient.setJobPriority(getID(), priority); } /** * Kill indicated task attempt. * @param taskId the id of the task to kill. * @param shouldFail if true the task is failed and added to failed tasks list, otherwise * it is just killed, w/o affecting job failure status. */ public synchronized void killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException { jobSubmitClient.killTask(taskId, shouldFail); } /** @deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}*/ @Deprecated public synchronized void killTask(String taskId, boolean shouldFail) throws IOException { killTask(TaskAttemptID.forName(taskId), shouldFail); } /** * Fetch task completion events from jobtracker for this job. */ public synchronized TaskCompletionEvent[] getTaskCompletionEvents( int startFrom) throws IOException{ return jobSubmitClient.getTaskCompletionEvents( getID(), startFrom, 10); } /** * Dump stats to screen */ @Override public String toString() { try { updateStatus(); } catch (IOException e) { } return "Job: " + profile.getJobID() + "\n" + "file: " + profile.getJobFile() + "\n" + "tracking URL: " + profile.getURL() + "\n" + "map() completion: " + status.mapProgress() + "\n" + "reduce() completion: " + status.reduceProgress(); } /** * Returns the counters for this job */ public Counters getCounters() throws IOException { return jobSubmitClient.getJobCounters(getID()); } @Override public String[] getTaskDiagnostics(TaskAttemptID id) throws IOException { return jobSubmitClient.getTaskDiagnostics(id); } } private JobSubmissionProtocol jobSubmitClient; private Path sysDir = null; private FileSystem fs = null; /** * Create a job client. */ public JobClient() { } /** * Build a job client with the given {@link JobConf}, and connect to the * default {@link JobTracker}. * * @param conf the job configuration. * @throws IOException */ public JobClient(JobConf conf) throws IOException { setConf(conf); init(conf); } /** * Connect to the default {@link JobTracker}. * @param conf the job configuration. * @throws IOException */ public void init(JobConf conf) throws IOException { String tracker = conf.get("mapred.job.tracker", "local"); if ("local".equals(tracker)) { this.jobSubmitClient = new LocalJobRunner(conf); } else { this.jobSubmitClient = createRPCProxy(JobTracker.getAddress(conf), conf); } } private JobSubmissionProtocol createRPCProxy(InetSocketAddress addr, Configuration conf) throws IOException { return (JobSubmissionProtocol) RPC.getProxy(JobSubmissionProtocol.class, JobSubmissionProtocol.versionID, addr, getUGI(conf), conf, NetUtils.getSocketFactory(conf, JobSubmissionProtocol.class)); } /** * Build a job client, connect to the indicated job tracker. * * @param jobTrackAddr the job tracker to connect to. * @param conf configuration. */ public JobClient(InetSocketAddress jobTrackAddr, Configuration conf) throws IOException { jobSubmitClient = createRPCProxy(jobTrackAddr, conf); } /** * Close the <code>JobClient</code>. */ public synchronized void close() throws IOException { if (!(jobSubmitClient instanceof LocalJobRunner)) { RPC.stopProxy(jobSubmitClient); } } /** * Get a filesystem handle. We need this to prepare jobs * for submission to the MapReduce system. * * @return the filesystem handle. */ public synchronized FileSystem getFs() throws IOException { if (this.fs == null) { Path sysDir = getSystemDir(); this.fs = sysDir.getFileSystem(getConf()); } return fs; } /* see if two file systems are the same or not * */ private boolean compareFs(FileSystem srcFs, FileSystem destFs) { URI srcUri = srcFs.getUri(); URI dstUri = destFs.getUri(); if (srcUri.getScheme() == null) { return false; } if (!srcUri.getScheme().equals(dstUri.getScheme())) { return false; } String srcHost = srcUri.getHost(); String dstHost = dstUri.getHost(); if ((srcHost != null) && (dstHost != null)) { try { srcHost = InetAddress.getByName(srcHost).getCanonicalHostName(); dstHost = InetAddress.getByName(dstHost).getCanonicalHostName(); } catch(UnknownHostException ue) { return false; } if (!srcHost.equals(dstHost)) { return false; } } else if (srcHost == null && dstHost != null) { return false; } else if (srcHost != null && dstHost == null) { return false; } //check for ports if (srcUri.getPort() != dstUri.getPort()) { return false; } return true; } // copies a file to the jobtracker filesystem and returns the path where it // was copied to private Path copyRemoteFiles(FileSystem jtFs, Path parentDir, Path originalPath, JobConf job, short replication) throws IOException { //check if we do not need to copy the files // is jt using the same file system. // just checking for uri strings... doing no dns lookups // to see if the filesystems are the same. This is not optimal. // but avoids name resolution. FileSystem remoteFs = null; remoteFs = originalPath.getFileSystem(job); if (compareFs(remoteFs, jtFs)) { return originalPath; } // this might have name collisions. copy will throw an exception //parse the original path to create new path Path newPath = new Path(parentDir, originalPath.getName()); FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, job); jtFs.setReplication(newPath, replication); return newPath; } /** * configure the jobconf of the user with the command line options of * -libjars, -files, -archives * @param conf * @throws IOException */ private void configureCommandLineOptions(JobConf job, Path submitJobDir, Path submitJarFile) throws IOException { if (!(job.getBoolean("mapred.used.genericoptionsparser", false))) { LOG.warn("Use GenericOptionsParser for parsing the arguments. " + "Applications should implement Tool for the same."); } // get all the command line arguments into the // jobconf passed in by the user conf String files = null; String libjars = null; String archives = null; files = job.get("tmpfiles"); libjars = job.get("tmpjars"); archives = job.get("tmparchives"); /* * set this user's id in job configuration, so later job files can be * accessed using this user's id */ UnixUserGroupInformation ugi = getUGI(job); // // Figure out what fs the JobTracker is using. Copy the // job to it, under a temporary name. This allows DFS to work, // and under the local fs also provides UNIX-like object loading // semantics. (that is, if the job file is deleted right after // submission, we can still run the submission to completion) // // Create a number of filenames in the JobTracker's fs namespace FileSystem fs = getFs(); LOG.debug("default FileSystem: " + fs.getUri()); fs.delete(submitJobDir, true); submitJobDir = fs.makeQualified(submitJobDir); submitJobDir = new Path(submitJobDir.toUri().getPath()); FsPermission mapredSysPerms = new FsPermission(JOB_DIR_PERMISSION); FileSystem.mkdirs(fs, submitJobDir, mapredSysPerms); Path filesDir = new Path(submitJobDir, "files"); Path archivesDir = new Path(submitJobDir, "archives"); Path libjarsDir = new Path(submitJobDir, "libjars"); short replication = (short)job.getInt("mapred.submit.replication", 10); // add all the command line files/ jars and archive // first copy them to jobtrackers filesystem if (files != null) { FileSystem.mkdirs(fs, filesDir, mapredSysPerms); String[] fileArr = files.split(","); for (String tmpFile: fileArr) { Path tmp = new Path(tmpFile); Path newPath = copyRemoteFiles(fs,filesDir, tmp, job, replication); try { URI pathURI = new URI(newPath.toUri().toString() + "#" + newPath.getName()); DistributedCache.addCacheFile(pathURI, job); } catch(URISyntaxException ue) { //should not throw a uri exception throw new IOException("Failed to create uri for " + tmpFile); } DistributedCache.createSymlink(job); } } if (libjars != null) { FileSystem.mkdirs(fs, libjarsDir, mapredSysPerms); String[] libjarsArr = libjars.split(","); for (String tmpjars: libjarsArr) { Path tmp = new Path(tmpjars); Path newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication); DistributedCache.addArchiveToClassPath(newPath, job); } } if (archives != null) { FileSystem.mkdirs(fs, archivesDir, mapredSysPerms); String[] archivesArr = archives.split(","); for (String tmpArchives: archivesArr) { Path tmp = new Path(tmpArchives); Path newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication); try { URI pathURI = new URI(newPath.toUri().toString() + "#" + newPath.getName()); DistributedCache.addCacheArchive(pathURI, job); } catch(URISyntaxException ue) { //should not throw an uri excpetion throw new IOException("Failed to create uri for " + tmpArchives); } DistributedCache.createSymlink(job); } } // set the timestamps of the archives and files URI[] tarchives = DistributedCache.getCacheArchives(job); if (tarchives != null) { + FileStatus status = DistributedCache.getFileStatus(job, tarchives[0]); + StringBuffer archiveFileSizes = + new StringBuffer(String.valueOf(status.getLen())); StringBuffer archiveTimestamps = - new StringBuffer(String.valueOf(DistributedCache.getTimestamp(job, tarchives[0]))); + new StringBuffer(String.valueOf(status.getModificationTime())); + for (int i = 1; i < tarchives.length; i++) { + status = DistributedCache.getFileStatus(job, tarchives[i]); + + archiveFileSizes.append(","); + archiveFileSizes.append(String.valueOf(status.getLen())); + archiveTimestamps.append(","); - archiveTimestamps.append(String.valueOf(DistributedCache.getTimestamp(job, tarchives[i]))); + archiveTimestamps.append(String.valueOf(status.getModificationTime())); } + job.set(DistributedCache.CACHE_ARCHIVES_SIZES, + archiveFileSizes.toString()); DistributedCache.setArchiveTimestamps(job, archiveTimestamps.toString()); } URI[] tfiles = DistributedCache.getCacheFiles(job); if (tfiles != null) { + FileStatus status = DistributedCache.getFileStatus(job, tfiles[0]); + + StringBuffer fileSizes = + new StringBuffer(String.valueOf(status.getLen())); StringBuffer fileTimestamps = - new StringBuffer(String.valueOf(DistributedCache.getTimestamp(job, tfiles[0]))); + new StringBuffer(String.valueOf(status.getModificationTime())); + for (int i = 1; i < tfiles.length; i++) { + status = DistributedCache.getFileStatus(job, tfiles[i]); + + fileSizes.append(","); + fileSizes.append(String.valueOf(status.getLen())); + fileTimestamps.append(","); - fileTimestamps.append(String.valueOf(DistributedCache.getTimestamp(job, tfiles[i]))); + fileTimestamps.append(String.valueOf(status.getModificationTime())); } + job.set(DistributedCache.CACHE_FILES_SIZES, fileSizes.toString()); DistributedCache.setFileTimestamps(job, fileTimestamps.toString()); } String originalJarPath = job.getJar(); if (originalJarPath != null) { // copy jar to JobTracker's fs // use jar name if job is not named. if ("".equals(job.getJobName())){ job.setJobName(new Path(originalJarPath).getName()); } job.setJar(submitJarFile.toString()); fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile); fs.setReplication(submitJarFile, replication); fs.setPermission(submitJarFile, new FsPermission(JOB_FILE_PERMISSION)); } else { LOG.warn("No job jar file set. User classes may not be found. "+ "See JobConf(Class) or JobConf#setJar(String)."); } // Set the user's name and working directory job.setUser(ugi.getUserName()); if (ugi.getGroupNames().length > 0) { job.set("group.name", ugi.getGroupNames()[0]); } if (job.getWorkingDirectory() == null) { job.setWorkingDirectory(fs.getWorkingDirectory()); } } private UnixUserGroupInformation getUGI(Configuration job) throws IOException { UnixUserGroupInformation ugi = null; try { ugi = UnixUserGroupInformation.login(job, true); } catch (LoginException e) { throw (IOException)(new IOException( "Failed to get the current user's information.").initCause(e)); } return ugi; } /** * Submit a job to the MR system. * * This returns a handle to the {@link RunningJob} which can be used to track * the running-job. * * @param jobFile the job configuration. * @return a handle to the {@link RunningJob} which can be used to track the * running-job. * @throws FileNotFoundException * @throws InvalidJobConfException * @throws IOException */ public RunningJob submitJob(String jobFile) throws FileNotFoundException, InvalidJobConfException, IOException { // Load in the submitted job details JobConf job = new JobConf(jobFile); return submitJob(job); } // job files are world-wide readable and owner writable final private static FsPermission JOB_FILE_PERMISSION = FsPermission.createImmutable((short) 0644); // rw-r--r-- // job submission directory is world readable/writable/executable final static FsPermission JOB_DIR_PERMISSION = FsPermission.createImmutable((short) 0777); // rwx-rwx-rwx /** * Submit a job to the MR system. * This returns a handle to the {@link RunningJob} which can be used to track * the running-job. * * @param job the job configuration. * @return a handle to the {@link RunningJob} which can be used to track the * running-job. * @throws FileNotFoundException * @throws IOException */ public RunningJob submitJob(JobConf job) throws FileNotFoundException, IOException { try { return submitJobInternal(job); } catch (InterruptedException ie) { throw new IOException("interrupted", ie); } catch (ClassNotFoundException cnfe) { throw new IOException("class not found", cnfe); } } /** * Internal method for submitting jobs to the system. * @param job the configuration to submit * @return a proxy object for the running job * @throws FileNotFoundException * @throws ClassNotFoundException * @throws InterruptedException * @throws IOException */ public RunningJob submitJobInternal(JobConf job ) throws FileNotFoundException, ClassNotFoundException, InterruptedException, IOException { /* * configure the command line options correctly on the submitting dfs */ JobID jobId = jobSubmitClient.getNewJobId(); Path submitJobDir = new Path(getSystemDir(), jobId.toString()); Path submitJarFile = new Path(submitJobDir, "job.jar"); Path submitSplitFile = new Path(submitJobDir, "job.split"); configureCommandLineOptions(job, submitJobDir, submitJarFile); Path submitJobFile = new Path(submitJobDir, "job.xml"); int reduces = job.getNumReduceTasks(); JobContext context = new JobContext(job, jobId); // Check the output specification if (reduces == 0 ? job.getUseNewMapper() : job.getUseNewReducer()) { org.apache.hadoop.mapreduce.OutputFormat<?,?> output = ReflectionUtils.newInstance(context.getOutputFormatClass(), job); output.checkOutputSpecs(context); } else { job.getOutputFormat().checkOutputSpecs(fs, job); } // Create the splits for the job LOG.debug("Creating splits at " + fs.makeQualified(submitSplitFile)); int maps; if (job.getUseNewMapper()) { maps = writeNewSplits(context, submitSplitFile); } else { maps = writeOldSplits(job, submitSplitFile); } job.set("mapred.job.split.file", submitSplitFile.toString()); job.setNumMapTasks(maps); // Write job file to JobTracker's fs FSDataOutputStream out = FileSystem.create(fs, submitJobFile, new FsPermission(JOB_FILE_PERMISSION)); try { job.writeXml(out); } finally { out.close(); } // // Now, actually submit the job (using the submit name) // JobStatus status = jobSubmitClient.submitJob(jobId); if (status != null) { return new NetworkedJob(status); } else { throw new IOException("Could not launch job"); } } private int writeOldSplits(JobConf job, Path submitSplitFile) throws IOException { InputSplit[] splits = job.getInputFormat().getSplits(job, job.getNumMapTasks()); // sort the splits into order based on size, so that the biggest // go first Arrays.sort(splits, new Comparator<InputSplit>() { public int compare(InputSplit a, InputSplit b) { try { long left = a.getLength(); long right = b.getLength(); if (left == right) { return 0; } else if (left < right) { return 1; } else { return -1; } } catch (IOException ie) { throw new RuntimeException("Problem getting input split size", ie); } } }); DataOutputStream out = writeSplitsFileHeader(job, submitSplitFile, splits.length); try { DataOutputBuffer buffer = new DataOutputBuffer(); RawSplit rawSplit = new RawSplit(); for(InputSplit split: splits) { rawSplit.setClassName(split.getClass().getName()); buffer.reset(); split.write(buffer); rawSplit.setDataLength(split.getLength()); rawSplit.setBytes(buffer.getData(), 0, buffer.getLength()); rawSplit.setLocations(split.getLocations()); rawSplit.write(out); } } finally { out.close(); } return splits.length; } private static class NewSplitComparator implements Comparator<org.apache.hadoop.mapreduce.InputSplit>{ @Override public int compare(org.apache.hadoop.mapreduce.InputSplit o1, org.apache.hadoop.mapreduce.InputSplit o2) { try { long len1 = o1.getLength(); long len2 = o2.getLength(); if (len1 < len2) { return 1; } else if (len1 == len2) { return 0; } else { return -1; } } catch (IOException ie) { throw new RuntimeException("exception in compare", ie); } catch (InterruptedException ie) { throw new RuntimeException("exception in compare", ie); } } } @SuppressWarnings("unchecked") private <T extends org.apache.hadoop.mapreduce.InputSplit> int writeNewSplits(JobContext job, Path submitSplitFile ) throws IOException, InterruptedException, ClassNotFoundException { JobConf conf = job.getJobConf(); org.apache.hadoop.mapreduce.InputFormat<?,?> input = ReflectionUtils.newInstance(job.getInputFormatClass(), job.getJobConf()); List<org.apache.hadoop.mapreduce.InputSplit> splits = input.getSplits(job); T[] array = (T[]) splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]); // sort the splits into order based on size, so that the biggest // go first Arrays.sort(array, new NewSplitComparator()); DataOutputStream out = writeSplitsFileHeader(conf, submitSplitFile, array.length); try { if (array.length != 0) { DataOutputBuffer buffer = new DataOutputBuffer(); RawSplit rawSplit = new RawSplit(); SerializationFactory factory = new SerializationFactory(conf); Serializer<T> serializer = factory.getSerializer((Class<T>) array[0].getClass()); serializer.open(buffer); for(T split: array) { rawSplit.setClassName(split.getClass().getName()); buffer.reset(); serializer.serialize(split); rawSplit.setDataLength(split.getLength()); rawSplit.setBytes(buffer.getData(), 0, buffer.getLength()); rawSplit.setLocations(split.getLocations()); rawSplit.write(out); } serializer.close(); } } finally { out.close(); } return array.length; } /** * Checks if the job directory is clean and has all the required components * for (re) starting the job */ public static boolean isJobDirValid(Path jobDirPath, FileSystem fs) throws IOException { FileStatus[] contents = fs.listStatus(jobDirPath); int matchCount = 0; if (contents != null && contents.length >=2) { for (FileStatus status : contents) { if ("job.xml".equals(status.getPath().getName())) { ++matchCount; } if ("job.split".equals(status.getPath().getName())) { ++matchCount; } } if (matchCount == 2) { return true; } } return false; } static class RawSplit implements Writable { private String splitClass; private BytesWritable bytes = new BytesWritable(); private String[] locations; long dataLength; public void setBytes(byte[] data, int offset, int length) { bytes.set(data, offset, length); } public void setClassName(String className) { splitClass = className; } public String getClassName() { return splitClass; } public BytesWritable getBytes() { return bytes; } public void clearBytes() { bytes = null; } public void setLocations(String[] locations) { this.locations = locations; } public String[] getLocations() { return locations; } public void readFields(DataInput in) throws IOException { splitClass = Text.readString(in); dataLength = in.readLong(); bytes.readFields(in); int len = WritableUtils.readVInt(in); locations = new String[len]; for(int i=0; i < len; ++i) { locations[i] = Text.readString(in); } } public void write(DataOutput out) throws IOException { Text.writeString(out, splitClass); out.writeLong(dataLength); bytes.write(out); WritableUtils.writeVInt(out, locations.length); for(int i = 0; i < locations.length; i++) { Text.writeString(out, locations[i]); } } public long getDataLength() { return dataLength; } public void setDataLength(long l) { dataLength = l; } } private static final int CURRENT_SPLIT_FILE_VERSION = 0; private static final byte[] SPLIT_FILE_HEADER = "SPL".getBytes(); private DataOutputStream writeSplitsFileHeader(Configuration conf, Path filename, int length ) throws IOException { // write the splits to a file for the job tracker FileSystem fs = filename.getFileSystem(conf); FSDataOutputStream out = FileSystem.create(fs, filename, new FsPermission(JOB_FILE_PERMISSION)); out.write(SPLIT_FILE_HEADER); WritableUtils.writeVInt(out, CURRENT_SPLIT_FILE_VERSION); WritableUtils.writeVInt(out, length); return out; } /** Create the list of input splits and write them out in a file for *the JobTracker. The format is: * <format version> * <numSplits> * for each split: * <RawSplit> * @param splits the input splits to write out * @param out the stream to write to */ private void writeOldSplitsFile(InputSplit[] splits, FSDataOutputStream out) throws IOException { } /** * Read a splits file into a list of raw splits * @param in the stream to read from * @return the complete list of splits * @throws IOException */ static RawSplit[] readSplitFile(DataInput in) throws IOException { byte[] header = new byte[SPLIT_FILE_HEADER.length]; in.readFully(header); if (!Arrays.equals(SPLIT_FILE_HEADER, header)) { throw new IOException("Invalid header on split file"); } int vers = WritableUtils.readVInt(in); if (vers != CURRENT_SPLIT_FILE_VERSION) { throw new IOException("Unsupported split version " + vers); } int len = WritableUtils.readVInt(in); RawSplit[] result = new RawSplit[len]; for(int i=0; i < len; ++i) { result[i] = new RawSplit(); result[i].readFields(in); } return result; } /** * Get an {@link RunningJob} object to track an ongoing job. Returns * null if the id does not correspond to any known job. * * @param jobid the jobid of the job. * @return the {@link RunningJob} handle to track the job, null if the * <code>jobid</code> doesn't correspond to any known job. * @throws IOException */ public RunningJob getJob(JobID jobid) throws IOException { JobStatus status = jobSubmitClient.getJobStatus(jobid); if (status != null) { return new NetworkedJob(status); } else { return null; } } /**@deprecated Applications should rather use {@link #getJob(JobID)}. */ @Deprecated public RunningJob getJob(String jobid) throws IOException { return getJob(JobID.forName(jobid)); } /** * Get the information of the current state of the map tasks of a job. * * @param jobId the job to query. * @return the list of all of the map tips. * @throws IOException */ public TaskReport[] getMapTaskReports(JobID jobId) throws IOException { return jobSubmitClient.getMapTaskReports(jobId); } /**@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}*/ @Deprecated public TaskReport[] getMapTaskReports(String jobId) throws IOException { return getMapTaskReports(JobID.forName(jobId)); } /** * Get the information of the current state of the reduce tasks of a job. * * @param jobId the job to query. * @return the list of all of the reduce tips. * @throws IOException */ public TaskReport[] getReduceTaskReports(JobID jobId) throws IOException { return jobSubmitClient.getReduceTaskReports(jobId); } /** * Get the information of the current state of the cleanup tasks of a job. * * @param jobId the job to query. * @return the list of all of the cleanup tips. * @throws IOException */ public TaskReport[] getCleanupTaskReports(JobID jobId) throws IOException { return jobSubmitClient.getCleanupTaskReports(jobId); } /** * Get the information of the current state of the setup tasks of a job. * * @param jobId the job to query. * @return the list of all of the setup tips. * @throws IOException */ public TaskReport[] getSetupTaskReports(JobID jobId) throws IOException { return jobSubmitClient.getSetupTaskReports(jobId); } /**@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}*/ @Deprecated public TaskReport[] getReduceTaskReports(String jobId) throws IOException { return getReduceTaskReports(JobID.forName(jobId)); } /** * Display the information about a job's tasks, of a particular type and * in a particular state * * @param jobId the ID of the job * @param type the type of the task (map/reduce/setup/cleanup) * @param state the state of the task * (pending/running/completed/failed/killed) */ public void displayTasks(JobID jobId, String type, String state) throws IOException { TaskReport[] reports = new TaskReport[0]; if (type.equals("map")) { reports = getMapTaskReports(jobId); } else if (type.equals("reduce")) { diff --git a/src/test/org/apache/hadoop/mapred/MRCaching.java b/src/test/org/apache/hadoop/mapred/MRCaching.java index bfdd859..4ff3028 100644 --- a/src/test/org/apache/hadoop/mapred/MRCaching.java +++ b/src/test/org/apache/hadoop/mapred/MRCaching.java @@ -1,300 +1,339 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.*; import java.util.*; import org.apache.hadoop.fs.*; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.*; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.filecache.*; + import java.net.URI; +import junit.framework.Assert; + public class MRCaching { static String testStr = "This is a test file " + "used for testing caching " + "jars, zip and normal files."; /** * Using the wordcount example and adding caching to it. The cache * archives/files are set and then are checked in the map if they have been * localized or not. */ public static class MapClass extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { JobConf conf; private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void configure(JobConf jconf) { conf = jconf; try { Path[] localArchives = DistributedCache.getLocalCacheArchives(conf); Path[] localFiles = DistributedCache.getLocalCacheFiles(conf); // read the cached files (unzipped, unjarred and text) // and put it into a single file TEST_ROOT_DIR/test.txt String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp"); Path file = new Path("file:///", TEST_ROOT_DIR); FileSystem fs = FileSystem.getLocal(conf); if (!fs.mkdirs(file)) { throw new IOException("Mkdirs failed to create " + file.toString()); } Path fileOut = new Path(file, "test.txt"); fs.delete(fileOut, true); DataOutputStream out = fs.create(fileOut); for (int i = 0; i < localArchives.length; i++) { // read out the files from these archives File f = new File(localArchives[i].toString()); File txt = new File(f, "test.txt"); FileInputStream fin = new FileInputStream(txt); DataInputStream din = new DataInputStream(fin); String str = din.readLine(); din.close(); out.writeBytes(str); out.writeBytes("\n"); } for (int i = 0; i < localFiles.length; i++) { // read out the files from these archives File txt = new File(localFiles[i].toString()); FileInputStream fin = new FileInputStream(txt); DataInputStream din = new DataInputStream(fin); String str = din.readLine(); out.writeBytes(str); out.writeBytes("\n"); } out.close(); } catch (IOException ie) { System.out.println(StringUtils.stringifyException(ie)); } } public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { String line = value.toString(); StringTokenizer itr = new StringTokenizer(line); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); output.collect(word, one); } } } /** * Using the wordcount example and adding caching to it. The cache * archives/files are set and then are checked in the map if they have been * symlinked or not. */ public static class MapClass2 extends MapClass { JobConf conf; public void configure(JobConf jconf) { conf = jconf; try { // read the cached files (unzipped, unjarred and text) // and put it into a single file TEST_ROOT_DIR/test.txt String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp"); Path file = new Path("file:///", TEST_ROOT_DIR); FileSystem fs = FileSystem.getLocal(conf); if (!fs.mkdirs(file)) { throw new IOException("Mkdirs failed to create " + file.toString()); } Path fileOut = new Path(file, "test.txt"); fs.delete(fileOut, true); DataOutputStream out = fs.create(fileOut); String[] symlinks = new String[6]; symlinks[0] = "."; symlinks[1] = "testjar"; symlinks[2] = "testzip"; symlinks[3] = "testtgz"; symlinks[4] = "testtargz"; symlinks[5] = "testtar"; for (int i = 0; i < symlinks.length; i++) { // read out the files from these archives File f = new File(symlinks[i]); File txt = new File(f, "test.txt"); FileInputStream fin = new FileInputStream(txt); BufferedReader reader = new BufferedReader(new InputStreamReader(fin)); String str = reader.readLine(); reader.close(); out.writeBytes(str); out.writeBytes("\n"); } out.close(); } catch (IOException ie) { System.out.println(StringUtils.stringifyException(ie)); } } } /** * A reducer class that just emits the sum of the input values. */ public static class ReduceClass extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { int sum = 0; while (values.hasNext()) { sum += values.next().get(); } output.collect(key, new IntWritable(sum)); } } public static class TestResult { public RunningJob job; public boolean isOutputOk; TestResult(RunningJob job, boolean isOutputOk) { this.job = job; this.isOutputOk = isOutputOk; } } static void setupCache(String cacheDir, FileSystem fs) throws IOException { Path localPath = new Path("build/test/cache"); Path txtPath = new Path(localPath, new Path("test.txt")); Path jarPath = new Path(localPath, new Path("test.jar")); Path zipPath = new Path(localPath, new Path("test.zip")); Path tarPath = new Path(localPath, new Path("test.tgz")); Path tarPath1 = new Path(localPath, new Path("test.tar.gz")); Path tarPath2 = new Path(localPath, new Path("test.tar")); Path cachePath = new Path(cacheDir); fs.delete(cachePath, true); if (!fs.mkdirs(cachePath)) { throw new IOException("Mkdirs failed to create " + cachePath.toString()); } fs.copyFromLocalFile(txtPath, cachePath); fs.copyFromLocalFile(jarPath, cachePath); fs.copyFromLocalFile(zipPath, cachePath); fs.copyFromLocalFile(tarPath, cachePath); fs.copyFromLocalFile(tarPath1, cachePath); fs.copyFromLocalFile(tarPath2, cachePath); } public static TestResult launchMRCache(String indir, String outdir, String cacheDir, JobConf conf, String input) throws IOException { setupCache(cacheDir, FileSystem.get(conf)); return launchMRCache(indir,outdir, cacheDir, conf, input, false); } public static TestResult launchMRCache(String indir, String outdir, String cacheDir, JobConf conf, String input, boolean withSymlink) throws IOException { String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/tmp")) .toString().replace(' ', '+'); //if (TEST_ROOT_DIR.startsWith("C:")) TEST_ROOT_DIR = "/tmp"; conf.set("test.build.data", TEST_ROOT_DIR); final Path inDir = new Path(indir); final Path outDir = new Path(outdir); FileSystem fs = FileSystem.get(conf); fs.delete(outDir, true); if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } { System.out.println("HERE:"+inDir); DataOutputStream file = fs.create(new Path(inDir, "part-0")); file.writeBytes(input); file.close(); } conf.setJobName("cachetest"); // the keys are words (strings) conf.setOutputKeyClass(Text.class); // the values are counts (ints) conf.setOutputValueClass(IntWritable.class); conf.setCombinerClass(MRCaching.ReduceClass.class); conf.setReducerClass(MRCaching.ReduceClass.class); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); conf.setNumMapTasks(1); conf.setNumReduceTasks(1); conf.setSpeculativeExecution(false); URI[] uris = new URI[6]; if (!withSymlink) { conf.setMapperClass(MRCaching.MapClass.class); uris[0] = fs.getUri().resolve(cacheDir + "/test.txt"); uris[1] = fs.getUri().resolve(cacheDir + "/test.jar"); uris[2] = fs.getUri().resolve(cacheDir + "/test.zip"); uris[3] = fs.getUri().resolve(cacheDir + "/test.tgz"); uris[4] = fs.getUri().resolve(cacheDir + "/test.tar.gz"); uris[5] = fs.getUri().resolve(cacheDir + "/test.tar"); } else { DistributedCache.createSymlink(conf); conf.setMapperClass(MRCaching.MapClass2.class); uris[0] = fs.getUri().resolve(cacheDir + "/test.txt#" + "test.txt"); uris[1] = fs.getUri().resolve(cacheDir + "/test.jar#" + "testjar"); uris[2] = fs.getUri().resolve(cacheDir + "/test.zip#" + "testzip"); uris[3] = fs.getUri().resolve(cacheDir + "/test.tgz#" + "testtgz"); uris[4] = fs.getUri().resolve(cacheDir + "/test.tar.gz#" + "testtargz"); uris[5] = fs.getUri().resolve(cacheDir + "/test.tar#" + "testtar"); } + + //Add files to DC and track their sizes DistributedCache.addCacheFile(uris[0], conf); + long[] fileSizes = new long[1]; + fileSizes[0] = fs.getFileStatus(new Path(uris[0].getPath())).getLen(); + + long archivesSizes[] = new long[5]; for (int i = 1; i < 6; i++) { DistributedCache.addCacheArchive(uris[i], conf); + archivesSizes[i-1] = + fs.getFileStatus(new Path(uris[i].getPath())).getLen(); } + + // Run the job RunningJob job = JobClient.runJob(conf); + int count = 0; // after the job ran check to see if the input from the localized cache // match the real string. check if there are 3 instances or not. Path result = new Path(TEST_ROOT_DIR + "/test.txt"); { BufferedReader file = new BufferedReader (new InputStreamReader(FileSystem.getLocal(conf).open(result))); String line = file.readLine(); while (line != null) { if (!testStr.equals(line)) return new TestResult(job, false); count++; line = file.readLine(); } file.close(); } if (count != 6) return new TestResult(job, false); + // Check to ensure the filesizes of files in DC were correctly saved + validateCacheFilesSizes(conf, fileSizes, DistributedCache.CACHE_FILES_SIZES); + validateCacheFilesSizes(conf, archivesSizes, + DistributedCache.CACHE_ARCHIVES_SIZES); + return new TestResult(job, true); } + + private static void validateCacheFilesSizes(JobConf job, + long[] expectedSizes, String configKey) + throws IOException { + String configValues = job.get(configKey, ""); + System.out.println(configKey + " -> " + configValues); + String[] realSizes = StringUtils.getStrings(configValues); + Assert.assertEquals("Found " + realSizes.length + " file-sizes for " + + configKey + " (" + configValues + "), expected: " + + expectedSizes.length, + expectedSizes.length, realSizes.length); + + for (int i=0; i < expectedSizes.length; ++i) { + long actual = Long.valueOf(realSizes[i]); + long expected = expectedSizes[i]; + Assert.assertEquals("Found length: " + actual + ", while expected: " + + expected, + expected, actual); + } + } }
jaxlaw/hadoop-common
7b04889e436ee7bf6af9c9b16546b8ab6fa1b33f
MAPREDUCE:1476 from https://issues.apache.org/jira/secure/attachment/12435855/patch-1476-ydist.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index eae5be1..cd55a01 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,520 +1,524 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.9 + MAPREDUCE-1476. Fix the M/R framework to not call commit for special + tasks like job setup/cleanup and task cleanup. + (Amareshwari Sriramadasu via yhemanth) + HADOOP-5879. Read compression level and strategy from Configuration for gzip compression. (He Yongqiang via cdouglas) HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) HADOOP-6382 Mavenize the build.xml targets and update the bin scripts in preparation for publishing POM files (giri kesavan via ltucker) HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) yahoo-hadoop-0.20.1-3195383008 HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) diff --git a/src/mapred/org/apache/hadoop/mapred/Task.java b/src/mapred/org/apache/hadoop/mapred/Task.java index 926ec08..f61414b 100644 --- a/src/mapred/org/apache/hadoop/mapred/Task.java +++ b/src/mapred/org/apache/hadoop/mapred/Task.java @@ -196,1054 +196,1069 @@ abstract public class Task implements Writable, Configurable { * @return the job name */ public JobID getJobID() { return taskId.getJobID(); } /** * Get the index of this task within the job. * @return the integer part of the task id */ public int getPartition() { return partition; } /** * Return current phase of the task. * needs to be synchronized as communication thread sends the phase every second * @return the curent phase of the task */ public synchronized TaskStatus.Phase getPhase(){ return this.taskStatus.getPhase(); } /** * Set current phase of the task. * @param phase task phase */ protected synchronized void setPhase(TaskStatus.Phase phase){ this.taskStatus.setPhase(phase); } /** * Get whether to write skip records. */ protected boolean toWriteSkipRecs() { return writeSkipRecs; } /** * Set whether to write skip records. */ protected void setWriteSkipRecs(boolean writeSkipRecs) { this.writeSkipRecs = writeSkipRecs; } /** * Report a fatal error to the parent (task) tracker. */ protected void reportFatalError(TaskAttemptID id, Throwable throwable, String logMsg) { LOG.fatal(logMsg); Throwable tCause = throwable.getCause(); String cause = tCause == null ? StringUtils.stringifyException(throwable) : StringUtils.stringifyException(tCause); try { umbilical.fatalError(id, cause); } catch (IOException ioe) { LOG.fatal("Failed to contact the tasktracker", ioe); System.exit(-1); } } /** * Get skipRanges. */ public SortedRanges getSkipRanges() { return skipRanges; } /** * Set skipRanges. */ public void setSkipRanges(SortedRanges skipRanges) { this.skipRanges = skipRanges; } /** * Is Task in skipping mode. */ public boolean isSkipping() { return skipping; } /** * Sets whether to run Task in skipping mode. * @param skipping */ public void setSkipping(boolean skipping) { this.skipping = skipping; } /** * Return current state of the task. * needs to be synchronized as communication thread * sends the state every second * @return */ synchronized TaskStatus.State getState(){ return this.taskStatus.getRunState(); } /** * Set current state of the task. * @param state */ synchronized void setState(TaskStatus.State state){ this.taskStatus.setRunState(state); } void setTaskCleanupTask() { taskCleanup = true; } boolean isTaskCleanupTask() { return taskCleanup; } boolean isJobCleanupTask() { return jobCleanup; } boolean isJobAbortTask() { // the task is an abort task if its marked for cleanup and the final // expected state is either failed or killed. return isJobCleanupTask() && (jobRunStateForCleanup == JobStatus.State.KILLED || jobRunStateForCleanup == JobStatus.State.FAILED); } boolean isJobSetupTask() { return jobSetup; } void setJobSetupTask() { jobSetup = true; } void setJobCleanupTask() { jobCleanup = true; } /** * Sets the task to do job abort in the cleanup. * @param status the final runstate of the job */ void setJobCleanupTaskState(JobStatus.State status) { jobRunStateForCleanup = status; } boolean isMapOrReduce() { return !jobSetup && !jobCleanup && !taskCleanup; } String getUser() { return username; } //////////////////////////////////////////// // Writable methods //////////////////////////////////////////// public void write(DataOutput out) throws IOException { Text.writeString(out, jobFile); taskId.write(out); out.writeInt(partition); out.writeInt(numSlotsRequired); taskStatus.write(out); skipRanges.write(out); out.writeBoolean(skipping); out.writeBoolean(jobCleanup); if (jobCleanup) { WritableUtils.writeEnum(out, jobRunStateForCleanup); } out.writeBoolean(jobSetup); Text.writeString(out, username); out.writeBoolean(writeSkipRecs); out.writeBoolean(taskCleanup); } public void readFields(DataInput in) throws IOException { jobFile = Text.readString(in); taskId = TaskAttemptID.read(in); partition = in.readInt(); numSlotsRequired = in.readInt(); taskStatus.readFields(in); this.mapOutputFile.setJobId(taskId.getJobID()); skipRanges.readFields(in); currentRecIndexIterator = skipRanges.skipRangeIterator(); currentRecStartIndex = currentRecIndexIterator.next(); skipping = in.readBoolean(); jobCleanup = in.readBoolean(); if (jobCleanup) { jobRunStateForCleanup = WritableUtils.readEnum(in, JobStatus.State.class); } jobSetup = in.readBoolean(); username = Text.readString(in); writeSkipRecs = in.readBoolean(); taskCleanup = in.readBoolean(); if (taskCleanup) { setPhase(TaskStatus.Phase.CLEANUP); } } @Override public String toString() { return taskId.toString(); } /** * Localize the given JobConf to be specific for this task. */ public void localizeConfiguration(JobConf conf) throws IOException { conf.set("mapred.tip.id", taskId.getTaskID().toString()); conf.set("mapred.task.id", taskId.toString()); conf.setBoolean("mapred.task.is.map", isMapTask()); conf.setInt("mapred.task.partition", partition); conf.set("mapred.job.id", taskId.getJobID().toString()); } /** Run this task as a part of the named job. This method is executed in the * child process and is what invokes user-supplied map, reduce, etc. methods. * @param umbilical for progress reports */ public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical) throws IOException, ClassNotFoundException, InterruptedException; /** Return an approprate thread runner for this task. * @param tip TODO*/ public abstract TaskRunner createRunner(TaskTracker tracker, TaskTracker.TaskInProgress tip) throws IOException; /** The number of milliseconds between progress reports. */ public static final int PROGRESS_INTERVAL = 3000; private transient Progress taskProgress = new Progress(); // Current counters private transient Counters counters = new Counters(); /* flag to track whether task is done */ private AtomicBoolean taskDone = new AtomicBoolean(false); public abstract boolean isMapTask(); public Progress getProgress() { return taskProgress; } public void initialize(JobConf job, JobID id, Reporter reporter, boolean useNewApi) throws IOException, ClassNotFoundException, InterruptedException { jobContext = new JobContext(job, id, reporter); taskContext = new TaskAttemptContext(job, taskId, reporter); if (getState() == TaskStatus.State.UNASSIGNED) { setState(TaskStatus.State.RUNNING); } if (useNewApi) { LOG.debug("using new api for output committer"); outputFormat = ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job); committer = outputFormat.getOutputCommitter(taskContext); } else { committer = conf.getOutputCommitter(); } Path outputPath = FileOutputFormat.getOutputPath(conf); if (outputPath != null) { if ((committer instanceof FileOutputCommitter)) { FileOutputFormat.setWorkOutputPath(conf, ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext)); } else { FileOutputFormat.setWorkOutputPath(conf, outputPath); } } committer.setupTask(taskContext); } protected class TaskReporter extends org.apache.hadoop.mapreduce.StatusReporter implements Runnable, Reporter { private TaskUmbilicalProtocol umbilical; private InputSplit split = null; private Progress taskProgress; private Thread pingThread = null; /** * flag that indicates whether progress update needs to be sent to parent. * If true, it has been set. If false, it has been reset. * Using AtomicBoolean since we need an atomic read & reset method. */ private AtomicBoolean progressFlag = new AtomicBoolean(false); TaskReporter(Progress taskProgress, TaskUmbilicalProtocol umbilical) { this.umbilical = umbilical; this.taskProgress = taskProgress; } // getters and setters for flag void setProgressFlag() { progressFlag.set(true); } boolean resetProgressFlag() { return progressFlag.getAndSet(false); } public void setStatus(String status) { taskProgress.setStatus(status); // indicate that progress update needs to be sent setProgressFlag(); } public void setProgress(float progress) { taskProgress.set(progress); // indicate that progress update needs to be sent setProgressFlag(); } public void progress() { // indicate that progress update needs to be sent setProgressFlag(); } public Counters.Counter getCounter(String group, String name) { Counters.Counter counter = null; if (counters != null) { counter = counters.findCounter(group, name); } return counter; } public Counters.Counter getCounter(Enum<?> name) { return counters == null ? null : counters.findCounter(name); } public void incrCounter(Enum key, long amount) { if (counters != null) { counters.incrCounter(key, amount); } setProgressFlag(); } public void incrCounter(String group, String counter, long amount) { if (counters != null) { counters.incrCounter(group, counter, amount); } if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && ( SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) || SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) { //if application reports the processed records, move the //currentRecStartIndex to the next. //currentRecStartIndex is the start index which has not yet been //finished and is still in task's stomach. for(int i=0;i<amount;i++) { currentRecStartIndex = currentRecIndexIterator.next(); } } setProgressFlag(); } public void setInputSplit(InputSplit split) { this.split = split; } public InputSplit getInputSplit() throws UnsupportedOperationException { if (split == null) { throw new UnsupportedOperationException("Input only available on map"); } else { return split; } } /** * The communication thread handles communication with the parent (Task Tracker). * It sends progress updates if progress has been made or if the task needs to * let the parent know that it's alive. It also pings the parent to see if it's alive. */ public void run() { final int MAX_RETRIES = 3; int remainingRetries = MAX_RETRIES; // get current flag value and reset it as well boolean sendProgress = resetProgressFlag(); while (!taskDone.get()) { try { boolean taskFound = true; // whether TT knows about this task // sleep for a bit try { Thread.sleep(PROGRESS_INTERVAL); } catch (InterruptedException e) { LOG.debug(getTaskID() + " Progress/ping thread exiting " + "since it got interrupted"); break; } if (sendProgress) { // we need to send progress update updateCounters(); taskStatus.statusUpdate(taskProgress.get(), taskProgress.toString(), counters); taskFound = umbilical.statusUpdate(taskId, taskStatus); taskStatus.clearStatus(); } else { // send ping taskFound = umbilical.ping(taskId); } // if Task Tracker is not aware of our task ID (probably because it died and // came back up), kill ourselves if (!taskFound) { LOG.warn("Parent died. Exiting "+taskId); System.exit(66); } sendProgress = resetProgressFlag(); remainingRetries = MAX_RETRIES; } catch (Throwable t) { LOG.info("Communication exception: " + StringUtils.stringifyException(t)); remainingRetries -=1; if (remainingRetries == 0) { ReflectionUtils.logThreadInfo(LOG, "Communication exception", 0); LOG.warn("Last retry, killing "+taskId); System.exit(65); } } } } public void startCommunicationThread() { if (pingThread == null) { pingThread = new Thread(this, "communication thread"); pingThread.setDaemon(true); pingThread.start(); } } public void stopCommunicationThread() throws InterruptedException { if (pingThread != null) { pingThread.interrupt(); pingThread.join(); } } } /** * Reports the next executing record range to TaskTracker. * * @param umbilical * @param nextRecIndex the record index which would be fed next. * @throws IOException */ protected void reportNextRecordRange(final TaskUmbilicalProtocol umbilical, long nextRecIndex) throws IOException{ //currentRecStartIndex is the start index which has not yet been finished //and is still in task's stomach. long len = nextRecIndex - currentRecStartIndex +1; SortedRanges.Range range = new SortedRanges.Range(currentRecStartIndex, len); taskStatus.setNextRecordRange(range); LOG.debug("sending reportNextRecordRange " + range); umbilical.reportNextRecordRange(taskId, range); } /** * An updater that tracks the last number reported for a given file * system and only creates the counters when they are needed. */ class FileSystemStatisticUpdater { private long prevReadBytes = 0; private long prevWriteBytes = 0; private FileSystem.Statistics stats; private Counters.Counter readCounter = null; private Counters.Counter writeCounter = null; private String[] counterNames; FileSystemStatisticUpdater(String uriScheme, FileSystem.Statistics stats) { this.stats = stats; this.counterNames = getFileSystemCounterNames(uriScheme); } void updateCounters() { long newReadBytes = stats.getBytesRead(); long newWriteBytes = stats.getBytesWritten(); if (prevReadBytes != newReadBytes) { if (readCounter == null) { readCounter = counters.findCounter(FILESYSTEM_COUNTER_GROUP, counterNames[0]); } readCounter.increment(newReadBytes - prevReadBytes); prevReadBytes = newReadBytes; } if (prevWriteBytes != newWriteBytes) { if (writeCounter == null) { writeCounter = counters.findCounter(FILESYSTEM_COUNTER_GROUP, counterNames[1]); } writeCounter.increment(newWriteBytes - prevWriteBytes); prevWriteBytes = newWriteBytes; } } } /** * A Map where Key-> URIScheme and value->FileSystemStatisticUpdater */ private Map<String, FileSystemStatisticUpdater> statisticUpdaters = new HashMap<String, FileSystemStatisticUpdater>(); private synchronized void updateCounters() { for(Statistics stat: FileSystem.getAllStatistics()) { String uriScheme = stat.getScheme(); FileSystemStatisticUpdater updater = statisticUpdaters.get(uriScheme); if(updater==null) {//new FileSystem has been found in the cache updater = new FileSystemStatisticUpdater(uriScheme, stat); statisticUpdaters.put(uriScheme, updater); } updater.updateCounters(); } } public void done(TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, InterruptedException { LOG.info("Task:" + taskId + " is done." + " And is in the process of commiting"); updateCounters(); - // check whether the commit is required. - boolean commitRequired = committer.needsTaskCommit(taskContext); + boolean commitRequired = isCommitRequired(); if (commitRequired) { int retries = MAX_RETRIES; setState(TaskStatus.State.COMMIT_PENDING); // say the task tracker that task is commit pending while (true) { try { umbilical.commitPending(taskId, taskStatus); break; } catch (InterruptedException ie) { // ignore } catch (IOException ie) { LOG.warn("Failure sending commit pending: " + StringUtils.stringifyException(ie)); if (--retries == 0) { System.exit(67); } } } //wait for commit approval and commit commit(umbilical, reporter, committer); } taskDone.set(true); reporter.stopCommunicationThread(); sendLastUpdate(umbilical); //signal the tasktracker that we are done sendDone(umbilical); } + /** + * Checks if this task has anything to commit, depending on the + * type of task, as well as on whether the {@link OutputCommitter} + * has anything to commit. + * + * @return true if the task has to commit + * @throws IOException + */ + boolean isCommitRequired() throws IOException { + boolean commitRequired = false; + if (isMapOrReduce()) { + commitRequired = committer.needsTaskCommit(taskContext); + } + return commitRequired; + } + protected void statusUpdate(TaskUmbilicalProtocol umbilical) throws IOException { int retries = MAX_RETRIES; while (true) { try { if (!umbilical.statusUpdate(getTaskID(), taskStatus)) { LOG.warn("Parent died. Exiting "+taskId); System.exit(66); } taskStatus.clearStatus(); return; } catch (InterruptedException ie) { Thread.currentThread().interrupt(); // interrupt ourself } catch (IOException ie) { LOG.warn("Failure sending status update: " + StringUtils.stringifyException(ie)); if (--retries == 0) { throw ie; } } } } private void sendLastUpdate(TaskUmbilicalProtocol umbilical) throws IOException { // send a final status report taskStatus.statusUpdate(taskProgress.get(), taskProgress.toString(), counters); statusUpdate(umbilical); } private void sendDone(TaskUmbilicalProtocol umbilical) throws IOException { int retries = MAX_RETRIES; while (true) { try { umbilical.done(getTaskID()); LOG.info("Task '" + taskId + "' done."); return; } catch (IOException ie) { LOG.warn("Failure signalling completion: " + StringUtils.stringifyException(ie)); if (--retries == 0) { throw ie; } } } } private void commit(TaskUmbilicalProtocol umbilical, TaskReporter reporter, org.apache.hadoop.mapreduce.OutputCommitter committer ) throws IOException { int retries = MAX_RETRIES; while (true) { try { while (!umbilical.canCommit(taskId)) { try { Thread.sleep(1000); } catch(InterruptedException ie) { //ignore } reporter.setProgressFlag(); } break; } catch (IOException ie) { LOG.warn("Failure asking whether task can commit: " + StringUtils.stringifyException(ie)); if (--retries == 0) { //if it couldn't query successfully then delete the output discardOutput(taskContext); System.exit(68); } } } // task can Commit now try { LOG.info("Task " + taskId + " is allowed to commit now"); committer.commitTask(taskContext); return; } catch (IOException iee) { LOG.warn("Failure committing: " + StringUtils.stringifyException(iee)); //if it couldn't commit a successfully then delete the output discardOutput(taskContext); throw iee; } } private void discardOutput(TaskAttemptContext taskContext) { try { committer.abortTask(taskContext); } catch (IOException ioe) { LOG.warn("Failure cleaning up: " + StringUtils.stringifyException(ioe)); } } protected void runTaskCleanupTask(TaskUmbilicalProtocol umbilical, TaskReporter reporter) throws IOException, InterruptedException { taskCleanup(umbilical); done(umbilical, reporter); } void taskCleanup(TaskUmbilicalProtocol umbilical) throws IOException { // set phase for this task setPhase(TaskStatus.Phase.CLEANUP); getProgress().setStatus("cleanup"); statusUpdate(umbilical); LOG.info("Runnning cleanup for the task"); // do the cleanup committer.abortTask(taskContext); } protected void runJobCleanupTask(TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, InterruptedException { // set phase for this task setPhase(TaskStatus.Phase.CLEANUP); getProgress().setStatus("cleanup"); statusUpdate(umbilical); // do the cleanup LOG.info("Cleaning up job"); if (jobRunStateForCleanup == JobStatus.State.FAILED || jobRunStateForCleanup == JobStatus.State.KILLED) { LOG.info("Aborting job with runstate : " + jobRunStateForCleanup); committer.abortJob(jobContext, jobRunStateForCleanup); } else if (jobRunStateForCleanup == JobStatus.State.SUCCEEDED){ LOG.info("Committing job"); committer.commitJob(jobContext); } else { throw new IOException("Invalid state of the job for cleanup. State found " + jobRunStateForCleanup + " expecting " + JobStatus.State.SUCCEEDED + ", " + JobStatus.State.FAILED + " or " + JobStatus.State.KILLED); } done(umbilical, reporter); } protected void runJobSetupTask(TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, InterruptedException { // do the setup getProgress().setStatus("setup"); committer.setupJob(jobContext); done(umbilical, reporter); } public void setConf(Configuration conf) { if (conf instanceof JobConf) { this.conf = (JobConf) conf; } else { this.conf = new JobConf(conf); } this.mapOutputFile.setConf(this.conf); this.lDirAlloc = new LocalDirAllocator("mapred.local.dir"); // add the static resolutions (this is required for the junit to // work on testcases that simulate multiple nodes on a single physical // node. String hostToResolved[] = conf.getStrings("hadoop.net.static.resolutions"); if (hostToResolved != null) { for (String str : hostToResolved) { String name = str.substring(0, str.indexOf('=')); String resolvedName = str.substring(str.indexOf('=') + 1); NetUtils.addStaticResolution(name, resolvedName); } } } public Configuration getConf() { return this.conf; } /** * OutputCollector for the combiner. */ protected static class CombineOutputCollector<K extends Object, V extends Object> implements OutputCollector<K, V> { private Writer<K, V> writer; private Counters.Counter outCounter; public CombineOutputCollector(Counters.Counter outCounter) { this.outCounter = outCounter; } public synchronized void setWriter(Writer<K, V> writer) { this.writer = writer; } public synchronized void collect(K key, V value) throws IOException { outCounter.increment(1); writer.append(key, value); } } /** Iterates values while keys match in sorted input. */ static class ValuesIterator<KEY,VALUE> implements Iterator<VALUE> { protected RawKeyValueIterator in; //input iterator private KEY key; // current key private KEY nextKey; private VALUE value; // current value private boolean hasNext; // more w/ this key private boolean more; // more in file private RawComparator<KEY> comparator; protected Progressable reporter; private Deserializer<KEY> keyDeserializer; private Deserializer<VALUE> valDeserializer; private DataInputBuffer keyIn = new DataInputBuffer(); private DataInputBuffer valueIn = new DataInputBuffer(); public ValuesIterator (RawKeyValueIterator in, RawComparator<KEY> comparator, Class<KEY> keyClass, Class<VALUE> valClass, Configuration conf, Progressable reporter) throws IOException { this.in = in; this.comparator = comparator; this.reporter = reporter; SerializationFactory serializationFactory = new SerializationFactory(conf); this.keyDeserializer = serializationFactory.getDeserializer(keyClass); this.keyDeserializer.open(keyIn); this.valDeserializer = serializationFactory.getDeserializer(valClass); this.valDeserializer.open(this.valueIn); readNextKey(); key = nextKey; nextKey = null; // force new instance creation hasNext = more; } RawKeyValueIterator getRawIterator() { return in; } /// Iterator methods public boolean hasNext() { return hasNext; } private int ctr = 0; public VALUE next() { if (!hasNext) { throw new NoSuchElementException("iterate past last value"); } try { readNextValue(); readNextKey(); } catch (IOException ie) { throw new RuntimeException("problem advancing post rec#"+ctr, ie); } reporter.progress(); return value; } public void remove() { throw new RuntimeException("not implemented"); } /// Auxiliary methods /** Start processing next unique key. */ void nextKey() throws IOException { // read until we find a new key while (hasNext) { readNextKey(); } ++ctr; // move the next key to the current one KEY tmpKey = key; key = nextKey; nextKey = tmpKey; hasNext = more; } /** True iff more keys remain. */ boolean more() { return more; } /** The current key. */ KEY getKey() { return key; } /** * read the next key */ private void readNextKey() throws IOException { more = in.next(); if (more) { DataInputBuffer nextKeyBytes = in.getKey(); keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength()); nextKey = keyDeserializer.deserialize(nextKey); hasNext = key != null && (comparator.compare(key, nextKey) == 0); } else { hasNext = false; } } /** * Read the next value * @throws IOException */ private void readNextValue() throws IOException { DataInputBuffer nextValueBytes = in.getValue(); valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength()); value = valDeserializer.deserialize(value); } } protected static class CombineValuesIterator<KEY,VALUE> extends ValuesIterator<KEY,VALUE> { private final Counters.Counter combineInputCounter; public CombineValuesIterator(RawKeyValueIterator in, RawComparator<KEY> comparator, Class<KEY> keyClass, Class<VALUE> valClass, Configuration conf, Reporter reporter, Counters.Counter combineInputCounter) throws IOException { super(in, comparator, keyClass, valClass, conf, reporter); this.combineInputCounter = combineInputCounter; } public VALUE next() { combineInputCounter.increment(1); return super.next(); } } private static final Constructor<org.apache.hadoop.mapreduce.Reducer.Context> contextConstructor; static { try { contextConstructor = org.apache.hadoop.mapreduce.Reducer.Context.class.getConstructor (new Class[]{org.apache.hadoop.mapreduce.Reducer.class, Configuration.class, org.apache.hadoop.mapreduce.TaskAttemptID.class, RawKeyValueIterator.class, org.apache.hadoop.mapreduce.Counter.class, org.apache.hadoop.mapreduce.Counter.class, org.apache.hadoop.mapreduce.RecordWriter.class, org.apache.hadoop.mapreduce.OutputCommitter.class, org.apache.hadoop.mapreduce.StatusReporter.class, RawComparator.class, Class.class, Class.class}); } catch (NoSuchMethodException nme) { throw new IllegalArgumentException("Can't find constructor"); } } @SuppressWarnings("unchecked") protected static <INKEY,INVALUE,OUTKEY,OUTVALUE> org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context createReduceContext(org.apache.hadoop.mapreduce.Reducer <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer, Configuration job, org.apache.hadoop.mapreduce.TaskAttemptID taskId, RawKeyValueIterator rIter, org.apache.hadoop.mapreduce.Counter inputKeyCounter, org.apache.hadoop.mapreduce.Counter inputValueCounter, org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output, org.apache.hadoop.mapreduce.OutputCommitter committer, org.apache.hadoop.mapreduce.StatusReporter reporter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass ) throws IOException, ClassNotFoundException { try { return contextConstructor.newInstance(reducer, job, taskId, rIter, inputKeyCounter, inputValueCounter, output, committer, reporter, comparator, keyClass, valueClass); } catch (InstantiationException e) { throw new IOException("Can't create Context", e); } catch (InvocationTargetException e) { throw new IOException("Can't invoke Context constructor", e); } catch (IllegalAccessException e) { throw new IOException("Can't invoke Context constructor", e); } } protected static abstract class CombinerRunner<K,V> { protected final Counters.Counter inputCounter; protected final JobConf job; protected final TaskReporter reporter; CombinerRunner(Counters.Counter inputCounter, JobConf job, TaskReporter reporter) { this.inputCounter = inputCounter; this.job = job; this.reporter = reporter; } /** * Run the combiner over a set of inputs. * @param iterator the key/value pairs to use as input * @param collector the output collector */ abstract void combine(RawKeyValueIterator iterator, OutputCollector<K,V> collector ) throws IOException, InterruptedException, ClassNotFoundException; static <K,V> CombinerRunner<K,V> create(JobConf job, TaskAttemptID taskId, Counters.Counter inputCounter, TaskReporter reporter, org.apache.hadoop.mapreduce.OutputCommitter committer ) throws ClassNotFoundException { Class<? extends Reducer<K,V,K,V>> cls = (Class<? extends Reducer<K,V,K,V>>) job.getCombinerClass(); if (cls != null) { return new OldCombinerRunner(cls, job, inputCounter, reporter); } // make a task context so we can get the classes org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.TaskAttemptContext(job, taskId); Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> newcls = (Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>) taskContext.getCombinerClass(); if (newcls != null) { return new NewCombinerRunner<K,V>(newcls, job, taskId, taskContext, inputCounter, reporter, committer); } return null; } } protected static class OldCombinerRunner<K,V> extends CombinerRunner<K,V> { private final Class<? extends Reducer<K,V,K,V>> combinerClass; private final Class<K> keyClass; private final Class<V> valueClass; private final RawComparator<K> comparator; protected OldCombinerRunner(Class<? extends Reducer<K,V,K,V>> cls, JobConf conf, Counters.Counter inputCounter, TaskReporter reporter) { super(inputCounter, conf, reporter); combinerClass = cls; keyClass = (Class<K>) job.getMapOutputKeyClass(); valueClass = (Class<V>) job.getMapOutputValueClass(); comparator = (RawComparator<K>) job.getOutputKeyComparator(); } @SuppressWarnings("unchecked") protected void combine(RawKeyValueIterator kvIter, OutputCollector<K,V> combineCollector ) throws IOException { Reducer<K,V,K,V> combiner = ReflectionUtils.newInstance(combinerClass, job); try { CombineValuesIterator<K,V> values = new CombineValuesIterator<K,V>(kvIter, comparator, keyClass, valueClass, job, Reporter.NULL, inputCounter); while (values.more()) { combiner.reduce(values.getKey(), values, combineCollector, Reporter.NULL); values.nextKey(); } } finally { combiner.close(); } } } protected static class NewCombinerRunner<K, V> extends CombinerRunner<K,V> { private final Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> reducerClass; private final org.apache.hadoop.mapreduce.TaskAttemptID taskId; private final RawComparator<K> comparator; private final Class<K> keyClass; private final Class<V> valueClass; private final org.apache.hadoop.mapreduce.OutputCommitter committer; NewCombinerRunner(Class reducerClass, JobConf job, org.apache.hadoop.mapreduce.TaskAttemptID taskId, org.apache.hadoop.mapreduce.TaskAttemptContext context, Counters.Counter inputCounter, TaskReporter reporter, org.apache.hadoop.mapreduce.OutputCommitter committer) { super(inputCounter, job, reporter); this.reducerClass = reducerClass; this.taskId = taskId; keyClass = (Class<K>) context.getMapOutputKeyClass(); valueClass = (Class<V>) context.getMapOutputValueClass(); comparator = (RawComparator<K>) context.getSortComparator(); this.committer = committer; } private static class OutputConverter<K,V> extends org.apache.hadoop.mapreduce.RecordWriter<K,V> { OutputCollector<K,V> output; OutputConverter(OutputCollector<K,V> output) { this.output = output; } @Override public void close(org.apache.hadoop.mapreduce.TaskAttemptContext context){ } @Override public void write(K key, V value ) throws IOException, InterruptedException { output.collect(key,value); diff --git a/src/test/org/apache/hadoop/mapred/TestTaskCommit.java b/src/test/org/apache/hadoop/mapred/TestTaskCommit.java index 5494128..f2e8ba3 100644 --- a/src/test/org/apache/hadoop/mapred/TestTaskCommit.java +++ b/src/test/org/apache/hadoop/mapred/TestTaskCommit.java @@ -1,62 +1,281 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; +import java.io.File; import java.io.IOException; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.SortedRanges.Range; +import org.apache.hadoop.mapreduce.TaskType; public class TestTaskCommit extends HadoopTestCase { + Path rootDir = + new Path(System.getProperty("test.build.data", "/tmp"), "test"); static class CommitterWithCommitFail extends FileOutputCommitter { public void commitTask(TaskAttemptContext context) throws IOException { Path taskOutputPath = getTempTaskOutputPath(context); TaskAttemptID attemptId = context.getTaskAttemptID(); JobConf job = context.getJobConf(); if (taskOutputPath != null) { FileSystem fs = taskOutputPath.getFileSystem(job); if (fs.exists(taskOutputPath)) { throw new IOException(); } } } } + /** + * Special Committer that does not cleanup temporary files in + * abortTask + * + * The framework's FileOutputCommitter cleans up any temporary + * files left behind in abortTask. We want the test case to + * find these files and hence short-circuit abortTask. + */ + static class CommitterWithoutCleanup extends FileOutputCommitter { + @Override + public void abortTask(TaskAttemptContext context) throws IOException { + // does nothing + } + } + + /** + * Special committer that always requires commit. + */ + static class CommitterThatAlwaysRequiresCommit extends FileOutputCommitter { + @Override + public boolean needsTaskCommit(TaskAttemptContext context) + throws IOException { + return true; + } + } + public TestTaskCommit() throws IOException { super(LOCAL_MR, LOCAL_FS, 1, 1); } - + + @Override + public void tearDown() throws Exception { + super.tearDown(); + FileUtil.fullyDelete(new File(rootDir.toString())); + } + public void testCommitFail() throws IOException { Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), "test"); final Path inDir = new Path(rootDir, "input"); final Path outDir = new Path(rootDir, "output"); JobConf jobConf = createJobConf(); jobConf.setMaxMapAttempts(1); jobConf.setOutputCommitter(CommitterWithCommitFail.class); RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0); rJob.waitForCompletion(); assertEquals(JobStatus.FAILED, rJob.getJobState()); } + + private class MyUmbilical implements TaskUmbilicalProtocol { + boolean taskDone = false; + + @Override + public boolean canCommit(TaskAttemptID taskid) throws IOException { + return false; + } + + @Override + public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException { + fail("Task should not go to commit-pending"); + } + + @Override + public void done(TaskAttemptID taskid) throws IOException { + taskDone = true; + } + + @Override + public void fatalError(TaskAttemptID taskId, String message) + throws IOException { } + + @Override + public void fsError(TaskAttemptID taskId, String message) + throws IOException { } + + @Override + public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, + int fromIndex, int maxLocs, TaskAttemptID id) throws IOException { + return null; + } + + @Override + public JvmTask getTask(JvmContext context) throws IOException { + return null; + } + + @Override + public boolean ping(TaskAttemptID taskid) throws IOException { + return true; + } + + @Override + public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) + throws IOException { + } + + @Override + public void reportNextRecordRange(TaskAttemptID taskid, Range range) + throws IOException { + } + + @Override + public void shuffleError(TaskAttemptID taskId, String message) + throws IOException { + } + + @Override + public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus) + throws IOException, InterruptedException { + return true; + } + + @Override + public long getProtocolVersion(String protocol, long clientVersion) + throws IOException { + return 0; + } + } + + /** + * A test that mimics a failed task to ensure that it does + * not get into the COMMIT_PENDING state, by using a fake + * UmbilicalProtocol's implementation that fails if the commit. + * protocol is played. + * + * The test mocks the various steps in a failed task's + * life-cycle using a special OutputCommitter and UmbilicalProtocol + * implementation. + * + * @throws Exception + */ + public void testTaskCleanupDoesNotCommit() throws Exception { + // Mimic a job with a special committer that does not cleanup + // files when a task fails. + JobConf job = new JobConf(); + job.setOutputCommitter(CommitterWithoutCleanup.class); + Path outDir = new Path(rootDir, "output"); + FileOutputFormat.setOutputPath(job, outDir); + + // Mimic job setup + String dummyAttemptID = "attempt_200707121733_0001_m_000000_0"; + TaskAttemptID attemptID = TaskAttemptID.forName(dummyAttemptID); + OutputCommitter committer = new CommitterWithoutCleanup(); + JobContext jContext = new JobContext(job, attemptID.getJobID()); + committer.setupJob(jContext); + + + // Mimic a map task + dummyAttemptID = "attempt_200707121733_0001_m_000001_0"; + attemptID = TaskAttemptID.forName(dummyAttemptID); + Task task = new MapTask(new Path(rootDir, "job.xml").toString(), attemptID, + 0, FileSplit.class.getName(), new BytesWritable(), 1, null); + task.setConf(job); + task.localizeConfiguration(job); + task.initialize(job, attemptID.getJobID(), Reporter.NULL, false); + + // Mimic the map task writing some output. + String file = "test.txt"; + FileSystem localFs = FileSystem.getLocal(job); + TextOutputFormat<Text, Text> theOutputFormat + = new TextOutputFormat<Text, Text>(); + RecordWriter<Text, Text> theRecordWriter = + theOutputFormat.getRecordWriter(localFs, + job, file, Reporter.NULL); + theRecordWriter.write(new Text("key"), new Text("value")); + theRecordWriter.close(Reporter.NULL); + + // Mimic a task failure; setting up the task for cleanup simulates + // the abort protocol to be played. + // Without checks in the framework, this will fail + // as the committer will cause a COMMIT to happen for + // the cleanup task. + task.setTaskCleanupTask(); + MyUmbilical umbilical = new MyUmbilical(); + task.run(job, umbilical); + assertTrue("Task did not succeed", umbilical.taskDone); + } + + public void testCommitRequiredForMapTask() throws Exception { + Task testTask = createDummyTask(TaskType.MAP); + assertTrue("MapTask should need commit", testTask.isCommitRequired()); + } + + public void testCommitRequiredForReduceTask() throws Exception { + Task testTask = createDummyTask(TaskType.REDUCE); + assertTrue("ReduceTask should need commit", testTask.isCommitRequired()); + } + + public void testCommitNotRequiredForJobSetup() throws Exception { + Task testTask = createDummyTask(TaskType.MAP); + testTask.setJobSetupTask(); + assertFalse("Job setup task should not need commit", + testTask.isCommitRequired()); + } + + public void testCommitNotRequiredForJobCleanup() throws Exception { + Task testTask = createDummyTask(TaskType.MAP); + testTask.setJobCleanupTask(); + assertFalse("Job cleanup task should not need commit", + testTask.isCommitRequired()); + } + + public void testCommitNotRequiredForTaskCleanup() throws Exception { + Task testTask = createDummyTask(TaskType.REDUCE); + testTask.setTaskCleanupTask(); + assertFalse("Task cleanup task should not need commit", + testTask.isCommitRequired()); + } + + private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException, + InterruptedException { + JobConf conf = new JobConf(); + conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class); + Path outDir = new Path(rootDir, "output"); + FileOutputFormat.setOutputPath(conf, outDir); + JobID jobId = JobID.forName("job_201002121132_0001"); + Task testTask; + if (type == TaskType.MAP) { + testTask = new MapTask(); + } else { + testTask = new ReduceTask(); + } + testTask.setConf(conf); + testTask.initialize(conf, jobId, Reporter.NULL, false); + return testTask; + } public static void main(String[] argv) throws Exception { TestTaskCommit td = new TestTaskCommit(); td.testCommitFail(); } }
jaxlaw/hadoop-common
ca5a47599a80564b5b80bcd66c79b6d2123c1801
Correct formatting errors in YAHOO-CHANGES.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 4e19afd..eae5be1 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,518 +1,524 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. +yahoo-hadoop-0.20.9 + HADOOP-5879. Read compression level and strategy from Configuration for gzip compression. (He Yongqiang via cdouglas) HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) -yahoo-hadoop-0.20.1-3195383006 + HADOOP-6382 Mavenize the build.xml targets and update the bin scripts + in preparation for publishing POM files (giri kesavan via ltucker) + HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) +yahoo-hadoop-0.20.1-3195383008 + HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch
jaxlaw/hadoop-common
c75fd1a9e2b3560e6105ea4381397399b2e96db1
HADOOP:5879 from http://issues.apache.org/jira/secure/attachment/12435254/hadoop-5879-yahoo-0.20-v1.0.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index d117af8..4e19afd 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,515 +1,518 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + HADOOP-5879. Read compression level and strategy from Configuration for + gzip compression. (He Yongqiang via cdouglas) + HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) yahoo-hadoop-0.20.1-3195383006 HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/io/compress/CodecPool.java b/src/core/org/apache/hadoop/io/compress/CodecPool.java index 8960b41..dbf1f15 100644 --- a/src/core/org/apache/hadoop/io/compress/CodecPool.java +++ b/src/core/org/apache/hadoop/io/compress/CodecPool.java @@ -1,154 +1,161 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; /** * A global compressor/decompressor pool used to save and reuse * (possibly native) compression/decompression codecs. */ public class CodecPool { private static final Log LOG = LogFactory.getLog(CodecPool.class); /** * A global compressor pool used to save the expensive * construction/destruction of (possibly native) decompression codecs. */ private static final Map<Class<Compressor>, List<Compressor>> compressorPool = new HashMap<Class<Compressor>, List<Compressor>>(); /** * A global decompressor pool used to save the expensive * construction/destruction of (possibly native) decompression codecs. */ private static final Map<Class<Decompressor>, List<Decompressor>> decompressorPool = new HashMap<Class<Decompressor>, List<Decompressor>>(); private static <T> T borrow(Map<Class<T>, List<T>> pool, Class<? extends T> codecClass) { T codec = null; // Check if an appropriate codec is available synchronized (pool) { if (pool.containsKey(codecClass)) { List<T> codecList = pool.get(codecClass); if (codecList != null) { synchronized (codecList) { if (!codecList.isEmpty()) { codec = codecList.remove(codecList.size()-1); } } } } } return codec; } private static <T> void payback(Map<Class<T>, List<T>> pool, T codec) { if (codec != null) { Class<T> codecClass = ReflectionUtils.getClass(codec); synchronized (pool) { if (!pool.containsKey(codecClass)) { pool.put(codecClass, new ArrayList<T>()); } List<T> codecList = pool.get(codecClass); synchronized (codecList) { codecList.add(codec); } } } } /** * Get a {@link Compressor} for the given {@link CompressionCodec} from the * pool or a new one. * * @param codec the <code>CompressionCodec</code> for which to get the * <code>Compressor</code> + * @param conf the <code>Configuration</code> object which contains confs for creating or reinit the compressor * @return <code>Compressor</code> for the given * <code>CompressionCodec</code> from the pool or a new one */ - public static Compressor getCompressor(CompressionCodec codec) { + public static Compressor getCompressor(CompressionCodec codec, Configuration conf) { Compressor compressor = borrow(compressorPool, codec.getCompressorType()); if (compressor == null) { compressor = codec.createCompressor(); LOG.info("Got brand-new compressor"); } else { + compressor.reinit(conf); LOG.debug("Got recycled compressor"); } return compressor; } + public static Compressor getCompressor(CompressionCodec codec) { + return getCompressor(codec, null); + } + /** * Get a {@link Decompressor} for the given {@link CompressionCodec} from the * pool or a new one. * * @param codec the <code>CompressionCodec</code> for which to get the * <code>Decompressor</code> * @return <code>Decompressor</code> for the given * <code>CompressionCodec</code> the pool or a new one */ public static Decompressor getDecompressor(CompressionCodec codec) { Decompressor decompressor = borrow(decompressorPool, codec.getDecompressorType()); if (decompressor == null) { decompressor = codec.createDecompressor(); LOG.info("Got brand-new decompressor"); } else { LOG.debug("Got recycled decompressor"); } return decompressor; } /** * Return the {@link Compressor} to the pool. * * @param compressor the <code>Compressor</code> to be returned to the pool */ public static void returnCompressor(Compressor compressor) { if (compressor == null) { return; } compressor.reset(); payback(compressorPool, compressor); } /** * Return the {@link Decompressor} to the pool. * * @param decompressor the <code>Decompressor</code> to be returned to the * pool */ public static void returnDecompressor(Decompressor decompressor) { if (decompressor == null) { return; } decompressor.reset(); payback(decompressorPool, decompressor); } } diff --git a/src/core/org/apache/hadoop/io/compress/Compressor.java b/src/core/org/apache/hadoop/io/compress/Compressor.java index 66bc4bf..0d05cdf 100644 --- a/src/core/org/apache/hadoop/io/compress/Compressor.java +++ b/src/core/org/apache/hadoop/io/compress/Compressor.java @@ -1,106 +1,116 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; + /** * Specification of a stream-based 'compressor' which can be * plugged into a {@link CompressionOutputStream} to compress data. * This is modelled after {@link java.util.zip.Deflater} * */ public interface Compressor { /** * Sets input data for compression. * This should be called whenever #needsInput() returns * <code>true</code> indicating that more input data is required. * * @param b Input data * @param off Start offset * @param len Length */ public void setInput(byte[] b, int off, int len); /** * Returns true if the input data buffer is empty and * #setInput() should be called to provide more input. * * @return <code>true</code> if the input data buffer is empty and * #setInput() should be called in order to provide more input. */ public boolean needsInput(); /** * Sets preset dictionary for compression. A preset dictionary * is used when the history buffer can be predetermined. * * @param b Dictionary data bytes * @param off Start offset * @param len Length */ public void setDictionary(byte[] b, int off, int len); /** * Return number of uncompressed bytes input so far. */ public long getBytesRead(); /** * Return number of compressed bytes output so far. */ public long getBytesWritten(); /** * When called, indicates that compression should end * with the current contents of the input buffer. */ public void finish(); /** * Returns true if the end of the compressed * data output stream has been reached. * @return <code>true</code> if the end of the compressed * data output stream has been reached. */ public boolean finished(); /** * Fills specified buffer with compressed data. Returns actual number * of bytes of compressed data. A return value of 0 indicates that * needsInput() should be called in order to determine if more input * data is required. * * @param b Buffer for the compressed data * @param off Start offset of the data * @param len Size of the buffer * @return The actual number of bytes of compressed data. */ public int compress(byte[] b, int off, int len) throws IOException; /** * Resets compressor so that a new set of input data can be processed. */ public void reset(); /** * Closes the compressor and discards any unprocessed input. */ - public void end(); + public void end(); + + /** + * Prepare the compressor to be used in a new stream with settings defined in + * the given Configuration + * + * @param conf Configuration from which new setting are fetched + */ + public void reinit(Configuration conf); } diff --git a/src/core/org/apache/hadoop/io/compress/GzipCodec.java b/src/core/org/apache/hadoop/io/compress/GzipCodec.java index 674dce2..ef6df99 100644 --- a/src/core/org/apache/hadoop/io/compress/GzipCodec.java +++ b/src/core/org/apache/hadoop/io/compress/GzipCodec.java @@ -1,216 +1,226 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress; import java.io.*; import java.util.zip.GZIPOutputStream; import java.util.zip.GZIPInputStream; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.io.compress.zlib.*; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy; /** * This class creates gzip compressors/decompressors. */ public class GzipCodec extends DefaultCodec { /** * A bridge that wraps around a DeflaterOutputStream to make it * a CompressionOutputStream. */ protected static class GzipOutputStream extends CompressorStream { private static class ResetableGZIPOutputStream extends GZIPOutputStream { public ResetableGZIPOutputStream(OutputStream out) throws IOException { super(out); } public void resetState() throws IOException { def.reset(); } } public GzipOutputStream(OutputStream out) throws IOException { super(new ResetableGZIPOutputStream(out)); } /** * Allow children types to put a different type in here. * @param out the Deflater stream to use */ protected GzipOutputStream(CompressorStream out) { super(out); } public void close() throws IOException { out.close(); } public void flush() throws IOException { out.flush(); } public void write(int b) throws IOException { out.write(b); } public void write(byte[] data, int offset, int length) throws IOException { out.write(data, offset, length); } public void finish() throws IOException { ((ResetableGZIPOutputStream) out).finish(); } public void resetState() throws IOException { ((ResetableGZIPOutputStream) out).resetState(); } } protected static class GzipInputStream extends DecompressorStream { private static class ResetableGZIPInputStream extends GZIPInputStream { public ResetableGZIPInputStream(InputStream in) throws IOException { super(in); } public void resetState() throws IOException { inf.reset(); } } public GzipInputStream(InputStream in) throws IOException { super(new ResetableGZIPInputStream(in)); } /** * Allow subclasses to directly set the inflater stream. */ protected GzipInputStream(DecompressorStream in) { super(in); } public int available() throws IOException { return in.available(); } public void close() throws IOException { in.close(); } public int read() throws IOException { return in.read(); } public int read(byte[] data, int offset, int len) throws IOException { return in.read(data, offset, len); } public long skip(long offset) throws IOException { return in.skip(offset); } public void resetState() throws IOException { ((ResetableGZIPInputStream) in).resetState(); } } public CompressionOutputStream createOutputStream(OutputStream out) throws IOException { return (ZlibFactory.isNativeZlibLoaded(conf)) ? new CompressorStream(out, createCompressor(), conf.getInt("io.file.buffer.size", 4*1024)) : new GzipOutputStream(out); } public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { return (compressor != null) ? new CompressorStream(out, compressor, conf.getInt("io.file.buffer.size", 4*1024)) : createOutputStream(out); } public Compressor createCompressor() { return (ZlibFactory.isNativeZlibLoaded(conf)) - ? new GzipZlibCompressor() + ? new GzipZlibCompressor(conf) : null; } public Class<? extends Compressor> getCompressorType() { return ZlibFactory.isNativeZlibLoaded(conf) ? GzipZlibCompressor.class : BuiltInZlibDeflater.class; } public CompressionInputStream createInputStream(InputStream in) throws IOException { return (ZlibFactory.isNativeZlibLoaded(conf)) ? new DecompressorStream(in, createDecompressor(), conf.getInt("io.file.buffer.size", 4*1024)) : new GzipInputStream(in); } public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { return (decompressor != null) ? new DecompressorStream(in, decompressor, conf.getInt("io.file.buffer.size", 4*1024)) : createInputStream(in); } public Decompressor createDecompressor() { return (ZlibFactory.isNativeZlibLoaded(conf)) ? new GzipZlibDecompressor() : null; } public Class<? extends Decompressor> getDecompressorType() { return ZlibFactory.isNativeZlibLoaded(conf) ? GzipZlibDecompressor.class : BuiltInZlibInflater.class; } public String getDefaultExtension() { return ".gz"; } static final class GzipZlibCompressor extends ZlibCompressor { public GzipZlibCompressor() { super(ZlibCompressor.CompressionLevel.DEFAULT_COMPRESSION, ZlibCompressor.CompressionStrategy.DEFAULT_STRATEGY, ZlibCompressor.CompressionHeader.GZIP_FORMAT, 64*1024); } + + public GzipZlibCompressor(Configuration conf) { + super(ZlibFactory.getCompressionLevel(conf), + ZlibFactory.getCompressionStrategy(conf), + ZlibCompressor.CompressionHeader.GZIP_FORMAT, + 64 * 1024); + } } static final class GzipZlibDecompressor extends ZlibDecompressor { public GzipZlibDecompressor() { super(ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB, 64*1024); } } } diff --git a/src/core/org/apache/hadoop/io/compress/bzip2/BZip2DummyCompressor.java b/src/core/org/apache/hadoop/io/compress/bzip2/BZip2DummyCompressor.java index 2594717..afb004d 100644 --- a/src/core/org/apache/hadoop/io/compress/bzip2/BZip2DummyCompressor.java +++ b/src/core/org/apache/hadoop/io/compress/bzip2/BZip2DummyCompressor.java @@ -1,62 +1,68 @@ package org.apache.hadoop.io.compress.bzip2; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.Compressor; /** * This is a dummy compressor for BZip2. */ public class BZip2DummyCompressor implements Compressor { @Override public int compress(byte[] b, int off, int len) throws IOException { throw new UnsupportedOperationException(); } @Override public void end() { throw new UnsupportedOperationException(); } @Override public void finish() { throw new UnsupportedOperationException(); } @Override public boolean finished() { throw new UnsupportedOperationException(); } @Override public long getBytesRead() { throw new UnsupportedOperationException(); } @Override public long getBytesWritten() { throw new UnsupportedOperationException(); } @Override public boolean needsInput() { throw new UnsupportedOperationException(); } @Override public void reset() { // do nothing } @Override public void setDictionary(byte[] b, int off, int len) { throw new UnsupportedOperationException(); } @Override public void setInput(byte[] b, int off, int len) { throw new UnsupportedOperationException(); } + @Override + public void reinit(Configuration conf) { + // do nothing + } + } diff --git a/src/core/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java b/src/core/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java index f27e831..a017e1b 100644 --- a/src/core/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java +++ b/src/core/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java @@ -1,49 +1,77 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress.zlib; import java.io.IOException; import java.util.zip.Deflater; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.Compressor; +import org.mortbay.log.Log; /** * A wrapper around java.util.zip.Deflater to make it conform * to org.apache.hadoop.io.compress.Compressor interface. * */ public class BuiltInZlibDeflater extends Deflater implements Compressor { public BuiltInZlibDeflater(int level, boolean nowrap) { super(level, nowrap); } public BuiltInZlibDeflater(int level) { super(level); } public BuiltInZlibDeflater() { super(); } public synchronized int compress(byte[] b, int off, int len) throws IOException { return super.deflate(b, off, len); } + + /** + * reinit the compressor with the given configuration. It will reset the + * compressor's compression level and compression strategy. Different from + * <tt>ZlibCompressor</tt>, <tt>BuiltInZlibDeflater</tt> only support three + * kind of compression strategy: FILTERED, HUFFMAN_ONLY and DEFAULT_STRATEGY. + * It will use DEFAULT_STRATEGY as default if the configured compression + * strategy is not supported. + */ + @Override + public void reinit(Configuration conf) { + reset(); + if (conf == null) { + return; + } + setLevel(ZlibFactory.getCompressionLevel(conf).compressionLevel()); + final ZlibCompressor.CompressionStrategy strategy = + ZlibFactory.getCompressionStrategy(conf); + try { + setStrategy(strategy.compressionStrategy()); + } catch (IllegalArgumentException ill) { + Log.warn(strategy + " not supported by BuiltInZlibDeflater."); + setStrategy(DEFAULT_STRATEGY); + } + Log.debug("Reinit compressor with new compression configuration"); + } } diff --git a/src/core/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/src/core/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java index 7abdcfb..67fa8b1 100644 --- a/src/core/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java +++ b/src/core/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java @@ -1,378 +1,415 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress.zlib; import java.io.IOException; import java.nio.Buffer; import java.nio.ByteBuffer; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.util.NativeCodeLoader; +import org.mortbay.log.Log; /** * A {@link Compressor} based on the popular * zlib compression algorithm. * http://www.zlib.net/ * */ public class ZlibCompressor implements Compressor { private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024; // HACK - Use this as a global lock in the JNI layer private static Class clazz = ZlibCompressor.class; private long stream; private CompressionLevel level; private CompressionStrategy strategy; - private CompressionHeader windowBits; + private final CompressionHeader windowBits; private int directBufferSize; private byte[] userBuf = null; private int userBufOff = 0, userBufLen = 0; private Buffer uncompressedDirectBuf = null; private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0; private Buffer compressedDirectBuf = null; private boolean finish, finished; /** * The compression level for zlib library. */ public static enum CompressionLevel { /** * Compression level for no compression. */ NO_COMPRESSION (0), /** * Compression level for fastest compression. */ BEST_SPEED (1), /** * Compression level for best compression. */ BEST_COMPRESSION (9), /** * Default compression level. */ DEFAULT_COMPRESSION (-1); private final int compressionLevel; CompressionLevel(int level) { compressionLevel = level; } int compressionLevel() { return compressionLevel; } }; /** * The compression level for zlib library. */ public static enum CompressionStrategy { /** * Compression strategy best used for data consisting mostly of small * values with a somewhat random distribution. Forces more Huffman coding * and less string matching. */ FILTERED (1), /** * Compression strategy for Huffman coding only. */ HUFFMAN_ONLY (2), /** * Compression strategy to limit match distances to one * (run-length encoding). */ RLE (3), /** * Compression strategy to prevent the use of dynamic Huffman codes, * allowing for a simpler decoder for special applications. */ FIXED (4), /** * Default compression strategy. */ DEFAULT_STRATEGY (0); private final int compressionStrategy; CompressionStrategy(int strategy) { compressionStrategy = strategy; } int compressionStrategy() { return compressionStrategy; } }; /** * The type of header for compressed data. */ public static enum CompressionHeader { /** * No headers/trailers/checksums. */ NO_HEADER (-15), /** * Default headers/trailers/checksums. */ DEFAULT_HEADER (15), /** * Simple gzip headers/trailers. */ GZIP_FORMAT (31); private final int windowBits; CompressionHeader(int windowBits) { this.windowBits = windowBits; } public int windowBits() { return windowBits; } } private static boolean nativeZlibLoaded = false; static { if (NativeCodeLoader.isNativeCodeLoaded()) { try { // Initialize the native library initIDs(); nativeZlibLoaded = true; } catch (Throwable t) { // Ignore failure to load/initialize native-zlib } } } static boolean isNativeZlibLoaded() { return nativeZlibLoaded; } + protected final void construct(CompressionLevel level, CompressionStrategy strategy, + CompressionHeader header, int directBufferSize) { + } + + /** + * Creates a new compressor with the default compression level. + * Compressed data will be generated in ZLIB format. + */ + public ZlibCompressor() { + this(CompressionLevel.DEFAULT_COMPRESSION, + CompressionStrategy.DEFAULT_STRATEGY, + CompressionHeader.DEFAULT_HEADER, + DEFAULT_DIRECT_BUFFER_SIZE); + } + + /** + * Creates a new compressor, taking settings from the configuration. + */ + public ZlibCompressor(Configuration conf) { + this(ZlibFactory.getCompressionLevel(conf), + ZlibFactory.getCompressionStrategy(conf), + CompressionHeader.DEFAULT_HEADER, + DEFAULT_DIRECT_BUFFER_SIZE); + } + /** * Creates a new compressor using the specified compression level. * Compressed data will be generated in ZLIB format. * * @param level Compression level #CompressionLevel * @param strategy Compression strategy #CompressionStrategy * @param header Compression header #CompressionHeader * @param directBufferSize Size of the direct buffer to be used. */ public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy, CompressionHeader header, int directBufferSize) { this.level = level; this.strategy = strategy; this.windowBits = header; + stream = init(this.level.compressionLevel(), + this.strategy.compressionStrategy(), + this.windowBits.windowBits()); + this.directBufferSize = directBufferSize; - uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf.position(directBufferSize); - - stream = init(this.level.compressionLevel(), - this.strategy.compressionStrategy(), - this.windowBits.windowBits()); } - + /** - * Creates a new compressor with the default compression level. - * Compressed data will be generated in ZLIB format. + * Prepare the compressor to be used in a new stream with settings defined in + * the given Configuration. It will reset the compressor's compression level + * and compression strategy. + * + * @param conf Configuration storing new settings */ - public ZlibCompressor() { - this(CompressionLevel.DEFAULT_COMPRESSION, - CompressionStrategy.DEFAULT_STRATEGY, - CompressionHeader.DEFAULT_HEADER, - DEFAULT_DIRECT_BUFFER_SIZE); + @Override + public synchronized void reinit(Configuration conf) { + reset(); + if (conf == null) { + return; + } + end(stream); + level = ZlibFactory.getCompressionLevel(conf); + strategy = ZlibFactory.getCompressionStrategy(conf); + stream = init(level.compressionLevel(), + strategy.compressionStrategy(), + windowBits.windowBits()); + Log.debug("Reinit compressor with new compression configuration"); } - + public synchronized void setInput(byte[] b, int off, int len) { if (b== null) { throw new NullPointerException(); } if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } this.userBuf = b; this.userBufOff = off; this.userBufLen = len; setInputFromSavedData(); // Reinitialize zlib's output direct buffer compressedDirectBuf.limit(directBufferSize); compressedDirectBuf.position(directBufferSize); } synchronized void setInputFromSavedData() { uncompressedDirectBufOff = 0; uncompressedDirectBufLen = userBufLen; if (uncompressedDirectBufLen > directBufferSize) { uncompressedDirectBufLen = directBufferSize; } // Reinitialize zlib's input direct buffer uncompressedDirectBuf.rewind(); ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff, uncompressedDirectBufLen); // Note how much data is being fed to zlib userBufOff += uncompressedDirectBufLen; userBufLen -= uncompressedDirectBufLen; } public synchronized void setDictionary(byte[] b, int off, int len) { if (stream == 0 || b == null) { throw new NullPointerException(); } if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } setDictionary(stream, b, off, len); } public boolean needsInput() { // Consume remaining compressed data? if (compressedDirectBuf.remaining() > 0) { return false; } // Check if zlib has consumed all input if (uncompressedDirectBufLen <= 0) { // Check if we have consumed all user-input if (userBufLen <= 0) { return true; } else { setInputFromSavedData(); } } return false; } public synchronized void finish() { finish = true; } public synchronized boolean finished() { // Check if 'zlib' says its 'finished' and // all compressed data has been consumed return (finished && compressedDirectBuf.remaining() == 0); } public synchronized int compress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); } if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int n = 0; // Check if there is compressed data n = compressedDirectBuf.remaining(); if (n > 0) { n = Math.min(n, len); ((ByteBuffer)compressedDirectBuf).get(b, off, n); return n; } // Re-initialize the zlib's output direct buffer compressedDirectBuf.rewind(); compressedDirectBuf.limit(directBufferSize); // Compress data n = deflateBytesDirect(); compressedDirectBuf.limit(n); // Get atmost 'len' bytes n = Math.min(n, len); ((ByteBuffer)compressedDirectBuf).get(b, off, n); return n; } /** * Returns the total number of compressed bytes output so far. * * @return the total (non-negative) number of compressed bytes output so far */ public synchronized long getBytesWritten() { checkStream(); return getBytesWritten(stream); } /** * Returns the total number of uncompressed bytes input so far.</p> * * @return the total (non-negative) number of uncompressed bytes input so far */ public synchronized long getBytesRead() { checkStream(); return getBytesRead(stream); } public synchronized void reset() { checkStream(); reset(stream); finish = false; finished = false; uncompressedDirectBuf.rewind(); uncompressedDirectBufOff = uncompressedDirectBufLen = 0; compressedDirectBuf.limit(directBufferSize); compressedDirectBuf.position(directBufferSize); userBufOff = userBufLen = 0; } public synchronized void end() { if (stream != 0) { end(stream); stream = 0; } } private void checkStream() { if (stream == 0) throw new NullPointerException(); } private native static void initIDs(); private native static long init(int level, int strategy, int windowBits); private native static void setDictionary(long strm, byte[] b, int off, int len); private native int deflateBytesDirect(); private native static long getBytesRead(long strm); private native static long getBytesWritten(long strm); private native static void reset(long strm); private native static void end(long strm); } diff --git a/src/core/org/apache/hadoop/io/compress/zlib/ZlibFactory.java b/src/core/org/apache/hadoop/io/compress/zlib/ZlibFactory.java index e3ce3ec..bf16262 100644 --- a/src/core/org/apache/hadoop/io/compress/zlib/ZlibFactory.java +++ b/src/core/org/apache/hadoop/io/compress/zlib/ZlibFactory.java @@ -1,110 +1,132 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress.zlib; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy; import org.apache.hadoop.util.NativeCodeLoader; /** * A collection of factories to create the right * zlib/gzip compressor/decompressor instances. * */ public class ZlibFactory { private static final Log LOG = LogFactory.getLog(ZlibFactory.class); private static boolean nativeZlibLoaded = false; static { if (NativeCodeLoader.isNativeCodeLoaded()) { nativeZlibLoaded = ZlibCompressor.isNativeZlibLoaded() && ZlibDecompressor.isNativeZlibLoaded(); if (nativeZlibLoaded) { LOG.info("Successfully loaded & initialized native-zlib library"); } else { LOG.warn("Failed to load/initialize native-zlib library"); } } } /** * Check if native-zlib code is loaded & initialized correctly and * can be loaded for this job. * * @param conf configuration * @return <code>true</code> if native-zlib is loaded & initialized * and can be loaded for this job, else <code>false</code> */ public static boolean isNativeZlibLoaded(Configuration conf) { return nativeZlibLoaded && conf.getBoolean("hadoop.native.lib", true); } /** * Return the appropriate type of the zlib compressor. * * @param conf configuration * @return the appropriate type of the zlib compressor. */ public static Class<? extends Compressor> getZlibCompressorType(Configuration conf) { return (isNativeZlibLoaded(conf)) ? ZlibCompressor.class : BuiltInZlibDeflater.class; } /** * Return the appropriate implementation of the zlib compressor. * * @param conf configuration * @return the appropriate implementation of the zlib compressor. */ public static Compressor getZlibCompressor(Configuration conf) { return (isNativeZlibLoaded(conf)) ? new ZlibCompressor() : new BuiltInZlibDeflater(); } /** * Return the appropriate type of the zlib decompressor. * * @param conf configuration * @return the appropriate type of the zlib decompressor. */ public static Class<? extends Decompressor> getZlibDecompressorType(Configuration conf) { return (isNativeZlibLoaded(conf)) ? ZlibDecompressor.class : BuiltInZlibInflater.class; } /** * Return the appropriate implementation of the zlib decompressor. * * @param conf configuration * @return the appropriate implementation of the zlib decompressor. */ public static Decompressor getZlibDecompressor(Configuration conf) { return (isNativeZlibLoaded(conf)) ? new ZlibDecompressor() : new BuiltInZlibInflater(); } - + + public static void setCompressionStrategy(Configuration conf, + CompressionStrategy strategy) { + conf.setEnum("zlib.compress.strategy", strategy); + } + + public static CompressionStrategy getCompressionStrategy(Configuration conf) { + return conf.getEnum("zlib.compress.strategy", + CompressionStrategy.DEFAULT_STRATEGY); + } + + public static void setCompressionLevel(Configuration conf, + CompressionLevel level) { + conf.setEnum("zlib.compress.level", level); + } + + public static CompressionLevel getCompressionLevel(Configuration conf) { + return conf.getEnum("zlib.compress.level", + CompressionLevel.DEFAULT_COMPRESSION); + } + } diff --git a/src/test/org/apache/hadoop/io/compress/TestCodec.java b/src/test/org/apache/hadoop/io/compress/TestCodec.java index 38e4a35..96a5d67 100644 --- a/src/test/org/apache/hadoop/io/compress/TestCodec.java +++ b/src/test/org/apache/hadoop/io/compress/TestCodec.java @@ -1,249 +1,308 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.io.compress; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; +import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; +import java.util.Arrays; import java.util.Random; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.RandomDatum; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.compress.CompressionOutputStream; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy; import org.apache.hadoop.io.compress.zlib.ZlibFactory; public class TestCodec extends TestCase { private static final Log LOG= LogFactory.getLog(TestCodec.class); private Configuration conf = new Configuration(); private int count = 10000; private int seed = new Random().nextInt(); public void testDefaultCodec() throws IOException { codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.DefaultCodec"); codecTest(conf, seed, count, "org.apache.hadoop.io.compress.DefaultCodec"); } public void testGzipCodec() throws IOException { codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec"); codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec"); } public void testBZip2Codec() throws IOException { codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec"); codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec"); } + public void testGzipCodecWithParam() throws IOException { + Configuration conf = new Configuration(this.conf); + ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION); + ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY); + codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec"); + codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec"); + } + private static void codecTest(Configuration conf, int seed, int count, String codecClass) throws IOException { // Create the codec CompressionCodec codec = null; try { codec = (CompressionCodec) ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf); } catch (ClassNotFoundException cnfe) { throw new IOException("Illegal codec!"); } LOG.info("Created a Codec object of type: " + codecClass); // Generate data DataOutputBuffer data = new DataOutputBuffer(); RandomDatum.Generator generator = new RandomDatum.Generator(seed); for(int i=0; i < count; ++i) { generator.next(); RandomDatum key = generator.getKey(); RandomDatum value = generator.getValue(); key.write(data); value.write(data); } DataInputBuffer originalData = new DataInputBuffer(); DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData)); originalData.reset(data.getData(), 0, data.getLength()); LOG.info("Generated " + count + " records"); // Compress data DataOutputBuffer compressedDataBuffer = new DataOutputBuffer(); CompressionOutputStream deflateFilter = codec.createOutputStream(compressedDataBuffer); DataOutputStream deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter)); deflateOut.write(data.getData(), 0, data.getLength()); deflateOut.flush(); deflateFilter.finish(); LOG.info("Finished compressing data"); // De-compress data DataInputBuffer deCompressedDataBuffer = new DataInputBuffer(); deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, compressedDataBuffer.getLength()); CompressionInputStream inflateFilter = codec.createInputStream(deCompressedDataBuffer); DataInputStream inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter)); // Check for(int i=0; i < count; ++i) { RandomDatum k1 = new RandomDatum(); RandomDatum v1 = new RandomDatum(); k1.readFields(originalIn); v1.readFields(originalIn); RandomDatum k2 = new RandomDatum(); RandomDatum v2 = new RandomDatum(); k2.readFields(inflateIn); v2.readFields(inflateIn); } LOG.info("SUCCESS! Completed checking " + count + " records"); } public void testCodecPoolGzipReuse() throws Exception { Configuration conf = new Configuration(); conf.setBoolean("hadoop.native.lib", true); if (!ZlibFactory.isNativeZlibLoaded(conf)) { LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded"); return; } GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf); DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf); Compressor c1 = CodecPool.getCompressor(gzc); Compressor c2 = CodecPool.getCompressor(dfc); CodecPool.returnCompressor(c1); CodecPool.returnCompressor(c2); assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc)); } + private static void gzipReinitTest(Configuration conf, CompressionCodec codec) + throws IOException { + // Add codec to cache + ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION); + ZlibFactory.setCompressionStrategy(conf, + CompressionStrategy.DEFAULT_STRATEGY); + Compressor c1 = CodecPool.getCompressor(codec); + CodecPool.returnCompressor(c1); + // reset compressor's compression level to perform no compression + ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION); + Compressor c2 = CodecPool.getCompressor(codec, conf); + // ensure same compressor placed earlier + assertTrue("Got mismatched ZlibCompressor", c1 == c2); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + CompressionOutputStream cos = null; + // write trivially compressable data + byte[] b = new byte[1 << 15]; + Arrays.fill(b, (byte) 43); + try { + cos = codec.createOutputStream(bos, c2); + cos.write(b); + } finally { + if (cos != null) { + cos.close(); + } + CodecPool.returnCompressor(c2); + } + byte[] outbytes = bos.toByteArray(); + // verify data were not compressed + assertTrue("Compressed bytes contrary to configuration", + outbytes.length >= b.length); + } + + public void testCodecPoolCompressorReinit() throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean("hadoop.native.lib", true); + if (ZlibFactory.isNativeZlibLoaded(conf)) { + GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf); + gzipReinitTest(conf, gzc); + } else { + LOG.warn("testCodecPoolCompressorReinit skipped: native libs not loaded"); + } + conf.setBoolean("hadoop.native.lib", false); + DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf); + gzipReinitTest(conf, dfc); + } + public void testSequenceFileDefaultCodec() throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException { sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.DefaultCodec", 100); sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.DefaultCodec", 1000000); } public void testSequenceFileBZip2Codec() throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException { sequenceFileCodecTest(conf, 0, "org.apache.hadoop.io.compress.BZip2Codec", 100); sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.BZip2Codec", 100); sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.BZip2Codec", 1000000); } private static void sequenceFileCodecTest(Configuration conf, int lines, String codecClass, int blockSize) throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException { Path filePath = new Path("SequenceFileCodecTest." + codecClass); // Configuration conf.setInt("io.seqfile.compress.blocksize", blockSize); // Create the SequenceFile FileSystem fs = FileSystem.get(conf); LOG.info("Creating SequenceFile with codec \"" + codecClass + "\""); SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, filePath, Text.class, Text.class, CompressionType.BLOCK, (CompressionCodec)Class.forName(codecClass).newInstance()); // Write some data LOG.info("Writing to SequenceFile..."); for (int i=0; i<lines; i++) { Text key = new Text("key" + i); Text value = new Text("value" + i); writer.append(key, value); } writer.close(); // Read the data back and check LOG.info("Reading from the SequenceFile..."); SequenceFile.Reader reader = new SequenceFile.Reader(fs, filePath, conf); Writable key = (Writable)reader.getKeyClass().newInstance(); Writable value = (Writable)reader.getValueClass().newInstance(); int lc = 0; try { while (reader.next(key, value)) { assertEquals("key" + lc, key.toString()); assertEquals("value" + lc, value.toString()); lc ++; } } finally { reader.close(); } assertEquals(lines, lc); // Delete temporary files fs.delete(filePath, false); LOG.info("SUCCESS! Completed SequenceFileCodecTest with codec \"" + codecClass + "\""); } public static void main(String[] args) { int count = 10000; String codecClass = "org.apache.hadoop.io.compress.DefaultCodec"; String usage = "TestCodec [-count N] [-codec <codec class>]"; if (args.length == 0) { System.err.println(usage); System.exit(-1); } try { for (int i=0; i < args.length; ++i) { // parse command line if (args[i] == null) { continue; } else if (args[i].equals("-count")) { count = Integer.parseInt(args[++i]); } else if (args[i].equals("-codec")) { codecClass = args[++i]; } } Configuration conf = new Configuration(); int seed = 0; codecTest(conf, seed, count, codecClass); } catch (Exception e) { System.err.println("Caught: " + e); e.printStackTrace(); } } public TestCodec(String name) { super(name); } }
jaxlaw/hadoop-common
ff7172a10d607923aff7dc3a3b52d64d94d70ea8
HADOOP:6161 from http://issues.apache.org/jira/secure/attachment/12434928/hadoop-6161-yahoo-20-v1.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 393d4bf..d117af8 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,513 +1,515 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + HADOOP-6161. Add get/setEnum methods to Configuration. (cdouglas) + yahoo-hadoop-0.20.1-3195383006 HDFS-737. Add full path name of the file to the block information and summary of total number of files, blocks, live and deadnodes to metasave output. (Jitendra Nath Pandey via suresh) HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/conf/Configuration.java b/src/core/org/apache/hadoop/conf/Configuration.java index 851d986..1095a3a 100644 --- a/src/core/org/apache/hadoop/conf/Configuration.java +++ b/src/core/org/apache/hadoop/conf/Configuration.java @@ -117,1024 +117,1048 @@ import org.xml.sax.SAXException; * undefined here,</li> * <li>Properties in {@link System#getProperties()}.</li> * </ol> * * <p>For example, if a configuration resource contains the following property * definitions: * <tt><pre> * &lt;property&gt; * &lt;name&gt;basedir&lt;/name&gt; * &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt; * &lt;/property&gt; * * &lt;property&gt; * &lt;name&gt;tempdir&lt;/name&gt; * &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt; * &lt;/property&gt;</pre></tt> * * When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt> * will be resolved to another property in this Configuration, while * <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value * of the System property with that name. */ public class Configuration implements Iterable<Map.Entry<String,String>>, Writable { private static final Log LOG = LogFactory.getLog(Configuration.class); private boolean quietmode = true; /** * List of configuration resources. */ private ArrayList<Object> resources = new ArrayList<Object>(); /** * List of configuration parameters marked <b>final</b>. */ private Set<String> finalParameters = new HashSet<String>(); private boolean loadDefaults = true; /** * Configuration objects */ private static final WeakHashMap<Configuration,Object> REGISTRY = new WeakHashMap<Configuration,Object>(); /** * List of default Resources. Resources are loaded in the order of the list * entries */ private static final ArrayList<String> defaultResources = new ArrayList<String>(); /** * Flag to indicate if the storage of resource which updates a key needs * to be stored for each key */ private boolean storeResource; /** * Stores the mapping of key to the resource which modifies or loads * the key most recently */ private HashMap<String, String> updatingResource; static{ //print deprecation warning if hadoop-site.xml is found in classpath ClassLoader cL = Thread.currentThread().getContextClassLoader(); if (cL == null) { cL = Configuration.class.getClassLoader(); } if(cL.getResource("hadoop-site.xml")!=null) { LOG.warn("DEPRECATED: hadoop-site.xml found in the classpath. " + "Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, " + "mapred-site.xml and hdfs-site.xml to override properties of " + "core-default.xml, mapred-default.xml and hdfs-default.xml " + "respectively"); } addDefaultResource("core-default.xml"); addDefaultResource("core-site.xml"); } private Properties properties; private Properties overlay; private ClassLoader classLoader; { classLoader = Thread.currentThread().getContextClassLoader(); if (classLoader == null) { classLoader = Configuration.class.getClassLoader(); } } /** A new configuration. */ public Configuration() { this(true); } /** A new configuration where the behavior of reading from the default * resources can be turned off. * * If the parameter {@code loadDefaults} is false, the new instance * will not load resources from the default files. * @param loadDefaults specifies whether to load from the default files */ public Configuration(boolean loadDefaults) { this.loadDefaults = loadDefaults; if (LOG.isDebugEnabled()) { LOG.debug(StringUtils.stringifyException(new IOException("config()"))); } synchronized(Configuration.class) { REGISTRY.put(this, null); } this.storeResource = false; } /** * A new configuration with the same settings and additional facility for * storage of resource to each key which loads or updates * the key most recently * @param other the configuration from which to clone settings * @param storeResource flag to indicate if the storage of resource to * each key is to be stored */ private Configuration(Configuration other, boolean storeResource) { this(other); this.loadDefaults = other.loadDefaults; this.storeResource = storeResource; if (storeResource) { updatingResource = new HashMap<String, String>(); } } /** * A new configuration with the same settings cloned from another. * * @param other the configuration from which to clone settings. */ @SuppressWarnings("unchecked") public Configuration(Configuration other) { if (LOG.isDebugEnabled()) { LOG.debug(StringUtils.stringifyException (new IOException("config(config)"))); } this.resources = (ArrayList)other.resources.clone(); synchronized(other) { if (other.properties != null) { this.properties = (Properties)other.properties.clone(); } if (other.overlay!=null) { this.overlay = (Properties)other.overlay.clone(); } } this.finalParameters = new HashSet<String>(other.finalParameters); synchronized(Configuration.class) { REGISTRY.put(this, null); } } /** * Add a default resource. Resources are loaded in the order of the resources * added. * @param name file name. File should be present in the classpath. */ public static synchronized void addDefaultResource(String name) { if(!defaultResources.contains(name)) { defaultResources.add(name); for(Configuration conf : REGISTRY.keySet()) { if(conf.loadDefaults) { conf.reloadConfiguration(); } } } } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param name resource to be added, the classpath is examined for a file * with that name. */ public void addResource(String name) { addResourceObject(name); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param url url of the resource to be added, the local filesystem is * examined directly to find the resource, without referring to * the classpath. */ public void addResource(URL url) { addResourceObject(url); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param file file-path of resource to be added, the local filesystem is * examined directly to find the resource, without referring to * the classpath. */ public void addResource(Path file) { addResourceObject(file); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param in InputStream to deserialize the object from. */ public void addResource(InputStream in) { addResourceObject(in); } /** * Reload configuration from previously added resources. * * This method will clear all the configuration read from the added * resources, and final parameters. This will make the resources to * be read again before accessing the values. Values that are added * via set methods will overlay values read from the resources. */ public synchronized void reloadConfiguration() { properties = null; // trigger reload finalParameters.clear(); // clear site-limits } private synchronized void addResourceObject(Object resource) { resources.add(resource); // add to resources reloadConfiguration(); } private static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}"); private static int MAX_SUBST = 20; private String substituteVars(String expr) { if (expr == null) { return null; } Matcher match = varPat.matcher(""); String eval = expr; for(int s=0; s<MAX_SUBST; s++) { match.reset(eval); if (!match.find()) { return eval; } String var = match.group(); var = var.substring(2, var.length()-1); // remove ${ .. } String val = null; try { val = System.getProperty(var); } catch(SecurityException se) { LOG.warn("Unexpected SecurityException in Configuration", se); } if (val == null) { val = getRaw(var); } if (val == null) { return eval; // return literal ${var}: var is unbound } // substitute eval = eval.substring(0, match.start())+val+eval.substring(match.end()); } throw new IllegalStateException("Variable substitution depth too large: " + MAX_SUBST + " " + expr); } /** * Get the value of the <code>name</code> property, <code>null</code> if * no such property exists. * * Values are processed for <a href="#VariableExpansion">variable expansion</a> * before being returned. * * @param name the property name. * @return the value of the <code>name</code> property, * or null if no such property exists. */ public String get(String name) { return substituteVars(getProps().getProperty(name)); } /** * Get the value of the <code>name</code> property, without doing * <a href="#VariableExpansion">variable expansion</a>. * * @param name the property name. * @return the value of the <code>name</code> property, * or null if no such property exists. */ public String getRaw(String name) { return getProps().getProperty(name); } /** * Set the <code>value</code> of the <code>name</code> property. * * @param name property name. * @param value property value. */ public void set(String name, String value) { getOverlay().setProperty(name, value); getProps().setProperty(name, value); } /** * Sets a property if it is currently unset. * @param name the property name * @param value the new value */ public void setIfUnset(String name, String value) { if (get(name) == null) { set(name, value); } } private synchronized Properties getOverlay() { if (overlay==null){ overlay=new Properties(); } return overlay; } /** * Get the value of the <code>name</code> property. If no such property * exists, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value, or <code>defaultValue</code> if the property * doesn't exist. */ public String get(String name, String defaultValue) { return substituteVars(getProps().getProperty(name, defaultValue)); } /** * Get the value of the <code>name</code> property as an <code>int</code>. * * If no such property exists, or if the specified value is not a valid * <code>int</code>, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value as an <code>int</code>, * or <code>defaultValue</code>. */ public int getInt(String name, int defaultValue) { String valueString = get(name); if (valueString == null) return defaultValue; try { String hexString = getHexDigits(valueString); if (hexString != null) { return Integer.parseInt(hexString, 16); } return Integer.parseInt(valueString); } catch (NumberFormatException e) { return defaultValue; } } /** * Set the value of the <code>name</code> property to an <code>int</code>. * * @param name property name. * @param value <code>int</code> value of the property. */ public void setInt(String name, int value) { set(name, Integer.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>long</code>. * If no such property is specified, or if the specified value is not a valid * <code>long</code>, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value as a <code>long</code>, * or <code>defaultValue</code>. */ public long getLong(String name, long defaultValue) { String valueString = get(name); if (valueString == null) return defaultValue; try { String hexString = getHexDigits(valueString); if (hexString != null) { return Long.parseLong(hexString, 16); } return Long.parseLong(valueString); } catch (NumberFormatException e) { return defaultValue; } } private String getHexDigits(String value) { boolean negative = false; String str = value; String hexString = null; if (value.startsWith("-")) { negative = true; str = value.substring(1); } if (str.startsWith("0x") || str.startsWith("0X")) { hexString = str.substring(2); if (negative) { hexString = "-" + hexString; } return hexString; } return null; } /** * Set the value of the <code>name</code> property to a <code>long</code>. * * @param name property name. * @param value <code>long</code> value of the property. */ public void setLong(String name, long value) { set(name, Long.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>float</code>. * If no such property is specified, or if the specified value is not a valid * <code>float</code>, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value as a <code>float</code>, * or <code>defaultValue</code>. */ public float getFloat(String name, float defaultValue) { String valueString = get(name); if (valueString == null) return defaultValue; try { return Float.parseFloat(valueString); } catch (NumberFormatException e) { return defaultValue; } } /** * Set the value of the <code>name</code> property to a <code>float</code>. * * @param name property name. * @param value property value. */ public void setFloat(String name, float value) { set(name,Float.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>boolean</code>. * If no such property is specified, or if the specified value is not a valid * <code>boolean</code>, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value as a <code>boolean</code>, * or <code>defaultValue</code>. */ public boolean getBoolean(String name, boolean defaultValue) { String valueString = get(name); if ("true".equals(valueString)) return true; else if ("false".equals(valueString)) return false; else return defaultValue; } /** * Set the value of the <code>name</code> property to a <code>boolean</code>. * * @param name property name. * @param value <code>boolean</code> value of the property. */ public void setBoolean(String name, boolean value) { set(name, Boolean.toString(value)); } /** * Set the given property, if it is currently unset. * @param name property name * @param value new value */ public void setBooleanIfUnset(String name, boolean value) { setIfUnset(name, Boolean.toString(value)); } + /** + * Set the value of the <code>name</code> property to the given type. This + * is equivalent to <code>set(&lt;name&gt;, value.toString())</code>. + * @param name property name + * @param value new value + */ + public <T extends Enum<T>> void setEnum(String name, T value) { + set(name, value.toString()); + } + + /** + * Return value matching this enumerated type. + * @param name Property name + * @param defaultValue Value returned if no mapping exists + * @throws IllegalArgumentException If mapping is illegal for the type + * provided + */ + public <T extends Enum<T>> T getEnum(String name, T defaultValue) { + final String val = get(name); + return null == val + ? defaultValue + : Enum.valueOf(defaultValue.getDeclaringClass(), val); + } + /** * A class that represents a set of positive integer ranges. It parses * strings of the form: "2-3,5,7-" where ranges are separated by comma and * the lower/upper bounds are separated by dash. Either the lower or upper * bound may be omitted meaning all values up to or over. So the string * above means 2, 3, 5, and 7, 8, 9, ... */ public static class IntegerRanges { private static class Range { int start; int end; } List<Range> ranges = new ArrayList<Range>(); public IntegerRanges() { } public IntegerRanges(String newValue) { StringTokenizer itr = new StringTokenizer(newValue, ","); while (itr.hasMoreTokens()) { String rng = itr.nextToken().trim(); String[] parts = rng.split("-", 3); if (parts.length < 1 || parts.length > 2) { throw new IllegalArgumentException("integer range badly formed: " + rng); } Range r = new Range(); r.start = convertToInt(parts[0], 0); if (parts.length == 2) { r.end = convertToInt(parts[1], Integer.MAX_VALUE); } else { r.end = r.start; } if (r.start > r.end) { throw new IllegalArgumentException("IntegerRange from " + r.start + " to " + r.end + " is invalid"); } ranges.add(r); } } /** * Convert a string to an int treating empty strings as the default value. * @param value the string value * @param defaultValue the value for if the string is empty * @return the desired integer */ private static int convertToInt(String value, int defaultValue) { String trim = value.trim(); if (trim.length() == 0) { return defaultValue; } return Integer.parseInt(trim); } /** * Is the given value in the set of ranges * @param value the value to check * @return is the value in the ranges? */ public boolean isIncluded(int value) { for(Range r: ranges) { if (r.start <= value && value <= r.end) { return true; } } return false; } @Override public String toString() { StringBuffer result = new StringBuffer(); boolean first = true; for(Range r: ranges) { if (first) { first = false; } else { result.append(','); } result.append(r.start); result.append('-'); result.append(r.end); } return result.toString(); } } /** * Parse the given attribute as a set of integer ranges * @param name the attribute name * @param defaultValue the default value if it is not set * @return a new set of ranges from the configured value */ public IntegerRanges getRange(String name, String defaultValue) { return new IntegerRanges(get(name, defaultValue)); } /** * Get the comma delimited values of the <code>name</code> property as * a collection of <code>String</code>s. * If no such property is specified then empty collection is returned. * <p> * This is an optimized version of {@link #getStrings(String)} * * @param name property name. * @return property value as a collection of <code>String</code>s. */ public Collection<String> getStringCollection(String name) { String valueString = get(name); return StringUtils.getStringCollection(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s. * If no such property is specified then <code>null</code> is returned. * * @param name property name. * @return property value as an array of <code>String</code>s, * or <code>null</code>. */ public String[] getStrings(String name) { String valueString = get(name); return StringUtils.getStrings(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s. * If no such property is specified then default value is returned. * * @param name property name. * @param defaultValue The default value * @return property value as an array of <code>String</code>s, * or default value. */ public String[] getStrings(String name, String... defaultValue) { String valueString = get(name); if (valueString == null) { return defaultValue; } else { return StringUtils.getStrings(valueString); } } /** * Set the array of string values for the <code>name</code> property as * as comma delimited values. * * @param name property name. * @param values The values */ public void setStrings(String name, String... values) { set(name, StringUtils.arrayToString(values)); } /** * Load a class by name. * * @param name the class name. * @return the class object. * @throws ClassNotFoundException if the class is not found. */ public Class<?> getClassByName(String name) throws ClassNotFoundException { return Class.forName(name, true, classLoader); } /** * Get the value of the <code>name</code> property * as an array of <code>Class</code>. * The value of the property specifies a list of comma separated class names. * If no such property is specified, then <code>defaultValue</code> is * returned. * * @param name the property name. * @param defaultValue default value. * @return property value as a <code>Class[]</code>, * or <code>defaultValue</code>. */ public Class<?>[] getClasses(String name, Class<?> ... defaultValue) { String[] classnames = getStrings(name); if (classnames == null) return defaultValue; try { Class<?>[] classes = new Class<?>[classnames.length]; for(int i = 0; i < classnames.length; i++) { classes[i] = getClassByName(classnames[i]); } return classes; } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>Class</code>. * If no such property is specified, then <code>defaultValue</code> is * returned. * * @param name the class name. * @param defaultValue default value. * @return property value as a <code>Class</code>, * or <code>defaultValue</code>. */ public Class<?> getClass(String name, Class<?> defaultValue) { String valueString = get(name); if (valueString == null) return defaultValue; try { return getClassByName(valueString); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>Class</code> * implementing the interface specified by <code>xface</code>. * * If no such property is specified, then <code>defaultValue</code> is * returned. * * An exception is thrown if the returned class does not implement the named * interface. * * @param name the class name. * @param defaultValue default value. * @param xface the interface implemented by the named class. * @return property value as a <code>Class</code>, * or <code>defaultValue</code>. */ public <U> Class<? extends U> getClass(String name, Class<? extends U> defaultValue, Class<U> xface) { try { Class<?> theClass = getClass(name, defaultValue); if (theClass != null && !xface.isAssignableFrom(theClass)) throw new RuntimeException(theClass+" not "+xface.getName()); else if (theClass != null) return theClass.asSubclass(xface); else return null; } catch (Exception e) { throw new RuntimeException(e); } } /** * Set the value of the <code>name</code> property to the name of a * <code>theClass</code> implementing the given interface <code>xface</code>. * * An exception is thrown if <code>theClass</code> does not implement the * interface <code>xface</code>. * * @param name property name. * @param theClass property value. * @param xface the interface implemented by the named class. */ public void setClass(String name, Class<?> theClass, Class<?> xface) { if (!xface.isAssignableFrom(theClass)) throw new RuntimeException(theClass+" not "+xface.getName()); set(name, theClass.getName()); } /** * Get a local file under a directory named by <i>dirsProp</i> with * the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, * then one is chosen based on <i>path</i>'s hash code. If the selected * directory does not exist, an attempt is made to create it. * * @param dirsProp directory in which to locate the file. * @param path file-path. * @return local file under the directory with the given path. */ public Path getLocalPath(String dirsProp, String path) throws IOException { String[] dirs = getStrings(dirsProp); int hashCode = path.hashCode(); FileSystem fs = FileSystem.getLocal(this); for (int i = 0; i < dirs.length; i++) { // try each local dir int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; Path file = new Path(dirs[index], path); Path dir = file.getParent(); if (fs.mkdirs(dir) || fs.exists(dir)) { return file; } } LOG.warn("Could not make " + path + " in local directories from " + dirsProp); for(int i=0; i < dirs.length; i++) { int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]); } throw new IOException("No valid local directories in property: "+dirsProp); } /** * Get a local file name under a directory named in <i>dirsProp</i> with * the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, * then one is chosen based on <i>path</i>'s hash code. If the selected * directory does not exist, an attempt is made to create it. * * @param dirsProp directory in which to locate the file. * @param path file-path. * @return local file under the directory with the given path. */ public File getFile(String dirsProp, String path) throws IOException { String[] dirs = getStrings(dirsProp); int hashCode = path.hashCode(); for (int i = 0; i < dirs.length; i++) { // try each local dir int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; File file = new File(dirs[index], path); File dir = file.getParentFile(); if (dir.exists() || dir.mkdirs()) { return file; } } throw new IOException("No valid local directories in property: "+dirsProp); } /** * Get the {@link URL} for the named resource. * * @param name resource name. * @return the url for the named resource. */ public URL getResource(String name) { return classLoader.getResource(name); } /** * Get an input stream attached to the configuration resource with the * given <code>name</code>. * * @param name configuration resource name. * @return an input stream attached to the resource. */ public InputStream getConfResourceAsInputStream(String name) { try { URL url= getResource(name); if (url == null) { LOG.info(name + " not found"); return null; } else { LOG.info("found resource " + name + " at " + url); } return url.openStream(); } catch (Exception e) { return null; } } /** * Get a {@link Reader} attached to the configuration resource with the * given <code>name</code>. * * @param name configuration resource name. * @return a reader attached to the resource. */ public Reader getConfResourceAsReader(String name) { try { URL url= getResource(name); if (url == null) { LOG.info(name + " not found"); return null; } else { LOG.info("found resource " + name + " at " + url); } return new InputStreamReader(url.openStream()); } catch (Exception e) { return null; } } private synchronized Properties getProps() { if (properties == null) { properties = new Properties(); loadResources(properties, resources, quietmode); if (overlay!= null) { properties.putAll(overlay); if (storeResource) { for (Map.Entry<Object,Object> item: overlay.entrySet()) { updatingResource.put((String) item.getKey(), "Unknown"); } } } } return properties; } /** * Return the number of keys in the configuration. * * @return number of keys in the configuration. */ public int size() { return getProps().size(); } /** * Clears all keys from the configuration. */ public void clear() { getProps().clear(); getOverlay().clear(); } /** * Get an {@link Iterator} to go through the list of <code>String</code> * key-value pairs in the configuration. * * @return an iterator over the entries. */ public Iterator<Map.Entry<String, String>> iterator() { // Get a copy of just the string to string pairs. After the old object // methods that allow non-strings to be put into configurations are removed, // we could replace properties with a Map<String,String> and get rid of this // code. Map<String,String> result = new HashMap<String,String>(); for(Map.Entry<Object,Object> item: getProps().entrySet()) { if (item.getKey() instanceof String && item.getValue() instanceof String) { result.put((String) item.getKey(), (String) item.getValue()); } } return result.entrySet().iterator(); } private void loadResources(Properties properties, ArrayList resources, boolean quiet) { if(loadDefaults) { for (String resource : defaultResources) { loadResource(properties, resource, quiet); } //support the hadoop-site.xml as a deprecated case if(getResource("hadoop-site.xml")!=null) { loadResource(properties, "hadoop-site.xml", quiet); } } for (Object resource : resources) { loadResource(properties, resource, quiet); } } private void loadResource(Properties properties, Object name, boolean quiet) { try { DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); //ignore all comments inside the xml file docBuilderFactory.setIgnoringComments(true); //allow includes in the xml file docBuilderFactory.setNamespaceAware(true); try { docBuilderFactory.setXIncludeAware(true); } catch (UnsupportedOperationException e) { LOG.error("Failed to set setXIncludeAware(true) for parser " + docBuilderFactory + ":" + e, e); } DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = null; Element root = null; if (name instanceof URL) { // an URL resource URL url = (URL)name; if (url != null) { if (!quiet) { LOG.info("parsing " + url); } doc = builder.parse(url.toString()); } } else if (name instanceof String) { // a CLASSPATH resource URL url = getResource((String)name); if (url != null) { if (!quiet) { LOG.info("parsing " + url); } doc = builder.parse(url.toString()); } } else if (name instanceof Path) { // a file resource // Can't use FileSystem API or we get an infinite loop // since FileSystem uses Configuration API. Use java.io.File instead. File file = new File(((Path)name).toUri().getPath()) .getAbsoluteFile(); if (file.exists()) { if (!quiet) { LOG.info("parsing " + file); } InputStream in = new BufferedInputStream(new FileInputStream(file)); try { doc = builder.parse(in); } finally { in.close(); } } } else if (name instanceof InputStream) { try { doc = builder.parse((InputStream)name); } finally { ((InputStream)name).close(); } diff --git a/src/test/org/apache/hadoop/conf/TestConfiguration.java b/src/test/org/apache/hadoop/conf/TestConfiguration.java index c22caf5..629e179 100644 --- a/src/test/org/apache/hadoop/conf/TestConfiguration.java +++ b/src/test/org/apache/hadoop/conf/TestConfiguration.java @@ -1,593 +1,610 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.DataInputStream; import java.io.ByteArrayOutputStream; import java.io.ByteArrayInputStream; import java.io.DataOutputStream; import java.io.StringWriter; import java.util.ArrayList; import java.util.HashMap; import java.util.Random; import junit.framework.TestCase; import org.apache.hadoop.fs.Path; import org.codehaus.jackson.map.ObjectMapper; public class TestConfiguration extends TestCase { private Configuration conf; final static String CONFIG = new File("./test-config.xml").getAbsolutePath(); final static String CONFIG2 = new File("./test-config2.xml").getAbsolutePath(); final static Random RAN = new Random(); @Override protected void setUp() throws Exception { super.setUp(); conf = new Configuration(); } @Override protected void tearDown() throws Exception { super.tearDown(); new File(CONFIG).delete(); new File(CONFIG2).delete(); } private void startConfig() throws IOException{ out.write("<?xml version=\"1.0\"?>\n"); out.write("<configuration>\n"); } private void endConfig() throws IOException{ out.write("</configuration>\n"); out.close(); } private void addInclude(String filename) throws IOException{ out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />\n "); } public void testVariableSubstitution() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); declareProperty("my.int", "${intvar}", "42"); declareProperty("intvar", "42", "42"); declareProperty("my.base", "/tmp/${user.name}", UNSPEC); declareProperty("my.file", "hello", "hello"); declareProperty("my.suffix", ".txt", ".txt"); declareProperty("my.relfile", "${my.file}${my.suffix}", "hello.txt"); declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", UNSPEC); // check that undefined variables are returned as-is declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); for (Prop p : props) { System.out.println("p=" + p.name); String gotVal = conf.get(p.name); String gotRawVal = conf.getRaw(p.name); assertEq(p.val, gotRawVal); if (p.expectEval == UNSPEC) { // expansion is system-dependent (uses System properties) // can't do exact match so just check that all variables got expanded assertTrue(gotVal != null && -1 == gotVal.indexOf("${")); } else { assertEq(p.expectEval, gotVal); } } // check that expansion also occurs for getInt() assertTrue(conf.getInt("intvar", -1) == 42); assertTrue(conf.getInt("my.int", -1) == 42); } public void testFinalParam() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); declareProperty("my.var", "", "", true); endConfig(); Path fileResource = new Path(CONFIG); Configuration conf1 = new Configuration(); conf1.addResource(fileResource); assertNull("my var is not null", conf1.get("my.var")); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); declareProperty("my.var", "myval", "myval", false); endConfig(); fileResource = new Path(CONFIG2); Configuration conf2 = new Configuration(conf1); conf2.addResource(fileResource); assertNull("my var is not final", conf2.get("my.var")); } public static void assertEq(Object a, Object b) { System.out.println("assertEq: " + a + ", " + b); assertEquals(a, b); } static class Prop { String name; String val; String expectEval; } final String UNSPEC = null; ArrayList<Prop> props = new ArrayList<Prop>(); void declareProperty(String name, String val, String expectEval) throws IOException { declareProperty(name, val, expectEval, false); } void declareProperty(String name, String val, String expectEval, boolean isFinal) throws IOException { appendProperty(name, val, isFinal); Prop p = new Prop(); p.name = name; p.val = val; p.expectEval = expectEval; props.add(p); } void appendProperty(String name, String val) throws IOException { appendProperty(name, val, false); } void appendProperty(String name, String val, boolean isFinal) throws IOException { out.write("<property>"); out.write("<name>"); out.write(name); out.write("</name>"); out.write("<value>"); out.write(val); out.write("</value>"); if (isFinal) { out.write("<final>true</final>"); } out.write("</property>\n"); } public void testOverlay() throws IOException{ out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("a","b"); appendProperty("b","c"); appendProperty("d","e"); appendProperty("e","f", true); endConfig(); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("a","b"); appendProperty("b","d"); appendProperty("e","e"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); //set dynamically something conf.set("c","d"); conf.set("a","d"); Configuration clone=new Configuration(conf); clone.addResource(new Path(CONFIG2)); assertEquals(clone.get("a"), "d"); assertEquals(clone.get("b"), "d"); assertEquals(clone.get("c"), "d"); assertEquals(clone.get("d"), "e"); assertEquals(clone.get("e"), "f"); } public void testCommentsInValue() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("my.comment", "this <!--comment here--> contains a comment"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); //two spaces one after "this", one before "contains" assertEquals("this contains a comment", conf.get("my.comment")); } public void testTrim() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); String[] whitespaces = {"", " ", "\n", "\t"}; String[] name = new String[100]; for(int i = 0; i < name.length; i++) { name[i] = "foo" + i; StringBuilder prefix = new StringBuilder(); StringBuilder postfix = new StringBuilder(); for(int j = 0; j < 3; j++) { prefix.append(whitespaces[RAN.nextInt(whitespaces.length)]); postfix.append(whitespaces[RAN.nextInt(whitespaces.length)]); } appendProperty(prefix + name[i] + postfix, name[i] + ".value"); } endConfig(); conf.addResource(new Path(CONFIG)); for(String n : name) { assertEquals(n + ".value", conf.get(n)); } } public void testToString() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); String expectedOutput = "Configuration: core-default.xml, core-site.xml, " + fileResource.toString(); assertEquals(expectedOutput, conf.toString()); } public void testIncludes() throws Exception { tearDown(); System.out.println("XXX testIncludes"); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("a","b"); appendProperty("c","d"); endConfig(); out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); addInclude(CONFIG2); appendProperty("e","f"); appendProperty("g","h"); endConfig(); // verify that the includes file contains all properties Path fileResource = new Path(CONFIG); conf.addResource(fileResource); assertEquals(conf.get("a"), "b"); assertEquals(conf.get("c"), "d"); assertEquals(conf.get("e"), "f"); assertEquals(conf.get("g"), "h"); tearDown(); } BufferedWriter out; public void testIntegerRanges() { Configuration conf = new Configuration(); conf.set("first", "-100"); conf.set("second", "4-6,9-10,27"); conf.set("third", "34-"); Configuration.IntegerRanges range = conf.getRange("first", null); System.out.println("first = " + range); assertEquals(true, range.isIncluded(0)); assertEquals(true, range.isIncluded(1)); assertEquals(true, range.isIncluded(100)); assertEquals(false, range.isIncluded(101)); range = conf.getRange("second", null); System.out.println("second = " + range); assertEquals(false, range.isIncluded(3)); assertEquals(true, range.isIncluded(4)); assertEquals(true, range.isIncluded(6)); assertEquals(false, range.isIncluded(7)); assertEquals(false, range.isIncluded(8)); assertEquals(true, range.isIncluded(9)); assertEquals(true, range.isIncluded(10)); assertEquals(false, range.isIncluded(11)); assertEquals(false, range.isIncluded(26)); assertEquals(true, range.isIncluded(27)); assertEquals(false, range.isIncluded(28)); range = conf.getRange("third", null); System.out.println("third = " + range); assertEquals(false, range.isIncluded(33)); assertEquals(true, range.isIncluded(34)); assertEquals(true, range.isIncluded(100000000)); } public void testHexValues() throws IOException{ out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("test.hex1", "0x10"); appendProperty("test.hex2", "0xF"); appendProperty("test.hex3", "-0x10"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); assertEquals(16, conf.getInt("test.hex1", 0)); assertEquals(16, conf.getLong("test.hex1", 0)); assertEquals(15, conf.getInt("test.hex2", 0)); assertEquals(15, conf.getLong("test.hex2", 0)); assertEquals(-16, conf.getInt("test.hex3", 0)); assertEquals(-16, conf.getLong("test.hex3", 0)); } public void testIntegerValues() throws IOException{ out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("test.int1", "20"); appendProperty("test.int2", "020"); appendProperty("test.int3", "-20"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); assertEquals(20, conf.getInt("test.int1", 0)); assertEquals(20, conf.getLong("test.int1", 0)); assertEquals(20, conf.getInt("test.int2", 0)); assertEquals(20, conf.getLong("test.int2", 0)); assertEquals(-20, conf.getInt("test.int3", 0)); assertEquals(-20, conf.getLong("test.int3", 0)); } + enum Dingo { FOO, BAR }; + enum Yak { RAB, FOO }; + public void testEnum() throws IOException { + Configuration conf = new Configuration(); + conf.setEnum("test.enum", Dingo.FOO); + assertSame(Dingo.FOO, conf.getEnum("test.enum", Dingo.BAR)); + assertSame(Yak.FOO, conf.getEnum("test.enum", Yak.RAB)); + boolean fail = false; + try { + conf.setEnum("test.enum", Dingo.BAR); + Yak y = conf.getEnum("test.enum", Yak.FOO); + } catch (IllegalArgumentException e) { + fail = true; + } + assertTrue(fail); + } + public void testReload() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("test.key1", "final-value1", true); appendProperty("test.key2", "value2"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("test.key1", "value1"); appendProperty("test.key3", "value3"); endConfig(); Path fileResource1 = new Path(CONFIG2); conf.addResource(fileResource1); // add a few values via set. conf.set("test.key3", "value4"); conf.set("test.key4", "value5"); assertEquals("final-value1", conf.get("test.key1")); assertEquals("value2", conf.get("test.key2")); assertEquals("value4", conf.get("test.key3")); assertEquals("value5", conf.get("test.key4")); // change values in the test file... out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("test.key1", "final-value1"); appendProperty("test.key3", "final-value3", true); endConfig(); conf.reloadConfiguration(); assertEquals("value1", conf.get("test.key1")); // overlayed property overrides. assertEquals("value4", conf.get("test.key3")); assertEquals(null, conf.get("test.key2")); assertEquals("value5", conf.get("test.key4")); } public void testSize() throws IOException { Configuration conf = new Configuration(false); conf.set("a", "A"); conf.set("b", "B"); assertEquals(2, conf.size()); } public void testClear() throws IOException { Configuration conf = new Configuration(false); conf.set("a", "A"); conf.set("b", "B"); conf.clear(); assertEquals(0, conf.size()); assertFalse(conf.iterator().hasNext()); } public static void main(String[] argv) throws Exception { junit.textui.TestRunner.main(new String[]{ TestConfiguration.class.getName() }); } static class JsonConfiguration { JsonProperty[] properties; public JsonProperty[] getProperties() { return properties; } public void setProperties(JsonProperty[] properties) { this.properties = properties; } } static class JsonProperty { String key; public String getKey() { return key; } public void setKey(String key) { this.key = key; } public String getValue() { return value; } public void setValue(String value) { this.value = value; } public boolean getIsFinal() { return isFinal; } public void setIsFinal(boolean isFinal) { this.isFinal = isFinal; } public String getResource() { return resource; } public void setResource(String resource) { this.resource = resource; } String value; boolean isFinal; String resource; } public void testDumpConfiguration () throws IOException { StringWriter outWriter = new StringWriter(); Configuration.dumpConfiguration(conf, outWriter); String jsonStr = outWriter.toString(); ObjectMapper mapper = new ObjectMapper(); JsonConfiguration jconf = mapper.readValue(jsonStr, JsonConfiguration.class); int defaultLength = jconf.getProperties().length; // add 3 keys to the existing configuration properties out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("test.key1", "value1"); appendProperty("test.key2", "value2",true); appendProperty("test.key3", "value3"); endConfig(); Path fileResource = new Path(CONFIG); conf.addResource(fileResource); out.close(); outWriter = new StringWriter(); Configuration.dumpConfiguration(conf, outWriter); jsonStr = outWriter.toString(); mapper = new ObjectMapper(); jconf = mapper.readValue(jsonStr, JsonConfiguration.class); int length = jconf.getProperties().length; // check for consistency in the number of properties parsed in Json format. assertEquals(length, defaultLength+3); //change few keys in another resource file out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("test.key1", "newValue1"); appendProperty("test.key2", "newValue2"); endConfig(); Path fileResource1 = new Path(CONFIG2); conf.addResource(fileResource1); out.close(); outWriter = new StringWriter(); Configuration.dumpConfiguration(conf, outWriter); jsonStr = outWriter.toString(); mapper = new ObjectMapper(); jconf = mapper.readValue(jsonStr, JsonConfiguration.class); // put the keys and their corresponding attributes into a hashmap for their // efficient retrieval HashMap<String,JsonProperty> confDump = new HashMap<String,JsonProperty>(); for(JsonProperty prop : jconf.getProperties()) { confDump.put(prop.getKey(), prop); } // check if the value and resource of test.key1 is changed assertEquals("newValue1", confDump.get("test.key1").getValue()); assertEquals(false, confDump.get("test.key1").getIsFinal()); assertEquals(fileResource1.toString(), confDump.get("test.key1").getResource()); // check if final parameter test.key2 is not changed, since it is first // loaded as final parameter assertEquals("value2", confDump.get("test.key2").getValue()); assertEquals(true, confDump.get("test.key2").getIsFinal()); assertEquals(fileResource.toString(), confDump.get("test.key2").getResource()); // check for other keys which are not modified later assertEquals("value3", confDump.get("test.key3").getValue()); assertEquals(false, confDump.get("test.key3").getIsFinal()); assertEquals(fileResource.toString(), confDump.get("test.key3").getResource()); // check for resource to be "Unknown" for keys which are loaded using 'set' // and expansion of properties conf.set("test.key4", "value4"); conf.set("test.key5", "value5"); conf.set("test.key6", "${test.key5}"); outWriter = new StringWriter(); Configuration.dumpConfiguration(conf, outWriter); jsonStr = outWriter.toString(); mapper = new ObjectMapper(); jconf = mapper.readValue(jsonStr, JsonConfiguration.class); confDump = new HashMap<String, JsonProperty>(); for(JsonProperty prop : jconf.getProperties()) { confDump.put(prop.getKey(), prop); } assertEquals("value5",confDump.get("test.key6").getValue()); assertEquals("Unknown", confDump.get("test.key4").getResource()); outWriter.close(); } public void testDumpConfiguratioWithoutDefaults() throws IOException { // check for case when default resources are not loaded Configuration config = new Configuration(false); StringWriter outWriter = new StringWriter(); Configuration.dumpConfiguration(config, outWriter); String jsonStr = outWriter.toString(); ObjectMapper mapper = new ObjectMapper(); JsonConfiguration jconf = mapper.readValue(jsonStr, JsonConfiguration.class); //ensure that no properties are loaded. assertEquals(0, jconf.getProperties().length); // add 2 keys out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("test.key1", "value1"); appendProperty("test.key2", "value2",true); endConfig(); Path fileResource = new Path(CONFIG); config.addResource(fileResource); out.close(); outWriter = new StringWriter(); Configuration.dumpConfiguration(config, outWriter); jsonStr = outWriter.toString(); mapper = new ObjectMapper(); jconf = mapper.readValue(jsonStr, JsonConfiguration.class); HashMap<String, JsonProperty>confDump = new HashMap<String, JsonProperty>(); for (JsonProperty prop : jconf.getProperties()) { confDump.put(prop.getKey(), prop); } //ensure only 2 keys are loaded assertEquals(2,jconf.getProperties().length); //ensure the values are consistent assertEquals(confDump.get("test.key1").getValue(),"value1"); assertEquals(confDump.get("test.key2").getValue(),"value2"); //check the final tag assertEquals(false, confDump.get("test.key1").getIsFinal()); assertEquals(true, confDump.get("test.key2").getIsFinal()); //check the resource for each property for (JsonProperty prop : jconf.getProperties()) { assertEquals(fileResource.toString(),prop.getResource()); } } }
jaxlaw/hadoop-common
07c16664bbd027bb3e50eb0fce3390dd2d9d31f1
hadoop-6498 from http://issues.apache.org/jira/secure/attachment/12431372/hangClient-0.20.patch.
diff --git a/CHANGES.txt b/CHANGES.txt index 50e9a02..2ea8e6d 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,521 +1,524 @@ Hadoop Change Log Release 0.20.2 - Unreleased BUG FIXES MAPREDUCE-112. Add counters for reduce input, output records to the new API. (Jothi Padmanabhan via cdouglas) + HADOOP-6498. IPC client bug may cause rpc call hang. (Ruyue Ma and hairong + via hairong) + Release 0.20.1 - 2009-09-01 INCOMPATIBLE CHANGES HADOOP-5726. Remove pre-emption from capacity scheduler code base. (Rahul Kumar Singh via yhemanth) HADOOP-5881. Simplify memory monitoring and scheduling related configuration. (Vinod Kumar Vavilapalli via yhemanth) NEW FEATURES HADOOP-6080. Introduce -skipTrash option to rm and rmr. (Jakob Homan via shv) HADOOP-3315. Add a new, binary file foramt, TFile. (Hong Tang via cdouglas) IMPROVEMENTS HADOOP-5711. Change Namenode file close log to info. (szetszwo) HADOOP-5736. Update the capacity scheduler documentation for features like memory based scheduling, job initialization and removal of pre-emption. (Sreekanth Ramakrishnan via yhemanth) HADOOP-4674. Fix fs help messages for -test, -text, -tail, -stat and -touchz options. (Ravi Phulari via szetszwo) HADOOP-4372. Improves the way history filenames are obtained and manipulated. (Amar Kamat via ddas) HADOOP-5897. Add name-node metrics to capture java heap usage. (Suresh Srinivas via shv) HDFS-438. Improve help message for space quota command. (Raghu Angadi) MAPREDUCE-767. Remove the dependence on the CLI 2.0 snapshot. (Amar Kamat via ddas) OPTIMIZATIONS BUG FIXES HADOOP-5691. Makes org.apache.hadoop.mapreduce.Reducer concrete class instead of abstract. (Amareshwari Sriramadasu via sharad) HADOOP-5646. Fixes a problem in TestQueueCapacities. (Vinod Kumar Vavilapalli via ddas) HADOOP-5655. TestMRServerPorts fails on java.net.BindException. (Devaraj Das via hairong) HADOOP-5654. TestReplicationPolicy.<init> fails on java.net.BindException. (hairong) HADOOP-5688. Fix HftpFileSystem checksum path construction. (Tsz Wo (Nicholas) Sze via cdouglas) HADOOP-5213. Fix Null pointer exception caused when bzip2compression was used and user closed a output stream without writing any data. (Zheng Shao via dhruba) HADOOP-5718. Remove the check for the default queue in capacity scheduler. (Sreekanth Ramakrishnan via yhemanth) HADOOP-5719. Remove jobs that failed initialization from the waiting queue in the capacity scheduler. (Sreekanth Ramakrishnan via yhemanth) HADOOP-4744. Attaching another fix to the jetty port issue. The TaskTracker kills itself if it ever discovers that the port to which jetty is actually bound is invalid (-1). (ddas) HADOOP-5349. Fixes a problem in LocalDirAllocator to check for the return path value that is returned for the case where the file we want to write is of an unknown size. (Vinod Kumar Vavilapalli via ddas) HADOOP-5636. Prevents a job from going to RUNNING state after it has been KILLED (this used to happen when the SetupTask would come back with a success after the job has been killed). (Amar Kamat via ddas) HADOOP-5641. Fix a NullPointerException in capacity scheduler's memory based scheduling code when jobs get retired. (yhemanth) HADOOP-5828. Use absolute path for mapred.local.dir of JobTracker in MiniMRCluster. (yhemanth) HADOOP-4981. Fix capacity scheduler to schedule speculative tasks correctly in the presence of High RAM jobs. (Sreekanth Ramakrishnan via yhemanth) HADOOP-5210. Solves a problem in the progress report of the reduce task. (Ravi Gummadi via ddas) HADOOP-5850. Fixes a problem to do with not being able to jobs with 0 maps/reduces. (Vinod K V via ddas) HADOOP-5728. Fixed FSEditLog.printStatistics IndexOutOfBoundsException. (Wang Xu via johan) HADOOP-4626. Correct the API links in hdfs forrest doc so that they point to the same version of hadoop. (szetszwo) HADOOP-5883. Fixed tasktracker memory monitoring to account for momentary spurts in memory usage due to java's fork() model. (yhemanth) HADOOP-5539. Fixes a problem to do with not preserving intermediate output compression for merged data. (Jothi Padmanabhan and Billy Pearson via ddas) HADOOP-5932. Fixes a problem in capacity scheduler in computing available memory on a tasktracker. (Vinod Kumar Vavilapalli via yhemanth) HADOOP-5648. Fixes a build issue in not being able to generate gridmix.jar in hadoop binary tarball. (Giridharan Kesavan via gkesavan) HADOOP-5908. Fixes a problem to do with ArithmeticException in the JobTracker when there are jobs with 0 maps. (Amar Kamat via ddas) HADOOP-5924. Fixes a corner case problem to do with job recovery with empty history files. Also, after a JT restart, sends KillTaskAction to tasks that report back but the corresponding job hasn't been initialized yet. (Amar Kamat via ddas) HADOOP-5882. Fixes a reducer progress update problem for new mapreduce api. (Amareshwari Sriramadasu via sharad) HADOOP-5746. Fixes a corner case problem in Streaming, where if an exception happens in MROutputThread after the last call to the map/reduce method, the exception goes undetected. (Amar Kamat via ddas) HADOOP-5884. Fixes accounting in capacity scheduler so that high RAM jobs take more slots. (Vinod Kumar Vavilapalli via yhemanth) HADOOP-5937. Correct a safemode message in FSNamesystem. (Ravi Phulari via szetszwo) HADOOP-5869. Fix bug in assignment of setup / cleanup task that was causing TestQueueCapacities to fail. (Sreekanth Ramakrishnan via yhemanth) HADOOP-5921. Fixes a problem in the JobTracker where it sometimes never used to come up due to a system file creation on JobTracker's system-dir failing. This problem would sometimes show up only when the FS for the system-dir (usually HDFS) is started at nearly the same time as the JobTracker. (Amar Kamat via ddas) HADOOP-5920. Fixes a testcase failure for TestJobHistory. (Amar Kamat via ddas) HDFS-26. Better error message to users when commands fail because of lack of quota. Allow quota to be set even if the limit is lower than current consumption. (Boris Shkolnik via rangadi) MAPREDUCE-2. Fixes a bug in KeyFieldBasedPartitioner in handling empty keys. (Amar Kamat via sharad) MAPREDUCE-130. Delete the jobconf copy from the log directory of the JobTracker when the job is retired. (Amar Kamat via sharad) MAPREDUCE-657. Fix hardcoded filesystem problem in CompletedJobStatusStore. (Amar Kamat via sharad) MAPREDUCE-179. Update progress in new RecordReaders. (cdouglas) MAPREDUCE-124. Fix a bug in failure handling of abort task of OutputCommiter. (Amareshwari Sriramadasu via sharad) HADOOP-6139. Fix the FsShell help messages for rm and rmr. (Jakob Homan via szetszwo) HADOOP-6141. Fix a few bugs in 0.20 test-patch.sh. (Hong Tang via szetszwo) HADOOP-6145. Fix FsShell rm/rmr error messages when there is a FNFE. (Jakob Homan via szetszwo) MAPREDUCE-565. Fix partitioner to work with new API. (Owen O'Malley via cdouglas) MAPREDUCE-465. Fix a bug in MultithreadedMapRunner. (Amareshwari Sriramadasu via sharad) MAPREDUCE-18. Puts some checks to detect cases where jetty serves up incorrect output during shuffle. (Ravi Gummadi via ddas) MAPREDUCE-735. Fixes a problem in the KeyFieldHelper to do with the end index for some inputs (Amar Kamat via ddas) HADOOP-6150. Users should be able to instantiate comparator using TFile API. (Hong Tang via rangadi) MAPREDUCE-383. Fix a bug in Pipes combiner due to bytes count not getting reset after the spill. (Christian Kunz via sharad) MAPREDUCE-40. Keep memory management backwards compatible for job configuration parameters and limits. (Rahul Kumar Singh via yhemanth) MAPREDUCE-796. Fixes a ClassCastException in an exception log in MultiThreadedMapRunner. (Amar Kamat via ddas) MAPREDUCE-838. Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. (Amareshwari Sriramadasu via ddas) MAPREDUCE-805. Fixes some deadlocks in the JobTracker due to the fact the JobTracker lock hierarchy wasn't maintained in some JobInProgress method calls. (Amar Kamat via ddas) HDFS-167. Fix a bug in DFSClient that caused infinite retries on write. (Bill Zeller via szetszwo) HDFS-527. Remove unnecessary DFSClient constructors. (szetszwo) MAPREDUCE-832. Reduce number of warning messages printed when deprecated memory variables are used. (Rahul Kumar Singh via yhemanth) MAPREDUCE-745. Fixes a testcase problem to do with generation of JobTracker IDs. (Amar Kamat via ddas) MAPREDUCE-834. Enables memory management on tasktrackers when old memory management parameters are used in configuration. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-818. Fixes Counters#getGroup API. (Amareshwari Sriramadasu via sharad) MAPREDUCE-807. Handles the AccessControlException during the deletion of mapred.system.dir in the JobTracker. The JobTracker will bail out if it encounters such an exception. (Amar Kamat via ddas) HADOOP-6213. Remove commons dependency on commons-cli2. (Amar Kamat via sharad) MAPREDUCE-430. Fix a bug related to task getting stuck in case of OOM error. (Amar Kamat via ddas) HADOOP-6215. fix GenericOptionParser to deal with -D with '=' in the value. (Amar Kamat via sharad) MAPREDUCE-421. Fix Pipes to use returned system exit code. (Christian Kunz via omalley) HDFS-525. The SimpleDateFormat object in ListPathsServlet is not thread safe. (Suresh Srinivas and cdouglas) MAPREDUCE-911. Fix a bug in TestTaskFail related to speculative execution. (Amareshwari Sriramadasu via sharad) MAPREDUCE-687. Fix an assertion in TestMiniMRMapRedDebugScript. (Amareshwari Sriramadasu via sharad) MAPREDUCE-924. Fixes the TestPipes testcase to use Tool. (Amareshwari Sriramadasu via sharad) Release 0.20.0 - 2009-04-15 INCOMPATIBLE CHANGES HADOOP-4210. Fix findbugs warnings for equals implementations of mapred ID classes. Removed public, static ID::read and ID::forName; made ID an abstract class. (Suresh Srinivas via cdouglas) HADOOP-4253. Fix various warnings generated by findbugs. Following deprecated methods in RawLocalFileSystem are removed: public String getName() public void lock(Path p, boolean shared) public void release(Path p) (Suresh Srinivas via johan) HADOOP-4618. Move http server from FSNamesystem into NameNode. FSNamesystem.getNameNodeInfoPort() is removed. FSNamesystem.getDFSNameNodeMachine() and FSNamesystem.getDFSNameNodePort() replaced by FSNamesystem.getDFSNameNodeAddress(). NameNode(bindAddress, conf) is removed. (shv) HADOOP-4567. GetFileBlockLocations returns the NetworkTopology information of the machines where the blocks reside. (dhruba) HADOOP-4435. The JobTracker WebUI displays the amount of heap memory in use. (dhruba) HADOOP-4628. Move Hive into a standalone subproject. (omalley) HADOOP-4188. Removes task's dependency on concrete filesystems. (Sharad Agarwal via ddas) HADOOP-1650. Upgrade to Jetty 6. (cdouglas) HADOOP-3986. Remove static Configuration from JobClient. (Amareshwari Sriramadasu via cdouglas) JobClient::setCommandLineConfig is removed JobClient::getCommandLineConfig is removed JobShell, TestJobShell classes are removed HADOOP-4422. S3 file systems should not create bucket. (David Phillips via tomwhite) HADOOP-4035. Support memory based scheduling in capacity scheduler. (Vinod Kumar Vavilapalli via yhemanth) HADOOP-3497. Fix bug in overly restrictive file globbing with a PathFilter. (tomwhite) HADOOP-4445. Replace running task counts with running task percentage in capacity scheduler UI. (Sreekanth Ramakrishnan via yhemanth) HADOOP-4631. Splits the configuration into three parts - one for core, one for mapred and the last one for HDFS. (Sharad Agarwal via cdouglas) HADOOP-3344. Fix libhdfs build to use autoconf and build the same architecture (32 vs 64 bit) of the JVM running Ant. The libraries for pipes, utils, and libhdfs are now all in c++/<os_osarch_jvmdatamodel>/lib. (Giridharan Kesavan via nigel) HADOOP-4874. Remove LZO codec because of licensing issues. (omalley) HADOOP-4970. The full path name of a file is preserved inside Trash. (Prasad Chakka via dhruba) HADOOP-4103. NameNode keeps a count of missing blocks. It warns on WebUI if there are such blocks. '-report' and '-metaSave' have extra info to track such blocks. (Raghu Angadi) HADOOP-4783. Change permissions on history files on the jobtracker to be only group readable instead of world readable. (Amareshwari Sriramadasu via yhemanth) HADOOP-5531. Removed Chukwa from Hadoop 0.20.0. (nigel) NEW FEATURES HADOOP-4575. Add a proxy service for relaying HsftpFileSystem requests. Includes client authentication via user certificates and config-based access control. (Kan Zhang via cdouglas) HADOOP-4661. Add DistCh, a new tool for distributed ch{mod,own,grp}. (szetszwo) HADOOP-4709. Add several new features and bug fixes to Chukwa. Added Hadoop Infrastructure Care Center (UI for visualize data collected by Chukwa) Added FileAdaptor for streaming small file in one chunk Added compression to archive and demux output Added unit tests and validation for agent, collector, and demux map reduce job Added database loader for loading demux output (sequence file) to jdbc connected database Added algorithm to distribute collector load more evenly (Jerome Boulon, Eric Yang, Andy Konwinski, Ariel Rabkin via cdouglas) HADOOP-4179. Add Vaidya tool to analyze map/reduce job logs for performanc problems. (Suhas Gogate via omalley) HADOOP-4029. Add NameNode storage information to the dfshealth page and move DataNode information to a separated page. (Boris Shkolnik via szetszwo) HADOOP-4348. Add service-level authorization for Hadoop. (acmurthy) HADOOP-4826. Introduce admin command saveNamespace. (shv) HADOOP-3063 BloomMapFile - fail-fast version of MapFile for sparsely populated key space (Andrzej Bialecki via stack) HADOOP-1230. Add new map/reduce API and deprecate the old one. Generally, the old code should work without problem. The new api is in org.apache.hadoop.mapreduce and the old classes in org.apache.hadoop.mapred are deprecated. Differences in the new API: 1. All of the methods take Context objects that allow us to add new methods without breaking compatability. 2. Mapper and Reducer now have a "run" method that is called once and contains the control loop for the task, which lets applications replace it. 3. Mapper and Reducer by default are Identity Mapper and Reducer. 4. The FileOutputFormats use part-r-00000 for the output of reduce 0 and part-m-00000 for the output of map 0. 5. The reduce grouping comparator now uses the raw compare instead of object compare. 6. The number of maps in FileInputFormat is controlled by min and max split size rather than min size and the desired number of maps. (omalley) HADOOP-3305. Use Ivy to manage dependencies. (Giridharan Kesavan and Steve Loughran via cutting) IMPROVEMENTS HADOOP-4565. Added CombineFileInputFormat to use data locality information to create splits. (dhruba via zshao) HADOOP-4749. Added a new counter REDUCE_INPUT_BYTES. (Yongqiang He via zshao) HADOOP-4234. Fix KFS "glue" layer to allow applications to interface with multiple KFS metaservers. (Sriram Rao via lohit) HADOOP-4245. Update to latest version of KFS "glue" library jar. (Sriram Rao via lohit) HADOOP-4244. Change test-patch.sh to check Eclipse classpath no matter it is run by Hudson or not. (szetszwo) HADOOP-3180. Add name of missing class to WritableName.getClass IOException. (Pete Wyckoff via omalley) HADOOP-4178. Make the capacity scheduler's default values configurable. (Sreekanth Ramakrishnan via omalley) HADOOP-4262. Generate better error message when client exception has null message. (stevel via omalley) HADOOP-4226. Refactor and document LineReader to make it more readily understandable. (Yuri Pradkin via cdouglas) HADOOP-4238. When listing jobs, if scheduling information isn't available print NA instead of empty output. (Sreekanth Ramakrishnan via johan) HADOOP-4284. Support filters that apply to all requests, or global filters, to HttpServer. (Kan Zhang via cdouglas) HADOOP-4276. Improve the hashing functions and deserialization of the mapred ID classes. (omalley) HADOOP-4485. Add a compile-native ant task, as a shorthand. (enis) HADOOP-4454. Allow # comments in slaves file. (Rama Ramasamy via omalley) HADOOP-3461. Remove hdfs.StringBytesWritable. (szetszwo) HADOOP-4437. Use Halton sequence instead of java.util.Random in PiEstimator. (szetszwo) HADOOP-4572. Change INode and its sub-classes to package private. (szetszwo) HADOOP-4187. Does a runtime lookup for JobConf/JobConfigurable, and if found, invokes the appropriate configure method. (Sharad Agarwal via ddas) HADOOP-4453. Improve ssl configuration and handling in HsftpFileSystem, particularly when used with DistCp. (Kan Zhang via cdouglas) HADOOP-4583. Several code optimizations in HDFS. (Suresh Srinivas via szetszwo) HADOOP-3923. Remove org.apache.hadoop.mapred.StatusHttpServer. (szetszwo) HADOOP-4622. Explicitly specify interpretor for non-native pipes binaries. (Fredrik Hedberg via johan) HADOOP-4505. Add a unit test to test faulty setup task and cleanup task killing the job. (Amareshwari Sriramadasu via johan) HADOOP-4608. Don't print a stack trace when the example driver gets an unknown program to run. (Edward Yoon via omalley) HADOOP-4645. Package HdfsProxy contrib project without the extra level of directories. (Kan Zhang via omalley) HADOOP-4126. Allow access to HDFS web UI on EC2 (tomwhite via omalley) HADOOP-4612. Removes RunJar's dependency on JobClient. (Sharad Agarwal via ddas) HADOOP-4185. Adds setVerifyChecksum() method to FileSystem. (Sharad Agarwal via ddas) HADOOP-4523. Prevent too many tasks scheduled on a node from bringing it down by monitoring for cumulative memory usage across tasks. (Vinod Kumar Vavilapalli via yhemanth) HADOOP-4640. Adds an input format that can split lzo compressed text files. (johan) HADOOP-4666. Launch reduces only after a few maps have run in the Fair Scheduler. (Matei Zaharia via johan) HADOOP-4339. Remove redundant calls from FileSystem/FsShell when generating/processing ContentSummary. (David Phillips via cdouglas) HADOOP-2774. Add counters tracking records spilled to disk in MapTask and ReduceTask. (Ravi Gummadi via cdouglas) HADOOP-4513. Initialize jobs asynchronously in the capacity scheduler. (Sreekanth Ramakrishnan via yhemanth) HADOOP-4649. Improve abstraction for spill indices. (cdouglas) HADOOP-3770. Add gridmix2, an iteration on the gridmix benchmark. (Runping Qi via cdouglas) HADOOP-4708. Add support for dfsadmin commands in TestCLI. (Boris Shkolnik via cdouglas) HADOOP-4758. Add a splitter for metrics contexts to support more than one type of collector. (cdouglas) HADOOP-4722. Add tests for dfsadmin quota error messages. (Boris Shkolnik via cdouglas) HADOOP-4690. fuse-dfs - create source file/function + utils + config + main source files. (pete wyckoff via mahadev) HADOOP-3750. Fix and enforce module dependencies. (Sharad Agarwal via tomwhite) HADOOP-4747. Speed up FsShell::ls by removing redundant calls to the filesystem. (David Phillips via cdouglas) diff --git a/src/core/org/apache/hadoop/ipc/Client.java b/src/core/org/apache/hadoop/ipc/Client.java index 5d724cc..fdfbc80 100644 --- a/src/core/org/apache/hadoop/ipc/Client.java +++ b/src/core/org/apache/hadoop/ipc/Client.java @@ -1,910 +1,911 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.net.Socket; import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import java.net.UnknownHostException; import java.net.ConnectException; import java.io.IOException; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.FilterInputStream; import java.io.InputStream; import java.util.Hashtable; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import javax.net.SocketFactory; import org.apache.commons.logging.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; /** A client for an IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. * * @see Server */ public class Client { public static final Log LOG = LogFactory.getLog(Client.class); private Hashtable<ConnectionId, Connection> connections = new Hashtable<ConnectionId, Connection>(); private Class<? extends Writable> valueClass; // class of call values private int counter; // counter for call ids private AtomicBoolean running = new AtomicBoolean(true); // if client runs final private Configuration conf; final private int maxIdleTime; //connections will be culled if it was idle for //maxIdleTime msecs final private int maxRetries; //the max. no. of retries for socket connections private boolean tcpNoDelay; // if T then disable Nagle's Algorithm private int pingInterval; // how often sends ping to the server in msecs private SocketFactory socketFactory; // how to create sockets private int refCount = 1; final private static String PING_INTERVAL_NAME = "ipc.ping.interval"; final static int DEFAULT_PING_INTERVAL = 60000; // 1 min final static int PING_CALL_ID = -1; /** * set the ping interval value in configuration * * @param conf Configuration * @param pingInterval the ping interval */ final public static void setPingInterval(Configuration conf, int pingInterval) { conf.setInt(PING_INTERVAL_NAME, pingInterval); } /** * Get the ping interval from configuration; * If not set in the configuration, return the default value. * * @param conf Configuration * @return the ping interval */ final static int getPingInterval(Configuration conf) { return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL); } /** * Increment this client's reference count * */ synchronized void incCount() { refCount++; } /** * Decrement this client's reference count * */ synchronized void decCount() { refCount--; } /** * Return if this client has no reference * * @return true if this client has no reference; false otherwise */ synchronized boolean isZeroReference() { return refCount==0; } /** A call waiting for a value. */ private class Call { int id; // call id Writable param; // parameter Writable value; // value, null if error IOException error; // exception, null if value boolean done; // true when call is done protected Call(Writable param) { this.param = param; synchronized (Client.this) { this.id = counter++; } } /** Indicate when the call is complete and the * value or error are available. Notifies by default. */ protected synchronized void callComplete() { this.done = true; notify(); // notify caller } /** Set the exception when there is an error. * Notify the caller the call is done. * * @param error exception thrown by the call; either local or remote */ public synchronized void setException(IOException error) { this.error = error; callComplete(); } /** Set the return value when there is no error. * Notify the caller the call is done. * * @param value return value of the call. */ public synchronized void setValue(Writable value) { this.value = value; callComplete(); } } /** Thread that reads responses and notifies callers. Each connection owns a * socket connected to a remote address. Calls are multiplexed through this * socket: responses may be delivered out of order. */ private class Connection extends Thread { private InetSocketAddress server; // server ip:port private ConnectionHeader header; // connection header private ConnectionId remoteId; // connection id private Socket socket = null; // connected socket private DataInputStream in; private DataOutputStream out; // currently active calls private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>(); private AtomicLong lastActivity = new AtomicLong();// last I/O activity time private AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed private IOException closeException; // close reason public Connection(ConnectionId remoteId) throws IOException { this.remoteId = remoteId; this.server = remoteId.getAddress(); if (server.isUnresolved()) { throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); } UserGroupInformation ticket = remoteId.getTicket(); Class<?> protocol = remoteId.getProtocol(); header = new ConnectionHeader(protocol == null ? null : protocol.getName(), ticket); this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + remoteId.getAddress().toString() + " from " + ((ticket==null)?"an unknown user":ticket.getUserName())); this.setDaemon(true); } /** Update lastActivity with the current time. */ private void touch() { lastActivity.set(System.currentTimeMillis()); } /** * Add a call to this connection's call queue and notify * a listener; synchronized. * Returns false if called during shutdown. * @param call to add * @return true if the call was added. */ private synchronized boolean addCall(Call call) { if (shouldCloseConnection.get()) return false; calls.put(call.id, call); notify(); return true; } /** This class sends a ping to the remote side when timeout on * reading. If no failure is detected, it retries until at least * a byte is read. */ private class PingInputStream extends FilterInputStream { /* constructor */ protected PingInputStream(InputStream in) { super(in); } /* Process timeout exception * if the connection is not going to be closed, send a ping. * otherwise, throw the timeout exception. */ private void handleTimeout(SocketTimeoutException e) throws IOException { if (shouldCloseConnection.get() || !running.get()) { throw e; } else { sendPing(); } } /** Read a byte from the stream. * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. * @throws IOException for any IO problem other than socket timeout */ public int read() throws IOException { do { try { return super.read(); } catch (SocketTimeoutException e) { handleTimeout(e); } } while (true); } /** Read bytes into a buffer starting from offset <code>off</code> * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. * * @return the total number of bytes read; -1 if the connection is closed. */ public int read(byte[] buf, int off, int len) throws IOException { do { try { return super.read(buf, off, len); } catch (SocketTimeoutException e) { handleTimeout(e); } } while (true); } } /** Connect to the server and set up the I/O streams. It then sends * a header to the server and starts * the connection thread that waits for responses. */ private synchronized void setupIOstreams() { if (socket != null || shouldCloseConnection.get()) { return; } short ioFailures = 0; short timeoutFailures = 0; try { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to "+server); } while (true) { try { this.socket = socketFactory.createSocket(); this.socket.setTcpNoDelay(tcpNoDelay); // connection time out is 20s NetUtils.connect(this.socket, remoteId.getAddress(), 20000); this.socket.setSoTimeout(pingInterval); break; } catch (SocketTimeoutException toe) { /* The max number of retries is 45, * which amounts to 20s*45 = 15 minutes retries. */ handleConnectionFailure(timeoutFailures++, 45, toe); } catch (IOException ie) { handleConnectionFailure(ioFailures++, maxRetries, ie); } } this.in = new DataInputStream(new BufferedInputStream (new PingInputStream(NetUtils.getInputStream(socket)))); this.out = new DataOutputStream (new BufferedOutputStream(NetUtils.getOutputStream(socket))); writeHeader(); // update last activity time touch(); // start the receiver thread after the socket connection has been set up start(); } catch (IOException e) { markClosed(e); close(); } } /* Handle connection failures * * If the current number of retries is equal to the max number of retries, * stop retrying and throw the exception; Otherwise backoff 1 second and * try connecting again. * * This Method is only called from inside setupIOstreams(), which is * synchronized. Hence the sleep is synchronized; the locks will be retained. * * @param curRetries current number of retries * @param maxRetries max number of retries allowed * @param ioe failure reason * @throws IOException if max number of retries is reached */ private void handleConnectionFailure( int curRetries, int maxRetries, IOException ioe) throws IOException { // close the current connection try { socket.close(); } catch (IOException e) { LOG.warn("Not able to close a socket", e); } // set socket to null so that the next call to setupIOstreams // can start the process of connect all over again. socket = null; // throw the exception if the maximum number of retries is reached if (curRetries >= maxRetries) { throw ioe; } // otherwise back off and retry try { Thread.sleep(1000); } catch (InterruptedException ignored) {} LOG.info("Retrying connect to server: " + server + ". Already tried " + curRetries + " time(s)."); } /* Write the header for each connection * Out is not synchronized because only the first thread does this. */ private void writeHeader() throws IOException { // Write out the header and version out.write(Server.HEADER.array()); out.write(Server.CURRENT_VERSION); // Write out the ConnectionHeader DataOutputBuffer buf = new DataOutputBuffer(); header.write(buf); // Write out the payload length int bufLen = buf.getLength(); out.writeInt(bufLen); out.write(buf.getData(), 0, bufLen); } /* wait till someone signals us to start reading RPC response or * it is idle too long, it is marked as to be closed, * or the client is marked as not running. * * Return true if it is time to read a response; false otherwise. */ private synchronized boolean waitForWork() { if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { long timeout = maxIdleTime- (System.currentTimeMillis()-lastActivity.get()); if (timeout>0) { try { wait(timeout); } catch (InterruptedException e) {} } } if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { return true; } else if (shouldCloseConnection.get()) { return false; } else if (calls.isEmpty()) { // idle connection closed or stopped markClosed(null); return false; } else { // get stopped but there are still pending requests markClosed((IOException)new IOException().initCause( new InterruptedException())); return false; } } public InetSocketAddress getRemoteAddress() { return server; } /* Send a ping to the server if the time elapsed * since last I/O activity is equal to or greater than the ping interval */ private synchronized void sendPing() throws IOException { long curTime = System.currentTimeMillis(); if ( curTime - lastActivity.get() >= pingInterval) { lastActivity.set(curTime); synchronized (out) { out.writeInt(PING_CALL_ID); out.flush(); } } } public void run() { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": starting, having connections " + connections.size()); while (waitForWork()) {//wait here for work - read or close connection receiveResponse(); } close(); if (LOG.isDebugEnabled()) LOG.debug(getName() + ": stopped, remaining connections " + connections.size()); } /** Initiates a call by sending the parameter to the remote server. * Note: this is not called from the Connection thread, but by other * threads. */ public void sendParam(Call call) { if (shouldCloseConnection.get()) { return; } DataOutputBuffer d=null; try { synchronized (this.out) { if (LOG.isDebugEnabled()) LOG.debug(getName() + " sending #" + call.id); //for serializing the //data to be written d = new DataOutputBuffer(); d.writeInt(call.id); call.param.write(d); byte[] data = d.getData(); int dataLength = d.getLength(); out.writeInt(dataLength); //first put the data length out.write(data, 0, dataLength);//write the data out.flush(); } } catch(IOException e) { markClosed(e); } finally { //the buffer is just an in-memory buffer, but it is still polite to // close early IOUtils.closeStream(d); } } /* Receive a response. * Because only one receiver, so no synchronization on in. */ private void receiveResponse() { if (shouldCloseConnection.get()) { return; } touch(); try { int id = in.readInt(); // try to read an id if (LOG.isDebugEnabled()) LOG.debug(getName() + " got value #" + id); - Call call = calls.remove(id); + Call call = calls.get(id); int state = in.readInt(); // read call status if (state == Status.SUCCESS.state) { Writable value = ReflectionUtils.newInstance(valueClass, conf); value.readFields(in); // read value call.setValue(value); + calls.remove(id); } else if (state == Status.ERROR.state) { call.setException(new RemoteException(WritableUtils.readString(in), WritableUtils.readString(in))); } else if (state == Status.FATAL.state) { // Close the connection markClosed(new RemoteException(WritableUtils.readString(in), WritableUtils.readString(in))); } } catch (IOException e) { markClosed(e); } } private synchronized void markClosed(IOException e) { if (shouldCloseConnection.compareAndSet(false, true)) { closeException = e; notifyAll(); } } /** Close the connection. */ private synchronized void close() { if (!shouldCloseConnection.get()) { LOG.error("The connection is not in the closed state"); return; } // release the resources // first thing to do;take the connection out of the connection list synchronized (connections) { if (connections.get(remoteId) == this) { connections.remove(remoteId); } } // close the streams and therefore the socket IOUtils.closeStream(out); IOUtils.closeStream(in); // clean up all calls if (closeException == null) { if (!calls.isEmpty()) { LOG.warn( "A connection is closed for no cause and calls are not empty"); // clean up calls anyway closeException = new IOException("Unexpected closed connection"); cleanupCalls(); } } else { // log the info if (LOG.isDebugEnabled()) { LOG.debug("closing ipc connection to " + server + ": " + closeException.getMessage(),closeException); } // cleanup calls cleanupCalls(); } if (LOG.isDebugEnabled()) LOG.debug(getName() + ": closed"); } /* Cleanup all calls and mark them as done */ private void cleanupCalls() { Iterator<Entry<Integer, Call>> itor = calls.entrySet().iterator() ; while (itor.hasNext()) { Call c = itor.next().getValue(); c.setException(closeException); // local exception itor.remove(); } } } /** Call implementation used for parallel calls. */ private class ParallelCall extends Call { private ParallelResults results; private int index; public ParallelCall(Writable param, ParallelResults results, int index) { super(param); this.results = results; this.index = index; } /** Deliver result to result collector. */ protected void callComplete() { results.callComplete(this); } } /** Result collector for parallel calls. */ private static class ParallelResults { private Writable[] values; private int size; private int count; public ParallelResults(int size) { this.values = new Writable[size]; this.size = size; } /** Collect a result. */ public synchronized void callComplete(ParallelCall call) { values[call.index] = call.value; // store the value count++; // count it if (count == size) // if all values are in notify(); // then notify waiting caller } } /** Construct an IPC client whose values are of the given {@link Writable} * class. */ public Client(Class<? extends Writable> valueClass, Configuration conf, SocketFactory factory) { this.valueClass = valueClass; this.maxIdleTime = conf.getInt("ipc.client.connection.maxidletime", 10000); //10s this.maxRetries = conf.getInt("ipc.client.connect.max.retries", 10); this.tcpNoDelay = conf.getBoolean("ipc.client.tcpnodelay", false); this.pingInterval = getPingInterval(conf); if (LOG.isDebugEnabled()) { LOG.debug("The ping interval is" + this.pingInterval + "ms."); } this.conf = conf; this.socketFactory = factory; } /** * Construct an IPC client with the default SocketFactory * @param valueClass * @param conf */ public Client(Class<? extends Writable> valueClass, Configuration conf) { this(valueClass, conf, NetUtils.getDefaultSocketFactory(conf)); } /** Return the socket factory of this client * * @return this client's socket factory */ SocketFactory getSocketFactory() { return socketFactory; } /** Stop all threads related to this client. No further calls may be made * using this client. */ public void stop() { if (LOG.isDebugEnabled()) { LOG.debug("Stopping client"); } if (!running.compareAndSet(true, false)) { return; } // wake up all connections synchronized (connections) { for (Connection conn : connections.values()) { conn.interrupt(); } } // wait until all connections are closed while (!connections.isEmpty()) { try { Thread.sleep(100); } catch (InterruptedException e) { } } } /** Make a call, passing <code>param</code>, to the IPC server running at * <code>address</code>, returning the value. Throws exceptions if there are * network problems or if the remote code threw an exception. * @deprecated Use {@link #call(Writable, InetSocketAddress, Class, UserGroupInformation)} instead */ @Deprecated public Writable call(Writable param, InetSocketAddress address) throws InterruptedException, IOException { return call(param, address, null); } /** Make a call, passing <code>param</code>, to the IPC server running at * <code>address</code> with the <code>ticket</code> credentials, returning * the value. * Throws exceptions if there are network problems or if the remote code * threw an exception. * @deprecated Use {@link #call(Writable, InetSocketAddress, Class, UserGroupInformation)} instead */ @Deprecated public Writable call(Writable param, InetSocketAddress addr, UserGroupInformation ticket) throws InterruptedException, IOException { return call(param, addr, null, ticket); } /** Make a call, passing <code>param</code>, to the IPC server running at * <code>address</code> which is servicing the <code>protocol</code> protocol, * with the <code>ticket</code> credentials, returning the value. * Throws exceptions if there are network problems or if the remote code * threw an exception. */ public Writable call(Writable param, InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket) throws InterruptedException, IOException { Call call = new Call(param); Connection connection = getConnection(addr, protocol, ticket, call); connection.sendParam(call); // send the parameter boolean interrupted = false; synchronized (call) { while (!call.done) { try { call.wait(); // wait for the result } catch (InterruptedException ie) { // save the fact that we were interrupted interrupted = true; } } if (interrupted) { // set the interrupt flag now that we are done waiting Thread.currentThread().interrupt(); } if (call.error != null) { if (call.error instanceof RemoteException) { call.error.fillInStackTrace(); throw call.error; } else { // local exception throw wrapException(addr, call.error); } } else { return call.value; } } } /** * Take an IOException and the address we were trying to connect to * and return an IOException with the input exception as the cause. * The new exception provides the stack trace of the place where * the exception is thrown and some extra diagnostics information. * If the exception is ConnectException or SocketTimeoutException, * return a new one of the same type; Otherwise return an IOException. * * @param addr target address * @param exception the relevant exception * @return an exception to throw */ private IOException wrapException(InetSocketAddress addr, IOException exception) { if (exception instanceof ConnectException) { //connection refused; include the host:port in the error return (ConnectException)new ConnectException( "Call to " + addr + " failed on connection exception: " + exception) .initCause(exception); } else if (exception instanceof SocketTimeoutException) { return (SocketTimeoutException)new SocketTimeoutException( "Call to " + addr + " failed on socket timeout exception: " + exception).initCause(exception); } else { return (IOException)new IOException( "Call to " + addr + " failed on local exception: " + exception) .initCause(exception); } } /** * Makes a set of calls in parallel. Each parameter is sent to the * corresponding address. When all values are available, or have timed out * or errored, the collected results are returned in an array. The array * contains nulls for calls that timed out or errored. * @deprecated Use {@link #call(Writable[], InetSocketAddress[], Class, UserGroupInformation)} instead */ @Deprecated public Writable[] call(Writable[] params, InetSocketAddress[] addresses) throws IOException { return call(params, addresses, null, null); } /** Makes a set of calls in parallel. Each parameter is sent to the * corresponding address. When all values are available, or have timed out * or errored, the collected results are returned in an array. The array * contains nulls for calls that timed out or errored. */ public Writable[] call(Writable[] params, InetSocketAddress[] addresses, Class<?> protocol, UserGroupInformation ticket) throws IOException { if (addresses.length == 0) return new Writable[0]; ParallelResults results = new ParallelResults(params.length); synchronized (results) { for (int i = 0; i < params.length; i++) { ParallelCall call = new ParallelCall(params[i], results, i); try { Connection connection = getConnection(addresses[i], protocol, ticket, call); connection.sendParam(call); // send each parameter } catch (IOException e) { // log errors LOG.info("Calling "+addresses[i]+" caught: " + e.getMessage(),e); results.size--; // wait for one fewer result } } while (results.count != results.size) { try { results.wait(); // wait for all results } catch (InterruptedException e) {} } return results.values; } } /** Get a connection from the pool, or create a new one and add it to the * pool. Connections to a given host/port are reused. */ private Connection getConnection(InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, Call call) throws IOException { if (!running.get()) { // the client is stopped throw new IOException("The client is stopped"); } Connection connection; /* we could avoid this allocation for each RPC by having a * connectionsId object and with set() method. We need to manage the * refs for keys in HashMap properly. For now its ok. */ ConnectionId remoteId = new ConnectionId(addr, protocol, ticket); do { synchronized (connections) { connection = connections.get(remoteId); if (connection == null) { connection = new Connection(remoteId); connections.put(remoteId, connection); } } } while (!connection.addCall(call)); //we don't invoke the method below inside "synchronized (connections)" //block above. The reason for that is if the server happens to be slow, //it will take longer to establish a connection and that will slow the //entire system down. connection.setupIOstreams(); return connection; } /** * This class holds the address and the user ticket. The client connections * to servers are uniquely identified by <remoteAddress, protocol, ticket> */ private static class ConnectionId { InetSocketAddress address; UserGroupInformation ticket; Class<?> protocol; private static final int PRIME = 16777619; ConnectionId(InetSocketAddress address, Class<?> protocol, UserGroupInformation ticket) { this.protocol = protocol; this.address = address; this.ticket = ticket; } InetSocketAddress getAddress() { return address; } Class<?> getProtocol() { return protocol; } UserGroupInformation getTicket() { return ticket; } @Override public boolean equals(Object obj) { if (obj instanceof ConnectionId) { ConnectionId id = (ConnectionId) obj; return address.equals(id.address) && protocol == id.protocol && ticket == id.ticket; //Note : ticket is a ref comparision. } return false; } @Override public int hashCode() { return (address.hashCode() + PRIME * System.identityHashCode(protocol)) ^ System.identityHashCode(ticket); } } } diff --git a/src/test/org/apache/hadoop/ipc/TestIPC.java b/src/test/org/apache/hadoop/ipc/TestIPC.java index df5a155..b672ad5 100644 --- a/src/test/org/apache/hadoop/ipc/TestIPC.java +++ b/src/test/org/apache/hadoop/ipc/TestIPC.java @@ -1,243 +1,279 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import org.apache.commons.logging.*; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.net.NetUtils; import java.util.Random; +import java.io.DataInput; import java.io.IOException; import java.net.InetSocketAddress; import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; /** Unit tests for IPC. */ public class TestIPC extends TestCase { public static final Log LOG = LogFactory.getLog(TestIPC.class); final private static Configuration conf = new Configuration(); final static private int PING_INTERVAL = 1000; static { Client.setPingInterval(conf, PING_INTERVAL); } public TestIPC(String name) { super(name); } private static final Random RANDOM = new Random(); private static final String ADDRESS = "0.0.0.0"; private static class TestServer extends Server { private boolean sleep; public TestServer(int handlerCount, boolean sleep) throws IOException { super(ADDRESS, 0, LongWritable.class, handlerCount, conf); this.sleep = sleep; } @Override public Writable call(Class<?> protocol, Writable param, long receiveTime) throws IOException { if (sleep) { try { Thread.sleep(RANDOM.nextInt(2*PING_INTERVAL)); // sleep a bit } catch (InterruptedException e) {} } return param; // echo param as result } } private static class SerialCaller extends Thread { private Client client; private InetSocketAddress server; private int count; private boolean failed; public SerialCaller(Client client, InetSocketAddress server, int count) { this.client = client; this.server = server; this.count = count; } public void run() { for (int i = 0; i < count; i++) { try { LongWritable param = new LongWritable(RANDOM.nextLong()); LongWritable value = - (LongWritable)client.call(param, server); + (LongWritable)client.call(param, server, null, null); if (!param.equals(value)) { LOG.fatal("Call failed!"); failed = true; break; } } catch (Exception e) { LOG.fatal("Caught: " + StringUtils.stringifyException(e)); failed = true; } } } } private static class ParallelCaller extends Thread { private Client client; private int count; private InetSocketAddress[] addresses; private boolean failed; public ParallelCaller(Client client, InetSocketAddress[] addresses, int count) { this.client = client; this.addresses = addresses; this.count = count; } public void run() { for (int i = 0; i < count; i++) { try { Writable[] params = new Writable[addresses.length]; for (int j = 0; j < addresses.length; j++) params[j] = new LongWritable(RANDOM.nextLong()); - Writable[] values = client.call(params, addresses); + Writable[] values = client.call(params, addresses, null, null); for (int j = 0; j < addresses.length; j++) { if (!params[j].equals(values[j])) { LOG.fatal("Call failed!"); failed = true; break; } } } catch (Exception e) { LOG.fatal("Caught: " + StringUtils.stringifyException(e)); failed = true; } } } } public void testSerial() throws Exception { testSerial(3, false, 2, 5, 100); } public void testSerial(int handlerCount, boolean handlerSleep, int clientCount, int callerCount, int callCount) throws Exception { Server server = new TestServer(handlerCount, handlerSleep); InetSocketAddress addr = NetUtils.getConnectAddress(server); server.start(); Client[] clients = new Client[clientCount]; for (int i = 0; i < clientCount; i++) { clients[i] = new Client(LongWritable.class, conf); } SerialCaller[] callers = new SerialCaller[callerCount]; for (int i = 0; i < callerCount; i++) { callers[i] = new SerialCaller(clients[i%clientCount], addr, callCount); callers[i].start(); } for (int i = 0; i < callerCount; i++) { callers[i].join(); assertFalse(callers[i].failed); } for (int i = 0; i < clientCount; i++) { clients[i].stop(); } server.stop(); } public void testParallel() throws Exception { testParallel(10, false, 2, 4, 2, 4, 100); } public void testParallel(int handlerCount, boolean handlerSleep, int serverCount, int addressCount, int clientCount, int callerCount, int callCount) throws Exception { Server[] servers = new Server[serverCount]; for (int i = 0; i < serverCount; i++) { servers[i] = new TestServer(handlerCount, handlerSleep); servers[i].start(); } InetSocketAddress[] addresses = new InetSocketAddress[addressCount]; for (int i = 0; i < addressCount; i++) { addresses[i] = NetUtils.getConnectAddress(servers[i%serverCount]); } Client[] clients = new Client[clientCount]; for (int i = 0; i < clientCount; i++) { clients[i] = new Client(LongWritable.class, conf); } ParallelCaller[] callers = new ParallelCaller[callerCount]; for (int i = 0; i < callerCount; i++) { callers[i] = new ParallelCaller(clients[i%clientCount], addresses, callCount); callers[i].start(); } for (int i = 0; i < callerCount; i++) { callers[i].join(); assertFalse(callers[i].failed); } for (int i = 0; i < clientCount; i++) { clients[i].stop(); } for (int i = 0; i < serverCount; i++) { servers[i].stop(); } } public void testStandAloneClient() throws Exception { testParallel(10, false, 2, 4, 2, 4, 100); Client client = new Client(LongWritable.class, conf); InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10); try { client.call(new LongWritable(RANDOM.nextLong()), - address); + address, null, null); fail("Expected an exception to have been thrown"); } catch (IOException e) { String message = e.getMessage(); String addressText = address.toString(); assertTrue("Did not find "+addressText+" in "+message, message.contains(addressText)); Throwable cause=e.getCause(); assertNotNull("No nested exception in "+e,cause); String causeText=cause.getMessage(); assertTrue("Did not find " + causeText + " in " + message, message.contains(causeText)); } } + private static class LongErrorWritable extends LongWritable { + private final static String ERR_MSG = + "Come across an exception while reading"; + + LongErrorWritable() {} + + LongErrorWritable(long longValue) { + super(longValue); + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + throw new IOException(ERR_MSG); + } + } + + public void testErrorClient() throws Exception { + // start server + Server server = new TestServer(1, false); + InetSocketAddress addr = NetUtils.getConnectAddress(server); + server.start(); + + // start client + Client client = new Client(LongErrorWritable.class, conf); + try { + client.call(new LongErrorWritable(RANDOM.nextLong()), + addr, null, null); + fail("Expected an exception to have been thrown"); + } catch (IOException e) { + // check error + Throwable cause = e.getCause(); + assertTrue(cause instanceof IOException); + assertEquals(LongErrorWritable.ERR_MSG, cause.getMessage()); + } + } public static void main(String[] args) throws Exception { //new TestIPC("test").testSerial(5, false, 2, 10, 1000); new TestIPC("test").testParallel(10, false, 2, 4, 2, 4, 1000); } }
jaxlaw/hadoop-common
e25e9653532b147c10348c05e2862c0a5e21f86a
HADOOP-6382 from patch https://issues.apache.org/jira/secure/attachment/12435121/HADOOP-6382.script.patch
diff --git a/bin/hadoop b/bin/hadoop index 78333a9..0a6b649 100755 --- a/bin/hadoop +++ b/bin/hadoop @@ -1,297 +1,297 @@ #!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The Hadoop command script # # Environment Variables # # JAVA_HOME The java implementation to use. Overrides JAVA_HOME. # # HADOOP_CLASSPATH Extra Java CLASSPATH entries. # # HADOOP_HEAPSIZE The maximum amount of heap to use, in MB. # Default is 1000. # # HADOOP_OPTS Extra Java runtime options. # # HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS # HADOOP_CLIENT_OPTS when the respective command is run. # HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker # for e.g. HADOOP_CLIENT_OPTS applies to # more than one command (fs, dfs, fsck, # dfsadmin etc) # # HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. # # HADOOP_ROOT_LOGGER The root appender. Default is INFO,console # bin=`dirname "$0"` bin=`cd "$bin"; pwd` . "$bin"/hadoop-config.sh cygwin=false case "`uname`" in CYGWIN*) cygwin=true;; esac # if no args specified, show usage if [ $# = 0 ]; then echo "Usage: hadoop [--config confdir] COMMAND" echo "where COMMAND is one of:" echo " namenode -format format the DFS filesystem" echo " secondarynamenode run the DFS secondary namenode" echo " namenode run the DFS namenode" echo " datanode run a DFS datanode" echo " dfsadmin run a DFS admin client" echo " mradmin run a Map-Reduce admin client" echo " fsck run a DFS filesystem checking utility" echo " fs run a generic filesystem user client" echo " balancer run a cluster balancing utility" echo " jobtracker run the MapReduce job Tracker node" echo " pipes run a Pipes job" echo " tasktracker run a MapReduce task Tracker node" echo " job manipulate MapReduce jobs" echo " queue get information regarding JobQueues" echo " version print the version" echo " jar <jar> run a jar file" echo " distcp <srcurl> <desturl> copy file or directories recursively" echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive" echo " classpath prints the class path needed to get the" echo " Hadoop jar and the required libraries" echo " daemonlog get/set the log level for each daemon" echo " or" echo " CLASSNAME run the class named CLASSNAME" echo "Most commands print help when invoked w/o parameters." exit 1 fi # get arguments COMMAND=$1 shift if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then . "${HADOOP_CONF_DIR}/hadoop-env.sh" fi # some Java parameters if [ "$JAVA_HOME" != "" ]; then #echo "run java in $JAVA_HOME" JAVA_HOME=$JAVA_HOME fi if [ "$JAVA_HOME" = "" ]; then echo "Error: JAVA_HOME is not set." exit 1 fi JAVA=$JAVA_HOME/bin/java JAVA_HEAP_MAX=-Xmx1000m # check envvars which might override default args if [ "$HADOOP_HEAPSIZE" != "" ]; then #echo "run with heapsize $HADOOP_HEAPSIZE" JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m" #echo $JAVA_HEAP_MAX fi # CLASSPATH initially contains $HADOOP_CONF_DIR CLASSPATH="${HADOOP_CONF_DIR}" CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar # for developers, add Hadoop classes to CLASSPATH if [ -d "$HADOOP_HOME/build/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes fi if [ -d "$HADOOP_HOME/build/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build fi if [ -d "$HADOOP_HOME/build/test/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes fi if [ -d "$HADOOP_HOME/build/tools" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools fi # so that filenames w/ spaces are handled correctly in loops below IFS= # for releases, add core hadoop jar & webapps to CLASSPATH if [ -d "$HADOOP_HOME/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME fi -for f in $HADOOP_HOME/hadoop-*-core.jar; do +for f in $HADOOP_HOME/hadoop-core-*.jar; do CLASSPATH=${CLASSPATH}:$f; done # add libs to CLASSPATH for f in $HADOOP_HOME/lib/*.jar; do CLASSPATH=${CLASSPATH}:$f; done if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do CLASSPATH=${CLASSPATH}:$f; done fi for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do CLASSPATH=${CLASSPATH}:$f; done -for f in $HADOOP_HOME/hadoop-*-tools.jar; do +for f in $HADOOP_HOME/hadoop-tools-*.jar; do TOOL_PATH=${TOOL_PATH}:$f; done -for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do +for f in $HADOOP_HOME/build/hadoop-tools-*.jar; do TOOL_PATH=${TOOL_PATH}:$f; done # add user-specified CLASSPATH last if [ "$HADOOP_CLASSPATH" != "" ]; then CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH} fi # default log directory & file if [ "$HADOOP_LOG_DIR" = "" ]; then HADOOP_LOG_DIR="$HADOOP_HOME/logs" fi if [ "$HADOOP_LOGFILE" = "" ]; then HADOOP_LOGFILE='hadoop.log' fi # default policy file for service-level authorization if [ "$HADOOP_POLICYFILE" = "" ]; then HADOOP_POLICYFILE="hadoop-policy.xml" fi # restore ordinary behaviour unset IFS # figure out which class to run if [ "$COMMAND" = "classpath" ] ; then if $cygwin; then CLASSPATH=`cygpath -p -w "$CLASSPATH"` fi echo $CLASSPATH exit elif [ "$COMMAND" = "namenode" ] ; then CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS" elif [ "$COMMAND" = "secondarynamenode" ] ; then CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS" elif [ "$COMMAND" = "datanode" ] ; then CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_OPTS" elif [ "$COMMAND" = "fs" ] ; then CLASS=org.apache.hadoop.fs.FsShell HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "dfs" ] ; then CLASS=org.apache.hadoop.fs.FsShell HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "dfsadmin" ] ; then CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "mradmin" ] ; then CLASS=org.apache.hadoop.mapred.tools.MRAdmin HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "fsck" ] ; then CLASS=org.apache.hadoop.hdfs.tools.DFSck HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "balancer" ] ; then CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS" elif [ "$COMMAND" = "jobtracker" ] ; then CLASS=org.apache.hadoop.mapred.JobTracker HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS" elif [ "$COMMAND" = "tasktracker" ] ; then CLASS=org.apache.hadoop.mapred.TaskTracker HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS" elif [ "$COMMAND" = "job" ] ; then CLASS=org.apache.hadoop.mapred.JobClient elif [ "$COMMAND" = "queue" ] ; then CLASS=org.apache.hadoop.mapred.JobQueueClient elif [ "$COMMAND" = "pipes" ] ; then CLASS=org.apache.hadoop.mapred.pipes.Submitter HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "version" ] ; then CLASS=org.apache.hadoop.util.VersionInfo HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "jar" ] ; then CLASS=org.apache.hadoop.util.RunJar elif [ "$COMMAND" = "distcp" ] ; then CLASS=org.apache.hadoop.tools.DistCp CLASSPATH=${CLASSPATH}:${TOOL_PATH} HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "daemonlog" ] ; then CLASS=org.apache.hadoop.log.LogLevel HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "archive" ] ; then CLASS=org.apache.hadoop.tools.HadoopArchives CLASSPATH=${CLASSPATH}:${TOOL_PATH} HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "sampler" ] ; then CLASS=org.apache.hadoop.mapred.lib.InputSampler HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" else CLASS=$COMMAND fi # cygwin path translation if $cygwin; then CLASSPATH=`cygpath -p -w "$CLASSPATH"` HADOOP_HOME=`cygpath -w "$HADOOP_HOME"` HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"` TOOL_PATH=`cygpath -p -w "$TOOL_PATH"` fi # setup 'java.library.path' for native-hadoop code if necessary JAVA_LIBRARY_PATH='' if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" ]; then JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` if [ -d "$HADOOP_HOME/build/native" ]; then JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib fi if [ -d "${HADOOP_HOME}/lib/native" ]; then if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} else JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} fi fi fi # cygwin path translation if $cygwin; then JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"` fi HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE" HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME" HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" fi HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE" # run it exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" diff --git a/bin/rcc b/bin/rcc index a39745b..a023da0 100755 --- a/bin/rcc +++ b/bin/rcc @@ -1,99 +1,99 @@ #!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The Hadoop record compiler # # Environment Variables # # JAVA_HOME The java implementation to use. Overrides JAVA_HOME. # # HADOOP_OPTS Extra Java runtime options. # # HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. # bin=`dirname "$0"` bin=`cd "$bin"; pwd` . "$bin"/hadoop-config.sh if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then . "${HADOOP_CONF_DIR}/hadoop-env.sh" fi # some Java parameters if [ "$JAVA_HOME" != "" ]; then #echo "run java in $JAVA_HOME" JAVA_HOME=$JAVA_HOME fi if [ "$JAVA_HOME" = "" ]; then echo "Error: JAVA_HOME is not set." exit 1 fi JAVA=$JAVA_HOME/bin/java JAVA_HEAP_MAX=-Xmx1000m # CLASSPATH initially contains $HADOOP_CONF_DIR CLASSPATH="${HADOOP_CONF_DIR}" CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar # for developers, add Hadoop classes to CLASSPATH if [ -d "$HADOOP_HOME/build/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes fi if [ -d "$HADOOP_HOME/build/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build fi if [ -d "$HADOOP_HOME/build/test/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes fi # so that filenames w/ spaces are handled correctly in loops below IFS= # for releases, add core hadoop jar & webapps to CLASSPATH if [ -d "$HADOOP_HOME/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME fi -for f in $HADOOP_HOME/hadoop-*-core.jar; do +for f in $HADOOP_HOME/hadoop-core-*.jar; do CLASSPATH=${CLASSPATH}:$f; done # add libs to CLASSPATH for f in $HADOOP_HOME/lib/*.jar; do CLASSPATH=${CLASSPATH}:$f; done for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do CLASSPATH=${CLASSPATH}:$f; done # restore ordinary behaviour unset IFS CLASS='org.apache.hadoop.record.compiler.generated.Rcc' # cygwin path translation if expr `uname` : 'CYGWIN*' > /dev/null; then CLASSPATH=`cygpath -p -w "$CLASSPATH"` fi # run it exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
jaxlaw/hadoop-common
e74a0612359a5a75bfd2102d8c7efbec24ecae33
HDFS-737 from https://issues.apache.org/jira/secure/attachment/12435041/HDFS-737.3.rel20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 27d6613..393d4bf 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,509 +1,513 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383006 + HDFS-737. Add full path name of the file to the block information and + summary of total number of files, blocks, live and deadnodes to + metasave output. (Jitendra Nath Pandey via suresh) + HADOOP-6521. Fix backward compatiblity issue with umask when applications use deprecated param dfs.umask in configuration or use FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index fad88d8..7130b4c 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -384,967 +384,984 @@ class FSDirectory implements FSConstants, Closeable { if (dst.startsWith(src) && dst.charAt(src.length()) == Path.SEPARATOR_CHAR) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst + " because destination starts with src"); return false; } byte[][] dstComponents = INode.getPathComponents(dst); INode[] dstInodes = new INode[dstComponents.length]; rootDir.getExistingPathINodes(dstComponents, dstInodes); if (dstInodes[dstInodes.length-1] != null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +"failed to rename "+src+" to "+dst+ " because destination exists"); return false; } if (dstInodes[dstInodes.length-2] == null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +"failed to rename "+src+" to "+dst+ " because destination's parent does not exist"); return false; } // Ensure dst has quota to accommodate rename verifyQuotaForRename(srcInodes,dstInodes); INode dstChild = null; INode srcChild = null; String srcChildName = null; try { // remove src srcChild = removeChild(srcInodes, srcInodes.length-1); if (srcChild == null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst + " because the source can not be removed"); return false; } srcChildName = srcChild.getLocalName(); srcChild.setLocalName(dstComponents[dstInodes.length-1]); // add src to the destination dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, srcChild, -1, false); if (dstChild != null) { srcChild = null; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to " + dst); } // update modification time of dst and the parent of src srcInodes[srcInodes.length-2].setModificationTime(timestamp); dstInodes[dstInodes.length-2].setModificationTime(timestamp); return true; } } finally { if (dstChild == null && srcChild != null) { // put it back srcChild.setLocalName(srcChildName); addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, -1, false); } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +"failed to rename "+src+" to "+dst); return false; } } /** * Set file replication * * @param src file name * @param replication new replication * @param oldReplication old replication - output parameter * @return array of file blocks * @throws IOException */ Block[] setReplication(String src, short replication, int[] oldReplication ) throws IOException { waitForReady(); Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication); if (fileBlocks != null) // log replication change fsImage.getEditLog().logSetReplication(src, replication); return fileBlocks; } Block[] unprotectedSetReplication( String src, short replication, int[] oldReplication ) throws IOException { if (oldReplication == null) oldReplication = new int[1]; oldReplication[0] = -1; Block[] fileBlocks = null; synchronized(rootDir) { INode[] inodes = rootDir.getExistingPathINodes(src); INode inode = inodes[inodes.length - 1]; if (inode == null) return null; if (inode.isDirectory()) return null; INodeFile fileNode = (INodeFile)inode; oldReplication[0] = fileNode.getReplication(); // check disk quota long dsDelta = (replication - oldReplication[0]) * (fileNode.diskspaceConsumed()/oldReplication[0]); updateCount(inodes, inodes.length-1, 0, dsDelta, true); fileNode.setReplication(replication); fileBlocks = fileNode.getBlocks(); } return fileBlocks; } /** * Get the blocksize of a file * @param filename the filename * @return the number of bytes * @throws IOException if it is a directory or does not exist. */ long getPreferredBlockSize(String filename) throws IOException { synchronized (rootDir) { INode fileNode = rootDir.getNode(filename); if (fileNode == null) { throw new IOException("Unknown file: " + filename); } if (fileNode.isDirectory()) { throw new IOException("Getting block size of a directory: " + filename); } return ((INodeFile)fileNode).getPreferredBlockSize(); } } boolean exists(String src) { src = normalizePath(src); synchronized(rootDir) { INode inode = rootDir.getNode(src); if (inode == null) { return false; } return inode.isDirectory()? true: ((INodeFile)inode).getBlocks() != null; } } void setPermission(String src, FsPermission permission ) throws IOException { unprotectedSetPermission(src, permission); fsImage.getEditLog().logSetPermissions(src, permission); } void unprotectedSetPermission(String src, FsPermission permissions) throws FileNotFoundException { synchronized(rootDir) { INode inode = rootDir.getNode(src); if(inode == null) throw new FileNotFoundException("File does not exist: " + src); inode.setPermission(permissions); } } void setOwner(String src, String username, String groupname ) throws IOException { unprotectedSetOwner(src, username, groupname); fsImage.getEditLog().logSetOwner(src, username, groupname); } void unprotectedSetOwner(String src, String username, String groupname) throws FileNotFoundException { synchronized(rootDir) { INode inode = rootDir.getNode(src); if(inode == null) throw new FileNotFoundException("File does not exist: " + src); if (username != null) { inode.setUser(username); } if (groupname != null) { inode.setGroup(groupname); } } } /** * Remove the file from management, return blocks */ INode delete(String src) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src); } waitForReady(); long now = FSNamesystem.now(); INode deletedNode = unprotectedDelete(src, now); if (deletedNode != null) { fsImage.getEditLog().logDelete(src, now); } return deletedNode; } /** Return if a directory is empty or not **/ boolean isDirEmpty(String src) { boolean dirNotEmpty = true; if (!isDir(src)) { return true; } synchronized(rootDir) { INode targetNode = rootDir.getNode(src); assert targetNode != null : "should be taken care in isDir() above"; if (((INodeDirectory)targetNode).getChildren().size() != 0) { dirNotEmpty = false; } } return dirNotEmpty; } /** * Delete a path from the name space * Update the count at each ancestor directory with quota * @param src a string representation of a path to an inode * @param modificationTime the time the inode is removed * @param deletedBlocks the place holder for the blocks to be removed * @return if the deletion succeeds */ INode unprotectedDelete(String src, long modificationTime) { src = normalizePath(src); synchronized (rootDir) { INode[] inodes = rootDir.getExistingPathINodes(src); INode targetNode = inodes[inodes.length-1]; if (targetNode == null) { // non-existent src NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " +"failed to remove "+src+" because it does not exist"); return null; } else if (inodes.length == 1) { // src is the root NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because the root is not allowed to be deleted"); return null; } else { try { // Remove the node from the namespace removeChild(inodes, inodes.length-1); // set the parent's modification time inodes[inodes.length-2].setModificationTime(modificationTime); // GC all the blocks underneath the node. ArrayList<Block> v = new ArrayList<Block>(); int filesRemoved = targetNode.collectSubtreeBlocksAndClear(v); incrDeletedFileCount(filesRemoved); namesystem.removePathAndBlocks(src, v); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " +src+" is removed"); } return targetNode; } catch (IOException e) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because " + e.getMessage()); return null; } } } } /** * Replaces the specified inode with the specified one. */ void replaceNode(String path, INodeFile oldnode, INodeFile newnode) throws IOException { replaceNode(path, oldnode, newnode, true); } /** * @see #replaceNode(String, INodeFile, INodeFile) */ private void replaceNode(String path, INodeFile oldnode, INodeFile newnode, boolean updateDiskspace) throws IOException { synchronized (rootDir) { long dsOld = oldnode.diskspaceConsumed(); // // Remove the node from the namespace // if (!oldnode.removeNode()) { NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " + "failed to remove " + path); throw new IOException("FSDirectory.replaceNode: " + "failed to remove " + path); } /* Currently oldnode and newnode are assumed to contain the same * blocks. Otherwise, blocks need to be removed from the blocksMap. */ rootDir.addNode(path, newnode); //check if disk space needs to be updated. long dsNew = 0; if (updateDiskspace && (dsNew = newnode.diskspaceConsumed()) != dsOld) { try { updateSpaceConsumed(path, 0, dsNew-dsOld); } catch (QuotaExceededException e) { // undo replaceNode(path, newnode, oldnode, false); throw e; } } int index = 0; for (Block b : newnode.getBlocks()) { BlockInfo info = namesystem.blocksMap.addINode(b, newnode); newnode.setBlock(index, info); // inode refers to the block in BlocksMap index++; } } } /** * Get a listing of files given path 'src' * * This function is admittedly very inefficient right now. We'll * make it better later. */ FileStatus[] getListing(String src) { String srcs = normalizePath(src); synchronized (rootDir) { INode targetNode = rootDir.getNode(srcs); if (targetNode == null) return null; if (!targetNode.isDirectory()) { return new FileStatus[]{createFileStatus(srcs, targetNode)}; } List<INode> contents = ((INodeDirectory)targetNode).getChildren(); FileStatus listing[] = new FileStatus[contents.size()]; if(! srcs.endsWith(Path.SEPARATOR)) srcs += Path.SEPARATOR; int i = 0; for (INode cur : contents) { listing[i] = createFileStatus(srcs+cur.getLocalName(), cur); i++; } return listing; } } /** Get the file info for a specific file. * @param src The string representation of the path to the file * @return object containing information regarding the file * or null if file not found */ FileStatus getFileInfo(String src) { String srcs = normalizePath(src); synchronized (rootDir) { INode targetNode = rootDir.getNode(srcs); if (targetNode == null) { return null; } else { return createFileStatus(srcs, targetNode); } } } /** * Get the blocks associated with the file. */ Block[] getFileBlocks(String src) { waitForReady(); synchronized (rootDir) { INode targetNode = rootDir.getNode(src); if (targetNode == null) return null; if(targetNode.isDirectory()) return null; return ((INodeFile)targetNode).getBlocks(); } } /** * Get {@link INode} associated with the file. */ INodeFile getFileINode(String src) { synchronized (rootDir) { INode inode = rootDir.getNode(src); if (inode == null || inode.isDirectory()) return null; return (INodeFile)inode; } } /** * Retrieve the existing INodes along the given path. * * @param path the path to explore * @return INodes array containing the existing INodes in the order they * appear when following the path from the root INode to the * deepest INodes. The array size will be the number of expected * components in the path, and non existing components will be * filled with null * * @see INodeDirectory#getExistingPathINodes(byte[][], INode[]) */ INode[] getExistingPathINodes(String path) { synchronized (rootDir){ return rootDir.getExistingPathINodes(path); } } /** * Check whether the filepath could be created */ boolean isValidToCreate(String src) { String srcs = normalizePath(src); synchronized (rootDir) { if (srcs.startsWith("/") && !srcs.endsWith("/") && rootDir.getNode(srcs) == null) { return true; } else { return false; } } } /** * Check whether the path specifies a directory */ boolean isDir(String src) { synchronized (rootDir) { INode node = rootDir.getNode(normalizePath(src)); return node != null && node.isDirectory(); } } /** Updates namespace and diskspace consumed for all * directories until the parent directory of file represented by path. * * @param path path for the file. * @param nsDelta the delta change of namespace * @param dsDelta the delta change of diskspace * @throws QuotaExceededException if the new count violates any quota limit * @throws FileNotFound if path does not exist. */ void updateSpaceConsumed(String path, long nsDelta, long dsDelta) throws QuotaExceededException, FileNotFoundException { synchronized (rootDir) { INode[] inodes = rootDir.getExistingPathINodes(path); int len = inodes.length; if (inodes[len - 1] == null) { throw new FileNotFoundException(path + " does not exist under rootDir."); } updateCount(inodes, len-1, nsDelta, dsDelta, true); } } /** update count of each inode with quota * * @param inodes an array of inodes on a path * @param numOfINodes the number of inodes to update starting from index 0 * @param nsDelta the delta change of namespace * @param dsDelta the delta change of diskspace * @param checkQuota if true then check if quota is exceeded * @throws QuotaExceededException if the new count violates any quota limit */ private void updateCount(INode[] inodes, int numOfINodes, long nsDelta, long dsDelta, boolean checkQuota) throws QuotaExceededException { if (!ready) { //still intializing. do not check or update quotas. return; } if (numOfINodes>inodes.length) { numOfINodes = inodes.length; } if (checkQuota) { verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); } for(int i = 0; i < numOfINodes; i++) { if (inodes[i].isQuotaSet()) { // a directory with quota INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; node.updateNumItemsInTree(nsDelta, dsDelta); } } } /** * update quota of each inode and check to see if quota is exceeded. * See {@link #updateCount(INode[], int, long, long, boolean)} */ private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, long nsDelta, long dsDelta) { try { updateCount(inodes, numOfINodes, nsDelta, dsDelta, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e); } } /** Return the name of the path represented by inodes at [0, pos] */ private static String getFullPathName(INode[] inodes, int pos) { StringBuilder fullPathName = new StringBuilder(); for (int i=1; i<=pos; i++) { fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName()); } return fullPathName.toString(); } + /** Return the full path name of the specified inode */ + static String getFullPathName(INode inode) { + // calculate the depth of this inode from root + int depth = 0; + for (INode i = inode; i != null; i = i.parent) { + depth++; + } + INode[] inodes = new INode[depth]; + + // fill up the inodes in the path from this inode to root + for (int i = 0; i < depth; i++) { + inodes[depth-i-1] = inode; + inode = inode.parent; + } + return getFullPathName(inodes, depth-1); + } + /** * Create a directory * If ancestor directories do not exist, automatically create them. * @param src string representation of the path to the directory * @param permissions the permission of the directory * @param inheritPermission if the permission of the directory should inherit * from its parent or not. The automatically created * ones always inherit its permission from its parent * @param now creation time * @return true if the operation succeeds false otherwise * @throws FileNotFoundException if an ancestor or itself is a file * @throws QuotaExceededException if directory creation violates * any quota limit */ boolean mkdirs(String src, PermissionStatus permissions, boolean inheritPermission, long now) throws FileNotFoundException, QuotaExceededException { src = normalizePath(src); String[] names = INode.getPathNames(src); byte[][] components = INode.getPathComponents(names); INode[] inodes = new INode[components.length]; synchronized(rootDir) { rootDir.getExistingPathINodes(components, inodes); // find the index of the first null in inodes[] StringBuilder pathbuilder = new StringBuilder(); int i = 1; for(; i < inodes.length && inodes[i] != null; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); if (!inodes[i].isDirectory()) { throw new FileNotFoundException("Parent path is not a directory: " + pathbuilder); } } // create directories beginning from the first null index for(; i < inodes.length; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); String cur = pathbuilder.toString(); unprotectedMkdir(inodes, i, components[i], permissions, inheritPermission || i != components.length-1, now); if (inodes[i] == null) { return false; } // Directory creation also count towards FilesCreated // to match count of files_deleted metric. if (namesystem != null) NameNode.getNameNodeMetrics().numFilesCreated.inc(); fsImage.getEditLog().logMkDir(cur, inodes[i]); NameNode.stateChangeLog.debug( "DIR* FSDirectory.mkdirs: created directory " + cur); } } return true; } /** */ INode unprotectedMkdir(String src, PermissionStatus permissions, long timestamp) throws QuotaExceededException { byte[][] components = INode.getPathComponents(src); INode[] inodes = new INode[components.length]; synchronized (rootDir) { rootDir.getExistingPathINodes(components, inodes); unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], permissions, false, timestamp); return inodes[inodes.length-1]; } } /** create a directory at index pos. * The parent path to the directory is at [0, pos-1]. * All ancestors exist. Newly created one stored at index pos. */ private void unprotectedMkdir(INode[] inodes, int pos, byte[] name, PermissionStatus permission, boolean inheritPermission, long timestamp) throws QuotaExceededException { inodes[pos] = addChild(inodes, pos, new INodeDirectory(name, permission, timestamp), -1, inheritPermission ); } /** Add a node child to the namespace. The full path name of the node is src. * childDiskspace should be -1, if unknown. * QuotaExceededException is thrown if it violates quota limit */ private <T extends INode> T addNode(String src, T child, long childDiskspace, boolean inheritPermission) throws QuotaExceededException { byte[][] components = INode.getPathComponents(src); child.setLocalName(components[components.length-1]); INode[] inodes = new INode[components.length]; synchronized (rootDir) { rootDir.getExistingPathINodes(components, inodes); return addChild(inodes, inodes.length-1, child, childDiskspace, inheritPermission); } } /** * Verify quota for adding or moving a new INode with required * namespace and diskspace to a given position. * * @param inodes INodes corresponding to a path * @param pos position where a new INode will be added * @param nsDelta needed namespace * @param dsDelta needed diskspace * @param commonAncestor Last node in inodes array that is a common ancestor * for a INode that is being moved from one location to the other. * Pass null if a node is not being moved. * @throws QuotaExceededException if quota limit is exceeded. */ private void verifyQuota(INode[] inodes, int pos, long nsDelta, long dsDelta, INode commonAncestor) throws QuotaExceededException { if (!ready) { // Do not check quota if edits log is still being processed return; } if (pos>inodes.length) { pos = inodes.length; } int i = pos - 1; try { // check existing components in the path for(; i >= 0; i--) { if (commonAncestor == inodes[i]) { // Moving an existing node. Stop checking for quota when common // ancestor is reached return; } if (inodes[i].isQuotaSet()) { // a directory with quota INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; node.verifyQuota(nsDelta, dsDelta); } } } catch (QuotaExceededException e) { e.setPathName(getFullPathName(inodes, i)); throw e; } } /** * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves * dstInodes[dstInodes.length-1] * * @param srcInodes directory from where node is being moved. * @param dstInodes directory to where node is moved to. * @throws QuotaExceededException if quota limit is exceeded. */ private void verifyQuotaForRename(INode[] srcInodes, INode[]dstInodes) throws QuotaExceededException { if (!ready) { // Do not check quota if edits log is still being processed return; } INode srcInode = srcInodes[srcInodes.length - 1]; INode commonAncestor = null; for(int i =0;srcInodes[i] == dstInodes[i]; i++) { commonAncestor = srcInodes[i]; } INode.DirCounts counts = new INode.DirCounts(); srcInode.spaceConsumedInTree(counts); verifyQuota(dstInodes, dstInodes.length - 1, counts.getNsCount(), counts.getDsCount(), commonAncestor); } /** Add a node child to the inodes at index pos. * Its ancestors are stored at [0, pos-1]. * QuotaExceededException is thrown if it violates quota limit */ private <T extends INode> T addChild(INode[] pathComponents, int pos, T child, long childDiskspace, boolean inheritPermission, boolean checkQuota) throws QuotaExceededException { INode.DirCounts counts = new INode.DirCounts(); child.spaceConsumedInTree(counts); if (childDiskspace < 0) { childDiskspace = counts.getDsCount(); } updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace, checkQuota); T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild( child, inheritPermission); if (addedNode == null) { updateCount(pathComponents, pos, -counts.getNsCount(), -childDiskspace, true); } return addedNode; } private <T extends INode> T addChild(INode[] pathComponents, int pos, T child, long childDiskspace, boolean inheritPermission) throws QuotaExceededException { return addChild(pathComponents, pos, child, childDiskspace, inheritPermission, true); } private <T extends INode> T addChildNoQuotaCheck(INode[] pathComponents, int pos, T child, long childDiskspace, boolean inheritPermission) { T inode = null; try { inode = addChild(pathComponents, pos, child, childDiskspace, inheritPermission, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); } return inode; } /** Remove an inode at index pos from the namespace. * Its ancestors are stored at [0, pos-1]. * Count of each ancestor with quota is also updated. * Return the removed node; null if the removal fails. */ private INode removeChild(INode[] pathComponents, int pos) { INode removedNode = ((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]); if (removedNode != null) { INode.DirCounts counts = new INode.DirCounts(); removedNode.spaceConsumedInTree(counts); updateCountNoQuotaCheck(pathComponents, pos, -counts.getNsCount(), -counts.getDsCount()); } return removedNode; } /** */ String normalizePath(String src) { if (src.length() > 1 && src.endsWith("/")) { src = src.substring(0, src.length() - 1); } return src; } ContentSummary getContentSummary(String src) throws IOException { String srcs = normalizePath(src); synchronized (rootDir) { INode targetNode = rootDir.getNode(srcs); if (targetNode == null) { throw new FileNotFoundException("File does not exist: " + srcs); } else { return targetNode.computeContentSummary(); } } } /** Update the count of each directory with quota in the namespace * A directory's count is defined as the total number inodes in the tree * rooted at the directory. * * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ void updateCountForINodeWithQuota() { updateCountForINodeWithQuota(rootDir, new INode.DirCounts(), new ArrayList<INode>(50)); } /** * Update the count of the directory if it has a quota and return the count * * This does not throw a QuotaExceededException. This is just an update * of of existing state and throwing QuotaExceededException does not help * with fixing the state, if there is a problem. * * @param dir the root of the tree that represents the directory * @param counters counters for name space and disk space * @param nodesInPath INodes for the each of components in the path. * @return the size of the tree */ private static void updateCountForINodeWithQuota(INodeDirectory dir, INode.DirCounts counts, ArrayList<INode> nodesInPath) { long parentNamespace = counts.nsCount; long parentDiskspace = counts.dsCount; counts.nsCount = 1L;//for self. should not call node.spaceConsumedInTree() counts.dsCount = 0L; /* We don't need nodesInPath if we could use 'parent' field in * INode. using 'parent' is not currently recommended. */ nodesInPath.add(dir); for (INode child : dir.getChildren()) { if (child.isDirectory()) { updateCountForINodeWithQuota((INodeDirectory)child, counts, nodesInPath); } else { // reduce recursive calls counts.nsCount += 1; counts.dsCount += ((INodeFile)child).diskspaceConsumed(); } } if (dir.isQuotaSet()) { ((INodeDirectoryWithQuota)dir).setSpaceConsumed(counts.nsCount, counts.dsCount); // check if quota is violated for some reason. if ((dir.getNsQuota() >= 0 && counts.nsCount > dir.getNsQuota()) || (dir.getDsQuota() >= 0 && counts.dsCount > dir.getDsQuota())) { // can only happen because of a software bug. the bug should be fixed. StringBuilder path = new StringBuilder(512); for (INode n : nodesInPath) { path.append('/'); path.append(n.getLocalName()); } NameNode.LOG.warn("Quota violation in image for " + path + " (Namespace quota : " + dir.getNsQuota() + " consumed : " + counts.nsCount + ")" + " (Diskspace quota : " + dir.getDsQuota() + " consumed : " + counts.dsCount + ")."); } } // pop nodesInPath.remove(nodesInPath.size()-1); counts.nsCount += parentNamespace; counts.dsCount += parentDiskspace; } /** * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. * Sets quota for for a directory. * @returns INodeDirectory if any of the quotas have changed. null other wise. * @throws FileNotFoundException if the path does not exist or is a file * @throws QuotaExceededException if the directory tree size is * greater than the given quota */ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) throws FileNotFoundException, QuotaExceededException { // sanity check if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET && nsQuota < FSConstants.QUOTA_RESET) || (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET && dsQuota < FSConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Illegal value for nsQuota or " + "dsQuota : " + nsQuota + " and " + dsQuota); } String srcs = normalizePath(src); INode[] inodes = rootDir.getExistingPathINodes(src); INode targetNode = inodes[inodes.length-1]; if (targetNode == null) { throw new FileNotFoundException("Directory does not exist: " + srcs); } else if (!targetNode.isDirectory()) { throw new FileNotFoundException("Cannot set quota on a file: " + srcs); } else { // a directory inode INodeDirectory dirNode = (INodeDirectory)targetNode; long oldNsQuota = dirNode.getNsQuota(); long oldDsQuota = dirNode.getDsQuota(); if (nsQuota == FSConstants.QUOTA_DONT_SET) { nsQuota = oldNsQuota; } if (dsQuota == FSConstants.QUOTA_DONT_SET) { dsQuota = oldDsQuota; } if (dirNode instanceof INodeDirectoryWithQuota) { // a directory with quota; so set the quota to the new value ((INodeDirectoryWithQuota)dirNode).setQuota(nsQuota, dsQuota); } else { // a non-quota directory; so replace it with a directory with quota INodeDirectoryWithQuota newNode = new INodeDirectoryWithQuota(nsQuota, dsQuota, dirNode); // non-root directory node; parent != null INodeDirectory parent = (INodeDirectory)inodes[inodes.length-2]; dirNode = newNode; parent.replaceChild(newNode); } return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; } } /** * See {@link ClientProtocol#setQuota(String, long, long)} for the * contract. * @see #unprotectedSetQuota(String, long, long) */ void setQuota(String src, long nsQuota, long dsQuota) throws FileNotFoundException, QuotaExceededException { synchronized (rootDir) { INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota); if (dir != null) { fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), dir.getDsQuota()); } } } long totalInodes() { synchronized (rootDir) { return rootDir.numItemsInTree(); } } /** * Sets the access time on the file. Logs it in the transaction log */ void setTimes(String src, INodeFile inode, long mtime, long atime, boolean force) throws IOException { if (unprotectedSetTimes(src, inode, mtime, atime, force)) { fsImage.getEditLog().logTimes(src, mtime, atime); } } boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) throws IOException { INodeFile inode = getFileINode(src); return unprotectedSetTimes(src, inode, mtime, atime, force); } private boolean unprotectedSetTimes(String src, INodeFile inode, long mtime, long atime, boolean force) throws IOException { boolean status = false; if (mtime != -1) { inode.setModificationTimeForce(mtime); status = true; } if (atime != -1) { long inodeTime = inode.getAccessTime(); // if the last access time update was within the last precision interval, then // no need to store access time if (atime <= inodeTime + namesystem.getAccessTimePrecision() && !force) { status = false; } else { inode.setAccessTime(atime); status = true; } } return status; } /** * Create FileStatus by file INode */ private static FileStatus createFileStatus(String path, INode node) { // length is zero for directories return new FileStatus(node.isDirectory() ? 0 : node.computeContentSummary().getLength(), node.isDirectory(), node.isDirectory() ? 0 : ((INodeFile)node).getReplication(), node.isDirectory() ? 0 : ((INodeFile)node).getPreferredBlockSize(), node.getModificationTime(), node.getAccessTime(), node.getFsPermission(), node.getUserName(), node.getGroupName(), new Path(path)); } } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 48a4118..3aaed16 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1,1036 +1,1055 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.commons.logging.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.PermissionChecker; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.*; import org.apache.hadoop.metrics.util.MBeanUtil; import org.apache.hadoop.net.CachedDNSToSwitchMapping; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.io.IOUtils; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; import java.io.DataOutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.*; import java.util.Map.Entry; import javax.management.NotCompliantMBeanException; import javax.management.ObjectName; import javax.management.StandardMBean; import javax.security.auth.login.LoginException; /*************************************************** * FSNamesystem does the actual bookkeeping work for the * DataNode. * * It tracks several important tables. * * 1) valid fsname --> blocklist (kept on disk, logged) * 2) Set of all valid blocks (inverted #1) * 3) block --> machinelist (kept in memory, rebuilt dynamically from reports) * 4) machine --> blocklist (inverted #2) * 5) LRU cache of updated-heartbeat machines ***************************************************/ public class FSNamesystem implements FSConstants, FSNamesystemMBean { public static final Log LOG = LogFactory.getLog(FSNamesystem.class); public static final String AUDIT_FORMAT = "ugi=%s\t" + // ugi "ip=%s\t" + // remote IP "cmd=%s\t" + // command "src=%s\t" + // src path "dst=%s\t" + // dst path (optional) "perm=%s"; // permissions (optional) private static final ThreadLocal<Formatter> auditFormatter = new ThreadLocal<Formatter>() { protected Formatter initialValue() { return new Formatter(new StringBuilder(AUDIT_FORMAT.length() * 4)); } }; private static final void logAuditEvent(UserGroupInformation ugi, InetAddress addr, String cmd, String src, String dst, FileStatus stat) { final Formatter fmt = auditFormatter.get(); ((StringBuilder)fmt.out()).setLength(0); auditLog.info(fmt.format(AUDIT_FORMAT, ugi, addr, cmd, src, dst, (stat == null) ? null : stat.getOwner() + ':' + stat.getGroup() + ':' + stat.getPermission() ).toString()); } public static final Log auditLog = LogFactory.getLog( FSNamesystem.class.getName() + ".audit"); // Default initial capacity and load factor of map public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16; public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f; private boolean isPermissionEnabled; private UserGroupInformation fsOwner; private String supergroup; private PermissionStatus defaultPermission; // FSNamesystemMetrics counter variables private FSNamesystemMetrics myFSMetrics; private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L; private int totalLoad = 0; volatile long pendingReplicationBlocksCount = 0L; volatile long corruptReplicaBlocksCount = 0L; volatile long underReplicatedBlocksCount = 0L; volatile long scheduledReplicationBlocksCount = 0L; volatile long excessBlocksCount = 0L; volatile long pendingDeletionBlocksCount = 0L; // // Stores the correct file name hierarchy // public FSDirectory dir; // // Mapping: Block -> { INode, datanodes, self ref } // Updated only in response to client-sent information. // final BlocksMap blocksMap = new BlocksMap(DEFAULT_INITIAL_MAP_CAPACITY, DEFAULT_MAP_LOAD_FACTOR); // // Store blocks-->datanodedescriptor(s) map of corrupt replicas // public CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); /** * Stores the datanode -> block map. * <p> * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by * storage id. In order to keep the storage map consistent it tracks * all storages ever registered with the namenode. * A descriptor corresponding to a specific storage id can be * <ul> * <li>added to the map if it is a new storage id;</li> * <li>updated with a new datanode started as a replacement for the old one * with the same storage id; and </li> * <li>removed if and only if an existing datanode is restarted to serve a * different storage id.</li> * </ul> <br> * The list of the {@link DatanodeDescriptor}s in the map is checkpointed * in the namespace image file. Only the {@link DatanodeInfo} part is * persistent, the list of blocks is restored from the datanode block * reports. * <p> * Mapping: StorageID -> DatanodeDescriptor */ NavigableMap<String, DatanodeDescriptor> datanodeMap = new TreeMap<String, DatanodeDescriptor>(); // // Keeps a Collection for every named machine containing // blocks that have recently been invalidated and are thought to live // on the machine in question. // Mapping: StorageID -> ArrayList<Block> // private Map<String, Collection<Block>> recentInvalidateSets = new TreeMap<String, Collection<Block>>(); // // Keeps a TreeSet for every named node. Each treeset contains // a list of the blocks that are "extra" at that location. We'll // eventually remove these extras. // Mapping: StorageID -> TreeSet<Block> // Map<String, Collection<Block>> excessReplicateMap = new TreeMap<String, Collection<Block>>(); Random r = new Random(); /** * Stores a set of DatanodeDescriptor objects. * This is a subset of {@link #datanodeMap}, containing nodes that are * considered alive. * The {@link HeartbeatMonitor} periodically checks for outdated entries, * and removes them from the list. */ ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>(); // // Store set of Blocks that need to be replicated 1 or more times. // We also store pending replication-orders. // Set of: Block // private UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); private PendingReplicationBlocks pendingReplications; public LeaseManager leaseManager = new LeaseManager(this); // // Threaded object that checks to see if we have been // getting heartbeats from all clients. // Daemon hbthread = null; // HeartbeatMonitor thread public Daemon lmthread = null; // LeaseMonitor thread Daemon smmthread = null; // SafeModeMonitor thread public Daemon replthread = null; // Replication thread private volatile boolean fsRunning = true; long systemStart = 0; // The maximum number of replicates we should allow for a single block private int maxReplication; // How many outgoing replication streams a given node should have at one time private int maxReplicationStreams; // MIN_REPLICATION is how many copies we need in place or else we disallow the write private int minReplication; // Default replication private int defaultReplication; // heartbeatRecheckInterval is how often namenode checks for expired datanodes private long heartbeatRecheckInterval; // heartbeatExpireInterval is how long namenode waits for datanode to report // heartbeat private long heartbeatExpireInterval; //replicationRecheckInterval is how often namenode checks for new replication work private long replicationRecheckInterval; // default block size of a file private long defaultBlockSize = 0; // allow appending to hdfs files private boolean supportAppends = true; /** * Last block index used for replication work. */ private int replIndex = 0; private long missingBlocksInCurIter = 0; private long missingBlocksInPrevIter = 0; public static FSNamesystem fsNamesystemObject; /** NameNode RPC address */ private InetSocketAddress nameNodeAddress = null; // TODO: name-node has this field, it should be removed here private SafeModeInfo safeMode; // safe mode information private Host2NodesMap host2DataNodeMap = new Host2NodesMap(); // datanode networktoplogy NetworkTopology clusterMap = new NetworkTopology(); private DNSToSwitchMapping dnsToSwitchMapping; // for block replicas placement ReplicationTargetChooser replicator; private HostsFileReader hostsReader; private Daemon dnthread = null; private long maxFsObjects = 0; // maximum number of fs objects /** * The global generation stamp for this file system. */ private final GenerationStamp generationStamp = new GenerationStamp(); // Ask Datanode only up to this many blocks to delete. private int blockInvalidateLimit = FSConstants.BLOCK_INVALIDATE_CHUNK; // precision of access times. private long accessTimePrecision = 0; /** * FSNamesystem constructor. */ FSNamesystem(NameNode nn, Configuration conf) throws IOException { try { initialize(nn, conf); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); throw e; } } /** * Initialize FSNamesystem. */ private void initialize(NameNode nn, Configuration conf) throws IOException { this.systemStart = now(); setConfigurationParameters(conf); this.nameNodeAddress = nn.getNameNodeAddress(); this.registerMBean(conf); // register the MBean for the FSNamesystemStutus this.dir = new FSDirectory(this, conf); StartupOption startOpt = NameNode.getStartupOption(conf); this.dir.loadFSImage(getNamespaceDirs(conf), getNamespaceEditsDirs(conf), startOpt); long timeTakenToLoadFSImage = now() - systemStart; LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs"); NameNode.getNameNodeMetrics().fsImageLoadTime.set( (int) timeTakenToLoadFSImage); this.safeMode = new SafeModeInfo(conf); setBlockTotal(); pendingReplications = new PendingReplicationBlocks( conf.getInt("dfs.replication.pending.timeout.sec", -1) * 1000L); this.hbthread = new Daemon(new HeartbeatMonitor()); this.lmthread = new Daemon(leaseManager.new Monitor()); this.replthread = new Daemon(new ReplicationMonitor()); hbthread.start(); lmthread.start(); replthread.start(); this.hostsReader = new HostsFileReader(conf.get("dfs.hosts",""), conf.get("dfs.hosts.exclude","")); this.dnthread = new Daemon(new DecommissionManager(this).new Monitor( conf.getInt("dfs.namenode.decommission.interval", 30), conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5))); dnthread.start(); this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); /* If the dns to swith mapping supports cache, resolve network * locations of those hosts in the include list, * and store the mapping in the cache; so future calls to resolve * will be fast. */ if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts())); } } public static Collection<File> getNamespaceDirs(Configuration conf) { Collection<String> dirNames = conf.getStringCollection("dfs.name.dir"); if (dirNames.isEmpty()) dirNames.add("/tmp/hadoop/dfs/name"); Collection<File> dirs = new ArrayList<File>(dirNames.size()); for(String name : dirNames) { dirs.add(new File(name)); } return dirs; } public static Collection<File> getNamespaceEditsDirs(Configuration conf) { Collection<String> editsDirNames = conf.getStringCollection("dfs.name.edits.dir"); if (editsDirNames.isEmpty()) editsDirNames.add("/tmp/hadoop/dfs/name"); Collection<File> dirs = new ArrayList<File>(editsDirNames.size()); for(String name : editsDirNames) { dirs.add(new File(name)); } return dirs; } /** * dirs is a list of directories where the filesystem directory state * is stored */ FSNamesystem(FSImage fsImage, Configuration conf) throws IOException { setConfigurationParameters(conf); this.dir = new FSDirectory(fsImage, this, conf); } /** * Initializes some of the members from configuration */ private void setConfigurationParameters(Configuration conf) throws IOException { fsNamesystemObject = this; try { fsOwner = UnixUserGroupInformation.login(conf); } catch (LoginException e) { throw new IOException(StringUtils.stringifyException(e)); } LOG.info("fsOwner=" + fsOwner); this.supergroup = conf.get("dfs.permissions.supergroup", "supergroup"); this.isPermissionEnabled = conf.getBoolean("dfs.permissions", true); LOG.info("supergroup=" + supergroup); LOG.info("isPermissionEnabled=" + isPermissionEnabled); short filePermission = (short)conf.getInt("dfs.upgrade.permission", 0777); this.defaultPermission = PermissionStatus.createImmutable( fsOwner.getUserName(), supergroup, new FsPermission(filePermission)); this.replicator = new ReplicationTargetChooser( conf.getBoolean("dfs.replication.considerLoad", true), this, clusterMap); this.defaultReplication = conf.getInt("dfs.replication", 3); this.maxReplication = conf.getInt("dfs.replication.max", 512); this.minReplication = conf.getInt("dfs.replication.min", 1); if (minReplication <= 0) throw new IOException( "Unexpected configuration parameters: dfs.replication.min = " + minReplication + " must be greater than 0"); if (maxReplication >= (int)Short.MAX_VALUE) throw new IOException( "Unexpected configuration parameters: dfs.replication.max = " + maxReplication + " must be less than " + (Short.MAX_VALUE)); if (maxReplication < minReplication) throw new IOException( "Unexpected configuration parameters: dfs.replication.min = " + minReplication + " must be less than dfs.replication.max = " + maxReplication); this.maxReplicationStreams = conf.getInt("dfs.max-repl-streams", 2); long heartbeatInterval = conf.getLong("dfs.heartbeat.interval", 3) * 1000; this.heartbeatRecheckInterval = conf.getInt( "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * heartbeatInterval; this.replicationRecheckInterval = conf.getInt("dfs.replication.interval", 3) * 1000L; this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); this.maxFsObjects = conf.getLong("dfs.max.objects", 0); this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 20*(int)(heartbeatInterval/1000)); this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0); this.supportAppends = conf.getBoolean("dfs.support.append", false); } /** * Return the default path permission when upgrading from releases with no * permissions (<=0.15) to releases with permissions (>=0.16) */ protected PermissionStatus getUpgradePermission() { return defaultPermission; } /** Return the FSNamesystem object * */ public static FSNamesystem getFSNamesystem() { return fsNamesystemObject; } NamespaceInfo getNamespaceInfo() { return new NamespaceInfo(dir.fsImage.getNamespaceID(), dir.fsImage.getCTime(), getDistributedUpgradeVersion()); } /** * Close down this file system manager. * Causes heartbeat and lease daemons to stop; waits briefly for * them to finish, but a short timeout returns control back to caller. */ public void close() { fsRunning = false; try { if (pendingReplications != null) pendingReplications.stop(); if (hbthread != null) hbthread.interrupt(); if (replthread != null) replthread.interrupt(); if (dnthread != null) dnthread.interrupt(); if (smmthread != null) smmthread.interrupt(); } catch (Exception e) { LOG.warn("Exception shutting down FSNamesystem", e); } finally { // using finally to ensure we also wait for lease daemon try { if (lmthread != null) { lmthread.interrupt(); lmthread.join(3000); } dir.close(); } catch (InterruptedException ie) { } catch (IOException ie) { LOG.error("Error closing FSDirectory", ie); IOUtils.cleanup(LOG, dir); } } } /** Is this name system running? */ boolean isRunning() { return fsRunning; } /** * Dump all metadata into specified file */ synchronized void metaSave(String filename) throws IOException { checkSuperuserPrivilege(); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( new FileWriter(file, true))); + long totalInodes = this.dir.totalInodes(); + long totalBlocks = this.getBlocksTotal(); + + ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); + ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); + this.DFSNodesStatus(live, dead); + + String str = totalInodes + " files and directories, " + totalBlocks + + " blocks = " + (totalInodes + totalBlocks) + " total"; + out.println(str); + out.println("Live Datanodes: "+live.size()); + out.println("Dead Datanodes: "+dead.size()); // // Dump contents of neededReplication // synchronized (neededReplications) { out.println("Metasave: Blocks waiting for replication: " + neededReplications.size()); for (Block block : neededReplications) { List<DatanodeDescriptor> containingNodes = new ArrayList<DatanodeDescriptor>(); NumberReplicas numReplicas = new NumberReplicas(); // source node returned is not used chooseSourceDatanode(block, containingNodes, numReplicas); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedReplicas(); + + if (block instanceof BlockInfo) { + String fileName = FSDirectory.getFullPathName(((BlockInfo) block) + .getINode()); + out.print(fileName + ": "); + } + // l: == live:, d: == decommissioned c: == corrupt e: == excess out.print(block + ((usableReplicas > 0)? "" : " MISSING") + " (replicas:" + " l: " + numReplicas.liveReplicas() + " d: " + numReplicas.decommissionedReplicas() + " c: " + numReplicas.corruptReplicas() + " e: " + numReplicas.excessReplicas() + ") "); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block); jt.hasNext();) { DatanodeDescriptor node = jt.next(); String state = ""; if (corruptNodes != null && corruptNodes.contains(node)) { state = "(corrupt)"; } else if (node.isDecommissioned() || node.isDecommissionInProgress()) { state = "(decommissioned)"; } out.print(" " + node + state + " : "); } out.println(""); } } // // Dump blocks from pendingReplication // pendingReplications.metaSave(out); // // Dump blocks that are waiting to be deleted // dumpRecentInvalidateSets(out); // // Dump all datanodes // datanodeDump(out); out.flush(); out.close(); } long getDefaultBlockSize() { return defaultBlockSize; } long getAccessTimePrecision() { return accessTimePrecision; } private boolean isAccessTimeSupported() { return accessTimePrecision > 0; } /* get replication factor of a block */ private int getReplication(Block block) { INodeFile fileINode = blocksMap.getINode(block); if (fileINode == null) { // block does not belong to any file return 0; } assert !fileINode.isDirectory() : "Block cannot belong to a directory."; return fileINode.getReplication(); } /* updates a block in under replication queue */ synchronized void updateNeededReplications(Block block, int curReplicasDelta, int expectedReplicasDelta) { NumberReplicas repl = countNodes(block); int curExpectedReplicas = getReplication(block); neededReplications.update(block, repl.liveReplicas(), repl.decommissionedReplicas(), curExpectedReplicas, curReplicasDelta, expectedReplicasDelta); } ///////////////////////////////////////////////////////// // // These methods are called by secondary namenodes // ///////////////////////////////////////////////////////// /** * return a list of blocks & their locations on <code>datanode</code> whose * total size is <code>size</code> * * @param datanode on which blocks are located * @param size total size of blocks */ synchronized BlocksWithLocations getBlocks(DatanodeID datanode, long size) throws IOException { checkSuperuserPrivilege(); DatanodeDescriptor node = getDatanode(datanode); if (node == null) { NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: " + "Asking for blocks from an unrecorded node " + datanode.getName()); throw new IllegalArgumentException( "Unexpected exception. Got getBlocks message for datanode " + datanode.getName() + ", but there is no info for it"); } int numBlocks = node.numBlocks(); if(numBlocks == 0) { return new BlocksWithLocations(new BlockWithLocations[0]); } Iterator<Block> iter = node.getBlockIterator(); int startBlock = r.nextInt(numBlocks); // starting from a random block // skip blocks for(int i=0; i<startBlock; i++) { iter.next(); } List<BlockWithLocations> results = new ArrayList<BlockWithLocations>(); long totalSize = 0; while(totalSize<size && iter.hasNext()) { totalSize += addBlock(iter.next(), results); } if(totalSize<size) { iter = node.getBlockIterator(); // start from the beginning for(int i=0; i<startBlock&&totalSize<size; i++) { totalSize += addBlock(iter.next(), results); } } return new BlocksWithLocations( results.toArray(new BlockWithLocations[results.size()])); } /** * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ private long addBlock(Block block, List<BlockWithLocations> results) { ArrayList<String> machineSet = new ArrayList<String>(blocksMap.numNodes(block)); for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { String storageID = it.next().getStorageID(); // filter invalidate replicas Collection<Block> blocks = recentInvalidateSets.get(storageID); if(blocks==null || !blocks.contains(block)) { machineSet.add(storageID); } } if(machineSet.size() == 0) { return 0; } else { results.add(new BlockWithLocations(block, machineSet.toArray(new String[machineSet.size()]))); return block.getNumBytes(); } } ///////////////////////////////////////////////////////// // // These methods are called by HadoopFS clients // ///////////////////////////////////////////////////////// /** * Set permissions for an existing file. * @throws IOException */ public synchronized void setPermission(String src, FsPermission permission ) throws IOException { checkOwner(src); dir.setPermission(src, permission); getEditLog().logSync(); if (auditLog.isInfoEnabled()) { final FileStatus stat = dir.getFileInfo(src); logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(), "setPermission", src, null, stat); } } /** * Set owner for an existing file. * @throws IOException */ public synchronized void setOwner(String src, String username, String group ) throws IOException { FSPermissionChecker pc = checkOwner(src); if (!pc.isSuper) { if (username != null && !pc.user.equals(username)) { throw new AccessControlException("Non-super user cannot change owner."); } if (group != null && !pc.containsGroup(group)) { throw new AccessControlException("User does not belong to " + group + " ."); } } dir.setOwner(src, username, group); getEditLog().logSync(); if (auditLog.isInfoEnabled()) { final FileStatus stat = dir.getFileInfo(src); logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(), "setOwner", src, null, stat); } } /** * Get block locations within the specified range. * * @see #getBlockLocations(String, long, long) */ LocatedBlocks getBlockLocations(String clientMachine, String src, long offset, long length) throws IOException { if (isPermissionEnabled) { checkPathAccess(src, FsAction.READ); } LocatedBlocks blocks = getBlockLocations(src, offset, length, true); if (blocks != null) { //sort the blocks DatanodeDescriptor client = host2DataNodeMap.getDatanodeByHost( clientMachine); for (LocatedBlock b : blocks.getLocatedBlocks()) { clusterMap.pseudoSortByDistance(client, b.getLocations()); } } return blocks; } /** * Get block locations within the specified range. * @see ClientProtocol#getBlockLocations(String, long, long) */ public LocatedBlocks getBlockLocations(String src, long offset, long length ) throws IOException { return getBlockLocations(src, offset, length, false); } /** * Get block locations within the specified range. * @see ClientProtocol#getBlockLocations(String, long, long) */ public LocatedBlocks getBlockLocations(String src, long offset, long length, boolean doAccessTime) throws IOException { if (offset < 0) { throw new IOException("Negative offset is not supported. File: " + src ); } if (length < 0) { throw new IOException("Negative length is not supported. File: " + src ); } final LocatedBlocks ret = getBlockLocationsInternal(src, offset, length, Integer.MAX_VALUE, doAccessTime); if (auditLog.isInfoEnabled()) { logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(), "open", src, null, null); } return ret; } private synchronized LocatedBlocks getBlockLocationsInternal(String src, long offset, long length, int nrBlocksToReturn, boolean doAccessTime) throws IOException { INodeFile inode = dir.getFileINode(src); if(inode == null) { return null; } if (doAccessTime && isAccessTimeSupported()) { dir.setTimes(src, inode, -1, now(), false); } Block[] blocks = inode.getBlocks(); if (blocks == null) { return null; } if (blocks.length == 0) { return inode.createLocatedBlocks(new ArrayList<LocatedBlock>(blocks.length)); } List<LocatedBlock> results; results = new ArrayList<LocatedBlock>(blocks.length); int curBlk = 0; long curPos = 0, blkSize = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { blkSize = blocks[curBlk].getNumBytes(); assert blkSize > 0 : "Block of size 0"; if (curPos + blkSize > offset) { break; } curPos += blkSize; } if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file return null; long endOff = offset + length; do { // get block locations int numNodes = blocksMap.numNodes(blocks[curBlk]); int numCorruptNodes = countNodes(blocks[curBlk]).corruptReplicas(); int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blocks[curBlk]); if (numCorruptNodes != numCorruptReplicas) { LOG.warn("Inconsistent number of corrupt replicas for " + blocks[curBlk] + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + numCorruptReplicas); } boolean blockCorrupt = (numCorruptNodes == numNodes); int numMachineSet = blockCorrupt ? numNodes : (numNodes - numCorruptNodes); DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numMachineSet]; if (numMachineSet > 0) { numNodes = 0; for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(blocks[curBlk]); it.hasNext();) { DatanodeDescriptor dn = it.next(); boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(blocks[curBlk], dn); if (blockCorrupt || (!blockCorrupt && !replicaCorrupt)) machineSet[numNodes++] = dn; } } results.add(new LocatedBlock(blocks[curBlk], machineSet, curPos, blockCorrupt)); curPos += blocks[curBlk].getNumBytes(); curBlk++; } while (curPos < endOff && curBlk < blocks.length && results.size() < nrBlocksToReturn); return inode.createLocatedBlocks(results); } /** * stores the modification and access time for this inode. * The access time is precise upto an hour. The transaction, if needed, is * written to the edits log but is not flushed. */ public synchronized void setTimes(String src, long mtime, long atime) throws IOException { if (!isAccessTimeSupported() && atime != -1) { throw new IOException("Access time for hdfs is not configured. " + " Please set dfs.support.accessTime configuration parameter."); } // // The caller needs to have write access to set access & modification times. if (isPermissionEnabled) { checkPathAccess(src, FsAction.WRITE); } INodeFile inode = dir.getFileINode(src); if (inode != null) { dir.setTimes(src, inode, mtime, atime, true); if (auditLog.isInfoEnabled()) { final FileStatus stat = dir.getFileInfo(src); logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(), "setTimes", src, null, stat); } } else { throw new FileNotFoundException("File " + src + " does not exist."); } } /** * Set replication for an existing file. * * The NameNode sets new replication and schedules either replication of * under-replicated data blocks or removal of the eccessive block copies * if the blocks are over-replicated. * * @see ClientProtocol#setReplication(String, short) * @param src file name * @param replication new replication * @return true if successful; * false if file does not exist or is a directory */ public boolean setReplication(String src, short replication) throws IOException { boolean status = setReplicationInternal(src, replication); getEditLog().logSync(); if (status && auditLog.isInfoEnabled()) { logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(), "setReplication", src, null, null); } return status; } private synchronized boolean setReplicationInternal(String src, short replication ) throws IOException { if (isInSafeMode()) throw new SafeModeException("Cannot set replication for " + src, safeMode); verifyReplication(src, replication, null); if (isPermissionEnabled) { checkPathAccess(src, FsAction.WRITE); } int[] oldReplication = new int[1]; Block[] fileBlocks; fileBlocks = dir.setReplication(src, replication, oldReplication); if (fileBlocks == null) // file not found or is a directory return false; int oldRepl = oldReplication[0]; if (oldRepl == replication) // the same replication return true; // update needReplication priority queues for(int idx = 0; idx < fileBlocks.length; idx++) updateNeededReplications(fileBlocks[idx], 0, replication-oldRepl); if (oldRepl > replication) { // old replication > the new one; need to remove copies LOG.info("Reducing replication for file " + src + ". New replication is " + replication); for(int idx = 0; idx < fileBlocks.length; idx++) processOverReplicatedBlock(fileBlocks[idx], replication, null, null); } else { // replication factor is increased LOG.info("Increasing replication for file " + src + ". New replication is " + replication); } return true; } long getPreferredBlockSize(String filename) throws IOException { if (isPermissionEnabled) { checkTraverse(filename); } return dir.getPreferredBlockSize(filename); } /** * Check whether the replication parameter is within the range * determined by system configuration. */ private void verifyReplication(String src, short replication, String clientName ) throws IOException { String text = "file " + src + ((clientName != null) ? " on client " + clientName : "") + ".\n" + "Requested replication " + replication; if (replication > maxReplication) throw new IOException(text + " exceeds maximum " + maxReplication); if (replication < minReplication) throw new IOException( text + " is less than the required minimum " + minReplication); } /** * Create a new file entry in the namespace. * * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) * * @throws IOException if file name is invalid * {@link FSDirectory#isValidToCreate(String)}. */ void startFile(String src, PermissionStatus permissions, String holder, String clientMachine, boolean overwrite, short replication, long blockSize ) throws IOException { startFileInternal(src, permissions, holder, clientMachine, overwrite, false, replication, blockSize); getEditLog().logSync(); if (auditLog.isInfoEnabled()) { final FileStatus stat = dir.getFileInfo(src); logAuditEvent(UserGroupInformation.getCurrentUGI(), Server.getRemoteIp(), "create", src, null, stat); } } private synchronized void startFileInternal(String src, PermissionStatus permissions, String holder, String clientMachine, boolean overwrite, boolean append, short replication, long blockSize ) throws IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: src=" + src + ", holder=" + holder + ", clientMachine=" + clientMachine + ", replication=" + replication + ", overwrite=" + overwrite + ", append=" + append); } if (isInSafeMode()) throw new SafeModeException("Cannot create file" + src, safeMode); if (!DFSUtil.isValidName(src)) { throw new IOException("Invalid file name: " + src); } // Verify that the destination does not exist as a directory already. boolean pathExists = dir.exists(src); if (pathExists && dir.isDir(src)) { throw new IOException("Cannot create file "+ src + "; already exists as a directory."); } if (isPermissionEnabled) { if (append || (overwrite && pathExists)) { checkPathAccess(src, FsAction.WRITE); } else { checkAncestorAccess(src, FsAction.WRITE); } } diff --git a/src/test/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/src/test/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java new file mode 100644 index 0000000..4a5718f --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.junit.Test; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.DataInputStream; +import java.io.InputStreamReader; +import java.io.IOException; +import java.lang.InterruptedException; +import java.util.Random; +import static org.junit.Assert.assertTrue; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.fs.FSDataOutputStream; + +/** + * This class tests the creation and validation of metasave + */ +public class TestMetaSave { + static final int NUM_DATA_NODES = 2; + static final long seed = 0xDEADBEEFL; + static final int blockSize = 8192; + private static MiniDFSCluster cluster = null; + private static FileSystem fileSys = null; + + private void createFile(FileSystem fileSys, Path name) throws IOException { + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt("io.file.buffer.size", 4096), (short) 2, (long) blockSize); + byte[] buffer = new byte[1024]; + Random rand = new Random(seed); + rand.nextBytes(buffer); + stm.write(buffer); + stm.close(); + } + + @BeforeClass + public static void setUp() throws IOException { + // start a cluster + Configuration conf = new Configuration(); + + // High value of replication interval + // so that blocks remain under-replicated + conf.setInt("dfs.replication.interval", 1000); + conf.setLong("dfs.heartbeat.interval", 1L); + conf.setLong("heartbeat.recheck.interval", 1L); + cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); + } + + /** + * Tests metasave + */ + @Test + public void testMetaSave() throws IOException, InterruptedException { + + final FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); + + for (int i = 0; i < 2; i++) { + Path file = new Path("/filestatus" + i); + createFile(fileSys, file); + } + + cluster.stopDataNode(1); + // wait for namenode to discover that a datanode is dead + Thread.sleep(15000); + namesystem.setReplication("/filestatus0", (short) 4); + + namesystem.metaSave("metasave.out.txt"); + + // Verification + String logFile = System.getProperty("hadoop.log.dir") + "/" + + "metasave.out.txt"; + FileInputStream fstream = new FileInputStream(logFile); + DataInputStream in = new DataInputStream(fstream); + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + String line = reader.readLine(); + assertTrue(line.equals("3 files and directories, 2 blocks = 5 total")); + line = reader.readLine(); + assertTrue(line.equals("Live Datanodes: 1")); + line = reader.readLine(); + assertTrue(line.equals("Dead Datanodes: 1")); + line = reader.readLine(); + line = reader.readLine(); + assertTrue(line.matches("^/filestatus[01]:.*")); + } + + @AfterClass + public static void tearDown() throws IOException { + if (fileSys != null) + fileSys.close(); + if (cluster != null) + cluster.shutdown(); + } +}
jaxlaw/hadoop-common
e5d1ce782066858146f1cbb852d9a03356b09bfc
HADOOP:6521 from https://issues.apache.org/jira/secure/attachment/12434469/hadoop-6521.rel20.1.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 0f2c362..27d6613 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,506 +1,509 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383006 + HADOOP-6521. Fix backward compatiblity issue with umask when applications + use deprecated param dfs.umask in configuration or use + FsPermission.setUMask(). (suresh) MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1376. Add support for submitting jobs as configured users, pluggable mapping of trace users to target users in Gridmix. (cdouglas) yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/fs/permission/FsPermission.java b/src/core/org/apache/hadoop/fs/permission/FsPermission.java index fdb2616..bac25ec 100644 --- a/src/core/org/apache/hadoop/fs/permission/FsPermission.java +++ b/src/core/org/apache/hadoop/fs/permission/FsPermission.java @@ -1,220 +1,220 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; /** * A class for file/directory permissions. */ public class FsPermission implements Writable { private static final Log LOG = LogFactory.getLog(FsPermission.class); static final WritableFactory FACTORY = new WritableFactory() { public Writable newInstance() { return new FsPermission(); } }; static { // register a ctor WritableFactories.setFactory(FsPermission.class, FACTORY); } /** Create an immutable {@link FsPermission} object. */ public static FsPermission createImmutable(short permission) { return new FsPermission(permission) { public FsPermission applyUMask(FsPermission umask) { throw new UnsupportedOperationException(); } public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } }; } //POSIX permission style private FsAction useraction = null; private FsAction groupaction = null; private FsAction otheraction = null; private FsPermission() {} /** * Construct by the given {@link FsAction}. * @param u user action * @param g group action * @param o other action */ public FsPermission(FsAction u, FsAction g, FsAction o) {set(u, g, o);} /** * Construct by the given mode. * @param mode * @see #toShort() */ public FsPermission(short mode) { fromShort(mode); } /** * Copy constructor * * @param other other permission */ public FsPermission(FsPermission other) { this.useraction = other.useraction; this.groupaction = other.groupaction; this.otheraction = other.otheraction; } /** Return user {@link FsAction}. */ public FsAction getUserAction() {return useraction;} /** Return group {@link FsAction}. */ public FsAction getGroupAction() {return groupaction;} /** Return other {@link FsAction}. */ public FsAction getOtherAction() {return otheraction;} private void set(FsAction u, FsAction g, FsAction o) { useraction = u; groupaction = g; otheraction = o; } public void fromShort(short n) { FsAction[] v = FsAction.values(); set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7]); } /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { out.writeShort(toShort()); } /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { fromShort(in.readShort()); } /** * Create and initialize a {@link FsPermission} from {@link DataInput}. */ public static FsPermission read(DataInput in) throws IOException { FsPermission p = new FsPermission(); p.readFields(in); return p; } /** * Encode the object to a short. */ public short toShort() { int s = (useraction.ordinal() << 6) | (groupaction.ordinal() << 3) | otheraction.ordinal(); return (short)s; } /** {@inheritDoc} */ public boolean equals(Object obj) { if (obj instanceof FsPermission) { FsPermission that = (FsPermission)obj; return this.useraction == that.useraction && this.groupaction == that.groupaction && this.otheraction == that.otheraction; } return false; } /** {@inheritDoc} */ public int hashCode() {return toShort();} /** {@inheritDoc} */ public String toString() { return useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL; } /** Apply a umask to this permission and return a new one */ public FsPermission applyUMask(FsPermission umask) { return new FsPermission(useraction.and(umask.useraction.not()), groupaction.and(umask.groupaction.not()), otheraction.and(umask.otheraction.not())); } /** umask property label */ public static final String DEPRECATED_UMASK_LABEL = "dfs.umask"; public static final String UMASK_LABEL = "dfs.umaskmode"; public static final int DEFAULT_UMASK = 0022; /** Get the user file creation mask (umask) */ public static FsPermission getUMask(Configuration conf) { int umask = DEFAULT_UMASK; - // Attempt to pull value from configuration, trying new key first and then - // deprecated key, along with a warning, if not present + // To ensure backward compatibility first use the deprecated key. + // If the deprecated key is not present then check for the new key if(conf != null) { - String confUmask = conf.get(UMASK_LABEL); - if(confUmask != null) { // UMASK_LABEL is set - umask = new UmaskParser(confUmask).getUMask(); - } else { // check for deprecated key label - int oldStyleValue = conf.getInt(DEPRECATED_UMASK_LABEL, Integer.MIN_VALUE); - if(oldStyleValue != Integer.MIN_VALUE) { // Property was set with old key - LOG.warn(DEPRECATED_UMASK_LABEL + " configuration key is deprecated. " + - "Convert to " + UMASK_LABEL + ", using octal or symbolic umask " + - "specifications."); - umask = oldStyleValue; + int oldStyleValue = conf.getInt(DEPRECATED_UMASK_LABEL, Integer.MIN_VALUE); + if(oldStyleValue != Integer.MIN_VALUE) { // Property was set with old key + LOG.warn(DEPRECATED_UMASK_LABEL + " configuration key is deprecated. " + + "Convert to " + UMASK_LABEL + ", using octal or symbolic umask " + + "specifications."); + umask = oldStyleValue; + } else { + String confUmask = conf.get(UMASK_LABEL); + if(confUmask != null) { // UMASK_LABEL is set + umask = new UmaskParser(confUmask).getUMask(); } } } return new FsPermission((short)umask); } /** Set the user file creation mask (umask) */ public static void setUMask(Configuration conf, FsPermission umask) { - conf.setInt(UMASK_LABEL, umask.toShort()); + conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort())); } /** Get the default permission. */ public static FsPermission getDefault() { return new FsPermission((short)0777); } /** * Create a FsPermission from a Unix symbolic permission string * @param unixSymbolicPermission e.g. "-rw-rw-rw-" */ public static FsPermission valueOf(String unixSymbolicPermission) { if (unixSymbolicPermission == null) { return null; } else if (unixSymbolicPermission.length() != 10) { throw new IllegalArgumentException("length != 10(unixSymbolicPermission=" + unixSymbolicPermission + ")"); } int n = 0; for(int i = 1; i < unixSymbolicPermission.length(); i++) { n = n << 1; char c = unixSymbolicPermission.charAt(i); n += (c == '-' || c == 'T' || c == 'S') ? 0: 1; } return new FsPermission((short)n); } } diff --git a/src/test/org/apache/hadoop/security/TestPermission.java b/src/test/org/apache/hadoop/security/TestPermission.java index b6138a0..74a42f7 100644 --- a/src/test/org/apache/hadoop/security/TestPermission.java +++ b/src/test/org/apache/hadoop/security/TestPermission.java @@ -1,229 +1,262 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.security; import java.io.IOException; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Level; import junit.framework.TestCase; /** Unit tests for permission */ public class TestPermission extends TestCase { public static final Log LOG = LogFactory.getLog(TestPermission.class); { ((Log4JLogger)UserGroupInformation.LOG).getLogger().setLevel(Level.ALL); } final private static Path ROOT_PATH = new Path("/data"); final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1"); final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2"); final private static Path CHILD_FILE1 = new Path(ROOT_PATH, "file1"); final private static Path CHILD_FILE2 = new Path(ROOT_PATH, "file2"); final private static int FILE_LEN = 100; final private static Random RAN = new Random(); final private static String USER_NAME = "user" + RAN.nextInt(); final private static String[] GROUP_NAMES = {"group1", "group2"}; static FsPermission checkPermission(FileSystem fs, String path, FsPermission expected) throws IOException { FileStatus s = fs.getFileStatus(new Path(path)); LOG.info(s.getPath() + ": " + s.isDir() + " " + s.getPermission() + ":" + s.getOwner() + ":" + s.getGroup()); if (expected != null) { assertEquals(expected, s.getPermission()); assertEquals(expected.toShort(), s.getPermission().toShort()); } return s.getPermission(); } + /** + * Tests backward compatibility. Configuration can be + * either set with old param dfs.umask that takes decimal umasks + * or dfs.umaskmode that takes symbolic or octal umask. + */ + public void testBackwardCompatibility() { + // Test 1 - old configuration key with decimal + // umask value should be handled when set using + // FSPermission.setUMask() API + FsPermission perm = new FsPermission((short)18); + Configuration conf = new Configuration(); + FsPermission.setUMask(conf, perm); + assertEquals(18, FsPermission.getUMask(conf).toShort()); + + // Test 2 - old configuration key set with decimal + // umask value should be handled + perm = new FsPermission((short)18); + conf = new Configuration(); + conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18"); + assertEquals(18, FsPermission.getUMask(conf).toShort()); + + // Test 3 - old configuration key overrides the new one + conf = new Configuration(); + conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18"); + conf.set(FsPermission.UMASK_LABEL, "000"); + assertEquals(18, FsPermission.getUMask(conf).toShort()); + + // Test 4 - new configuration key is handled + conf = new Configuration(); + conf.set(FsPermission.UMASK_LABEL, "022"); + assertEquals(18, FsPermission.getUMask(conf).toShort()); + } + public void testCreate() throws Exception { Configuration conf = new Configuration(); conf.setBoolean("dfs.permissions", true); conf.set(FsPermission.UMASK_LABEL, "000"); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); fs = FileSystem.get(conf); FsPermission rootPerm = checkPermission(fs, "/", null); FsPermission inheritPerm = FsPermission.createImmutable( (short)(rootPerm.toShort() | 0300)); FsPermission dirPerm = new FsPermission((short)0777); fs.mkdirs(new Path("/a1/a2/a3"), dirPerm); checkPermission(fs, "/a1", inheritPerm); checkPermission(fs, "/a1/a2", inheritPerm); checkPermission(fs, "/a1/a2/a3", dirPerm); FsPermission filePerm = new FsPermission((short)0444); FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm, true, conf.getInt("io.file.buffer.size", 4096), fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); out.write(123); out.close(); checkPermission(fs, "/b1", inheritPerm); checkPermission(fs, "/b1/b2", inheritPerm); checkPermission(fs, "/b1/b2/b3.txt", filePerm); conf.set(FsPermission.UMASK_LABEL, "022"); FsPermission permission = FsPermission.createImmutable((short)0666); FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission)); FileSystem.create(fs, new Path("/c1/c2.txt"), new FsPermission(permission)); checkPermission(fs, "/c1", permission); checkPermission(fs, "/c1/c2.txt", permission); } finally { try { if(fs != null) fs.close(); } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); } try { if(cluster != null) cluster.shutdown(); } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); } } } public void testFilePermision() throws Exception { Configuration conf = new Configuration(); conf.setBoolean("dfs.permissions", true); MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); try { FileSystem nnfs = FileSystem.get(conf); // test permissions on files that do not exist assertFalse(nnfs.exists(CHILD_FILE1)); try { nnfs.setOwner(CHILD_FILE1, "foo", "bar"); assertTrue(false); } catch(java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } try { nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0777)); assertTrue(false); } catch(java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } // following dir/file creations are legal nnfs.mkdirs(CHILD_DIR1); FSDataOutputStream out = nnfs.create(CHILD_FILE1); byte data[] = new byte[FILE_LEN]; RAN.nextBytes(data); out.write(data); out.close(); nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0700)); // following read is legal byte dataIn[] = new byte[FILE_LEN]; FSDataInputStream fin = nnfs.open(CHILD_FILE1); int bytesRead = fin.read(dataIn); assertTrue(bytesRead == FILE_LEN); for(int i=0; i<FILE_LEN; i++) { assertEquals(data[i], dataIn[i]); } //////////////////////////////////////////////////////////////// // test illegal file/dir creation UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation( USER_NAME, GROUP_NAMES ); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo); FileSystem userfs = FileSystem.get(conf); // make sure mkdir of a existing directory that is not owned by // this user does not throw an exception. userfs.mkdirs(CHILD_DIR1); // illegal mkdir assertTrue(!canMkdirs(userfs, CHILD_DIR2)); // illegal file creation assertTrue(!canCreate(userfs, CHILD_FILE2)); // illegal file open assertTrue(!canOpen(userfs, CHILD_FILE1)); nnfs.setPermission(ROOT_PATH, new FsPermission((short)0755)); nnfs.setPermission(CHILD_DIR1, new FsPermission((short)0777)); nnfs.setPermission(new Path("/"), new FsPermission((short)0777)); final Path RENAME_PATH = new Path("/foo/bar"); userfs.mkdirs(RENAME_PATH); assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1)); } finally { if(cluster != null) cluster.shutdown(); } } static boolean canMkdirs(FileSystem fs, Path p) throws IOException { try { fs.mkdirs(p); return true; } catch(AccessControlException e) { return false; } } static boolean canCreate(FileSystem fs, Path p) throws IOException { try { fs.create(p); return true; } catch(AccessControlException e) { return false; } } static boolean canOpen(FileSystem fs, Path p) throws IOException { try { fs.open(p); return true; } catch(AccessControlException e) { return false; } } static boolean canRename(FileSystem fs, Path src, Path dst ) throws IOException { try { fs.rename(src, dst); return true; } catch(AccessControlException e) { return false; } } }
jaxlaw/hadoop-common
3b0d36afbcd8555df46c0eca57ec7bc613711b97
HADOOP:6382 patched from https://issues.apache.org/jira/secure/attachment/12434978/HADOOP-6382-findbugs.patch to correct 2 locations where jar file naming convention wasn't correctly changed.
diff --git a/build.xml b/build.xml index c05f625..b3c154c 100644 --- a/build.xml +++ b/build.xml @@ -385,1390 +385,1390 @@ <classpath refid="classpath" /> </taskdef> </target> <target name="compile-core-classes" depends="init, compile-rcc-compiler"> <taskdef classname="org.apache.jasper.JspC" name="jsp-compile" > <classpath refid="test.classpath"/> </taskdef> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${core.src.dir}" includes="**/*.properties"/> <fileset dir="${core.src.dir}" includes="core-default.xml"/> </copy> </target> <target name="compile-mapred-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/task" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/task/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/job" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/job/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${mapred.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${mapred.src.dir}" includes="**/*.properties"/> <fileset dir="${mapred.src.dir}" includes="mapred-default.xml"/> </copy> </target> <target name="compile-hdfs-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/hdfs" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.namenode" webxml="${build.webapps}/hdfs/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/datanode" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.datanode" webxml="${build.webapps}/datanode/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${hdfs.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${hdfs.src.dir}" includes="**/*.properties"/> <fileset dir="${hdfs.src.dir}" includes="hdfs-default.xml"/> </copy> </target> <target name="compile-tools" depends="init"> <javac encoding="${build.encoding}" srcdir="${tools.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.tools}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.tools}"> <fileset dir="${tools.src}" includes="**/*.properties" /> </copy> </target> <target name="compile-native"> <antcall target="compile-core-native"> <param name="compile.native" value="true"/> </antcall> </target> <target name="compile-core-native" depends="compile-core-classes" if="compile.native"> <mkdir dir="${build.native}/lib"/> <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/> <javah classpath="${build.classes}" destdir="${build.native}/src/org/apache/hadoop/io/compress/zlib" force="yes" verbose="yes" > <class name="org.apache.hadoop.io.compress.zlib.ZlibCompressor" /> <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" /> </javah> <exec dir="${build.native}" executable="sh" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> <arg line="${native.src.dir}/configure"/> </exec> <exec dir="${build.native}" executable="${make.cmd}" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> </exec> <exec dir="${build.native}" executable="sh" failonerror="true"> <arg line="${build.native}/libtool --mode=install cp ${build.native}/lib/libhadoop.la ${build.native}/lib"/> </exec> </target> <target name="compile-core" depends="clover,compile-core-classes,compile-mapred-classes, compile-hdfs-classes,compile-core-native,compile-c++" description="Compile core only"> </target> <target name="compile-contrib" depends="compile-core,tools-jar,compile-c++-libhdfs"> <subant target="compile"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="compile" depends="compile-core, compile-contrib, compile-ant-tasks, compile-tools" description="Compile core, contrib"> </target> <target name="compile-examples" depends="compile-core,compile-tools,compile-c++-examples"> <javac encoding="${build.encoding}" srcdir="${examples.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.examples}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath> <path refid="classpath"/> <pathelement location="${build.tools}"/> </classpath> </javac> </target> <!-- ================================================================== --> <!-- Make hadoop.jar --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="jar" depends="compile-core" description="Make hadoop.jar"> <tar compression="gzip" destfile="${build.classes}/bin.tgz"> <tarfileset dir="bin" mode="755"/> </tar> <jar jarfile="${build.dir}/${core.final.name}.jar" basedir="${build.classes}"> <manifest> <section name="org/apache/hadoop"> <attribute name="Implementation-Title" value="Hadoop"/> <attribute name="Implementation-Version" value="${version}"/> <attribute name="Implementation-Vendor" value="Yahoo!"/> </section> </manifest> <fileset file="${conf.dir}/commons-logging.properties"/> <fileset file="${conf.dir}/log4j.properties"/> <fileset file="${conf.dir}/hadoop-metrics.properties"/> <zipfileset dir="${build.webapps}" prefix="webapps"/> </jar> </target> <!-- ================================================================== --> <!-- Make the Hadoop examples jar. --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="examples" depends="jar, compile-examples" description="Make the Hadoop examples jar."> <jar jarfile="${build.dir}/${examples.final.name}.jar" basedir="${build.examples}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/examples/ExampleDriver"/> </manifest> </jar> </target> <target name="tools-jar" depends="jar, compile-tools" description="Make the Hadoop tools jar."> <jar jarfile="${build.dir}/${tools.final.name}.jar" basedir="${build.tools}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/examples/ExampleDriver"/> </manifest> </jar> </target> <!-- ================================================================== --> <!-- Make the Hadoop metrics jar. (for use outside Hadoop) --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="metrics.jar" depends="compile-core" description="Make the Hadoop metrics jar. (for use outside Hadoop)"> <jar jarfile="${build.dir}/hadoop-metrics-${version}.jar" basedir="${build.classes}"> <include name="**/metrics/**" /> <exclude name="**/package.html" /> </jar> </target> <target name="generate-test-records" depends="compile-rcc-compiler"> <recordcc destdir="${test.generated.dir}"> <fileset dir="${test.src.dir}" includes="**/*.jr" /> </recordcc> </target> <!-- ================================================================== --> <!-- Compile test code --> <!-- ================================================================== --> <target name="compile-core-test" depends="compile-examples, compile-tools, generate-test-records"> <javac encoding="${build.encoding}" srcdir="${test.generated.dir}" includes="org/apache/hadoop/**/*.java" destdir="${test.build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}" /> <classpath refid="test.classpath"/> </javac> <javac encoding="${build.encoding}" srcdir="${test.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${test.build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="test.classpath"/> </javac> <javac encoding="${build.encoding}" srcdir="${test.src.dir}/testjar" includes="*.java" destdir="${test.build.testjar}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="test.classpath"/> </javac> <delete file="${test.build.testjar}/testjob.jar"/> <jar jarfile="${test.build.testjar}/testjob.jar" basedir="${test.build.testjar}"> </jar> <javac encoding="${build.encoding}" srcdir="${test.src.dir}/testshell" includes="*.java" destdir="${test.build.testshell}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}"/> <classpath refid="test.classpath"/> </javac> <delete file="${test.build.testshell}/testshell.jar"/> <jar jarfile="${test.build.testshell}/testshell.jar" basedir="${test.build.testshell}"> </jar> <delete dir="${test.cache.data}"/> <mkdir dir="${test.cache.data}"/> <delete dir="${test.debug.data}"/> <mkdir dir="${test.debug.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/testscript.txt" todir="${test.debug.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.txt" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.jar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.zip" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar.gz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/testConf.xml" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data15bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data30bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data60bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data120bytes" todir="${test.cache.data}"/> </target> <!-- ================================================================== --> <!-- Make hadoop-test.jar --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="jar-test" depends="compile-core-test" description="Make hadoop-test.jar"> <jar jarfile="${build.dir}/${test.final.name}.jar" basedir="${test.build.classes}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/test/AllTestDriver"/> <section name="org/apache/hadoop"> <attribute name="Implementation-Title" value="Hadoop"/> <attribute name="Implementation-Version" value="${version}"/> <attribute name="Implementation-Vendor" value="Yahoo!"/> </section> </manifest> </jar> </target> <!-- ================================================================== --> <!-- Run unit tests --> <!-- ================================================================== --> <target name="test-core" depends="jar-test" description="Run core unit tests"> <delete dir="${test.build.data}"/> <mkdir dir="${test.build.data}"/> <delete dir="${test.log.dir}"/> <mkdir dir="${test.log.dir}"/> <copy file="${test.src.dir}/hadoop-policy.xml" todir="${test.build.extraconf}" /> <junit showoutput="${test.output}" printsummary="${test.junit.printsummary}" haltonfailure="${test.junit.haltonfailure}" fork="yes" forkmode="${test.junit.fork.mode}" maxmemory="${test.junit.maxmemory}" dir="${basedir}" timeout="${test.timeout}" errorProperty="tests.failed" failureProperty="tests.failed"> <sysproperty key="test.build.data" value="${test.build.data}"/> <sysproperty key="test.tools.input.dir" value="${test.tools.input.dir}"/> <sysproperty key="test.cache.data" value="${test.cache.data}"/> <sysproperty key="test.debug.data" value="${test.debug.data}"/> <sysproperty key="hadoop.log.dir" value="${test.log.dir}"/> <sysproperty key="test.src.dir" value="${test.src.dir}"/> <sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/> <sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/> <sysproperty key="test.build.extraconf" value="${test.build.extraconf}" /> <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml"/> <sysproperty key="java.library.path" value="${build.native}/lib:${lib.dir}/native/${build.platform}"/> <sysproperty key="install.c++.examples" value="${install.c++.examples}"/> <!-- set io.compression.codec.lzo.class in the child jvm only if it is set --> <syspropertyset dynamic="no"> <propertyref name="io.compression.codec.lzo.class"/> </syspropertyset> <!-- set compile.c++ in the child jvm only if it is set --> <syspropertyset dynamic="no"> <propertyref name="compile.c++"/> </syspropertyset> <classpath refid="${test.classpath.id}"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${test.build.dir}" unless="testcase"> <fileset dir="${test.src.dir}" includes="**/${test.include}.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${test.build.dir}" if="testcase"> <fileset dir="${test.src.dir}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> <target name="test-contrib" depends="compile, compile-core-test" description="Run contrib unit tests"> <subant target="test"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="test" depends="test-core, test-contrib" description="Run core, contrib unit tests"> </target> <!-- Run all unit tests, not just Test*, and use non-test configuration. --> <target name="test-cluster" description="Run all unit tests, not just Test*, and use non-test configuration."> <antcall target="test"> <param name="test.include" value="*"/> <param name="test.classpath.id" value="test.cluster.classpath"/> </antcall> </target> <target name="nightly" depends="test, tar"> </target> <!-- ================================================================== --> <!-- Run optional third-party tool targets --> <!-- ================================================================== --> <target name="checkstyle" depends="ivy-retrieve-checkstyle,check-for-checkstyle" if="checkstyle.present" description="Run optional third-party tool targets"> <taskdef resource="checkstyletask.properties"> <classpath refid="checkstyle-classpath"/> </taskdef> <mkdir dir="${test.build.dir}"/> <checkstyle config="${test.src.dir}/checkstyle.xml" failOnViolation="false"> <fileset dir="${core.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <fileset dir="${mapred.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <fileset dir="${hdfs.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/> </checkstyle> <xslt style="${test.src.dir}/checkstyle-noframes-sorted.xsl" in="${test.build.dir}/checkstyle-errors.xml" out="${test.build.dir}/checkstyle-errors.html"/> </target> <target name="check-for-checkstyle"> <available property="checkstyle.present" resource="checkstyletask.properties"> <classpath refid="checkstyle-classpath"/> </available> </target> <property name="findbugs.home" value=""/> <target name="findbugs" depends="check-for-findbugs, tar" if="findbugs.present" description="Run findbugs if present"> <property name="findbugs.out.dir" value="${test.build.dir}/findbugs"/> <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/> <property name="findbugs.report.htmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.html"/> <property name="findbugs.report.xmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.xml"/> <taskdef name="findbugs" classname="edu.umd.cs.findbugs.anttask.FindBugsTask" classpath="${findbugs.home}/lib/findbugs-ant.jar" /> <mkdir dir="${findbugs.out.dir}"/> <findbugs home="${findbugs.home}" output="xml:withMessages" outputFile="${findbugs.report.xmlfile}" effort="max" excludeFilter="${findbugs.exclude.file}" jvmargs="-Xmx512M"> <auxClasspath> <fileset dir="${lib.dir}"> <include name="**/*.jar"/> </fileset> <fileset dir="${build.ivy.lib.dir}/${ant.project.name}/common"> <include name="**/*.jar"/> </fileset> </auxClasspath> <sourcePath path="${core.src.dir}"/> <sourcePath path="${mapred.src.dir}"/> <sourcePath path="${hdfs.src.dir}"/> <sourcePath path="${examples.dir}" /> <sourcePath path="${tools.src}" /> <sourcePath path="${basedir}/src/contrib/streaming/src/java" /> - <class location="${basedir}/build/${final.name}-core.jar" /> - <class location="${basedir}/build/${final.name}-examples.jar" /> - <class location="${basedir}/build/${final.name}-tools.jar" /> - <class location="${basedir}/build/contrib/streaming/${final.name}-streaming.jar" /> + <class location="${build.dir}/${core.final.name}.jar" /> + <class location="${build.dir}/${examples.final.name}.jar" /> + <class location="${build.dir}/${tools.final.name}.jar" /> + <class location="${build.dir}/contrib/streaming/${streaming.final.name}.jar" /> </findbugs> <xslt style="${findbugs.home}/src/xsl/default.xsl" in="${findbugs.report.xmlfile}" out="${findbugs.report.htmlfile}"/> </target> <target name="check-for-findbugs"> <available property="findbugs.present" file="${findbugs.home}/lib/findbugs.jar" /> </target> <!-- ================================================================== --> <!-- Documentation --> <!-- ================================================================== --> <target name="docs" depends="forrest.check" description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home"> <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest" failonerror="true"> <env key="JAVA_HOME" value="${java5.home}"/> </exec> <copy todir="${build.docs}"> <fileset dir="${docs.src}/build/site/" /> </copy> <copy file="${docs.src}/releasenotes.html" todir="${build.docs}"/> <style basedir="${core.src.dir}" destdir="${build.docs}" includes="core-default.xml" style="conf/configuration.xsl"/> <style basedir="${hdfs.src.dir}" destdir="${build.docs}" includes="hdfs-default.xml" style="conf/configuration.xsl"/> <style basedir="${mapred.src.dir}" destdir="${build.docs}" includes="mapred-default.xml" style="conf/configuration.xsl"/> <antcall target="changes-to-html"/> <antcall target="cn-docs"/> </target> <target name="cn-docs" depends="forrest.check, init" description="Generate forrest-based Chinese documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home"> <exec dir="${src.docs.cn}" executable="${forrest.home}/bin/forrest" failonerror="true"> <env key="LANG" value="en_US.utf8"/> <env key="JAVA_HOME" value="${java5.home}"/> </exec> <copy todir="${build.docs.cn}"> <fileset dir="${src.docs.cn}/build/site/" /> </copy> <style basedir="${core.src.dir}" destdir="${build.docs.cn}" includes="core-default.xml" style="conf/configuration.xsl"/> <style basedir="${hdfs.src.dir}" destdir="${build.docs.cn}" includes="hdfs-default.xml" style="conf/configuration.xsl"/> <style basedir="${mapred.src.dir}" destdir="${build.docs.cn}" includes="mapred-default.xml" style="conf/configuration.xsl"/> <antcall target="changes-to-html"/> </target> <target name="forrest.check" unless="forrest.home" depends="java5.check"> <fail message="'forrest.home' is not defined. Please pass -Dforrest.home=&lt;base of Apache Forrest installation&gt; to Ant on the command-line." /> </target> <target name="java5.check" unless="java5.home"> <fail message="'java5.home' is not defined. Forrest requires Java 5. Please pass -Djava5.home=&lt;base of Java 5 distribution&gt; to Ant on the command-line." /> </target> <target name="javadoc-dev" description="Generate javadoc for hadoop developers"> <mkdir dir="${build.javadoc.dev}"/> <javadoc overview="${core.src.dir}/overview.html" packagenames="org.apache.hadoop.*" destdir="${build.javadoc.dev}" author="true" version="true" use="true" windowtitle="${Name} ${version} API" doctitle="${Name} ${version} Developer API" bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation." > <packageset dir="${core.src.dir}"/> <packageset dir="${mapred.src.dir}"/> <packageset dir="${hdfs.src.dir}"/> <packageset dir="${examples.dir}"/> <packageset dir="src/contrib/streaming/src/java"/> <packageset dir="src/contrib/data_join/src/java"/> <packageset dir="src/contrib/index/src/java"/> <link href="${javadoc.link.java}"/> <classpath > <path refid="classpath" /> <fileset dir="src/contrib/"> <include name="*/lib/*.jar" /> </fileset> <pathelement path="${java.class.path}"/> </classpath> <group title="Core" packages="org.apache.*"/> <group title="Examples" packages="org.apache.hadoop.examples*"/> <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/> <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/> <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/> </javadoc> </target> <target name="javadoc" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc"> <mkdir dir="${build.javadoc}"/> <javadoc overview="${core.src.dir}/overview.html" packagenames="org.apache.hadoop.*" destdir="${build.javadoc}" author="true" version="true" use="true" windowtitle="${Name} ${version} API" doctitle="${Name} ${version} API" bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation." > <packageset dir="${core.src.dir}"/> <packageset dir="${mapred.src.dir}"/> <packageset dir="${examples.dir}"/> <packageset dir="src/contrib/streaming/src/java"/> <packageset dir="src/contrib/data_join/src/java"/> <packageset dir="src/contrib/index/src/java"/> <packageset dir="src/contrib/failmon/src/java/"/> <link href="${javadoc.link.java}"/> <classpath > <path refid="classpath" /> <fileset dir="src/contrib/"> <include name="*/lib/*.jar" /> </fileset> <path refid="javadoc-classpath"/> <pathelement path="${java.class.path}"/> <pathelement location="${build.tools}"/> </classpath> <group title="Core" packages="org.apache.*"/> <group title="Examples" packages="org.apache.hadoop.examples*"/> <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/> <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/> <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/> <group title="contrib: FailMon" packages="org.apache.hadoop.contrib.failmon*"/> </javadoc> </target> <target name="api-xml" depends="ivy-retrieve-jdiff,javadoc,write-null"> <javadoc> <doclet name="jdiff.JDiff" path="${jdiff.jar}:${xerces.jar}"> <param name="-apidir" value="${jdiff.xml.dir}"/> <param name="-apiname" value="hadoop ${version}"/> </doclet> <packageset dir="src/core"/> <packageset dir="src/mapred"/> <packageset dir="src/tools"/> <classpath > <path refid="classpath" /> <path refid="jdiff-classpath" /> <pathelement path="${java.class.path}"/> </classpath> </javadoc> </target> <target name="write-null"> <exec executable="touch"> <arg value="${jdiff.home}/Null.java"/> </exec> </target> <target name="api-report" depends="ivy-retrieve-jdiff,api-xml"> <mkdir dir="${jdiff.build.dir}"/> <javadoc sourcepath="src/core,src/hdfs,src,mapred,src/tools" destdir="${jdiff.build.dir}" sourceFiles="${jdiff.home}/Null.java"> <doclet name="jdiff.JDiff" path="${jdiff.jar}:${xerces.jar}"> <param name="-oldapi" value="hadoop ${jdiff.stable}"/> <param name="-newapi" value="hadoop ${version}"/> <param name="-oldapidir" value="${jdiff.xml.dir}"/> <param name="-newapidir" value="${jdiff.xml.dir}"/> <param name="-javadocold" value="${jdiff.stable.javadoc}"/> <param name="-javadocnew" value="../../api/"/> <param name="-stats"/> </doclet> <classpath > <path refid="classpath" /> <path refid="jdiff-classpath"/> <pathelement path="${java.class.path}"/> </classpath> </javadoc> </target> <target name="changes-to-html" description="Convert CHANGES.txt into an html file"> <mkdir dir="${build.docs}"/> <exec executable="perl" input="CHANGES.txt" output="${build.docs}/changes.html" failonerror="true"> <arg value="${changes.src}/changes2html.pl"/> </exec> <copy todir="${build.docs}"> <fileset dir="${changes.src}" includes="*.css"/> </copy> </target> <!-- ================================================================== --> <!-- D I S T R I B U T I O N --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="package" depends="compile, jar, javadoc, examples, tools-jar, jar-test, ant-tasks, package-librecordio" description="Build distribution"> <mkdir dir="${dist.dir}"/> <mkdir dir="${dist.dir}/lib"/> <mkdir dir="${dist.dir}/contrib"/> <mkdir dir="${dist.dir}/bin"/> <mkdir dir="${dist.dir}/docs"/> <mkdir dir="${dist.dir}/docs/api"/> <mkdir dir="${dist.dir}/docs/jdiff"/> <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true"> <fileset dir="${common.ivy.lib.dir}"/> </copy> <copy todir="${dist.dir}/lib" includeEmptyDirs="false"> <fileset dir="lib"> <exclude name="**/native/**"/> </fileset> </copy> <exec dir="${dist.dir}" executable="sh" failonerror="true"> <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/> <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/> <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/> <arg line="${native.src.dir}/packageNativeHadoop.sh"/> </exec> <subant target="package"> <!--Pass down the version in case its needed again and the target distribution directory so contribs know where to install to.--> <property name="version" value="${version}"/> <property name="dist.dir" value="${dist.dir}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> <copy todir="${dist.dir}/webapps"> <fileset dir="${build.webapps}"/> </copy> <copy todir="${dist.dir}"> - <fileset file="${build.dir}/${final.name}-*.jar"/> + <fileset file="${build.dir}/${name}-*-${version}.jar"/> </copy> <copy todir="${dist.dir}/bin"> <fileset dir="bin"/> </copy> <copy todir="${dist.dir}/conf"> <fileset dir="${conf.dir}" excludes="**/*.template"/> </copy> <copy todir="${dist.dir}/docs"> <fileset dir="${build.docs}"/> </copy> <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/> <copy todir="${dist.dir}/ivy"> <fileset dir="ivy"/> </copy> <copy todir="${dist.dir}"> <fileset dir="."> <include name="*.txt" /> </fileset> </copy> <copy todir="${dist.dir}/src" includeEmptyDirs="true"> <fileset dir="src" excludes="**/*.template **/docs/build/**/*"/> </copy> <copy todir="${dist.dir}/c++" includeEmptyDirs="false"> <fileset dir="${build.dir}/c++"/> </copy> <copy todir="${dist.dir}/" file="build.xml"/> <chmod perm="ugo+x" type="file" parallel="false"> <fileset dir="${dist.dir}/bin"/> <fileset dir="${dist.dir}/src/contrib/"> <include name="*/bin/*" /> </fileset> <fileset dir="${dist.dir}/src/contrib/ec2/bin/image"/> </chmod> <chmod perm="ugo+x" type="file"> <fileset dir="${dist.dir}/src/c++/pipes/debug"/> </chmod> </target> <!-- ================================================================== --> <!-- Make release tarball --> <!-- ================================================================== --> <target name="tar" depends="package" description="Make release tarball"> <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz"> <param.listofitems> <tarfileset dir="${build.dir}" mode="664"> <exclude name="${final.name}/bin/*" /> <exclude name="${final.name}/contrib/*/bin/*" /> <exclude name="${final.name}/src/contrib/ec2/bin/*" /> <exclude name="${final.name}/src/contrib/ec2/bin/image/*" /> <include name="${final.name}/**" /> </tarfileset> <tarfileset dir="${build.dir}" mode="755"> <include name="${final.name}/bin/*" /> <include name="${final.name}/contrib/*/bin/*" /> <include name="${final.name}/src/contrib/ec2/bin/*" /> <include name="${final.name}/src/contrib/ec2/bin/image/*" /> </tarfileset> </param.listofitems> </macro_tar> </target> <target name="bin-package" depends="compile, jar, examples, tools-jar, jar-test, ant-tasks, package-librecordio" description="assembles artifacts for binary target"> <mkdir dir="${dist.dir}"/> <mkdir dir="${dist.dir}/lib"/> <mkdir dir="${dist.dir}/contrib"/> <mkdir dir="${dist.dir}/bin"/> <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true"> <fileset dir="${common.ivy.lib.dir}"/> </copy> <copy todir="${dist.dir}/lib" includeEmptyDirs="false"> <fileset dir="lib"> <exclude name="**/native/**"/> </fileset> </copy> <exec dir="${dist.dir}" executable="sh" failonerror="true"> <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/> <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/> <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/> <arg line="${native.src.dir}/packageNativeHadoop.sh"/> </exec> <subant target="package"> <!--Pass down the version in case its needed again and the target distribution directory so contribs know where to install to.--> <property name="version" value="${version}"/> <property name="dist.dir" value="${dist.dir}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> <copy todir="${dist.dir}/webapps"> <fileset dir="${build.webapps}"/> </copy> <copy todir="${dist.dir}"> - <fileset file="${build.dir}/${final.name}-*.jar"/> + <fileset file="${build.dir}/${name}-*-${version}.jar"/> </copy> <copy todir="${dist.dir}/bin"> <fileset dir="bin"/> </copy> <copy todir="${dist.dir}/conf"> <fileset dir="${conf.dir}" excludes="**/*.template"/> </copy> <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/> <copy todir="${dist.dir}/ivy"> <fileset dir="ivy"/> </copy> <copy todir="${dist.dir}"> <fileset dir="."> <include name="*.txt" /> </fileset> </copy> <copy todir="${dist.dir}/c++" includeEmptyDirs="false"> <fileset dir="${build.dir}/c++"/> </copy> <copy todir="${dist.dir}/" file="build.xml"/> <chmod perm="ugo+x" type="file" parallel="false"> <fileset dir="${dist.dir}/bin"/> </chmod> </target> <target name="binary" depends="bin-package" description="Make tarball without source and documentation"> <macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz"> <param.listofitems> <tarfileset dir="${build.dir}" mode="664"> <exclude name="${final.name}/bin/*" /> <exclude name="${final.name}/src/**" /> <exclude name="${final.name}/docs/**" /> <include name="${final.name}/**" /> </tarfileset> <tarfileset dir="${build.dir}" mode="755"> <include name="${final.name}/bin/*" /> </tarfileset> </param.listofitems> </macro_tar> </target> <!-- ================================================================== --> <!-- Perform audit activities for the release --> <!-- ================================================================== --> <target name="releaseaudit" depends="package,ivy-retrieve-releaseaudit" description="Release Audit activities"> <fail unless="rat.present" message="Failed to load class [${rat.reporting.classname}]."/> <java classname="${rat.reporting.classname}" fork="true"> <classpath refid="releaseaudit-classpath"/> <arg value="${build.dir}/${final.name}"/> </java> </target> <!-- ================================================================== --> <!-- Clean. Delete the build files, and their directories --> <!-- ================================================================== --> <target name="clean" depends="clean-contrib, clean-sign" description="Clean. Delete the build files, and their directories"> <delete dir="${build.dir}"/> <delete dir="${docs.src}/build"/> <delete dir="${src.docs.cn}/build"/> <delete file="${basedir}/ivy/hadoop-core-pom.xml"/> <delete file="${basedir}/ivy/hadoop-test-pom.xml"/> <delete file="${basedir}/ivy/hadoop-examples-pom.xml"/> <delete file="${basedir}/ivy/hadoop-tools-pom.xml"/> <delete file="${basedir}/ivy/hadoop-streaming-pom.xml"/> </target> <target name="clean-sign" description="Clean. Delete .asc files"> <delete> <fileset dir="." includes="**/**/*.asc"/> </delete> </target> <target name="veryclean" depends="clean" description="Delete mvn ant task jar and ivy ant taks jar"> <delete file="${ant_task.jar}"/> <delete file="${ivy.jar}"/> </target> <!-- ================================================================== --> <!-- Clean contrib target. For now, must be called explicitly --> <!-- Using subant instead of ant as a workaround for 30569 --> <!-- ================================================================== --> <target name="clean-contrib"> <subant target="clean"> <fileset file="src/contrib/build.xml"/> </subant> </target> <target name="test-c++-libhdfs" depends="compile-c++-libhdfs, compile-core" if="islibhdfs"> <delete dir="${test.libhdfs.dir}"/> <mkdir dir="${test.libhdfs.dir}"/> <mkdir dir="${test.libhdfs.dir}/logs"/> <mkdir dir="${test.libhdfs.dir}/hdfs/name"/> <exec dir="${build.c++.libhdfs}" executable="${make.cmd}" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_ARCH" value="${jvm.arch}"/> <env key="LIBHDFS_BUILD_DIR" value="${build.c++.libhdfs}"/> <env key="HADOOP_HOME" value="${basedir}"/> <env key="HADOOP_CONF_DIR" value="${test.libhdfs.conf.dir}"/> <env key="HADOOP_LOG_DIR" value="${test.libhdfs.dir}/logs"/> <env key="LIBHDFS_SRC_DIR" value="${c++.libhdfs.src}"/> <env key="LIBHDFS_INSTALL_DIR" value="${install.c++}/lib"/> <env key="LIB_DIR" value="${common.ivy.lib.dir}"/> <arg value="test"/> </exec> </target> <!-- ================================================================== --> <!-- librecordio targets. --> <!-- ================================================================== --> <target name="compile-librecordio" depends="init" if="librecordio" > <mkdir dir="${build.librecordio}"/> <exec dir="${librecordio.src}" executable="${make.cmd}" failonerror="true"> <env key="XERCESCROOT" value="${xercescroot}"/> <env key="LIBRECORDIO_BUILD_DIR" value="${build.librecordio}"/> </exec> </target> <target name="test-librecordio" depends="compile-librecordio, compile-core" if="librecordio"> <delete dir="${librecordio.test.dir}"/> <mkdir dir="${librecordio.test.dir}"/> <exec dir="${librecordio.src}/test" executable="${make.cmd}" failonerror="true"> <env key="HADOOP_HOME" value="${basedir}"/> <env key="XERCESCROOT" value="${xercescroot}"/> <env key="LIBRECORDIO_BUILD_DIR" value="${build.librecordio}"/> <env key="LIBRECORDIO_TEST_DIR" value="${librecordio.test.dir}"/> <arg value="all"/> </exec> </target> <target name="package-librecordio" depends="compile-librecordio" if="librecordio"> <mkdir dir="${dist.dir}/librecordio"/> <copy todir="${dist.dir}/librecordio"> <fileset dir="${build.librecordio}" casesensitive="yes" followsymlinks="false"> <exclude name="**/tests/**"/> <exclude name="*.so"/> <exclude name="*.o"/> </fileset> </copy> <chmod perm="ugo+x" type="file"> <fileset dir="${dist.dir}/librecordio"/> </chmod> </target> <target name="create-c++-configure" depends="init" if="compile.c++"> <exec executable="autoreconf" dir="${c++.utils.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> <exec executable="autoreconf" dir="${c++.pipes.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> <exec executable="autoreconf" dir="${c++.examples.pipes.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> <antcall target="create-c++-configure-libhdfs"/> </target> <target name="create-c++-configure-libhdfs" depends="check-c++-libhdfs" if="islibhdfs"> <exec executable="autoreconf" dir="${c++.libhdfs.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> </target> <target name="check-c++-makefiles" depends="init" if="compile.c++"> <condition property="need.c++.utils.makefile"> <not> <available file="${build.c++.utils}/Makefile"/> </not> </condition> <condition property="need.c++.pipes.makefile"> <not> <available file="${build.c++.pipes}/Makefile"/> </not> </condition> <condition property="need.c++.examples.pipes.makefile"> <not> <available file="${build.c++.examples.pipes}/Makefile"/> </not> </condition> </target> <target name="check-c++-libhdfs"> <condition property="islibhdfs"> <and> <isset property="compile.c++"/> <isset property="libhdfs"/> </and> </condition> </target> <target name="check-c++-makefile-libhdfs" depends="init,check-c++-libhdfs" if="islibhdfs"> <condition property="need.c++.libhdfs.makefile"> <not> <available file="${build.c++.libhdfs}/Makefile"/> </not> </condition> </target> <target name="create-c++-libhdfs-makefile" depends="check-c++-makefile-libhdfs" if="need.c++.libhdfs.makefile"> <mkdir dir="${build.c++.libhdfs}"/> <chmod file="${c++.libhdfs.src}/configure" perm="ugo+x"/> <exec executable="${c++.libhdfs.src}/configure" dir="${build.c++.libhdfs}" failonerror="yes"> <env key="ac_cv_func_malloc_0_nonnull" value="yes"/> <env key="JVM_ARCH" value="${jvm.arch}"/> <arg value="--prefix=${install.c++}"/> </exec> </target> <target name="create-c++-utils-makefile" depends="check-c++-makefiles" if="need.c++.utils.makefile"> <mkdir dir="${build.c++.utils}"/> <exec executable="${c++.utils.src}/configure" dir="${build.c++.utils}" failonerror="yes"> <arg value="--prefix=${install.c++}"/> </exec> </target> <target name="compile-c++-utils" depends="create-c++-utils-makefile" if="compile.c++"> <exec executable="${make.cmd}" dir="${build.c++.utils}" searchpath="yes" failonerror="yes"> <arg value="install"/> </exec> </target> <target name="create-c++-pipes-makefile" depends="check-c++-makefiles" if="need.c++.pipes.makefile"> <mkdir dir="${build.c++.pipes}"/> <exec executable="${c++.pipes.src}/configure" dir="${build.c++.pipes}" failonerror="yes"> <arg value="--prefix=${install.c++}"/> </exec> </target> <target name="compile-c++-pipes" depends="create-c++-pipes-makefile,compile-c++-utils" if="compile.c++"> <exec executable="${make.cmd}" dir="${build.c++.pipes}" searchpath="yes" failonerror="yes"> <arg value="install"/> </exec> </target> <target name="compile-c++" depends="compile-c++-pipes"/> <target name="create-c++-examples-pipes-makefile" depends="check-c++-makefiles" if="need.c++.examples.pipes.makefile"> <mkdir dir="${build.c++.examples.pipes}"/> <exec executable="${c++.examples.pipes.src}/configure" dir="${build.c++.examples.pipes}" failonerror="yes"> <arg value="--prefix=${install.c++.examples}"/> <arg value="--with-hadoop-utils=${install.c++}"/> <arg value="--with-hadoop-pipes=${install.c++}"/> </exec> </target> <target name="compile-c++-examples-pipes" depends="create-c++-examples-pipes-makefile,compile-c++-pipes" if="compile.c++"> <exec executable="${make.cmd}" dir="${build.c++.examples.pipes}" searchpath="yes" failonerror="yes"> <arg value="install"/> </exec> </target> <target name="compile-c++-examples" depends="compile-c++-examples-pipes"/> <target name="compile-c++-libhdfs" depends="create-c++-libhdfs-makefile" if="islibhdfs"> <exec executable="${make.cmd}" dir="${build.c++.libhdfs}" searchpath="yes" failonerror="yes"> <env key="ac_cv_func_malloc_0_nonnull" value="yes"/> <env key="JVM_ARCH" value="${jvm.arch}"/> <arg value="install"/> </exec> </target> <target name="compile-ant-tasks" depends="compile-core"> <javac encoding="${build.encoding}" srcdir="${anttasks.dir}" includes="org/apache/hadoop/ant/**/*.java" destdir="${build.anttasks}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}"/> <classpath refid="classpath"/> </javac> </target> <target name="ant-tasks" depends="jar, compile-ant-tasks"> <copy file="${anttasks.dir}/org/apache/hadoop/ant/antlib.xml" todir="${build.anttasks}/org/apache/hadoop/ant"/> <jar destfile="${build.dir}/${ant.final.name}.jar"> <fileset dir="${build.anttasks}"/> </jar> </target> <target name="clover" depends="clover.setup, clover.info" description="Instrument the Unit tests using Clover. To use, specify -Dclover.home=&lt;base of clover installation&gt; -Drun.clover=true on the command line."/> <target name="clover.setup" if="clover.enabled"> <taskdef resource="cloverlib.xml" classpath="${clover.jar}"/> <mkdir dir="${clover.db.dir}"/> <clover-setup initString="${clover.db.dir}/hadoop_coverage.db"> <fileset dir="src" includes="core/**/* tools/**/* hdfs/**/* mapred/**/*"/> </clover-setup> </target> <target name="clover.info" unless="clover.present"> <echo> Clover not found. Code coverage reports disabled. </echo> </target> <target name="clover.check"> <fail unless="clover.present"> ################################################################## Clover not found. Please specify -Dclover.home=&lt;base of clover installation&gt; on the command line. ################################################################## </fail> </target> <target name="generate-clover-reports" depends="clover.check, clover"> <mkdir dir="${clover.report.dir}"/> <clover-report> <current outfile="${clover.report.dir}" title="${final.name}"> <format type="html"/> </current> </clover-report> <clover-report> <current outfile="${clover.report.dir}/clover.xml" title="${final.name}"> <format type="xml"/> </current> </clover-report> </target> <target name="findbugs.check" depends="check-for-findbugs" unless="findbugs.present"> <fail message="'findbugs.home' is not defined. Please pass -Dfindbugs.home=&lt;base of Findbugs installation&gt; to Ant on the command-line." /> </target> <target name="patch.check" unless="patch.file"> <fail message="'patch.file' is not defined. Please pass -Dpatch.file=&lt;location of patch file&gt; to Ant on the command-line." /> </target> <target name="test-patch" depends="patch.check,findbugs.check,forrest.check"> <exec executable="bash" failonerror="true"> <arg value="${basedir}/src/test/bin/test-patch.sh"/> <arg value="DEVELOPER"/> <arg value="${patch.file}"/> <arg value="${scratch.dir}"/> <arg value="${svn.cmd}"/> <arg value="${grep.cmd}"/> <arg value="${patch.cmd}"/> <arg value="${findbugs.home}"/> <arg value="${forrest.home}"/> <arg value="${basedir}"/> <arg value="${java5.home}"/> </exec> </target> <target name="hudson-test-patch" depends="findbugs.check,forrest.check"> <exec executable="bash" failonerror="true"> <arg value="${basedir}/src/test/bin/test-patch.sh"/> <arg value="HUDSON"/> <arg value="${scratch.dir}"/> <arg value="${support.dir}"/> <arg value="${ps.cmd}"/> <arg value="${wget.cmd}"/> <arg value="${jiracli.cmd}"/> <arg value="${svn.cmd}"/> <arg value="${grep.cmd}"/> <arg value="${patch.cmd}"/> <arg value="${findbugs.home}"/> <arg value="${forrest.home}"/> <arg value="${eclipse.home}"/> <arg value="${python.home}"/> <arg value="${basedir}"/> <arg value="${trigger.url}"/> <arg value="${jira.passwd}"/> <arg value="${java5.home}"/> </exec> </target> <target name="eclipse-files" depends="init" description="Generate files for Eclipse"> <pathconvert property="eclipse.project"> <path path="${basedir}"/> <regexpmapper from="^.*/([^/]+)$$" to="\1" handledirsep="yes"/> </pathconvert> <copy todir="." overwrite="true"> <fileset dir=".eclipse.templates"> <exclude name="**/README.txt"/> </fileset> <filterset> <filter token="PROJECT" value="${eclipse.project}"/> </filterset> </copy> </target> <target name="ivy-init-dirs"> <mkdir dir="${build.ivy.dir}" /> <mkdir dir="${build.ivy.lib.dir}" /> <mkdir dir="${build.ivy.report.dir}" /> </target> <target name="ivy-probe-antlib" > <condition property="ivy.found"> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </condition> </target> <target name="ivy-download" description="To download ivy" unless="offline"> <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/> </target> <!-- To avoid Ivy leaking things across big projects, always load Ivy in the same classloader. Also note how we skip loading Ivy if it is already there, just to make sure all is well. --> <target name="ivy-init-antlib" depends="ivy-download,ivy-init-dirs,ivy-probe-antlib" unless="ivy.found"> <typedef uri="antlib:org.apache.ivy.ant" onerror="fail" loaderRef="ivyLoader"> <classpath> <pathelement location="${ivy.jar}"/> </classpath> </typedef> <fail > <condition > <not> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </not> </condition> You need Apache Ivy 2.0 or later from http://ant.apache.org/ It could not be loaded from ${ivy_repo_url} </fail> </target> <target name="ivy-init" depends="ivy-init-antlib" > <!--Configure Ivy by reading in the settings file If anyone has already read in a settings file into this settings ID, it gets priority --> <ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}" override='false'/> </target> <target name="ivy-resolve" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings"/> </target> <target name="ivy-resolve-javadoc" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="javadoc"/> </target> <target name="ivy-resolve-releaseaudit" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="releaseaudit"/> </target> <target name="ivy-resolve-test" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="test" /> </target> <target name="ivy-resolve-common" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" /> </target> <target name="ivy-resolve-jdiff" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="jdiff" /> </target> <target name="ivy-resolve-checkstyle" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="checkstyle"/> </target> <target name="ivy-retrieve" depends="ivy-resolve" description="Retrieve Ivy-managed artifacts"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> </target> <target name="ivy-retrieve-checkstyle" depends="ivy-resolve-checkstyle" description="Retrieve Ivy-managed artifacts for the checkstyle configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> <ivy:cachepath pathid="checkstyle-classpath" conf="checkstyle"/> </target> <target name="ivy-retrieve-jdiff" depends="ivy-resolve-jdiff" description="Retrieve Ivy-managed artifacts for the javadoc configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
jaxlaw/hadoop-common
46b7fcabef02b381dfbd2d48bc5c4abbc8169e4a
HADOOP-6382 from http://issues.apache.org/jira/secure/attachment/12430016/hadoop-6382-v1.patch
diff --git a/build.xml b/build.xml index 034c33f..c05f625 100644 --- a/build.xml +++ b/build.xml @@ -1,1867 +1,2041 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project name="Hadoop" default="compile" + xmlns:artifact="urn:maven-artifact-ant" xmlns:ivy="antlib:org.apache.ivy.ant"> <!-- Load all the default properties, and any the user wants --> <!-- to contribute (without having to type -D or edit this file --> <property file="${user.home}/build.properties" /> <property file="${basedir}/build.properties" /> <property name="Name" value="Yahoo! Distribution of Hadoop"/> <property name="name" value="hadoop"/> - <property name="version" value="0.20.1-dev"/> + <property name="version" value="0.20.9-SNAPSHOT"/> <property name="final.name" value="${name}-${version}"/> <property name="year" value="2009"/> + + <property name="core.final.name" value="${name}-core-${version}"/> + <property name="test.final.name" value="${name}-test-${version}"/> + <property name="examples.final.name" value="${name}-examples-${version}"/> + <property name="tools.final.name" value="${name}-tools-${version}"/> + <property name="ant.final.name" value="${name}-ant-${version}"/> + <property name="streaming.final.name" value="${name}-streaming-${version}"/> <property name="src.dir" value="${basedir}/src"/> <property name="core.src.dir" value="${src.dir}/core"/> <property name="mapred.src.dir" value="${src.dir}/mapred"/> <property name="hdfs.src.dir" value="${src.dir}/hdfs"/> <property name="native.src.dir" value="${basedir}/src/native"/> <property name="examples.dir" value="${basedir}/src/examples"/> <property name="anttasks.dir" value="${basedir}/src/ant"/> <property name="lib.dir" value="${basedir}/lib"/> <property name="conf.dir" value="${basedir}/conf"/> <property name="contrib.dir" value="${basedir}/src/contrib"/> <property name="docs.src" value="${basedir}/src/docs"/> <property name="src.docs.cn" value="${basedir}/src/docs/cn"/> <property name="changes.src" value="${docs.src}/changes"/> <property name="c++.src" value="${basedir}/src/c++"/> <property name="c++.utils.src" value="${c++.src}/utils"/> <property name="c++.pipes.src" value="${c++.src}/pipes"/> <property name="c++.examples.pipes.src" value="${examples.dir}/pipes"/> <property name="c++.libhdfs.src" value="${c++.src}/libhdfs"/> <property name="librecordio.src" value="${c++.src}/librecordio"/> <property name="tools.src" value="${basedir}/src/tools"/> <property name="xercescroot" value=""/> <property name="build.dir" value="${basedir}/build"/> <property name="build.classes" value="${build.dir}/classes"/> <property name="build.src" value="${build.dir}/src"/> <property name="build.tools" value="${build.dir}/tools"/> <property name="build.webapps" value="${build.dir}/webapps"/> <property name="build.examples" value="${build.dir}/examples"/> <property name="build.anttasks" value="${build.dir}/ant"/> <property name="build.librecordio" value="${build.dir}/librecordio"/> <!-- convert spaces to _ so that mac os doesn't break things --> <exec executable="sed" inputstring="${os.name}" outputproperty="nonspace.os"> <arg value="s/ /_/g"/> </exec> <property name="build.platform" value="${nonspace.os}-${os.arch}-${sun.arch.data.model}"/> <property name="jvm.arch" value="${sun.arch.data.model}"/> <property name="build.native" value="${build.dir}/native/${build.platform}"/> <property name="build.c++" value="${build.dir}/c++-build/${build.platform}"/> <property name="build.c++.utils" value="${build.c++}/utils"/> <property name="build.c++.pipes" value="${build.c++}/pipes"/> <property name="build.c++.libhdfs" value="${build.c++}/libhdfs"/> <property name="build.c++.examples.pipes" value="${build.c++}/examples/pipes"/> <property name="build.docs" value="${build.dir}/docs"/> <property name="build.docs.cn" value="${build.dir}/docs/cn"/> <property name="build.javadoc" value="${build.docs}/api"/> <property name="build.javadoc.dev" value="${build.docs}/dev-api"/> <property name="build.encoding" value="ISO-8859-1"/> <property name="install.c++" value="${build.dir}/c++/${build.platform}"/> <property name="install.c++.examples" value="${build.dir}/c++-examples/${build.platform}"/> <property name="test.src.dir" value="${basedir}/src/test"/> <property name="test.lib.dir" value="${basedir}/src/test/lib"/> <property name="test.build.dir" value="${build.dir}/test"/> <property name="test.generated.dir" value="${test.build.dir}/src"/> <property name="test.build.data" value="${test.build.dir}/data"/> <property name="test.cache.data" value="${test.build.dir}/cache"/> <property name="test.debug.data" value="${test.build.dir}/debug"/> <property name="test.log.dir" value="${test.build.dir}/logs"/> <property name="test.build.classes" value="${test.build.dir}/classes"/> <property name="test.build.testjar" value="${test.build.dir}/testjar"/> <property name="test.build.testshell" value="${test.build.dir}/testshell"/> <property name="test.build.extraconf" value="${test.build.dir}/extraconf"/> <property name="test.build.javadoc" value="${test.build.dir}/docs/api"/> <property name="test.build.javadoc.dev" value="${test.build.dir}/docs/dev-api"/> <property name="test.include" value="Test*"/> <property name="test.classpath.id" value="test.classpath"/> <property name="test.output" value="no"/> <property name="test.timeout" value="900000"/> <property name="test.junit.output.format" value="plain"/> <property name="test.junit.fork.mode" value="perTest" /> <property name="test.junit.printsummary" value="yes" /> <property name="test.junit.haltonfailure" value="no" /> <property name="test.junit.maxmemory" value="512m" /> <property name="test.tools.input.dir" value="${basedir}/src/test/tools/data"/> <property name="test.libhdfs.conf.dir" value="${c++.libhdfs.src}/tests/conf"/> <property name="test.libhdfs.dir" value="${test.build.dir}/libhdfs"/> <property name="librecordio.test.dir" value="${test.build.dir}/librecordio"/> <property name="web.src.dir" value="${basedir}/src/web"/> <property name="src.webapps" value="${basedir}/src/webapps"/> <property name="javadoc.link.java" value="http://java.sun.com/javase/6/docs/api/"/> <property name="javadoc.packages" value="org.apache.hadoop.*"/> <property name="dist.dir" value="${build.dir}/${final.name}"/> <property name="javac.debug" value="on"/> <property name="javac.optimize" value="on"/> <property name="javac.deprecation" value="off"/> <property name="javac.version" value="1.6"/> <property name="javac.args" value=""/> <property name="javac.args.warnings" value="-Xlint:unchecked"/> <property name="clover.db.dir" location="${build.dir}/test/clover/db"/> <property name="clover.report.dir" location="${build.dir}/test/clover/reports"/> <property name="rat.reporting.classname" value="rat.Report"/> <property name="jdiff.build.dir" value="${build.docs}/jdiff"/> <property name="jdiff.xml.dir" value="${lib.dir}/jdiff"/> <property name="jdiff.stable" value="0.19.2"/> <property name="jdiff.stable.javadoc" value="http://hadoop.apache.org/core/docs/r${jdiff.stable}/api/"/> <property name="scratch.dir" value="${user.home}/tmp"/> <property name="svn.cmd" value="svn"/> <property name="grep.cmd" value="grep"/> <property name="patch.cmd" value="patch"/> <property name="make.cmd" value="make"/> <!-- task-controller properties set here --> <!-- Source directory from where configure is run and files are copied --> <property name="c++.task-controller.src" value="${basedir}/src/c++/task-controller" /> <!-- directory where autoconf files + temporary files and src is stored for compilation --> <property name="build.c++.task-controller" value="${build.c++}/task-controller" /> <!-- the default install dir is build directory override it using -Dtask-controller.install.dir=$HADOOP_HOME/bin --> <property name="task-controller.install.dir" value="${dist.dir}/bin" /> <!-- end of task-controller properties --> <!-- IVY properteis set here --> <property name="ivy.dir" location="ivy" /> <loadproperties srcfile="${ivy.dir}/libraries.properties"/> + <property name="mvnrepo" value="http://repo2.maven.org/maven2"/> + <property name="asfrepo" value="https://repository.apache.org"/> <property name="ivy.jar" location="${ivy.dir}/ivy-${ivy.version}.jar"/> - <property name="ivy_repo_url" value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/> - <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml" /> + <property name="ivy_repo_url" + value="${mvnrepo}/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/> + <property name="ant_task.jar" + location="${ivy.dir}/maven-ant-tasks-${ant-task.version}.jar"/> + <property name="tsk.org" value="/org/apache/maven/maven-ant-tasks/"/> + <property name="ant_task_repo_url" + value="${mvnrepo}${tsk.org}${ant-task.version}/maven-ant-tasks-${ant-task.version}.jar"/> + <property name="repo" value="snapshots"/> + <property name="asfsnapshotrepo" + value="${asfrepo}/content/repositories/snapshots"/> + <property name="asfstagingrepo" + value="${asfrepo}/service/local/staging/deploy/maven2"/> + <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml"/> <property name="ivy.org" value="org.apache.hadoop"/> <property name="build.dir" location="build" /> <property name="dist.dir" value="${build.dir}/${final.name}"/> <property name="build.ivy.dir" location="${build.dir}/ivy" /> - <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" /> - <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/> - <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" /> - <property name="build.ivy.maven.dir" location="${build.ivy.dir}/maven" /> - <property name="build.ivy.maven.pom" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.pom" /> - <property name="build.ivy.maven.jar" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.jar" /> - + <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib"/> + <property name="common.ivy.lib.dir" + location="${build.ivy.lib.dir}/${ant.project.name}/common"/> + <property name="build.ivy.report.dir" location="${build.ivy.dir}/report"/> + + <property name="hadoop-core.pom" location="${ivy.dir}/hadoop-core-pom.xml"/> + <property name="hadoop-core-pom-template.xml" + location="${ivy.dir}/hadoop-core-pom-template.xml"/> + <property name="hadoop-core.jar" location="${build.dir}/${core.final.name}.jar"/> + <property name="hadoop-test.pom" location="${ivy.dir}/hadoop-test-pom.xml"/> + <property name="hadoop-test-pom-template.xml" + location="${ivy.dir}/hadoop-test-pom-template.xml" /> + <property name="hadoop-test.jar" location="${build.dir}/${test.final.name}.jar"/> + <property name="hadoop-tools.pom" location="${ivy.dir}/hadoop-tools-pom.xml"/> + <property name="hadoop-tools-pom-template.xml" + location="${ivy.dir}/hadoop-tools-pom-template.xml" /> + <property name="hadoop-tools.jar" location="${build.dir}/${tools.final.name}.jar"/> + <property name="hadoop-examples.pom" location="${ivy.dir}/hadoop-examples-pom.xml"/> + <property name="hadoop-examples-pom-template.xml" + location="${ivy.dir}/hadoop-examples-pom-template.xml"/> + <property name="hadoop-examples.jar" + location="${build.dir}/${examples.final.name}.jar"/> + <property name="hadoop-streaming.pom" + location="${ivy.dir}/hadoop-streaming-pom.xml"/> + <property name="hadoop-streaming-pom-template.xml" + location="${ivy.dir}/hadoop-streaming-pom-template.xml"/> + <property name="hadoop-streaming.jar" + location="${build.dir}/contrib/streaming/${streaming.final.name}.jar"/> + <!--this is the naming policy for artifacts we want pulled down--> - <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/> + <property name="ivy.artifact.retrieve.pattern" + value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/> <!--this is how artifacts that get built are named--> <property name="ivy.publish.pattern" value="hadoop-[revision]-core.[ext]"/> - <property name="hadoop.jar" location="${build.dir}/hadoop-${hadoop.version}-core.jar" /> + <property name="hadoop.jar" + location="${build.dir}/hadoop-${hadoop.version}-core.jar"/> <!-- jdiff.home property set --> - <property name="jdiff.home" value="${build.ivy.lib.dir}/${ant.project.name}/jdiff"/> + <property name="jdiff.home" + value="${build.ivy.lib.dir}/${ant.project.name}/jdiff"/> <property name="jdiff.jar" value="${jdiff.home}/jdiff-${jdiff.version}.jar"/> <property name="xerces.jar" value="${jdiff.home}/xerces-${xerces.version}.jar"/> <property name="clover.jar" location="${clover.home}/lib/clover.jar"/> <available property="clover.present" file="${clover.jar}" /> <!-- check if clover reports should be generated --> <condition property="clover.enabled"> <and> <isset property="run.clover"/> <isset property="clover.present"/> </and> </condition> + <condition property="staging"> + <equals arg1="${repo}" arg2="staging"/> + </condition> + <!-- the normal classpath --> <path id="classpath"> <pathelement location="${build.classes}"/> <fileset dir="${lib.dir}"> <include name="**/*.jar" /> <exclude name="**/excluded/" /> </fileset> <pathelement location="${conf.dir}"/> <path refid="ivy-common.classpath"/> </path> <!-- the unit test classpath: uses test.src.dir for configuration --> <path id="test.classpath"> <pathelement location="${test.build.extraconf}"/> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> <pathelement location="${build.dir}"/> <pathelement location="${build.examples}"/> <pathelement location="${build.tools}"/> <pathelement path="${clover.jar}"/> <fileset dir="${test.lib.dir}"> - <include name="**/*.jar" /> - <exclude name="**/excluded/" /> + <include name="**/*.jar"/> + <exclude name="**/excluded/"/> </fileset> <path refid="classpath"/> </path> <!-- the cluster test classpath: uses conf.dir for configuration --> <path id="test.cluster.classpath"> <path refid="classpath"/> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> <pathelement location="${build.dir}"/> </path> - <!-- properties dependent on the items defined above. --> - <!--<available classname="${rat.reporting.classname}" classpathref="classpath" property="rat.present" value="true"/> --> - <!-- ====================================================== --> <!-- Macro definitions --> <!-- ====================================================== --> <macrodef name="macro_tar" description="Worker Macro for tar"> <attribute name="param.destfile"/> <element name="param.listofitems"/> <sequential> <tar compression="gzip" longfile="gnu" destfile="@{param.destfile}"> <param.listofitems/> </tar> </sequential> </macrodef> <!-- ====================================================== --> <!-- Stuff needed by all targets --> <!-- ====================================================== --> <target name="init" depends="ivy-retrieve-common"> <mkdir dir="${build.dir}"/> <mkdir dir="${build.classes}"/> <mkdir dir="${build.tools}"/> <mkdir dir="${build.src}"/> <mkdir dir="${build.webapps}/task/WEB-INF"/> <mkdir dir="${build.webapps}/job/WEB-INF"/> <mkdir dir="${build.webapps}/hdfs/WEB-INF"/> <mkdir dir="${build.webapps}/datanode/WEB-INF"/> <mkdir dir="${build.webapps}/secondary/WEB-INF"/> <mkdir dir="${build.examples}"/> <mkdir dir="${build.anttasks}"/> <mkdir dir="${build.dir}/c++"/> <mkdir dir="${test.build.dir}"/> <mkdir dir="${test.build.classes}"/> <mkdir dir="${test.build.testjar}"/> <mkdir dir="${test.build.testshell}"/> <mkdir dir="${test.build.extraconf}"/> <tempfile property="touch.temp.file" destDir="${java.io.tmpdir}"/> <touch millis="0" file="${touch.temp.file}"> <fileset dir="${conf.dir}" includes="**/*.template"/> <fileset dir="${contrib.dir}" includes="**/*.template"/> </touch> <delete file="${touch.temp.file}"/> <!-- copy all of the jsp and static files --> <copy todir="${build.webapps}"> <fileset dir="${src.webapps}"> <exclude name="**/*.jsp" /> </fileset> </copy> <copy todir="${conf.dir}" verbose="true"> <fileset dir="${conf.dir}" includes="**/*.template"/> <mapper type="glob" from="*.template" to="*"/> </copy> <copy todir="${contrib.dir}" verbose="true"> <fileset dir="${contrib.dir}" includes="**/*.template"/> <mapper type="glob" from="*.template" to="*"/> </copy> <exec executable="sh"> <arg line="src/saveVersion.sh ${version}"/> </exec> <exec executable="sh"> <arg line="src/fixFontsPath.sh ${src.docs.cn}"/> </exec> </target> <!-- ====================================================== --> <!-- Compile the Java files --> <!-- ====================================================== --> <target name="record-parser" depends="init" if="javacc.home"> <javacc target="${core.src.dir}/org/apache/hadoop/record/compiler/generated/rcc.jj" outputdirectory="${core.src.dir}/org/apache/hadoop/record/compiler/generated" javacchome="${javacc.home}" /> </target> <target name="compile-rcc-compiler" depends="init, record-parser"> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/record/compiler/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}"/> <classpath refid="classpath"/> </javac> <taskdef name="recordcc" classname="org.apache.hadoop.record.compiler.ant.RccTask"> <classpath refid="classpath" /> </taskdef> </target> <target name="compile-core-classes" depends="init, compile-rcc-compiler"> <taskdef classname="org.apache.jasper.JspC" name="jsp-compile" > <classpath refid="test.classpath"/> </taskdef> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${core.src.dir}" includes="**/*.properties"/> <fileset dir="${core.src.dir}" includes="core-default.xml"/> </copy> </target> <target name="compile-mapred-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/task" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/task/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/job" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/job/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${mapred.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${mapred.src.dir}" includes="**/*.properties"/> <fileset dir="${mapred.src.dir}" includes="mapred-default.xml"/> </copy> </target> <target name="compile-hdfs-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/hdfs" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.namenode" webxml="${build.webapps}/hdfs/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/datanode" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.datanode" webxml="${build.webapps}/datanode/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${hdfs.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${hdfs.src.dir}" includes="**/*.properties"/> <fileset dir="${hdfs.src.dir}" includes="hdfs-default.xml"/> </copy> </target> <target name="compile-tools" depends="init"> <javac encoding="${build.encoding}" srcdir="${tools.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.tools}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.tools}"> <fileset dir="${tools.src}" includes="**/*.properties" /> </copy> </target> <target name="compile-native"> <antcall target="compile-core-native"> <param name="compile.native" value="true"/> </antcall> </target> <target name="compile-core-native" depends="compile-core-classes" if="compile.native"> <mkdir dir="${build.native}/lib"/> <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/> <javah classpath="${build.classes}" destdir="${build.native}/src/org/apache/hadoop/io/compress/zlib" force="yes" verbose="yes" > <class name="org.apache.hadoop.io.compress.zlib.ZlibCompressor" /> <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" /> </javah> <exec dir="${build.native}" executable="sh" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> <arg line="${native.src.dir}/configure"/> </exec> <exec dir="${build.native}" executable="${make.cmd}" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> </exec> <exec dir="${build.native}" executable="sh" failonerror="true"> <arg line="${build.native}/libtool --mode=install cp ${build.native}/lib/libhadoop.la ${build.native}/lib"/> </exec> </target> <target name="compile-core" depends="clover,compile-core-classes,compile-mapred-classes, compile-hdfs-classes,compile-core-native,compile-c++" description="Compile core only"> </target> <target name="compile-contrib" depends="compile-core,tools-jar,compile-c++-libhdfs"> <subant target="compile"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="compile" depends="compile-core, compile-contrib, compile-ant-tasks, compile-tools" description="Compile core, contrib"> </target> <target name="compile-examples" depends="compile-core,compile-tools,compile-c++-examples"> <javac encoding="${build.encoding}" srcdir="${examples.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.examples}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath> <path refid="classpath"/> <pathelement location="${build.tools}"/> </classpath> </javac> </target> <!-- ================================================================== --> <!-- Make hadoop.jar --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="jar" depends="compile-core" description="Make hadoop.jar"> <tar compression="gzip" destfile="${build.classes}/bin.tgz"> <tarfileset dir="bin" mode="755"/> </tar> - <jar jarfile="${build.dir}/${final.name}-core.jar" + <jar jarfile="${build.dir}/${core.final.name}.jar" basedir="${build.classes}"> <manifest> <section name="org/apache/hadoop"> <attribute name="Implementation-Title" value="Hadoop"/> <attribute name="Implementation-Version" value="${version}"/> <attribute name="Implementation-Vendor" value="Yahoo!"/> </section> </manifest> <fileset file="${conf.dir}/commons-logging.properties"/> <fileset file="${conf.dir}/log4j.properties"/> <fileset file="${conf.dir}/hadoop-metrics.properties"/> <zipfileset dir="${build.webapps}" prefix="webapps"/> </jar> </target> <!-- ================================================================== --> <!-- Make the Hadoop examples jar. --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="examples" depends="jar, compile-examples" description="Make the Hadoop examples jar."> - <jar jarfile="${build.dir}/${final.name}-examples.jar" + <jar jarfile="${build.dir}/${examples.final.name}.jar" basedir="${build.examples}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/examples/ExampleDriver"/> </manifest> </jar> </target> <target name="tools-jar" depends="jar, compile-tools" description="Make the Hadoop tools jar."> - <jar jarfile="${build.dir}/${final.name}-tools.jar" + <jar jarfile="${build.dir}/${tools.final.name}.jar" basedir="${build.tools}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/examples/ExampleDriver"/> </manifest> </jar> </target> <!-- ================================================================== --> <!-- Make the Hadoop metrics jar. (for use outside Hadoop) --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="metrics.jar" depends="compile-core" description="Make the Hadoop metrics jar. (for use outside Hadoop)"> <jar jarfile="${build.dir}/hadoop-metrics-${version}.jar" basedir="${build.classes}"> <include name="**/metrics/**" /> <exclude name="**/package.html" /> </jar> </target> <target name="generate-test-records" depends="compile-rcc-compiler"> <recordcc destdir="${test.generated.dir}"> <fileset dir="${test.src.dir}" includes="**/*.jr" /> </recordcc> </target> <!-- ================================================================== --> <!-- Compile test code --> <!-- ================================================================== --> <target name="compile-core-test" depends="compile-examples, compile-tools, generate-test-records"> <javac encoding="${build.encoding}" srcdir="${test.generated.dir}" includes="org/apache/hadoop/**/*.java" destdir="${test.build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}" /> <classpath refid="test.classpath"/> </javac> <javac encoding="${build.encoding}" srcdir="${test.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${test.build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="test.classpath"/> </javac> <javac encoding="${build.encoding}" srcdir="${test.src.dir}/testjar" includes="*.java" destdir="${test.build.testjar}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="test.classpath"/> </javac> <delete file="${test.build.testjar}/testjob.jar"/> <jar jarfile="${test.build.testjar}/testjob.jar" basedir="${test.build.testjar}"> </jar> <javac encoding="${build.encoding}" srcdir="${test.src.dir}/testshell" includes="*.java" destdir="${test.build.testshell}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}"/> <classpath refid="test.classpath"/> </javac> <delete file="${test.build.testshell}/testshell.jar"/> <jar jarfile="${test.build.testshell}/testshell.jar" basedir="${test.build.testshell}"> </jar> <delete dir="${test.cache.data}"/> <mkdir dir="${test.cache.data}"/> <delete dir="${test.debug.data}"/> <mkdir dir="${test.debug.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/testscript.txt" todir="${test.debug.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.txt" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.jar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.zip" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar.gz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/testConf.xml" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data15bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data30bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data60bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data120bytes" todir="${test.cache.data}"/> </target> <!-- ================================================================== --> <!-- Make hadoop-test.jar --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="jar-test" depends="compile-core-test" description="Make hadoop-test.jar"> - <jar jarfile="${build.dir}/${final.name}-test.jar" + <jar jarfile="${build.dir}/${test.final.name}.jar" basedir="${test.build.classes}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/test/AllTestDriver"/> <section name="org/apache/hadoop"> <attribute name="Implementation-Title" value="Hadoop"/> <attribute name="Implementation-Version" value="${version}"/> <attribute name="Implementation-Vendor" value="Yahoo!"/> </section> </manifest> </jar> </target> <!-- ================================================================== --> <!-- Run unit tests --> <!-- ================================================================== --> <target name="test-core" depends="jar-test" description="Run core unit tests"> <delete dir="${test.build.data}"/> <mkdir dir="${test.build.data}"/> <delete dir="${test.log.dir}"/> <mkdir dir="${test.log.dir}"/> <copy file="${test.src.dir}/hadoop-policy.xml" todir="${test.build.extraconf}" /> <junit showoutput="${test.output}" printsummary="${test.junit.printsummary}" haltonfailure="${test.junit.haltonfailure}" fork="yes" forkmode="${test.junit.fork.mode}" maxmemory="${test.junit.maxmemory}" dir="${basedir}" timeout="${test.timeout}" errorProperty="tests.failed" failureProperty="tests.failed"> <sysproperty key="test.build.data" value="${test.build.data}"/> <sysproperty key="test.tools.input.dir" value="${test.tools.input.dir}"/> <sysproperty key="test.cache.data" value="${test.cache.data}"/> <sysproperty key="test.debug.data" value="${test.debug.data}"/> <sysproperty key="hadoop.log.dir" value="${test.log.dir}"/> <sysproperty key="test.src.dir" value="${test.src.dir}"/> <sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/> <sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/> <sysproperty key="test.build.extraconf" value="${test.build.extraconf}" /> <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml"/> <sysproperty key="java.library.path" value="${build.native}/lib:${lib.dir}/native/${build.platform}"/> <sysproperty key="install.c++.examples" value="${install.c++.examples}"/> <!-- set io.compression.codec.lzo.class in the child jvm only if it is set --> <syspropertyset dynamic="no"> <propertyref name="io.compression.codec.lzo.class"/> </syspropertyset> <!-- set compile.c++ in the child jvm only if it is set --> <syspropertyset dynamic="no"> <propertyref name="compile.c++"/> </syspropertyset> <classpath refid="${test.classpath.id}"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${test.build.dir}" unless="testcase"> <fileset dir="${test.src.dir}" includes="**/${test.include}.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${test.build.dir}" if="testcase"> <fileset dir="${test.src.dir}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> <target name="test-contrib" depends="compile, compile-core-test" description="Run contrib unit tests"> <subant target="test"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="test" depends="test-core, test-contrib" description="Run core, contrib unit tests"> </target> <!-- Run all unit tests, not just Test*, and use non-test configuration. --> <target name="test-cluster" description="Run all unit tests, not just Test*, and use non-test configuration."> <antcall target="test"> <param name="test.include" value="*"/> <param name="test.classpath.id" value="test.cluster.classpath"/> </antcall> </target> <target name="nightly" depends="test, tar"> </target> <!-- ================================================================== --> <!-- Run optional third-party tool targets --> <!-- ================================================================== --> <target name="checkstyle" depends="ivy-retrieve-checkstyle,check-for-checkstyle" if="checkstyle.present" description="Run optional third-party tool targets"> <taskdef resource="checkstyletask.properties"> <classpath refid="checkstyle-classpath"/> </taskdef> <mkdir dir="${test.build.dir}"/> <checkstyle config="${test.src.dir}/checkstyle.xml" failOnViolation="false"> <fileset dir="${core.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <fileset dir="${mapred.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <fileset dir="${hdfs.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/> </checkstyle> <xslt style="${test.src.dir}/checkstyle-noframes-sorted.xsl" in="${test.build.dir}/checkstyle-errors.xml" out="${test.build.dir}/checkstyle-errors.html"/> </target> <target name="check-for-checkstyle"> <available property="checkstyle.present" resource="checkstyletask.properties"> <classpath refid="checkstyle-classpath"/> </available> </target> <property name="findbugs.home" value=""/> <target name="findbugs" depends="check-for-findbugs, tar" if="findbugs.present" description="Run findbugs if present"> <property name="findbugs.out.dir" value="${test.build.dir}/findbugs"/> <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/> <property name="findbugs.report.htmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.html"/> <property name="findbugs.report.xmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.xml"/> <taskdef name="findbugs" classname="edu.umd.cs.findbugs.anttask.FindBugsTask" classpath="${findbugs.home}/lib/findbugs-ant.jar" /> <mkdir dir="${findbugs.out.dir}"/> <findbugs home="${findbugs.home}" output="xml:withMessages" outputFile="${findbugs.report.xmlfile}" effort="max" excludeFilter="${findbugs.exclude.file}" jvmargs="-Xmx512M"> <auxClasspath> <fileset dir="${lib.dir}"> <include name="**/*.jar"/> </fileset> <fileset dir="${build.ivy.lib.dir}/${ant.project.name}/common"> <include name="**/*.jar"/> </fileset> </auxClasspath> <sourcePath path="${core.src.dir}"/> <sourcePath path="${mapred.src.dir}"/> <sourcePath path="${hdfs.src.dir}"/> <sourcePath path="${examples.dir}" /> <sourcePath path="${tools.src}" /> <sourcePath path="${basedir}/src/contrib/streaming/src/java" /> <class location="${basedir}/build/${final.name}-core.jar" /> <class location="${basedir}/build/${final.name}-examples.jar" /> <class location="${basedir}/build/${final.name}-tools.jar" /> <class location="${basedir}/build/contrib/streaming/${final.name}-streaming.jar" /> </findbugs> <xslt style="${findbugs.home}/src/xsl/default.xsl" in="${findbugs.report.xmlfile}" out="${findbugs.report.htmlfile}"/> </target> <target name="check-for-findbugs"> <available property="findbugs.present" file="${findbugs.home}/lib/findbugs.jar" /> </target> <!-- ================================================================== --> <!-- Documentation --> <!-- ================================================================== --> <target name="docs" depends="forrest.check" description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home"> <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest" failonerror="true"> <env key="JAVA_HOME" value="${java5.home}"/> </exec> <copy todir="${build.docs}"> <fileset dir="${docs.src}/build/site/" /> </copy> <copy file="${docs.src}/releasenotes.html" todir="${build.docs}"/> <style basedir="${core.src.dir}" destdir="${build.docs}" includes="core-default.xml" style="conf/configuration.xsl"/> <style basedir="${hdfs.src.dir}" destdir="${build.docs}" includes="hdfs-default.xml" style="conf/configuration.xsl"/> <style basedir="${mapred.src.dir}" destdir="${build.docs}" includes="mapred-default.xml" style="conf/configuration.xsl"/> <antcall target="changes-to-html"/> <antcall target="cn-docs"/> </target> <target name="cn-docs" depends="forrest.check, init" description="Generate forrest-based Chinese documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home"> <exec dir="${src.docs.cn}" executable="${forrest.home}/bin/forrest" failonerror="true"> <env key="LANG" value="en_US.utf8"/> <env key="JAVA_HOME" value="${java5.home}"/> </exec> <copy todir="${build.docs.cn}"> <fileset dir="${src.docs.cn}/build/site/" /> </copy> <style basedir="${core.src.dir}" destdir="${build.docs.cn}" includes="core-default.xml" style="conf/configuration.xsl"/> <style basedir="${hdfs.src.dir}" destdir="${build.docs.cn}" includes="hdfs-default.xml" style="conf/configuration.xsl"/> <style basedir="${mapred.src.dir}" destdir="${build.docs.cn}" includes="mapred-default.xml" style="conf/configuration.xsl"/> <antcall target="changes-to-html"/> </target> <target name="forrest.check" unless="forrest.home" depends="java5.check"> <fail message="'forrest.home' is not defined. Please pass -Dforrest.home=&lt;base of Apache Forrest installation&gt; to Ant on the command-line." /> </target> <target name="java5.check" unless="java5.home"> <fail message="'java5.home' is not defined. Forrest requires Java 5. Please pass -Djava5.home=&lt;base of Java 5 distribution&gt; to Ant on the command-line." /> </target> <target name="javadoc-dev" description="Generate javadoc for hadoop developers"> <mkdir dir="${build.javadoc.dev}"/> <javadoc overview="${core.src.dir}/overview.html" packagenames="org.apache.hadoop.*" destdir="${build.javadoc.dev}" author="true" version="true" use="true" windowtitle="${Name} ${version} API" doctitle="${Name} ${version} Developer API" bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation." > <packageset dir="${core.src.dir}"/> <packageset dir="${mapred.src.dir}"/> <packageset dir="${hdfs.src.dir}"/> <packageset dir="${examples.dir}"/> <packageset dir="src/contrib/streaming/src/java"/> <packageset dir="src/contrib/data_join/src/java"/> <packageset dir="src/contrib/index/src/java"/> <link href="${javadoc.link.java}"/> <classpath > <path refid="classpath" /> <fileset dir="src/contrib/"> <include name="*/lib/*.jar" /> </fileset> <pathelement path="${java.class.path}"/> </classpath> <group title="Core" packages="org.apache.*"/> <group title="Examples" packages="org.apache.hadoop.examples*"/> <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/> <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/> <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/> </javadoc> </target> <target name="javadoc" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc"> <mkdir dir="${build.javadoc}"/> <javadoc overview="${core.src.dir}/overview.html" packagenames="org.apache.hadoop.*" destdir="${build.javadoc}" author="true" version="true" use="true" windowtitle="${Name} ${version} API" doctitle="${Name} ${version} API" bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation." > <packageset dir="${core.src.dir}"/> <packageset dir="${mapred.src.dir}"/> <packageset dir="${examples.dir}"/> <packageset dir="src/contrib/streaming/src/java"/> <packageset dir="src/contrib/data_join/src/java"/> <packageset dir="src/contrib/index/src/java"/> <packageset dir="src/contrib/failmon/src/java/"/> <link href="${javadoc.link.java}"/> <classpath > <path refid="classpath" /> <fileset dir="src/contrib/"> <include name="*/lib/*.jar" /> </fileset> <path refid="javadoc-classpath"/> <pathelement path="${java.class.path}"/> <pathelement location="${build.tools}"/> </classpath> <group title="Core" packages="org.apache.*"/> <group title="Examples" packages="org.apache.hadoop.examples*"/> <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/> <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/> <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/> <group title="contrib: FailMon" packages="org.apache.hadoop.contrib.failmon*"/> </javadoc> </target> <target name="api-xml" depends="ivy-retrieve-jdiff,javadoc,write-null"> <javadoc> <doclet name="jdiff.JDiff" path="${jdiff.jar}:${xerces.jar}"> <param name="-apidir" value="${jdiff.xml.dir}"/> <param name="-apiname" value="hadoop ${version}"/> </doclet> <packageset dir="src/core"/> <packageset dir="src/mapred"/> <packageset dir="src/tools"/> <classpath > <path refid="classpath" /> <path refid="jdiff-classpath" /> <pathelement path="${java.class.path}"/> </classpath> </javadoc> </target> <target name="write-null"> <exec executable="touch"> <arg value="${jdiff.home}/Null.java"/> </exec> </target> <target name="api-report" depends="ivy-retrieve-jdiff,api-xml"> <mkdir dir="${jdiff.build.dir}"/> <javadoc sourcepath="src/core,src/hdfs,src,mapred,src/tools" destdir="${jdiff.build.dir}" sourceFiles="${jdiff.home}/Null.java"> <doclet name="jdiff.JDiff" path="${jdiff.jar}:${xerces.jar}"> <param name="-oldapi" value="hadoop ${jdiff.stable}"/> <param name="-newapi" value="hadoop ${version}"/> <param name="-oldapidir" value="${jdiff.xml.dir}"/> <param name="-newapidir" value="${jdiff.xml.dir}"/> <param name="-javadocold" value="${jdiff.stable.javadoc}"/> <param name="-javadocnew" value="../../api/"/> <param name="-stats"/> </doclet> <classpath > <path refid="classpath" /> <path refid="jdiff-classpath"/> <pathelement path="${java.class.path}"/> </classpath> </javadoc> </target> <target name="changes-to-html" description="Convert CHANGES.txt into an html file"> <mkdir dir="${build.docs}"/> <exec executable="perl" input="CHANGES.txt" output="${build.docs}/changes.html" failonerror="true"> <arg value="${changes.src}/changes2html.pl"/> </exec> <copy todir="${build.docs}"> <fileset dir="${changes.src}" includes="*.css"/> </copy> </target> <!-- ================================================================== --> <!-- D I S T R I B U T I O N --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> - <target name="package" depends="compile, jar, javadoc, docs, cn-docs, api-report, examples, tools-jar, jar-test, ant-tasks, package-librecordio" + <target name="package" depends="compile, jar, javadoc, examples, tools-jar, jar-test, ant-tasks, package-librecordio" description="Build distribution"> <mkdir dir="${dist.dir}"/> <mkdir dir="${dist.dir}/lib"/> <mkdir dir="${dist.dir}/contrib"/> <mkdir dir="${dist.dir}/bin"/> <mkdir dir="${dist.dir}/docs"/> <mkdir dir="${dist.dir}/docs/api"/> <mkdir dir="${dist.dir}/docs/jdiff"/> <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true"> <fileset dir="${common.ivy.lib.dir}"/> </copy> <copy todir="${dist.dir}/lib" includeEmptyDirs="false"> <fileset dir="lib"> <exclude name="**/native/**"/> </fileset> </copy> <exec dir="${dist.dir}" executable="sh" failonerror="true"> <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/> <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/> <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/> <arg line="${native.src.dir}/packageNativeHadoop.sh"/> </exec> <subant target="package"> <!--Pass down the version in case its needed again and the target distribution directory so contribs know where to install to.--> <property name="version" value="${version}"/> <property name="dist.dir" value="${dist.dir}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> <copy todir="${dist.dir}/webapps"> <fileset dir="${build.webapps}"/> </copy> <copy todir="${dist.dir}"> <fileset file="${build.dir}/${final.name}-*.jar"/> </copy> <copy todir="${dist.dir}/bin"> <fileset dir="bin"/> </copy> <copy todir="${dist.dir}/conf"> <fileset dir="${conf.dir}" excludes="**/*.template"/> </copy> <copy todir="${dist.dir}/docs"> <fileset dir="${build.docs}"/> </copy> <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/> <copy todir="${dist.dir}/ivy"> <fileset dir="ivy"/> </copy> <copy todir="${dist.dir}"> <fileset dir="."> <include name="*.txt" /> </fileset> </copy> <copy todir="${dist.dir}/src" includeEmptyDirs="true"> <fileset dir="src" excludes="**/*.template **/docs/build/**/*"/> </copy> <copy todir="${dist.dir}/c++" includeEmptyDirs="false"> <fileset dir="${build.dir}/c++"/> </copy> <copy todir="${dist.dir}/" file="build.xml"/> <chmod perm="ugo+x" type="file" parallel="false"> <fileset dir="${dist.dir}/bin"/> <fileset dir="${dist.dir}/src/contrib/"> <include name="*/bin/*" /> </fileset> <fileset dir="${dist.dir}/src/contrib/ec2/bin/image"/> </chmod> <chmod perm="ugo+x" type="file"> <fileset dir="${dist.dir}/src/c++/pipes/debug"/> </chmod> </target> <!-- ================================================================== --> <!-- Make release tarball --> <!-- ================================================================== --> <target name="tar" depends="package" description="Make release tarball"> <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz"> <param.listofitems> <tarfileset dir="${build.dir}" mode="664"> <exclude name="${final.name}/bin/*" /> <exclude name="${final.name}/contrib/*/bin/*" /> <exclude name="${final.name}/src/contrib/ec2/bin/*" /> <exclude name="${final.name}/src/contrib/ec2/bin/image/*" /> <include name="${final.name}/**" /> </tarfileset> <tarfileset dir="${build.dir}" mode="755"> <include name="${final.name}/bin/*" /> <include name="${final.name}/contrib/*/bin/*" /> <include name="${final.name}/src/contrib/ec2/bin/*" /> <include name="${final.name}/src/contrib/ec2/bin/image/*" /> </tarfileset> </param.listofitems> </macro_tar> </target> <target name="bin-package" depends="compile, jar, examples, tools-jar, jar-test, ant-tasks, package-librecordio" description="assembles artifacts for binary target"> <mkdir dir="${dist.dir}"/> <mkdir dir="${dist.dir}/lib"/> <mkdir dir="${dist.dir}/contrib"/> <mkdir dir="${dist.dir}/bin"/> <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true"> <fileset dir="${common.ivy.lib.dir}"/> </copy> <copy todir="${dist.dir}/lib" includeEmptyDirs="false"> <fileset dir="lib"> <exclude name="**/native/**"/> </fileset> </copy> <exec dir="${dist.dir}" executable="sh" failonerror="true"> <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/> <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/> <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/> <arg line="${native.src.dir}/packageNativeHadoop.sh"/> </exec> <subant target="package"> <!--Pass down the version in case its needed again and the target distribution directory so contribs know where to install to.--> <property name="version" value="${version}"/> <property name="dist.dir" value="${dist.dir}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> <copy todir="${dist.dir}/webapps"> <fileset dir="${build.webapps}"/> </copy> <copy todir="${dist.dir}"> <fileset file="${build.dir}/${final.name}-*.jar"/> </copy> <copy todir="${dist.dir}/bin"> <fileset dir="bin"/> </copy> <copy todir="${dist.dir}/conf"> <fileset dir="${conf.dir}" excludes="**/*.template"/> </copy> <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/> <copy todir="${dist.dir}/ivy"> <fileset dir="ivy"/> </copy> <copy todir="${dist.dir}"> <fileset dir="."> <include name="*.txt" /> </fileset> </copy> <copy todir="${dist.dir}/c++" includeEmptyDirs="false"> <fileset dir="${build.dir}/c++"/> </copy> <copy todir="${dist.dir}/" file="build.xml"/> <chmod perm="ugo+x" type="file" parallel="false"> <fileset dir="${dist.dir}/bin"/> </chmod> </target> <target name="binary" depends="bin-package" description="Make tarball without source and documentation"> <macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz"> <param.listofitems> <tarfileset dir="${build.dir}" mode="664"> <exclude name="${final.name}/bin/*" /> <exclude name="${final.name}/src/**" /> <exclude name="${final.name}/docs/**" /> <include name="${final.name}/**" /> </tarfileset> <tarfileset dir="${build.dir}" mode="755"> <include name="${final.name}/bin/*" /> </tarfileset> </param.listofitems> </macro_tar> </target> <!-- ================================================================== --> <!-- Perform audit activities for the release --> <!-- ================================================================== --> <target name="releaseaudit" depends="package,ivy-retrieve-releaseaudit" description="Release Audit activities"> <fail unless="rat.present" message="Failed to load class [${rat.reporting.classname}]."/> <java classname="${rat.reporting.classname}" fork="true"> <classpath refid="releaseaudit-classpath"/> <arg value="${build.dir}/${final.name}"/> </java> </target> <!-- ================================================================== --> <!-- Clean. Delete the build files, and their directories --> <!-- ================================================================== --> - <target name="clean" depends="clean-contrib" description="Clean. Delete the build files, and their directories"> + <target name="clean" depends="clean-contrib, clean-sign" description="Clean. Delete the build files, and their directories"> <delete dir="${build.dir}"/> <delete dir="${docs.src}/build"/> <delete dir="${src.docs.cn}/build"/> + <delete file="${basedir}/ivy/hadoop-core-pom.xml"/> + <delete file="${basedir}/ivy/hadoop-test-pom.xml"/> + <delete file="${basedir}/ivy/hadoop-examples-pom.xml"/> + <delete file="${basedir}/ivy/hadoop-tools-pom.xml"/> + <delete file="${basedir}/ivy/hadoop-streaming-pom.xml"/> + </target> + + <target name="clean-sign" description="Clean. Delete .asc files"> + <delete> + <fileset dir="." includes="**/**/*.asc"/> + </delete> + </target> + + <target name="veryclean" depends="clean" description="Delete mvn ant task jar and ivy ant taks jar"> + <delete file="${ant_task.jar}"/> + <delete file="${ivy.jar}"/> </target> + <!-- ================================================================== --> <!-- Clean contrib target. For now, must be called explicitly --> <!-- Using subant instead of ant as a workaround for 30569 --> <!-- ================================================================== --> <target name="clean-contrib"> <subant target="clean"> <fileset file="src/contrib/build.xml"/> </subant> </target> <target name="test-c++-libhdfs" depends="compile-c++-libhdfs, compile-core" if="islibhdfs"> <delete dir="${test.libhdfs.dir}"/> <mkdir dir="${test.libhdfs.dir}"/> <mkdir dir="${test.libhdfs.dir}/logs"/> <mkdir dir="${test.libhdfs.dir}/hdfs/name"/> <exec dir="${build.c++.libhdfs}" executable="${make.cmd}" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_ARCH" value="${jvm.arch}"/> <env key="LIBHDFS_BUILD_DIR" value="${build.c++.libhdfs}"/> <env key="HADOOP_HOME" value="${basedir}"/> <env key="HADOOP_CONF_DIR" value="${test.libhdfs.conf.dir}"/> <env key="HADOOP_LOG_DIR" value="${test.libhdfs.dir}/logs"/> <env key="LIBHDFS_SRC_DIR" value="${c++.libhdfs.src}"/> <env key="LIBHDFS_INSTALL_DIR" value="${install.c++}/lib"/> <env key="LIB_DIR" value="${common.ivy.lib.dir}"/> <arg value="test"/> </exec> </target> <!-- ================================================================== --> <!-- librecordio targets. --> <!-- ================================================================== --> <target name="compile-librecordio" depends="init" if="librecordio" > <mkdir dir="${build.librecordio}"/> <exec dir="${librecordio.src}" executable="${make.cmd}" failonerror="true"> <env key="XERCESCROOT" value="${xercescroot}"/> <env key="LIBRECORDIO_BUILD_DIR" value="${build.librecordio}"/> </exec> </target> <target name="test-librecordio" depends="compile-librecordio, compile-core" if="librecordio"> <delete dir="${librecordio.test.dir}"/> <mkdir dir="${librecordio.test.dir}"/> <exec dir="${librecordio.src}/test" executable="${make.cmd}" failonerror="true"> <env key="HADOOP_HOME" value="${basedir}"/> <env key="XERCESCROOT" value="${xercescroot}"/> <env key="LIBRECORDIO_BUILD_DIR" value="${build.librecordio}"/> <env key="LIBRECORDIO_TEST_DIR" value="${librecordio.test.dir}"/> <arg value="all"/> </exec> </target> <target name="package-librecordio" depends="compile-librecordio" if="librecordio"> <mkdir dir="${dist.dir}/librecordio"/> <copy todir="${dist.dir}/librecordio"> <fileset dir="${build.librecordio}" casesensitive="yes" followsymlinks="false"> <exclude name="**/tests/**"/> <exclude name="*.so"/> <exclude name="*.o"/> </fileset> </copy> <chmod perm="ugo+x" type="file"> <fileset dir="${dist.dir}/librecordio"/> </chmod> </target> <target name="create-c++-configure" depends="init" if="compile.c++"> <exec executable="autoreconf" dir="${c++.utils.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> <exec executable="autoreconf" dir="${c++.pipes.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> <exec executable="autoreconf" dir="${c++.examples.pipes.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> <antcall target="create-c++-configure-libhdfs"/> </target> <target name="create-c++-configure-libhdfs" depends="check-c++-libhdfs" if="islibhdfs"> <exec executable="autoreconf" dir="${c++.libhdfs.src}" searchpath="yes" failonerror="yes"> <arg value="-if"/> </exec> </target> <target name="check-c++-makefiles" depends="init" if="compile.c++"> <condition property="need.c++.utils.makefile"> <not> <available file="${build.c++.utils}/Makefile"/> </not> </condition> <condition property="need.c++.pipes.makefile"> <not> <available file="${build.c++.pipes}/Makefile"/> </not> </condition> <condition property="need.c++.examples.pipes.makefile"> <not> <available file="${build.c++.examples.pipes}/Makefile"/> </not> </condition> </target> <target name="check-c++-libhdfs"> <condition property="islibhdfs"> <and> <isset property="compile.c++"/> <isset property="libhdfs"/> </and> </condition> </target> <target name="check-c++-makefile-libhdfs" depends="init,check-c++-libhdfs" if="islibhdfs"> <condition property="need.c++.libhdfs.makefile"> <not> <available file="${build.c++.libhdfs}/Makefile"/> </not> </condition> </target> <target name="create-c++-libhdfs-makefile" depends="check-c++-makefile-libhdfs" if="need.c++.libhdfs.makefile"> <mkdir dir="${build.c++.libhdfs}"/> <chmod file="${c++.libhdfs.src}/configure" perm="ugo+x"/> <exec executable="${c++.libhdfs.src}/configure" dir="${build.c++.libhdfs}" failonerror="yes"> <env key="ac_cv_func_malloc_0_nonnull" value="yes"/> <env key="JVM_ARCH" value="${jvm.arch}"/> <arg value="--prefix=${install.c++}"/> </exec> </target> <target name="create-c++-utils-makefile" depends="check-c++-makefiles" if="need.c++.utils.makefile"> <mkdir dir="${build.c++.utils}"/> <exec executable="${c++.utils.src}/configure" dir="${build.c++.utils}" failonerror="yes"> <arg value="--prefix=${install.c++}"/> </exec> </target> <target name="compile-c++-utils" depends="create-c++-utils-makefile" if="compile.c++"> <exec executable="${make.cmd}" dir="${build.c++.utils}" searchpath="yes" failonerror="yes"> <arg value="install"/> </exec> </target> <target name="create-c++-pipes-makefile" depends="check-c++-makefiles" if="need.c++.pipes.makefile"> <mkdir dir="${build.c++.pipes}"/> <exec executable="${c++.pipes.src}/configure" dir="${build.c++.pipes}" failonerror="yes"> <arg value="--prefix=${install.c++}"/> </exec> </target> <target name="compile-c++-pipes" depends="create-c++-pipes-makefile,compile-c++-utils" if="compile.c++"> <exec executable="${make.cmd}" dir="${build.c++.pipes}" searchpath="yes" failonerror="yes"> <arg value="install"/> </exec> </target> <target name="compile-c++" depends="compile-c++-pipes"/> <target name="create-c++-examples-pipes-makefile" depends="check-c++-makefiles" if="need.c++.examples.pipes.makefile"> <mkdir dir="${build.c++.examples.pipes}"/> <exec executable="${c++.examples.pipes.src}/configure" dir="${build.c++.examples.pipes}" failonerror="yes"> <arg value="--prefix=${install.c++.examples}"/> <arg value="--with-hadoop-utils=${install.c++}"/> <arg value="--with-hadoop-pipes=${install.c++}"/> </exec> </target> <target name="compile-c++-examples-pipes" depends="create-c++-examples-pipes-makefile,compile-c++-pipes" if="compile.c++"> <exec executable="${make.cmd}" dir="${build.c++.examples.pipes}" searchpath="yes" failonerror="yes"> <arg value="install"/> </exec> </target> <target name="compile-c++-examples" depends="compile-c++-examples-pipes"/> <target name="compile-c++-libhdfs" depends="create-c++-libhdfs-makefile" if="islibhdfs"> <exec executable="${make.cmd}" dir="${build.c++.libhdfs}" searchpath="yes" failonerror="yes"> <env key="ac_cv_func_malloc_0_nonnull" value="yes"/> <env key="JVM_ARCH" value="${jvm.arch}"/> <arg value="install"/> </exec> </target> <target name="compile-ant-tasks" depends="compile-core"> <javac encoding="${build.encoding}" srcdir="${anttasks.dir}" includes="org/apache/hadoop/ant/**/*.java" destdir="${build.anttasks}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}"/> <classpath refid="classpath"/> </javac> </target> <target name="ant-tasks" depends="jar, compile-ant-tasks"> <copy file="${anttasks.dir}/org/apache/hadoop/ant/antlib.xml" todir="${build.anttasks}/org/apache/hadoop/ant"/> - <jar destfile="${build.dir}/${final.name}-ant.jar"> + <jar destfile="${build.dir}/${ant.final.name}.jar"> <fileset dir="${build.anttasks}"/> </jar> </target> <target name="clover" depends="clover.setup, clover.info" description="Instrument the Unit tests using Clover. To use, specify -Dclover.home=&lt;base of clover installation&gt; -Drun.clover=true on the command line."/> <target name="clover.setup" if="clover.enabled"> <taskdef resource="cloverlib.xml" classpath="${clover.jar}"/> <mkdir dir="${clover.db.dir}"/> <clover-setup initString="${clover.db.dir}/hadoop_coverage.db"> <fileset dir="src" includes="core/**/* tools/**/* hdfs/**/* mapred/**/*"/> </clover-setup> </target> <target name="clover.info" unless="clover.present"> <echo> Clover not found. Code coverage reports disabled. </echo> </target> <target name="clover.check"> <fail unless="clover.present"> ################################################################## Clover not found. Please specify -Dclover.home=&lt;base of clover installation&gt; on the command line. ################################################################## </fail> </target> <target name="generate-clover-reports" depends="clover.check, clover"> <mkdir dir="${clover.report.dir}"/> <clover-report> <current outfile="${clover.report.dir}" title="${final.name}"> <format type="html"/> </current> </clover-report> <clover-report> <current outfile="${clover.report.dir}/clover.xml" title="${final.name}"> <format type="xml"/> </current> </clover-report> </target> <target name="findbugs.check" depends="check-for-findbugs" unless="findbugs.present"> <fail message="'findbugs.home' is not defined. Please pass -Dfindbugs.home=&lt;base of Findbugs installation&gt; to Ant on the command-line." /> </target> <target name="patch.check" unless="patch.file"> <fail message="'patch.file' is not defined. Please pass -Dpatch.file=&lt;location of patch file&gt; to Ant on the command-line." /> </target> <target name="test-patch" depends="patch.check,findbugs.check,forrest.check"> <exec executable="bash" failonerror="true"> <arg value="${basedir}/src/test/bin/test-patch.sh"/> <arg value="DEVELOPER"/> <arg value="${patch.file}"/> <arg value="${scratch.dir}"/> <arg value="${svn.cmd}"/> <arg value="${grep.cmd}"/> <arg value="${patch.cmd}"/> <arg value="${findbugs.home}"/> <arg value="${forrest.home}"/> <arg value="${basedir}"/> <arg value="${java5.home}"/> </exec> </target> <target name="hudson-test-patch" depends="findbugs.check,forrest.check"> <exec executable="bash" failonerror="true"> <arg value="${basedir}/src/test/bin/test-patch.sh"/> <arg value="HUDSON"/> <arg value="${scratch.dir}"/> <arg value="${support.dir}"/> <arg value="${ps.cmd}"/> <arg value="${wget.cmd}"/> <arg value="${jiracli.cmd}"/> <arg value="${svn.cmd}"/> <arg value="${grep.cmd}"/> <arg value="${patch.cmd}"/> <arg value="${findbugs.home}"/> <arg value="${forrest.home}"/> <arg value="${eclipse.home}"/> <arg value="${python.home}"/> <arg value="${basedir}"/> <arg value="${trigger.url}"/> <arg value="${jira.passwd}"/> <arg value="${java5.home}"/> </exec> </target> <target name="eclipse-files" depends="init" description="Generate files for Eclipse"> <pathconvert property="eclipse.project"> <path path="${basedir}"/> <regexpmapper from="^.*/([^/]+)$$" to="\1" handledirsep="yes"/> </pathconvert> <copy todir="." overwrite="true"> <fileset dir=".eclipse.templates"> <exclude name="**/README.txt"/> </fileset> <filterset> <filter token="PROJECT" value="${eclipse.project}"/> </filterset> </copy> </target> <target name="ivy-init-dirs"> <mkdir dir="${build.ivy.dir}" /> <mkdir dir="${build.ivy.lib.dir}" /> <mkdir dir="${build.ivy.report.dir}" /> - <mkdir dir="${build.ivy.maven.dir}" /> </target> <target name="ivy-probe-antlib" > <condition property="ivy.found"> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </condition> </target> <target name="ivy-download" description="To download ivy" unless="offline"> <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/> </target> <!-- To avoid Ivy leaking things across big projects, always load Ivy in the same classloader. Also note how we skip loading Ivy if it is already there, just to make sure all is well. --> <target name="ivy-init-antlib" depends="ivy-download,ivy-init-dirs,ivy-probe-antlib" unless="ivy.found"> <typedef uri="antlib:org.apache.ivy.ant" onerror="fail" loaderRef="ivyLoader"> <classpath> <pathelement location="${ivy.jar}"/> </classpath> </typedef> <fail > <condition > <not> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </not> </condition> You need Apache Ivy 2.0 or later from http://ant.apache.org/ It could not be loaded from ${ivy_repo_url} </fail> </target> <target name="ivy-init" depends="ivy-init-antlib" > <!--Configure Ivy by reading in the settings file If anyone has already read in a settings file into this settings ID, it gets priority --> <ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}" override='false'/> </target> <target name="ivy-resolve" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings"/> </target> <target name="ivy-resolve-javadoc" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="javadoc"/> </target> <target name="ivy-resolve-releaseaudit" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="releaseaudit"/> </target> <target name="ivy-resolve-test" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="test" /> </target> <target name="ivy-resolve-common" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" /> </target> <target name="ivy-resolve-jdiff" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="jdiff" /> </target> <target name="ivy-resolve-checkstyle" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="checkstyle"/> </target> <target name="ivy-retrieve" depends="ivy-resolve" description="Retrieve Ivy-managed artifacts"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> </target> <target name="ivy-retrieve-checkstyle" depends="ivy-resolve-checkstyle" description="Retrieve Ivy-managed artifacts for the checkstyle configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> <ivy:cachepath pathid="checkstyle-classpath" conf="checkstyle"/> </target> <target name="ivy-retrieve-jdiff" depends="ivy-resolve-jdiff" description="Retrieve Ivy-managed artifacts for the javadoc configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> <ivy:cachepath pathid="jdiff-classpath" conf="jdiff"/> </target> <target name="ivy-retrieve-javadoc" depends="ivy-resolve-javadoc" description="Retrieve Ivy-managed artifacts for the javadoc configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> <ivy:cachepath pathid="javadoc-classpath" conf="javadoc"/> </target> <target name="ivy-retrieve-test" depends="ivy-resolve-test" description="Retrieve Ivy-managed artifacts for the test configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> <ivy:cachepath pathid="test.classpath" conf="test"/> </target> <target name="ivy-retrieve-common" depends="ivy-resolve-common" description="Retrieve Ivy-managed artifacts for the compile configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/> <ivy:cachepath pathid="ivy-common.classpath" conf="common"/> </target> <target name="ivy-retrieve-releaseaudit" depends="ivy-resolve-releaseaudit" description="Retrieve Ivy-managed artifacts for the compile configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}" /> <ivy:cachepath pathid="releaseaudit-classpath" conf="releaseaudit"/> <available classname="${rat.reporting.classname}" classpathref="releaseaudit-classpath" property="rat.present" value="true"/> </target> <target name="ivy-report" depends="ivy-resolve-releaseaudit" description="Generate"> <ivy:report todir="${build.ivy.report.dir}" settingsRef="${ant.project.name}.ivy.settings"/> <echo> Reports generated:${build.ivy.report.dir} </echo> </target> - <target name="assert-hadoop-jar-exists" depends="ivy-init"> - <fail> - <condition > - <not> - <available file="${hadoop.jar}" /> - </not> - </condition> - Not found: ${hadoop.jar} - Please run the target "jar" in the main build file - </fail> - - </target> - - <target name="ready-to-publish" depends="jar,assert-hadoop-jar-exists,ivy-resolve"/> - - <target name="ivy-publish-local" depends="ready-to-publish,ivy-resolve"> - <ivy:publish - settingsRef="${ant.project.name}.ivy.settings" - resolver="local" - pubrevision="${hadoop.version}" - overwrite="true" - artifactspattern="${build.dir}/${ivy.publish.pattern}" /> - </target> - - - <!-- this is here for curiosity, to see how well the makepom task works - Answer: it depends whether you want transitive dependencies excluded or not - --> - <target name="makepom" depends="ivy-resolve"> - <ivy:makepom settingsRef="${ant.project.name}.ivy.settings" - ivyfile="ivy.xml" - pomfile="${build.ivy.maven.dir}/generated.pom"> - <ivy:mapping conf="default" scope="default"/> - <ivy:mapping conf="master" scope="master"/> - <ivy:mapping conf="runtime" scope="runtime"/> - </ivy:makepom> - </target> - - - <target name="copy-jar-to-maven" depends="ready-to-publish"> - <copy file="${hadoop.jar}" - tofile="${build.ivy.maven.jar}"/> - <checksum file="${build.ivy.maven.jar}" algorithm="md5"/> - </target> - - <target name="copypom" depends="ivy-init-dirs"> - - <presetdef name="expandingcopy" > - <copy overwrite="true"> - <filterchain> - <expandproperties/> - </filterchain> - </copy> - </presetdef> - - <expandingcopy file="ivy/hadoop-core.pom" - tofile="${build.ivy.maven.pom}"/> - <checksum file="${build.ivy.maven.pom}" algorithm="md5"/> - </target> - - <target name="maven-artifacts" depends="copy-jar-to-maven,copypom" /> - - <target name="published" depends="ivy-publish-local,maven-artifacts"> - + <target name="ant-task-download" description="To download mvn-ant-task"> + <get src="${ant_task_repo_url}" dest="${ant_task.jar}" usetimestamp="true"/> + </target> + + <target name="mvn-taskdef" depends="ant-task-download"> + <path id="mvn-ant-task.classpath" path="${ant_task.jar}"/> + <typedef resource="org/apache/maven/artifact/ant/antlib.xml" + uri="urn:maven-artifact-ant" + classpathref="mvn-ant-task.classpath"/> + </target> + + <target name="mvn-install" depends="mvn-taskdef,bin-package,set-version" + description="To install hadoop core and test jars to local filesystem's m2 cache"> + <artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/> + <artifact:pom file="${hadoop-test.pom}" id="hadoop.test"/> + <artifact:pom file="${hadoop-examples.pom}" id="hadoop.examples"/> + <artifact:pom file="${hadoop-tools.pom}" id="hadoop.tools"/> + <artifact:pom file="${hadoop-streaming.pom}" id="hadoop.streaming"/> + + <artifact:install file="${hadoop-core.jar}"> + <pom refid="hadoop.core"/> + </artifact:install> + <artifact:install file="${hadoop-test.jar}"> + <pom refid="hadoop.test"/> + </artifact:install> + <artifact:install file="${hadoop-tools.jar}"> + <pom refid="hadoop.tools"/> + </artifact:install> + <artifact:install file="${hadoop-examples.jar}"> + <pom refid="hadoop.examples"/> + </artifact:install> + <artifact:install file="${hadoop-streaming.jar}"> + <pom refid="hadoop.streaming"/> + </artifact:install> + </target> + + <target name="mvn-deploy" depends="mvn-taskdef, bin-package, set-version, signanddeploy, simpledeploy" + description="To deploy hadoop core and test jar's to apache maven repository"/> + + <target name="signanddeploy" if="staging" depends="sign"> + <artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/> + <artifact:pom file="${hadoop-test.pom}" id="hadoop.core.test"/> + <artifact:pom file="${hadoop-examples.pom}" id="hadoop.examples"/> + <artifact:pom file="${hadoop-tools.pom}" id="hadoop.tools"/> + <artifact:pom file="${hadoop-streaming.pom}" id="hadoop.streaming"/> + <artifact:install-provider artifactId="wagon-http" + version="${wagon-http.version}"/> + <artifact:deploy file="${hadoop-core.jar}"> + <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> + <pom refid="hadoop.core"/> + <attach file="${hadoop-core.jar}.asc" type="jar.asc"/> + <attach file="${hadoop-core.pom}.asc" type="pom.asc"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-test.jar}"> + <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> + <pom refid="hadoop.core.test"/> + <attach file="${hadoop-test.jar}.asc" type="jar.asc"/> + <attach file="${hadoop-test.pom}.asc" type="pom.asc"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-tools.jar}"> + <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> + <pom refid="hadoop.tools"/> + <attach file="${hadoop-tools.jar}.asc" type="jar.asc"/> + <attach file="${hadoop-tools.pom}.asc" type="pom.asc"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-examples.jar}"> + <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> + <pom refid="hadoop.examples"/> + <attach file="${hadoop-examples.jar}.asc" type="jar.asc"/> + <attach file="${hadoop-examples.pom}.asc" type="pom.asc"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-streaming.jar}"> + <remoteRepository id="apache.staging.https" url="${asfstagingrepo}"/> + <pom refid="hadoop.streaming"/> + <attach file="${hadoop-streaming.jar}.asc" type="jar.asc"/> + <attach file="${hadoop-streaming.pom}.asc" type="pom.asc"/> + </artifact:deploy> + </target> + + <target name="sign" depends="clean-sign" if="staging"> + <input message="password:>" addproperty="gpg.passphrase"> + <handler classname="org.apache.tools.ant.input.SecureInputHandler" /> + </input> + <macrodef name="sign-artifact" description="Signs the artifact"> + <attribute name="input.file"/> + <attribute name="output.file" default="@{input.file}.asc"/> + <attribute name="gpg.passphrase"/> + <sequential> + <echo>Signing @{input.file} Sig File: @{output.file}</echo> + <exec executable="gpg" > + <arg value="--armor"/> + <arg value="--output"/> + <arg value="@{output.file}"/> + <arg value="--passphrase"/> + <arg value="@{gpg.passphrase}"/> + <arg value="--detach-sig"/> + <arg value="@{input.file}"/> + </exec> + </sequential> + </macrodef> + <echo> phassphrase : ${gpg.passphrase} </echo> + <sign-artifact input.file="${hadoop-core.jar}" + output.file="${hadoop-core.jar}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-test.jar}" + output.file="${hadoop-test.jar}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-tools.jar}" + output.file="${hadoop-tools.jar}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-examples.jar}" + output.file="${hadoop-examples.jar}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-streaming.jar}" + output.file="${hadoop-streaming.jar}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-core.pom}" + output.file="${hadoop-core.pom}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-test.pom}" + output.file="${hadoop-test.pom}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-tools.pom}" + output.file="${hadoop-tools.pom}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-examples.pom}" + output.file="${hadoop-examples.pom}.asc" gpg.passphrase="${gpg.passphrase}"/> + <sign-artifact input.file="${hadoop-streaming.pom}" + output.file="${hadoop-streaming.pom}.asc" gpg.passphrase="${gpg.passphrase}"/> + </target> + + <target name="simpledeploy" unless="staging"> + <artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/> + <artifact:pom file="${hadoop-test.pom}" id="hadoop.test"/> + <artifact:pom file="${hadoop-examples.pom}" id="hadoop.examples"/> + <artifact:pom file="${hadoop-tools.pom}" id="hadoop.tools"/> + <artifact:pom file="${hadoop-streaming.pom}" id="hadoop.streaming"/> + + <artifact:install-provider artifactId="wagon-http" version="${wagon-http.version}"/> + <artifact:deploy file="${hadoop-core.jar}"> + <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/> + <pom refid="hadoop.core"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-test.jar}"> + <remoteRepository id="apache.snapshosts.https" url="${asfsnapshotrepo}"/> + <pom refid="hadoop.test"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-examples.jar}"> + <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/> + <pom refid="hadoop.examples"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-tools.jar}"> + <remoteRepository id="apache.snapshots.https" url="${asfsnapshotrepo}"/> + <pom refid="hadoop.tools"/> + </artifact:deploy> + <artifact:deploy file="${hadoop-streaming.jar}"> + <remoteRepository id="apache.snapshosts.https" url="${asfsnapshotrepo}"/> + <pom refid="hadoop.streaming"/> + </artifact:deploy> + </target> + + <target name="set-version"> + <delete file="${hadoop-core.pom}"/> + <delete file="${hadoop-test.pom}"/> + <delete file="${hadoop-examples.pom}"/> + <delete file="${hadoop-tools.pom}"/> + <delete file="${hadoop-streaming.pom}"/> + <copy file="${hadoop-core-pom-template.xml}" tofile="${hadoop-core.pom}"/> + <copy file="${hadoop-test-pom-template.xml}" tofile="${hadoop-test.pom}"/> + <copy file="${hadoop-examples-pom-template.xml}" tofile="${hadoop-examples.pom}"/> + <copy file="${hadoop-tools-pom-template.xml}" tofile="${hadoop-tools.pom}"/> + <copy file="${hadoop-streaming-pom-template.xml}" tofile="${hadoop-streaming.pom}"/> + <replaceregexp byline="true"> + <regexp pattern="@version"/> + <substitution expression="${version}"/> + <fileset dir="${basedir}/ivy"> + <include name="hadoop-core-pom.xml"/> + <include name="hadoop-test-pom.xml"/> + <include name="hadoop-tools-pom.xml"/> + <include name="hadoop-examples-pom.xml"/> + <include name="hadoop-streaming-pom.xml"/> + </fileset> + </replaceregexp> </target> <!-- taskcontroller targets --> <target name="init-task-controller-build"> <mkdir dir="${build.c++.task-controller}" /> <copy todir="${build.c++.task-controller}"> <fileset dir="${c++.task-controller.src}" includes="*.c"> </fileset> <fileset dir="${c++.task-controller.src}" includes="*.h"> </fileset> </copy> <chmod file="${c++.task-controller.src}/configure" perm="ugo+x"/> <condition property="task-controller.conf.dir.passed"> <not> <equals arg1="${hadoop.conf.dir}" arg2="$${hadoop.conf.dir}"/> </not> </condition> </target> <target name="configure-task-controller" depends="init, init-task-controller-build, task-controller-configuration-with-confdir, task-controller-configuration-with-no-confdir"> </target> <target name="task-controller-configuration-with-confdir" if="task-controller.conf.dir.passed" > <exec executable="${c++.task-controller.src}/configure" dir="${build.c++.task-controller}" failonerror="yes"> <arg value="--prefix=${task-controller.install.dir}" /> <arg value="--with-confdir=${hadoop.conf.dir}" /> </exec> </target> <target name="task-controller-configuration-with-no-confdir" unless="task-controller.conf.dir.passed"> <exec executable="${c++.task-controller.src}/configure" dir="${build.c++.task-controller}" failonerror="yes"> <arg value="--prefix=${task-controller.install.dir}" /> </exec> </target> <!-- * Create the installation directory. * Do a make install. --> <target name="task-controller" depends="configure-task-controller"> <mkdir dir="${task-controller.install.dir}" /> <exec executable="${make.cmd}" dir="${build.c++.task-controller}" searchpath="yes" failonerror="yes"> <arg value="install" /> </exec> </target> <!-- end of task-controller target --> </project> diff --git a/ivy/hadoop-core-pom-template.xml b/ivy/hadoop-core-pom-template.xml new file mode 100644 index 0000000..79b6601 --- /dev/null +++ b/ivy/hadoop-core-pom-template.xml @@ -0,0 +1,127 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + <modelVersion>4.0.0</modelVersion> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-core</artifactId> + <packaging>jar</packaging> + <version>@version</version> + <dependencies> + <dependency> + <groupId>commons-cli</groupId> + <artifactId>commons-cli</artifactId> + <version>1.2</version> + </dependency> + <dependency> + <groupId>xmlenc</groupId> + <artifactId>xmlenc</artifactId> + <version>0.52</version> + </dependency> + <dependency> + <groupId>commons-httpclient</groupId> + <artifactId>commons-httpclient</artifactId> + <version>3.0.1</version> + </dependency> + <dependency> + <groupId>commons-codec</groupId> + <artifactId>commons-codec</artifactId> + <version>1.3</version> + </dependency> + <dependency> + <groupId>commons-net</groupId> + <artifactId>commons-net</artifactId> + <version>1.4.1</version> + </dependency> + <dependency> + <groupId>org.mortbay.jetty</groupId> + <artifactId>jetty</artifactId> + <version>6.1.14</version> + </dependency> + <dependency> + <groupId>org.mortbay.jetty</groupId> + <artifactId>jetty-util</artifactId> + <version>6.1.14</version> + </dependency> + <dependency> + <groupId>tomcat</groupId> + <artifactId>jasper-runtime</artifactId> + <version>5.5.12</version> + </dependency> + <dependency> + <groupId>tomcat</groupId> + <artifactId>jasper-compiler</artifactId> + <version>5.5.12</version> + </dependency> + <dependency> + <groupId>org.mortbay.jetty</groupId> + <artifactId>jsp-api-2.1</artifactId> + <version>6.1.14</version> + </dependency> + <dependency> + <groupId>org.mortbay.jetty</groupId> + <artifactId>jsp-2.1</artifactId> + <version>6.1.14</version> + </dependency> + <dependency> + <groupId>commons-el</groupId> + <artifactId>commons-el</artifactId> + <version>1.0</version> + </dependency> + <dependency> + <groupId>net.java.dev.jets3t</groupId> + <artifactId>jets3t</artifactId> + <version>0.7.1</version> + </dependency> + <dependency> + <groupId>commons-net</groupId> + <artifactId>commons-net</artifactId> + <version>1.4.1</version> + </dependency> + <dependency> + <groupId>org.mortbay.jetty</groupId> + <artifactId>servlet-api-2.5</artifactId> + <version>6.1.14</version> + </dependency> + <dependency> + <groupId>net.sf.kosmosfs</groupId> + <artifactId>kfs</artifactId> + <version>0.3</version> + </dependency> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <version>4.5</version> + </dependency> + <dependency> + <groupId>hsqldb</groupId> + <artifactId>hsqldb</artifactId> + <version>1.8.0.10</version> + </dependency> + <dependency> + <groupId>oro</groupId> + <artifactId>oro</artifactId> + <version>2.0.8</version> + </dependency> + <dependency> + <groupId>org.eclipse.jdt</groupId> + <artifactId>core</artifactId> + <version>3.1.1</version> + </dependency> + </dependencies> +</project> diff --git a/ivy/hadoop-examples-pom-template.xml b/ivy/hadoop-examples-pom-template.xml new file mode 100644 index 0000000..5646f4f --- /dev/null +++ b/ivy/hadoop-examples-pom-template.xml @@ -0,0 +1,28 @@ +<?xml version="1.0" encoding="UTF-8"?> + <!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + + <modelVersion>4.0.0</modelVersion> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-examples</artifactId> + <packaging>jar</packaging> + <version>@version</version> + <dependencies/> +</project> diff --git a/ivy/hadoop-streaming-pom-template.xml b/ivy/hadoop-streaming-pom-template.xml new file mode 100644 index 0000000..b1691a1 --- /dev/null +++ b/ivy/hadoop-streaming-pom-template.xml @@ -0,0 +1,34 @@ +<?xml version="1.0" encoding="UTF-8"?> + <!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + + <modelVersion>4.0.0</modelVersion> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-streaming</artifactId> + <packaging>jar</packaging> + <version>@version</version> + <dependencies> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-core</artifactId> + <version>@version</version> + </dependency> + </dependencies> +</project> diff --git a/ivy/hadoop-test-pom-template.xml b/ivy/hadoop-test-pom-template.xml new file mode 100644 index 0000000..3795e54 --- /dev/null +++ b/ivy/hadoop-test-pom-template.xml @@ -0,0 +1,53 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + <modelVersion>4.0.0</modelVersion> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-test</artifactId> + <packaging>jar</packaging> + <version>@version</version> + <dependencies> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-core</artifactId> + <version>@version</version> + </dependency> + <dependency> + <groupId>org.apache.ftpserver</groupId> + <artifactId>ftplet-api</artifactId> + <version>1.0.0</version> + </dependency> + <dependency> + <groupId>org.apache.mina</groupId> + <artifactId>mina-core</artifactId> + <version>2.0.0-M5</version> + </dependency> + <dependency> + <groupId>org.apache.ftpserver</groupId> + <artifactId>ftpserver-core</artifactId> + <version>1.0.0</version> + </dependency> + <dependency> + <groupId>org.apache.ftpserver</groupId> + <artifactId>ftpserver-deprecated</artifactId> + <version>1.0.0-M2</version> + </dependency> + </dependencies> +</project> diff --git a/ivy/hadoop-tools-pom-template.xml b/ivy/hadoop-tools-pom-template.xml new file mode 100644 index 0000000..44c0693 --- /dev/null +++ b/ivy/hadoop-tools-pom-template.xml @@ -0,0 +1,28 @@ +<?xml version="1.0" encoding="UTF-8"?> + <!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + + <modelVersion>4.0.0</modelVersion> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-tools</artifactId> + <packaging>jar</packaging> + <version>@version</version> + <dependencies/> +</project> diff --git a/ivy/libraries.properties b/ivy/libraries.properties index 358f48b..e6d2d24 100644 --- a/ivy/libraries.properties +++ b/ivy/libraries.properties @@ -1,71 +1,73 @@ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #This properties file lists the versions of the various artifacts used by hadoop and components. #It drives ivy and the generation of a maven POM # This is the version of hadoop we are generating hadoop.version=0.20.0 #These are the versions of our dependencies (in alphabetical order) apacheant.version=1.7.0 +ant-task.version=2.0.10 checkstyle.version=4.2 commons-cli.version=1.2 commons-codec.version=1.3 commons-collections.version=3.1 commons-httpclient.version=3.0.1 commons-lang.version=2.4 commons-logging.version=1.0.4 commons-logging-api.version=1.0.4 commons-el.version=1.0 commons-fileupload.version=1.2 commons-io.version=1.4 commons-net.version=1.4.1 core.version=3.1.1 coreplugin.version=1.3.2 hsqldb.version=1.8.0.10 #ivy.version=2.0.0-beta2 ivy.version=2.0.0-rc2 jasper.version=5.5.12 #not able to figureout the version of jsp & jsp-api version to get it resolved throught ivy # but still declared here as we are going to have a local copy from the lib folder jsp.version=2.1 jsp-api.version=5.5.12 jets3t.version=0.6.1 jetty.version=6.1.14 jetty-util.version=6.1.14 junit.version=4.5 jdiff.version=1.0.9 json.version=1.0 kfs.version=0.1 log4j.version=1.2.15 lucene-core.version=2.3.1 oro.version=2.0.8 rats-lib.version=0.5.1 servlet.version=4.0.6 servlet-api-2.5.version=6.1.14 servlet-api.version=2.5 slf4j-api.version=1.4.3 slf4j-log4j12.version=1.4.3 +wagon-http.version=1.0-beta-2 xmlenc.version=0.52 xerces.version=1.4.4 diff --git a/src/contrib/build-contrib.xml b/src/contrib/build-contrib.xml index 086cf68..485dfb6 100644 --- a/src/contrib/build-contrib.xml +++ b/src/contrib/build-contrib.xml @@ -1,302 +1,302 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Imported by contrib/*/build.xml files to share generic targets. --> <project name="hadoopbuildcontrib" xmlns:ivy="antlib:org.apache.ivy.ant"> <property name="name" value="${ant.project.name}"/> <property name="root" value="${basedir}"/> <!-- Load all the default properties, and any the user wants --> <!-- to contribute (without having to type -D or edit this file --> <property file="${user.home}/${name}.build.properties" /> <property file="${root}/build.properties" /> <property name="hadoop.root" location="${root}/../../../"/> <property name="src.dir" location="${root}/src/java"/> <property name="src.test" location="${root}/src/test"/> <property name="src.examples" location="${root}/src/examples"/> <available file="${src.examples}" type="dir" property="examples.available"/> <available file="${src.test}" type="dir" property="test.available"/> <property name="conf.dir" location="${hadoop.root}/conf"/> <property name="test.junit.output.format" value="plain"/> <property name="test.output" value="no"/> <property name="test.timeout" value="900000"/> <property name="build.dir" location="${hadoop.root}/build/contrib/${name}"/> <property name="build.classes" location="${build.dir}/classes"/> <property name="build.test" location="${build.dir}/test"/> <property name="build.examples" location="${build.dir}/examples"/> <property name="hadoop.log.dir" location="${build.dir}/test/logs"/> <!-- all jars together --> <property name="javac.deprecation" value="off"/> <property name="javac.debug" value="on"/> <property name="build.ivy.lib.dir" value="${hadoop.root}/build/ivy/lib"/> <property name="javadoc.link" value="http://java.sun.com/j2se/1.4/docs/api/"/> <property name="build.encoding" value="ISO-8859-1"/> <fileset id="lib.jars" dir="${root}" includes="lib/*.jar"/> <!-- IVY properties set here --> <property name="ivy.dir" location="ivy" /> <property name="ivysettings.xml" location="${hadoop.root}/ivy/ivysettings.xml"/> <loadproperties srcfile="${ivy.dir}/libraries.properties"/> <loadproperties srcfile="${hadoop.root}/ivy/libraries.properties"/> <property name="ivy.jar" location="${hadoop.root}/ivy/ivy-${ivy.version}.jar"/> <property name="ivy_repo_url" value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar" /> <property name="build.dir" location="build" /> <property name="build.ivy.dir" location="${build.dir}/ivy" /> <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" /> <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" /> <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/> <!--this is the naming policy for artifacts we want pulled down--> <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/> <!-- the normal classpath --> <path id="contrib-classpath"> <pathelement location="${build.classes}"/> <pathelement location="${hadoop.root}/build/tools"/> <fileset refid="lib.jars"/> <pathelement location="${hadoop.root}/build/classes"/> <fileset dir="${hadoop.root}/lib"> <include name="**/*.jar" /> </fileset> <path refid="${ant.project.name}.common-classpath"/> </path> <!-- the unit test classpath --> <path id="test.classpath"> <pathelement location="${build.test}" /> <pathelement location="${hadoop.root}/build/test/classes"/> <pathelement location="${hadoop.root}/src/contrib/test"/> <pathelement location="${conf.dir}"/> <pathelement location="${hadoop.root}/build"/> <pathelement location="${build.examples}"/> <pathelement location="${hadoop.root}/build/examples"/> <path refid="contrib-classpath"/> </path> <!-- to be overridden by sub-projects --> <target name="check-contrib"/> <target name="init-contrib"/> <!-- ====================================================== --> <!-- Stuff needed by all targets --> <!-- ====================================================== --> <target name="init" depends="check-contrib" unless="skip.contrib"> <echo message="contrib: ${name}"/> <mkdir dir="${build.dir}"/> <mkdir dir="${build.classes}"/> <mkdir dir="${build.test}"/> <mkdir dir="${build.examples}"/> <mkdir dir="${hadoop.log.dir}"/> <antcall target="init-contrib"/> </target> <!-- ====================================================== --> <!-- Compile a Hadoop contrib's files --> <!-- ====================================================== --> <target name="compile" depends="init, ivy-retrieve-common" unless="skip.contrib"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.dir}" includes="**/*.java" destdir="${build.classes}" debug="${javac.debug}" deprecation="${javac.deprecation}"> <classpath refid="contrib-classpath"/> </javac> </target> <!-- ======================================================= --> <!-- Compile a Hadoop contrib's example files (if available) --> <!-- ======================================================= --> <target name="compile-examples" depends="compile" if="examples.available"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.examples}" includes="**/*.java" destdir="${build.examples}" debug="${javac.debug}"> <classpath refid="contrib-classpath"/> </javac> </target> <!-- ================================================================== --> <!-- Compile test code --> <!-- ================================================================== --> <target name="compile-test" depends="compile-examples" if="test.available"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.test}" includes="**/*.java" destdir="${build.test}" debug="${javac.debug}"> <classpath refid="test.classpath"/> </javac> </target> <!-- ====================================================== --> <!-- Make a Hadoop contrib's jar --> <!-- ====================================================== --> <target name="jar" depends="compile" unless="skip.contrib"> <echo message="contrib: ${name}"/> <jar - jarfile="${build.dir}/hadoop-${version}-${name}.jar" + jarfile="${build.dir}/hadoop-${name}-${version}.jar" basedir="${build.classes}" /> </target> <!-- ====================================================== --> <!-- Make a Hadoop contrib's examples jar --> <!-- ====================================================== --> <target name="jar-examples" depends="compile-examples" if="examples.available" unless="skip.contrib"> <echo message="contrib: ${name}"/> - <jar jarfile="${build.dir}/hadoop-${version}-${name}-examples.jar"> + <jar jarfile="${build.dir}/hadoop-${name}-examples-${version}.jar"> <fileset dir="${build.classes}"> </fileset> <fileset dir="${build.examples}"> </fileset> </jar> </target> <!-- ====================================================== --> <!-- Package a Hadoop contrib --> <!-- ====================================================== --> <target name="package" depends="jar, jar-examples" unless="skip.contrib"> <mkdir dir="${dist.dir}/contrib/${name}"/> <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false" flatten="true"> <fileset dir="${build.dir}"> - <include name="hadoop-${version}-${name}.jar" /> + <include name="hadoop-${name}-${version}.jar" /> </fileset> </copy> </target> <!-- ================================================================== --> <!-- Run unit tests --> <!-- ================================================================== --> <target name="test" depends="compile-test, compile" if="test.available"> <echo message="contrib: ${name}"/> <delete dir="${hadoop.log.dir}"/> <mkdir dir="${hadoop.log.dir}"/> <junit printsummary="yes" showoutput="${test.output}" haltonfailure="no" fork="yes" maxmemory="256m" errorProperty="tests.failed" failureProperty="tests.failed" timeout="${test.timeout}"> <sysproperty key="test.build.data" value="${build.test}/data"/> <sysproperty key="build.test" value="${build.test}"/> <sysproperty key="contrib.name" value="${name}"/> <!-- requires fork=yes for: relative File paths to use the specified user.dir classpath to use build/contrib/*.jar --> <sysproperty key="user.dir" value="${build.test}/data"/> <sysproperty key="fs.default.name" value="${fs.default.name}"/> <sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/> <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/> <sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/> <sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/> <classpath refid="test.classpath"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${build.test}" unless="testcase"> <fileset dir="${src.test}" includes="**/Test*.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${build.test}" if="testcase"> <fileset dir="${src.test}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> <!-- ================================================================== --> <!-- Clean. Delete the build files, and their directories --> <!-- ================================================================== --> <target name="clean"> <echo message="contrib: ${name}"/> <delete dir="${build.dir}"/> </target> <target name="ivy-probe-antlib" > <condition property="ivy.found"> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </condition> </target> <target name="ivy-download" description="To download ivy " unless="offline"> <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/> </target> <target name="ivy-init-antlib" depends="ivy-download,ivy-probe-antlib" unless="ivy.found"> <typedef uri="antlib:org.apache.ivy.ant" onerror="fail" loaderRef="ivyLoader"> <classpath> <pathelement location="${ivy.jar}"/> </classpath> </typedef> <fail > <condition > <not> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </not> </condition> You need Apache Ivy 2.0 or later from http://ant.apache.org/ It could not be loaded from ${ivy_repo_url} </fail> </target> <target name="ivy-init" depends="ivy-init-antlib"> <ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}"/> </target> <target name="ivy-resolve-common" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" /> </target> <target name="ivy-retrieve-common" depends="ivy-resolve-common" description="Retrieve Ivy-managed artifacts for the compile/test configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}" sync="true" /> <ivy:cachepath pathid="${ant.project.name}.common-classpath" conf="common" /> </target> </project> diff --git a/src/contrib/data_join/build.xml b/src/contrib/data_join/build.xml index db9ce2c..00d758a 100644 --- a/src/contrib/data_join/build.xml +++ b/src/contrib/data_join/build.xml @@ -1,45 +1,44 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Before you can run these subtargets directly, you need to call at top-level: ant deploy-contrib compile-core-test --> <project name="datajoin" default="jar"> <import file="../build-contrib.xml"/> <!-- Override jar target to specify main class --> <target name="jar" depends="compile"> <jar - jarfile="${build.dir}/hadoop-${version}-${name}.jar" - basedir="${build.classes}" - > + jarfile="${build.dir}/hadoop-${name}-${version}.jar" + basedir="${build.classes}"> <manifest> <attribute name="Main-Class" value="org.apache.hadoop.contrib.utils.join.DataJoinJob"/> </manifest> </jar> </target> <target name="jar-examples" depends="jar"> <antcall target="hadoopbuildcontrib.jar-examples"> </antcall> </target> </project> diff --git a/src/contrib/eclipse-plugin/build.xml b/src/contrib/eclipse-plugin/build.xml index 783cb23..f04a74a 100644 --- a/src/contrib/eclipse-plugin/build.xml +++ b/src/contrib/eclipse-plugin/build.xml @@ -1,79 +1,79 @@ <?xml version="1.0" encoding="UTF-8" standalone="no"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project default="jar" name="eclipse-plugin"> <import file="../build-contrib.xml"/> <path id="eclipse-sdk-jars"> <fileset dir="${eclipse.home}/plugins/"> <include name="org.eclipse.ui*.jar"/> <include name="org.eclipse.jdt*.jar"/> <include name="org.eclipse.core*.jar"/> <include name="org.eclipse.equinox*.jar"/> <include name="org.eclipse.debug*.jar"/> <include name="org.eclipse.osgi*.jar"/> <include name="org.eclipse.swt*.jar"/> <include name="org.eclipse.jface*.jar"/> <include name="org.eclipse.team.cvs.ssh2*.jar"/> <include name="com.jcraft.jsch*.jar"/> </fileset> </path> <!-- Override classpath to include Eclipse SDK jars --> <path id="classpath"> <pathelement location="${build.classes}"/> <pathelement location="${hadoop.root}/build/classes"/> <path refid="eclipse-sdk-jars"/> </path> <!-- Skip building if eclipse.home is unset. --> <target name="check-contrib" unless="eclipse.home"> <property name="skip.contrib" value="yes"/> <echo message="eclipse.home unset: skipping eclipse plugin"/> </target> <target name="compile" depends="init, ivy-retrieve-common" unless="skip.contrib"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.dir}" includes="**/*.java" destdir="${build.classes}" debug="${javac.debug}" deprecation="${javac.deprecation}"> <classpath refid="classpath"/> </javac> </target> <!-- Override jar target to specify manifest --> <target name="jar" depends="compile" unless="skip.contrib"> <mkdir dir="${build.dir}/lib"/> - <copy file="${hadoop.root}/build/hadoop-${version}-core.jar" tofile="${build.dir}/lib/hadoop-core.jar" verbose="true"/> + <copy file="${hadoop.root}/build/hadoop-core-${version}.jar" tofile="${build.dir}/lib/hadoop-core.jar" verbose="true"/> <copy file="${hadoop.root}/build/ivy/lib/Hadoop/common/commons-cli-${commons-cli.version}.jar" todir="${build.dir}/lib" verbose="true"/> <jar - jarfile="${build.dir}/hadoop-${version}-${name}.jar" + jarfile="${build.dir}/hadoop-${name}-${version}.jar" manifest="${root}/META-INF/MANIFEST.MF"> <fileset dir="${build.dir}" includes="classes/ lib/"/> <fileset dir="${root}" includes="resources/ plugin.xml"/> </jar> </target> </project> diff --git a/src/contrib/failmon/build.xml b/src/contrib/failmon/build.xml index b2248f1..c77ff46 100644 --- a/src/contrib/failmon/build.xml +++ b/src/contrib/failmon/build.xml @@ -1,120 +1,120 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project name="failmon" default="compile"> <import file="../build-contrib.xml"/> - <property name="jarfile" value="${build.dir}/${name}.jar"/> + <property name="jarfile" value="${build.dir}/hadoop-${name}-${version}.jar"/> <target name="jar" depends="compile" unless="skip.contrib"> <!-- Make sure that the hadoop jar has been created --> <!-- This works, but causes findbugs to fail <subant antfile="build.xml" target="jar"> <fileset dir="../../.." includes="build.xml"/> </subant> --> <!-- Copy the required files so that the jar can run independently of Hadoop source code --> <!-- create the list of files to add to the classpath --> <fileset dir="${hadoop.root}/lib" id="class.path"> <include name="**/*.jar" /> <exclude name="**/excluded/" /> </fileset> <pathconvert pathsep=" " property="failmon-class-path" refid="class.path"> <map from="${basedir}/" to=""/> </pathconvert> <echo message="contrib: ${name}"/> <jar jarfile="${jarfile}" basedir="${build.classes}"> <manifest> <attribute name="Main-Class" value="org.apache.hadoop.contrib.failmon.RunOnce"/> <attribute name="Class-Path" value="${failmon-class-path}"/> </manifest> </jar> </target> <!-- Override test target to copy sample data --> <target name="test" depends="compile-test, compile, compile-examples" if="test.available"> <echo message="contrib: ${name}"/> <delete dir="${hadoop.log.dir}"/> <mkdir dir="${hadoop.log.dir}"/> <delete dir="${build.test}/sample"/> <mkdir dir="${build.test}/sample"/> <copy todir="${build.test}/sample"> <fileset dir="${root}/sample"/> </copy> <junit printsummary="yes" showoutput="${test.output}" haltonfailure="no" fork="yes" maxmemory="256m" errorProperty="tests.failed" failureProperty="tests.failed" timeout="${test.timeout}"> <sysproperty key="test.build.data" value="${build.test}/data"/> <sysproperty key="build.test" value="${build.test}"/> <sysproperty key="contrib.name" value="${name}"/> <!-- requires fork=yes for: relative File paths to use the specified user.dir classpath to use build/contrib/*.jar --> <sysproperty key="user.dir" value="${build.test}/data"/> <sysproperty key="fs.default.name" value="${fs.default.name}"/> <sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/> <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/> <classpath refid="test.classpath"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${build.test}" unless="testcase"> <fileset dir="${src.test}" includes="**/Test*.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${build.test}" if="testcase"> <fileset dir="${src.test}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> <target name="tar" depends="jar"> <copy todir="."> <fileset dir="${hadoop.root}/build/contrib/failmon/" includes="failmon.jar"/> </copy> <tar tarfile="${name}.tar" basedir=".." includes="${name}/**" excludes="${name}/${name}.tar.gz, ${name}/src/**, ${name}/logs/**, ${name}/build.xml*"/> <gzip zipfile="${name}.tar.gz" src="${name}.tar"/> <delete file="${name}.tar"/> <delete file="${name}.jar"/> <move file="${name}.tar.gz" todir="${build.dir}"/> - <echo message= "${hadoop.root}/build/contrib/failmon/${name}.jar"/> + <echo message= "${hadoop.root}/build/contrib/failmon/hadoop-${name}-${version}.jar"/> </target> </project> diff --git a/src/contrib/index/build.xml b/src/contrib/index/build.xml index 3b3cbb7..d0bfdcf 100755 --- a/src/contrib/index/build.xml +++ b/src/contrib/index/build.xml @@ -1,80 +1,80 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project name="index" default="jar"> <import file="../build-contrib.xml"/> <!-- Override jar target to specify main class --> <target name="jar" depends="compile" unless="skip.contrib"> <echo message="contrib: ${name}"/> <jar - jarfile="${build.dir}/hadoop-${version}-${name}.jar" + jarfile="${build.dir}/hadoop-${name}-${version}.jar" basedir="${build.classes}" > <manifest> <attribute name="Main-Class" value="org.apache.hadoop.contrib.index.main.UpdateIndex"/> </manifest> </jar> </target> <!-- Override test target to copy sample data --> <target name="test" depends="compile-test, compile, compile-examples" if="test.available"> <echo message="contrib: ${name}"/> <delete dir="${hadoop.log.dir}"/> <mkdir dir="${hadoop.log.dir}"/> <delete dir="${build.test}/sample"/> <mkdir dir="${build.test}/sample"/> <copy todir="${build.test}/sample"> <fileset dir="${root}/sample"/> </copy> <junit printsummary="yes" showoutput="${test.output}" haltonfailure="no" fork="yes" maxmemory="256m" errorProperty="tests.failed" failureProperty="tests.failed" timeout="${test.timeout}"> <sysproperty key="test.build.data" value="${build.test}/data"/> <sysproperty key="build.test" value="${build.test}"/> <sysproperty key="contrib.name" value="${name}"/> <!-- requires fork=yes for: relative File paths to use the specified user.dir classpath to use build/contrib/*.jar --> <sysproperty key="user.dir" value="${build.test}/data"/> <sysproperty key="fs.default.name" value="${fs.default.name}"/> <sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/> <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/> <classpath refid="test.classpath"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${build.test}" unless="testcase"> <fileset dir="${src.test}" includes="**/Test*.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${build.test}" if="testcase"> <fileset dir="${src.test}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> </project> diff --git a/src/contrib/streaming/build.xml b/src/contrib/streaming/build.xml index 0e351da..a4c5cd9 100644 --- a/src/contrib/streaming/build.xml +++ b/src/contrib/streaming/build.xml @@ -1,58 +1,56 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Before you can run these subtargets directly, you need to call at top-level: ant deploy-contrib compile-core-test --> <project name="streaming" default="jar"> <import file="../build-contrib.xml"/> <!-- Override jar target to specify main class --> <target name="jar" depends="compile"> - <jar - jarfile="${build.dir}/hadoop-${version}-${name}.jar" - basedir="${build.classes}" - > + <jar jarfile="${build.dir}/hadoop-${name}-${version}.jar" + basedir="${build.classes}"> <manifest> <attribute name="Main-Class" value="org.apache.hadoop.streaming.HadoopStreaming"/> </manifest> </jar> </target> <!-- Run only pure-Java unit tests. superdottest --> <target name="test"> <antcall target="hadoopbuildcontrib.test"> <param name="test.exclude" value="TestStreamedMerge"/> </antcall> </target> <!-- Run all unit tests This is not called as part of the nightly build because it will only run on platforms that have standard Unix utilities available. --> <target name="test-unix"> <antcall target="hadoopbuildcontrib.test"> </antcall> </target> </project> diff --git a/src/contrib/thriftfs/build.xml b/src/contrib/thriftfs/build.xml index 89e4ebd..b8ac0d9 100644 --- a/src/contrib/thriftfs/build.xml +++ b/src/contrib/thriftfs/build.xml @@ -1,64 +1,64 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Before you can run these subtargets directly, you need to call at top-level: ant deploy-contrib compile-core-test --> <project name="thriftfs" default="jar"> <import file="../build-contrib.xml"/> <!-- create the list of files to add to the classpath --> <fileset dir="${hadoop.root}/lib" id="class.path"> <include name="**/*.jar" /> <exclude name="**/excluded/" /> </fileset> <!-- Override jar target to specify main class --> <target name="jar" depends="compile"> <jar - jarfile="${build.dir}/hadoop-${version}-${name}.jar" + jarfile="${build.dir}/hadoop-${name}-${version}.jar" basedir="${build.classes}" > <manifest> <attribute name="Main-Class" value="org.apache.hadoop.thriftfs.HadooopThriftServer"/> </manifest> </jar> </target> <!-- Run only pure-Java unit tests. superdottest --> <target name="test"> <antcall target="hadoopbuildcontrib.test"> <param name="test.exclude" value="TestStreamedMerge"/> </antcall> </target> <!-- Run all unit tests This is not called as part of the nightly build because it will only run on platforms that have standard Unix utilities available. --> <target name="test-unix"> <antcall target="hadoopbuildcontrib.test"> </antcall> </target> </project> diff --git a/src/contrib/vaidya/build.xml b/src/contrib/vaidya/build.xml index f150e05..eda5991 100644 --- a/src/contrib/vaidya/build.xml +++ b/src/contrib/vaidya/build.xml @@ -1,69 +1,69 @@ <?xml version="1.0" ?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project name="vaidya" default="jar"> <import file="../build-contrib.xml" /> <import file="../../../build.xml" /> <target name="init"> <mkdir dir="${build.dir}" /> <mkdir dir="${build.classes}" /> <mkdir dir="${build.dir}/bin" /> <mkdir dir="${build.dir}/conf" /> <copy todir="${build.dir}/bin"> <!-- copy hadoop vaidya command script file to hadoop-vaidya/bin --> <fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya"> <include name="vaidya.sh" /> </fileset> </copy> <copy todir="${build.dir}/conf"> <!-- copy hadoop vaidya tests config file to chuckwa/conf --> <fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests"> <include name="postex_diagnosis_tests.xml" /> </fileset> </copy> </target> <!-- ====================================================== --> <!-- Override jar target to include the tests conf xml file --> <!-- ====================================================== --> <target name="jar" depends="compile" unless="skip.contrib"> <echo message="contrib: ${name}" /> - <jar jarfile="${build.dir}/hadoop-${version}-${name}.jar"> + <jar jarfile="${build.dir}/hadoop-${name}-${version}.jar"> <fileset dir="${build.classes}" /> <fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests"> <include name="postex_diagnosis_tests.xml" /> </fileset> </jar> </target> <target name="package" depends="jar"> <mkdir dir="${dist.dir}/contrib/${name}" /> <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false"> <fileset dir="${build.dir}"> <exclude name="**/classes/" /> </fileset> </copy> <chmod dir="${dist.dir}/contrib/${name}/bin" perm="a+x" includes="*" /> </target> </project>
jaxlaw/hadoop-common
b9f3243d2ee69a64886f8bbd23a987c267c201a4
MAPREDUCE:1376 from https://issues.apache.org/jira/secure/attachment/12431174/M1376-4.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 0c6107a..0f2c362 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,503 +1,506 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383006 MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. (Arun C Murthy via yhemanth) MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey + MAPREDUCE-1376. Add support for submitting jobs as configured users, + pluggable mapping of trace users to target users in Gridmix. (cdouglas) + yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java new file mode 100644 index 0000000..e7a903f --- /dev/null +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/EchoUserResolver.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.gridmix; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Echos the UGI offered. + */ +public class EchoUserResolver extends UserResolver { + + public EchoUserResolver() { } + + public synchronized boolean setTargetUsers(URI userdesc, Configuration conf) + throws IOException { + return false; + } + + public synchronized UserGroupInformation getTargetUgi( + UserGroupInformation ugi) { + return ugi; + } + +} diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java index dacd07a..ca74d0b 100644 --- a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateData.java @@ -1,293 +1,310 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.io.DataInput; import java.io.DataOutput; import java.io.OutputStream; import java.util.Arrays; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; // TODO can replace with form of GridmixJob class GenerateData extends GridmixJob { /** * Total bytes to write. */ public static final String GRIDMIX_GEN_BYTES = "gridmix.gen.bytes"; /** * Maximum size per file written. */ public static final String GRIDMIX_GEN_CHUNK = "gridmix.gen.bytes.per.file"; /** * Size of writes to output file. */ public static final String GRIDMIX_VAL_BYTES = "gendata.val.bytes"; /** * Status reporting interval, in megabytes. */ public static final String GRIDMIX_GEN_INTERVAL = "gendata.interval.mb"; + /** + * Blocksize of generated data. + */ + public static final String GRIDMIX_GEN_BLOCKSIZE = "gridmix.gen.blocksize"; + + /** + * Replication of generated data. + */ + public static final String GRIDMIX_GEN_REPLICATION = "gridmix.gen.replicas"; + public GenerateData(Configuration conf, Path outdir, long genbytes) throws IOException { super(conf, 0L, "GRIDMIX_GENDATA"); job.getConfiguration().setLong(GRIDMIX_GEN_BYTES, genbytes); FileOutputFormat.setOutputPath(job, outdir); } @Override public Job call() throws IOException, InterruptedException, ClassNotFoundException { job.setMapperClass(GenDataMapper.class); job.setNumReduceTasks(0); job.setMapOutputKeyClass(NullWritable.class); job.setMapOutputValueClass(BytesWritable.class); job.setInputFormatClass(GenDataFormat.class); job.setOutputFormatClass(RawBytesOutputFormat.class); job.setJarByClass(GenerateData.class); FileInputFormat.addInputPath(job, new Path("ignored")); job.submit(); return job; } public static class GenDataMapper extends Mapper<NullWritable,LongWritable,NullWritable,BytesWritable> { private BytesWritable val; private final Random r = new Random(); @Override protected void setup(Context context) throws IOException, InterruptedException { val = new BytesWritable(new byte[ context.getConfiguration().getInt(GRIDMIX_VAL_BYTES, 1024 * 1024)]); } @Override public void map(NullWritable key, LongWritable value, Context context) throws IOException, InterruptedException { for (long bytes = value.get(); bytes > 0; bytes -= val.getLength()) { r.nextBytes(val.getBytes()); val.setSize((int)Math.min(val.getLength(), bytes)); context.write(key, val); } } } static class GenDataFormat extends InputFormat<NullWritable,LongWritable> { @Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { final JobClient client = new JobClient(new JobConf(jobCtxt.getConfiguration())); ClusterStatus stat = client.getClusterStatus(true); final long toGen = jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1); if (toGen < 0) { throw new IOException("Invalid/missing generation bytes: " + toGen); } final int nTrackers = stat.getTaskTrackers(); final long bytesPerTracker = toGen / nTrackers; final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers); final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*"); final Matcher m = trackerPattern.matcher(""); for (String tracker : stat.getActiveTrackerNames()) { m.reset(tracker); if (!m.find()) { System.err.println("Skipping node: " + tracker); continue; } final String name = m.group(1); splits.add(new GenSplit(bytesPerTracker, new String[] { name })); } return splits; } @Override public RecordReader<NullWritable,LongWritable> createRecordReader( InputSplit split, final TaskAttemptContext taskContext) throws IOException { return new RecordReader<NullWritable,LongWritable>() { long written = 0L; long write = 0L; long RINTERVAL; long toWrite; final NullWritable key = NullWritable.get(); final LongWritable val = new LongWritable(); @Override public void initialize(InputSplit split, TaskAttemptContext ctxt) throws IOException, InterruptedException { toWrite = split.getLength(); RINTERVAL = ctxt.getConfiguration().getInt( GRIDMIX_GEN_INTERVAL, 10) << 20; } @Override public boolean nextKeyValue() throws IOException { written += write; write = Math.min(toWrite - written, RINTERVAL); val.set(write); return written < toWrite; } @Override public float getProgress() throws IOException { return written / ((float)toWrite); } @Override public NullWritable getCurrentKey() { return key; } @Override public LongWritable getCurrentValue() { return val; } @Override public void close() throws IOException { taskContext.setStatus("Wrote " + toWrite); } }; } } static class GenSplit extends InputSplit implements Writable { private long bytes; private int nLoc; private String[] locations; public GenSplit() { } public GenSplit(long bytes, String[] locations) { this(bytes, locations.length, locations); } public GenSplit(long bytes, int nLoc, String[] locations) { this.bytes = bytes; this.nLoc = nLoc; this.locations = Arrays.copyOf(locations, nLoc); } @Override public long getLength() { return bytes; } @Override public String[] getLocations() { return locations; } @Override public void readFields(DataInput in) throws IOException { bytes = in.readLong(); nLoc = in.readInt(); if (null == locations || locations.length < nLoc) { locations = new String[nLoc]; } for (int i = 0; i < nLoc; ++i) { locations[i] = Text.readString(in); } } @Override public void write(DataOutput out) throws IOException { out.writeLong(bytes); out.writeInt(nLoc); for (int i = 0; i < nLoc; ++i) { Text.writeString(out, locations[i]); } } } static class RawBytesOutputFormat extends FileOutputFormat<NullWritable,BytesWritable> { @Override public RecordWriter<NullWritable,BytesWritable> getRecordWriter( TaskAttemptContext job) throws IOException { return new ChunkWriter(getDefaultWorkFile(job, ""), job.getConfiguration()); } static class ChunkWriter extends RecordWriter<NullWritable,BytesWritable> { private final Path outDir; private final FileSystem fs; + private final int blocksize; + private final short replicas; private final long maxFileBytes; + private final FsPermission genPerms = new FsPermission((short) 0755); private long accFileBytes = 0L; private long fileIdx = -1L; private OutputStream fileOut = null; public ChunkWriter(Path outDir, Configuration conf) throws IOException { this.outDir = outDir; fs = outDir.getFileSystem(conf); + blocksize = conf.getInt(GRIDMIX_GEN_BLOCKSIZE, 1 << 28); + replicas = (short) conf.getInt(GRIDMIX_GEN_REPLICATION, 3); maxFileBytes = conf.getLong(GRIDMIX_GEN_CHUNK, 1L << 30); nextDestination(); } private void nextDestination() throws IOException { if (fileOut != null) { fileOut.close(); } - fileOut = fs.create(new Path(outDir, "segment-" + (++fileIdx)), false); + fileOut = fs.create(new Path(outDir, "segment-" + (++fileIdx)), + genPerms, false, 64 * 1024, replicas, blocksize, null); accFileBytes = 0L; } @Override public void write(NullWritable key, BytesWritable value) throws IOException { int written = 0; final int total = value.getLength(); while (written < total) { + if (accFileBytes >= maxFileBytes) { + nextDestination(); + } final int write = (int) Math.min(total - written, maxFileBytes - accFileBytes); fileOut.write(value.getBytes(), written, write); written += write; accFileBytes += write; - if (accFileBytes >= maxFileBytes) { - nextDestination(); - } } } @Override public void close(TaskAttemptContext ctxt) throws IOException { fileOut.close(); } } } } diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java index de653ae..0773e45 100644 --- a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java @@ -1,351 +1,385 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; +import java.net.URI; +import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * Driver class for the Gridmix3 benchmark. Gridmix accepts a timestamped * stream (trace) of job/task descriptions. For each job in the trace, the * client will submit a corresponding, synthetic job to the target cluster at * the rate in the original trace. The intent is to provide a benchmark that * can be configured and extended to closely match the measured resource * profile of actual, production loads. */ public class Gridmix extends Configured implements Tool { public static final Log LOG = LogFactory.getLog(Gridmix.class); /** * Output (scratch) directory for submitted jobs. Relative paths are * resolved against the path provided as input and absolute paths remain * independent of it. The default is &quot;gridmix&quot;. */ public static final String GRIDMIX_OUT_DIR = "gridmix.output.directory"; /** * Number of submitting threads at the client and upper bound for * in-memory split data. Submitting threads precompute InputSplits for * submitted jobs. This limits the number of splits held in memory waiting * for submission and also permits parallel computation of split data. */ public static final String GRIDMIX_SUB_THR = "gridmix.client.submit.threads"; /** * The depth of the queue of job descriptions. Before splits are computed, * a queue of pending descriptions is stored in memoory. This parameter * limits the depth of that queue. */ public static final String GRIDMIX_QUE_DEP = "gridmix.client.pending.queue.depth"; /** * Multiplier to accelerate or decelerate job submission. As a crude means of * sizing a job trace to a cluster, the time separating two jobs is * multiplied by this factor. */ public static final String GRIDMIX_SUB_MUL = "gridmix.submit.multiplier"; + /** + * Class used to resolve users in the trace to the list of target users + * on the cluster. + */ + public static final String GRIDMIX_USR_RSV = "gridmix.user.resolve.class"; + // Submit data structures private JobFactory factory; private JobSubmitter submitter; private JobMonitor monitor; // Shutdown hook private final Shutdown sdh = new Shutdown(); /** * Write random bytes at the path provided. * @see org.apache.hadoop.mapred.gridmix.GenerateData */ protected void writeInputData(long genbytes, Path ioPath) throws IOException, InterruptedException { final Configuration conf = getConf(); final GridmixJob genData = new GenerateData(conf, ioPath, genbytes); submitter.add(genData); LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) + " of test data..."); // TODO add listeners, use for job dependencies TimeUnit.SECONDS.sleep(10); try { genData.getJob().waitForCompletion(false); } catch (ClassNotFoundException e) { throw new IOException("Internal error", e); } if (!genData.getJob().isSuccessful()) { throw new IOException("Data generation failed!"); } LOG.info("Done."); } protected InputStream createInputStream(String in) throws IOException { if ("-".equals(in)) { return System.in; } final Path pin = new Path(in); return pin.getFileSystem(getConf()).open(pin); } /** * Create each component in the pipeline and start it. * @param conf Configuration data, no keys specific to this context * @param traceIn Either a Path to the trace data or &quot;-&quot; for * stdin * @param ioPath Path from which input data is read * @param scratchDir Path into which job output is written * @param startFlag Semaphore for starting job trace pipeline */ private void startThreads(Configuration conf, String traceIn, Path ioPath, - Path scratchDir, CountDownLatch startFlag) throws IOException { + Path scratchDir, CountDownLatch startFlag, UserResolver userResolver) + throws IOException { monitor = createJobMonitor(); submitter = createJobSubmitter(monitor, conf.getInt(GRIDMIX_SUB_THR, Runtime.getRuntime().availableProcessors() + 1), conf.getInt(GRIDMIX_QUE_DEP, 5), new FilePool(conf, ioPath)); - factory = createJobFactory(submitter, traceIn, scratchDir, conf, startFlag); + factory = createJobFactory(submitter, traceIn, scratchDir, conf, startFlag, + userResolver); monitor.start(); submitter.start(); factory.start(); } protected JobMonitor createJobMonitor() throws IOException { return new JobMonitor(); } protected JobSubmitter createJobSubmitter(JobMonitor monitor, int threads, int queueDepth, FilePool pool) throws IOException { return new JobSubmitter(monitor, threads, queueDepth, pool); } protected JobFactory createJobFactory(JobSubmitter submitter, String traceIn, - Path scratchDir, Configuration conf, CountDownLatch startFlag) - throws IOException { + Path scratchDir, Configuration conf, CountDownLatch startFlag, + UserResolver userResolver) throws IOException { return new JobFactory(submitter, createInputStream(traceIn), scratchDir, - conf, startFlag); + conf, startFlag, userResolver); } - public int run(String[] argv) throws IOException, InterruptedException { + public int run(final String[] argv) throws IOException, InterruptedException { if (argv.length < 2) { printUsage(System.err); return 1; } - long genbytes = 0; + final Configuration conf = getConf(); + long genbytes = -1L; String traceIn = null; Path ioPath = null; + URI userRsrc = null; + final UserResolver userResolver = ReflectionUtils.newInstance( + conf.getClass(GRIDMIX_USR_RSV, SubmitterUserResolver.class, + UserResolver.class), conf); try { - int i = 0; - genbytes = "-generate".equals(argv[i++]) - ? StringUtils.TraditionalBinaryPrefix.string2long(argv[i++]) - : --i; - ioPath = new Path(argv[i++]); - traceIn = argv[i++]; - if (i != argv.length) { - printUsage(System.err); - return 1; + for (int i = 0; i < argv.length - 2; ++i) { + if ("-generate".equals(argv[i])) { + genbytes = StringUtils.TraditionalBinaryPrefix.string2long(argv[++i]); + } else if ("-users".equals(argv[i])) { + userRsrc = new URI(argv[++i]); + } else { + printUsage(System.err); + return 1; + } + } + if (!userResolver.setTargetUsers(userRsrc, conf)) { + LOG.warn("Resource " + userRsrc + " ignored"); } + ioPath = new Path(argv[argv.length - 2]); + traceIn = argv[argv.length - 1]; } catch (Exception e) { + e.printStackTrace(); printUsage(System.err); return 1; } + return start(conf, traceIn, ioPath, genbytes, userResolver); + } + + int start(Configuration conf, String traceIn, Path ioPath, long genbytes, + UserResolver userResolver) throws IOException, InterruptedException { InputStream trace = null; try { - final Configuration conf = getConf(); Path scratchDir = new Path(ioPath, conf.get(GRIDMIX_OUT_DIR, "gridmix")); + final FileSystem scratchFs = scratchDir.getFileSystem(conf); + scratchFs.mkdirs(scratchDir, new FsPermission((short) 0777)); + scratchFs.setPermission(scratchDir, new FsPermission((short) 0777)); // add shutdown hook for SIGINT, etc. Runtime.getRuntime().addShutdownHook(sdh); CountDownLatch startFlag = new CountDownLatch(1); try { // Create, start job submission threads - startThreads(conf, traceIn, ioPath, scratchDir, startFlag); + startThreads(conf, traceIn, ioPath, scratchDir, startFlag, + userResolver); // Write input data if specified if (genbytes > 0) { writeInputData(genbytes, ioPath); } // scan input dir contents submitter.refreshFilePool(); } catch (Throwable e) { LOG.error("Startup failed", e); if (factory != null) factory.abort(); // abort pipeline } finally { // signal for factory to start; sets start time startFlag.countDown(); } if (factory != null) { // wait for input exhaustion factory.join(Long.MAX_VALUE); final Throwable badTraceException = factory.error(); if (null != badTraceException) { LOG.error("Error in trace", badTraceException); throw new IOException("Error in trace", badTraceException); } // wait for pending tasks to be submitted submitter.shutdown(); submitter.join(Long.MAX_VALUE); // wait for running tasks to complete monitor.shutdown(); monitor.join(Long.MAX_VALUE); } } finally { IOUtils.cleanup(LOG, trace); } return 0; } /** * Handles orderly shutdown by requesting that each component in the * pipeline abort its progress, waiting for each to exit and killing * any jobs still running on the cluster. */ class Shutdown extends Thread { static final long FAC_SLEEP = 1000; static final long SUB_SLEEP = 4000; static final long MON_SLEEP = 15000; private void killComponent(Component<?> component, long maxwait) { if (component == null) { return; } component.abort(); try { component.join(maxwait); } catch (InterruptedException e) { LOG.warn("Interrupted waiting for " + component); } } @Override public void run() { LOG.info("Exiting..."); try { killComponent(factory, FAC_SLEEP); // read no more tasks killComponent(submitter, SUB_SLEEP); // submit no more tasks killComponent(monitor, MON_SLEEP); // process remaining jobs here } finally { if (monitor == null) { return; } List<Job> remainingJobs = monitor.getRemainingJobs(); if (remainingJobs.isEmpty()) { return; } LOG.info("Killing running jobs..."); for (Job job : remainingJobs) { try { if (!job.isComplete()) { job.killJob(); LOG.info("Killed " + job.getJobName() + " (" + job.getJobID() + ")"); } else { if (job.isSuccessful()) { monitor.onSuccess(job); } else { monitor.onFailure(job); } } } catch (IOException e) { LOG.warn("Failure killing " + job.getJobName(), e); } catch (Exception e) { LOG.error("Unexcpected exception", e); } } LOG.info("Done."); } } } public static void main(String[] argv) throws Exception { int res = -1; try { res = ToolRunner.run(new Configuration(), new Gridmix(), argv); } finally { System.exit(res); } } protected void printUsage(PrintStream out) { ToolRunner.printGenericCommandUsage(out); - out.println("Usage: gridmix [-generate <MiB>] <iopath> <trace>"); + out.println("Usage: gridmix [-generate <MiB>] [-users URI] <iopath> <trace>"); out.println(" e.g. gridmix -generate 100m foo -"); out.println("Configuration parameters:"); out.printf(" %-40s : Output directory\n", GRIDMIX_OUT_DIR); out.printf(" %-40s : Submitting threads\n", GRIDMIX_SUB_THR); out.printf(" %-40s : Queued job desc\n", GRIDMIX_QUE_DEP); out.printf(" %-40s : Key fraction of rec\n", AvgRecordFactory.GRIDMIX_KEY_FRC); + out.printf(" %-40s : User resolution class\n", GRIDMIX_USR_RSV); } /** * Components in the pipeline must support the following operations for * orderly startup and shutdown. */ interface Component<T> { /** * Accept an item into this component from an upstream component. If * shutdown or abort have been called, this may fail, depending on the * semantics for the component. */ void add(T item) throws InterruptedException; /** * Attempt to start the service. */ void start(); /** * Wait until the service completes. It is assumed that either a * {@link #shutdown} or {@link #abort} has been requested. */ void join(long millis) throws InterruptedException; /** * Shut down gracefully, finishing all pending work. Reject new requests. */ void shutdown(); /** * Shut down immediately, aborting any work in progress and discarding * all pending work. It is legal to store pending work for another * thread to process. */ void abort(); } } diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java index 24c5516..ef5b7fe 100644 --- a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GridmixJob.java @@ -1,523 +1,540 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.util.ArrayList; import java.util.Formatter; import java.util.List; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; +import javax.security.auth.login.LoginException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Partitioner; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.TaskInfo; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * Synthetic job generated from a trace description. */ class GridmixJob implements Callable<Job>, Delayed { public static final String JOBNAME = "GRIDMIX"; public static final String ORIGNAME = "gridmix.job.name.original"; public static final Log LOG = LogFactory.getLog(GridmixJob.class); private static final ThreadLocal<Formatter> nameFormat = new ThreadLocal<Formatter>() { @Override protected Formatter initialValue() { final StringBuilder sb = new StringBuilder(JOBNAME.length() + 5); sb.append(JOBNAME); return new Formatter(sb); } }; private final int seq; private final Path outdir; protected final Job job; private final JobStory jobdesc; + private final UserGroupInformation ugi; private final long submissionTimeNanos; public GridmixJob(Configuration conf, long submissionMillis, - JobStory jobdesc, Path outRoot, int seq) throws IOException { + JobStory jobdesc, Path outRoot, UserGroupInformation ugi, int seq) + throws IOException { + this.ugi = ugi; + UserGroupInformation.setCurrentUser(ugi); + conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi.toString()); ((StringBuilder)nameFormat.get().out()).setLength(JOBNAME.length()); job = new Job(conf, nameFormat.get().format("%05d", seq).toString()); submissionTimeNanos = TimeUnit.NANOSECONDS.convert( submissionMillis, TimeUnit.MILLISECONDS); this.jobdesc = jobdesc; this.seq = seq; outdir = new Path(outRoot, "" + seq); } protected GridmixJob(Configuration conf, long submissionMillis, String name) throws IOException { job = new Job(conf, name); submissionTimeNanos = TimeUnit.NANOSECONDS.convert( submissionMillis, TimeUnit.MILLISECONDS); jobdesc = null; outdir = null; seq = -1; + try { + ugi = UnixUserGroupInformation.login(conf); + } catch (LoginException e) { + throw new IOException("Could not identify submitter", e); + } + } + + public UserGroupInformation getUgi() { + return ugi; } public String toString() { return job.getJobName(); } public long getDelay(TimeUnit unit) { return unit.convert(submissionTimeNanos - System.nanoTime(), TimeUnit.NANOSECONDS); } @Override public int compareTo(Delayed other) { if (this == other) { return 0; } if (other instanceof GridmixJob) { final long otherNanos = ((GridmixJob)other).submissionTimeNanos; if (otherNanos < submissionTimeNanos) { return 1; } if (otherNanos > submissionTimeNanos) { return -1; } return id() - ((GridmixJob)other).id(); } final long diff = getDelay(TimeUnit.NANOSECONDS) - other.getDelay(TimeUnit.NANOSECONDS); return 0 == diff ? 0 : (diff > 0 ? 1 : -1); } @Override public boolean equals(Object other) { if (this == other) { return true; } // not possible unless job is cloned; all jobs should be unique return other instanceof GridmixJob && id() == ((GridmixJob)other).id(); } @Override public int hashCode() { return id(); } int id() { return seq; } Job getJob() { return job; } JobStory getJobDesc() { return jobdesc; } public Job call() throws IOException, InterruptedException, ClassNotFoundException { job.setMapperClass(GridmixMapper.class); job.setReducerClass(GridmixReducer.class); job.setNumReduceTasks(jobdesc.getNumberReduces()); job.setMapOutputKeyClass(GridmixKey.class); job.setMapOutputValueClass(GridmixRecord.class); job.setSortComparatorClass(GridmixKey.Comparator.class); job.setGroupingComparatorClass(SpecGroupingComparator.class); job.setInputFormatClass(GridmixInputFormat.class); job.setOutputFormatClass(RawBytesOutputFormat.class); job.setPartitionerClass(DraftPartitioner.class); job.setJarByClass(GridmixJob.class); job.getConfiguration().setInt("gridmix.job.seq", seq); job.getConfiguration().set(ORIGNAME, null == jobdesc.getJobID() ? "<unknown>" : jobdesc.getJobID().toString()); job.getConfiguration().setBoolean("mapred.used.genericoptionsparser", true); FileInputFormat.addInputPath(job, new Path("ignored")); FileOutputFormat.setOutputPath(job, outdir); job.submit(); return job; } public static class DraftPartitioner<V> extends Partitioner<GridmixKey,V> { public int getPartition(GridmixKey key, V value, int numReduceTasks) { return key.getPartition(); } } public static class SpecGroupingComparator implements RawComparator<GridmixKey> { private final DataInputBuffer di = new DataInputBuffer(); private final byte[] reset = di.getData(); @Override public int compare(GridmixKey g1, GridmixKey g2) { final byte t1 = g1.getType(); final byte t2 = g2.getType(); if (t1 == GridmixKey.REDUCE_SPEC || t2 == GridmixKey.REDUCE_SPEC) { return t1 - t2; } assert t1 == GridmixKey.DATA; assert t2 == GridmixKey.DATA; return g1.compareTo(g2); } @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { try { final int ret; di.reset(b1, s1, l1); final int x1 = WritableUtils.readVInt(di); di.reset(b2, s2, l2); final int x2 = WritableUtils.readVInt(di); final int t1 = b1[s1 + x1]; final int t2 = b2[s2 + x2]; if (t1 == GridmixKey.REDUCE_SPEC || t2 == GridmixKey.REDUCE_SPEC) { ret = t1 - t2; } else { assert t1 == GridmixKey.DATA; assert t2 == GridmixKey.DATA; ret = WritableComparator.compareBytes(b1, s1, x1, b2, s2, x2); } di.reset(reset, 0, 0); return ret; } catch (IOException e) { throw new RuntimeException(e); } } } public static class GridmixMapper extends Mapper<NullWritable,GridmixRecord,GridmixKey,GridmixRecord> { private double acc; private double ratio; private final ArrayList<RecordFactory> reduces = new ArrayList<RecordFactory>(); private final Random r = new Random(); private final GridmixKey key = new GridmixKey(); private final GridmixRecord val = new GridmixRecord(); @Override protected void setup(Context ctxt) throws IOException, InterruptedException { final Configuration conf = ctxt.getConfiguration(); final GridmixSplit split = (GridmixSplit) ctxt.getInputSplit(); final int maps = split.getMapCount(); final long[] reduceBytes = split.getOutputBytes(); final long[] reduceRecords = split.getOutputRecords(); long totalRecords = 0L; final int nReduces = ctxt.getNumReduceTasks(); if (nReduces > 0) { int idx = 0; int id = split.getId(); for (int i = 0; i < nReduces; ++i) { final GridmixKey.Spec spec = new GridmixKey.Spec(); if (i == id) { spec.bytes_out = split.getReduceBytes(idx); spec.rec_out = split.getReduceRecords(idx); ++idx; id += maps; } reduces.add(new IntermediateRecordFactory( new AvgRecordFactory(reduceBytes[i], reduceRecords[i], conf), i, reduceRecords[i], spec, conf)); totalRecords += reduceRecords[i]; } } else { reduces.add(new AvgRecordFactory(reduceBytes[0], reduceRecords[0], conf)); totalRecords = reduceRecords[0]; } final long splitRecords = split.getInputRecords(); final long inputRecords = splitRecords <= 0 && split.getLength() >= 0 ? Math.max(1, split.getLength() / conf.getInt("gridmix.missing.rec.size", 64*1024)) : splitRecords; ratio = totalRecords / (1.0 * inputRecords); acc = 0.0; } @Override public void map(NullWritable ignored, GridmixRecord rec, Context context) throws IOException, InterruptedException { acc += ratio; while (acc >= 1.0 && !reduces.isEmpty()) { key.setSeed(r.nextLong()); val.setSeed(r.nextLong()); final int idx = r.nextInt(reduces.size()); final RecordFactory f = reduces.get(idx); if (!f.next(key, val)) { reduces.remove(idx); continue; } context.write(key, val); acc -= 1.0; } } @Override public void cleanup(Context context) throws IOException, InterruptedException { for (RecordFactory factory : reduces) { key.setSeed(r.nextLong()); while (factory.next(key, val)) { context.write(key, val); key.setSeed(r.nextLong()); } } } } public static class GridmixReducer extends Reducer<GridmixKey,GridmixRecord,NullWritable,GridmixRecord> { private final Random r = new Random(); private final GridmixRecord val = new GridmixRecord(); private double acc; private double ratio; private RecordFactory factory; @Override protected void setup(Context context) throws IOException, InterruptedException { if (!context.nextKey() || context.getCurrentKey().getType() != GridmixKey.REDUCE_SPEC) { throw new IOException("Missing reduce spec"); } long outBytes = 0L; long outRecords = 0L; long inRecords = 0L; for (GridmixRecord ignored : context.getValues()) { final GridmixKey spec = context.getCurrentKey(); inRecords += spec.getReduceInputRecords(); outBytes += spec.getReduceOutputBytes(); outRecords += spec.getReduceOutputRecords(); } if (0 == outRecords && inRecords > 0) { LOG.info("Spec output bytes w/o records. Using input record count"); outRecords = inRecords; } factory = new AvgRecordFactory(outBytes, outRecords, context.getConfiguration()); ratio = outRecords / (1.0 * inRecords); acc = 0.0; } @Override protected void reduce(GridmixKey key, Iterable<GridmixRecord> values, Context context) throws IOException, InterruptedException { for (GridmixRecord ignored : values) { acc += ratio; while (acc >= 1.0 && factory.next(null, val)) { context.write(NullWritable.get(), val); acc -= 1.0; } } } @Override protected void cleanup(Context context) throws IOException, InterruptedException { val.setSeed(r.nextLong()); while (factory.next(null, val)) { context.write(NullWritable.get(), val); val.setSeed(r.nextLong()); } } } static class GridmixRecordReader extends RecordReader<NullWritable,GridmixRecord> { private RecordFactory factory; private final Random r = new Random(); private final GridmixRecord val = new GridmixRecord(); public GridmixRecordReader() { } @Override public void initialize(InputSplit genericSplit, TaskAttemptContext ctxt) throws IOException, InterruptedException { final GridmixSplit split = (GridmixSplit)genericSplit; final Configuration conf = ctxt.getConfiguration(); factory = new ReadRecordFactory(split.getLength(), split.getInputRecords(), new FileQueue(split, conf), conf); } @Override public boolean nextKeyValue() throws IOException { val.setSeed(r.nextLong()); return factory.next(null, val); } @Override public float getProgress() throws IOException { return factory.getProgress(); } @Override public NullWritable getCurrentKey() { return NullWritable.get(); } @Override public GridmixRecord getCurrentValue() { return val; } @Override public void close() throws IOException { factory.close(); } } static class GridmixInputFormat extends InputFormat<NullWritable,GridmixRecord> { @Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { return pullDescription(jobCtxt.getConfiguration().getInt( "gridmix.job.seq", -1)); } @Override public RecordReader<NullWritable,GridmixRecord> createRecordReader( InputSplit split, final TaskAttemptContext taskContext) throws IOException { return new GridmixRecordReader(); } } static class RawBytesOutputFormat<K> extends FileOutputFormat<K,GridmixRecord> { @Override public RecordWriter<K,GridmixRecord> getRecordWriter( TaskAttemptContext job) throws IOException { Path file = getDefaultWorkFile(job, ""); FileSystem fs = file.getFileSystem(job.getConfiguration()); final FSDataOutputStream fileOut = fs.create(file, false); return new RecordWriter<K,GridmixRecord>() { @Override public void write(K ignored, GridmixRecord value) throws IOException { value.writeRandom(fileOut, value.getSize()); } @Override public void close(TaskAttemptContext ctxt) throws IOException { fileOut.close(); } }; } } // TODO replace with ThreadLocal submitter? private static final ConcurrentHashMap<Integer,List<InputSplit>> descCache = new ConcurrentHashMap<Integer,List<InputSplit>>(); static void pushDescription(int seq, List<InputSplit> splits) { if (null != descCache.putIfAbsent(seq, splits)) { throw new IllegalArgumentException("Description exists for id " + seq); } } static List<InputSplit> pullDescription(int seq) { return descCache.remove(seq); } // not nesc when TL static void clearAll() { descCache.clear(); } void buildSplits(FilePool inputDir) throws IOException { long mapInputBytesTotal = 0L; long mapOutputBytesTotal = 0L; long mapOutputRecordsTotal = 0L; final JobStory jobdesc = getJobDesc(); if (null == jobdesc) { return; } final int maps = jobdesc.getNumberMaps(); final int reds = jobdesc.getNumberReduces(); for (int i = 0; i < maps; ++i) { final TaskInfo info = jobdesc.getTaskInfo(TaskType.MAP, i); mapInputBytesTotal += info.getInputBytes(); mapOutputBytesTotal += info.getOutputBytes(); mapOutputRecordsTotal += info.getOutputRecords(); } final double[] reduceRecordRatio = new double[reds]; final double[] reduceByteRatio = new double[reds]; for (int i = 0; i < reds; ++i) { final TaskInfo info = jobdesc.getTaskInfo(TaskType.REDUCE, i); reduceByteRatio[i] = info.getInputBytes() / (1.0 * mapOutputBytesTotal); reduceRecordRatio[i] = info.getInputRecords() / (1.0 * mapOutputRecordsTotal); } final InputStriper striper = new InputStriper(inputDir, mapInputBytesTotal); final List<InputSplit> splits = new ArrayList<InputSplit>(); for (int i = 0; i < maps; ++i) { final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0); final long[] specBytes = new long[nSpec]; final long[] specRecords = new long[nSpec]; for (int j = 0; j < nSpec; ++j) { final TaskInfo info = jobdesc.getTaskInfo(TaskType.REDUCE, i + j * maps); specBytes[j] = info.getOutputBytes(); specRecords[j] = info.getOutputRecords(); if (LOG.isDebugEnabled()) { LOG.debug(String.format("SPEC(%d) %d -> %d %d %d", id(), i, i + j * maps, info.getOutputRecords(), info.getOutputBytes())); } } final TaskInfo info = jobdesc.getTaskInfo(TaskType.MAP, i); splits.add(new GridmixSplit(striper.splitFor(inputDir, info.getInputBytes(), 3), maps, i, info.getInputBytes(), info.getInputRecords(), info.getOutputBytes(), info.getOutputRecords(), reduceByteRatio, reduceRecordRatio, specBytes, specRecords)); } pushDescription(id(), splits); } } diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java index 859d406..ac5fbfe 100644 --- a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobFactory.java @@ -1,272 +1,276 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.io.InputStream; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values; import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants; import org.apache.hadoop.tools.rumen.TaskAttemptInfo; import org.apache.hadoop.tools.rumen.TaskInfo; import org.apache.hadoop.tools.rumen.ZombieJobProducer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * Component reading job traces generated by Rumen. Each job in the trace is * assigned a sequence number and given a submission time relative to the * job that preceded it. Jobs are enqueued in the JobSubmitter provided at * construction. * @see org.apache.hadoop.tools.rumen.HadoopLogsAnalyzer */ class JobFactory implements Gridmix.Component<Void> { public static final Log LOG = LogFactory.getLog(JobFactory.class); private final Path scratch; private final float rateFactor; private final Configuration conf; private final ReaderThread rThread; private final AtomicInteger sequence; private final JobSubmitter submitter; private final CountDownLatch startFlag; + private final UserResolver userResolver; private volatile IOException error = null; protected final JobStoryProducer jobProducer; /** * Creating a new instance does not start the thread. * @param submitter Component to which deserialized jobs are passed * @param jobTrace Stream of job traces with which to construct a * {@link org.apache.hadoop.tools.rumen.ZombieJobProducer} * @param scratch Directory into which to write output from simulated jobs * @param conf Config passed to all jobs to be submitted * @param startFlag Latch released from main to start pipeline */ public JobFactory(JobSubmitter submitter, InputStream jobTrace, - Path scratch, Configuration conf, CountDownLatch startFlag) - throws IOException { + Path scratch, Configuration conf, CountDownLatch startFlag, + UserResolver userResolver) throws IOException { this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf, - startFlag); + startFlag, userResolver); } /** * Constructor permitting JobStoryProducer to be mocked. * @param submitter Component to which deserialized jobs are passed * @param jobProducer Producer generating JobStory objects. * @param scratch Directory into which to write output from simulated jobs * @param conf Config passed to all jobs to be submitted * @param startFlag Latch released from main to start pipeline */ protected JobFactory(JobSubmitter submitter, JobStoryProducer jobProducer, - Path scratch, Configuration conf, CountDownLatch startFlag) { + Path scratch, Configuration conf, CountDownLatch startFlag, + UserResolver userResolver) { sequence = new AtomicInteger(0); this.scratch = scratch; this.rateFactor = conf.getFloat(Gridmix.GRIDMIX_SUB_MUL, 1.0f); this.jobProducer = jobProducer; this.conf = new Configuration(conf); this.submitter = submitter; this.startFlag = startFlag; this.rThread = new ReaderThread(); + this.userResolver = userResolver; } static class MinTaskInfo extends TaskInfo { public MinTaskInfo(TaskInfo info) { super(info.getInputBytes(), info.getInputRecords(), info.getOutputBytes(), info.getOutputRecords(), info.getTaskMemory()); } public long getInputBytes() { return Math.max(0, super.getInputBytes()); } public int getInputRecords() { return Math.max(0, super.getInputRecords()); } public long getOutputBytes() { return Math.max(0, super.getOutputBytes()); } public int getOutputRecords() { return Math.max(0, super.getOutputRecords()); } public long getTaskMemory() { return Math.max(0, super.getTaskMemory()); } } static class FilterJobStory implements JobStory { protected final JobStory job; public FilterJobStory(JobStory job) { this.job = job; } public JobConf getJobConf() { return job.getJobConf(); } public String getName() { return job.getName(); } public JobID getJobID() { return job.getJobID(); } public String getUser() { return job.getUser(); } public long getSubmissionTime() { return job.getSubmissionTime(); } public InputSplit[] getInputSplits() { return job.getInputSplits(); } public int getNumberMaps() { return job.getNumberMaps(); } public int getNumberReduces() { return job.getNumberReduces(); } public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) { return job.getTaskInfo(taskType, taskNumber); } public TaskAttemptInfo getTaskAttemptInfo(TaskType taskType, int taskNumber, int taskAttemptNumber) { return job.getTaskAttemptInfo(taskType, taskNumber, taskAttemptNumber); } public TaskAttemptInfo getMapTaskAttemptInfoAdjusted( int taskNumber, int taskAttemptNumber, int locality) { return job.getMapTaskAttemptInfoAdjusted( taskNumber, taskAttemptNumber, locality); } public Values getOutcome() { return job.getOutcome(); } } /** * Worker thread responsible for reading descriptions, assigning sequence * numbers, and normalizing time. */ private class ReaderThread extends Thread { public ReaderThread() { super("GridmixJobFactory"); } private JobStory getNextJobFiltered() throws IOException { JobStory job; do { job = jobProducer.getNextJob(); } while (job != null && (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS || job.getSubmissionTime() < 0)); return null == job ? null : new FilterJobStory(job) { @Override public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) { return new MinTaskInfo(this.job.getTaskInfo(taskType, taskNumber)); } }; } @Override public void run() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { return; } final long initTime = TimeUnit.MILLISECONDS.convert( System.nanoTime(), TimeUnit.NANOSECONDS); LOG.debug("START @ " + initTime); long first = -1; long last = -1; while (!Thread.currentThread().isInterrupted()) { try { final JobStory job = getNextJobFiltered(); if (null == job) { return; } if (first < 0) { first = job.getSubmissionTime(); } final long current = job.getSubmissionTime(); if (current < last) { LOG.warn("Job " + job.getJobID() + " out of order"); continue; } last = current; - submitter.add(new GridmixJob(conf, initTime + - Math.round(rateFactor * (current - first)), - job, scratch, sequence.getAndIncrement())); + submitter.add(new GridmixJob(new Configuration(conf), initTime + + Math.round(rateFactor * (current - first)), job, scratch, + userResolver.getTargetUgi(job.getUser()), + sequence.getAndIncrement())); } catch (IOException e) { JobFactory.this.error = e; return; } } } catch (InterruptedException e) { // exit thread; ignore any jobs remaining in the trace return; } finally { IOUtils.cleanup(null, jobProducer); } } } /** * Obtain the error that caused the thread to exit unexpectedly. */ public IOException error() { return error; } /** * Add is disabled. * @throws UnsupportedOperationException */ public void add(Void ignored) { throw new UnsupportedOperationException(getClass().getName() + " is at the start of the pipeline and accepts no events"); } /** * Start the reader thread, wait for latch if necessary. */ public void start() { rThread.start(); } /** * Wait for the reader thread to exhaust the job trace. */ public void join(long millis) throws InterruptedException { rThread.join(millis); } /** * Interrupt the reader thread. */ public void shutdown() { rThread.interrupt(); } /** * Interrupt the reader thread. This requires no special consideration, as * the thread has no pending work queue. */ public void abort() { // Currently no special work rThread.interrupt(); } } diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java index 7990d50..e272cc2 100644 --- a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/JobSubmitter.java @@ -1,177 +1,182 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.nio.channels.ClosedByInterruptException; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; + /** * Component accepting deserialized job traces, computing split data, and * submitting to the cluster on deadline. Each job added from an upstream * factory must be submitted to the cluster by the deadline recorded on it. * Once submitted, jobs must be added to a downstream component for * monitoring. */ class JobSubmitter implements Gridmix.Component<GridmixJob> { public static final Log LOG = LogFactory.getLog(JobSubmitter.class); final Semaphore sem; private final FilePool inputDir; private final JobMonitor monitor; private final ExecutorService sched; private volatile boolean shutdown = false; /** * Initialize the submission component with downstream monitor and pool of * files from which split data may be read. * @param monitor Monitor component to which jobs should be passed * @param threads Number of submission threads * See {@link Gridmix#GRIDMIX_SUB_THR}. * @param queueDepth Max depth of pending work queue * See {@link Gridmix#GRIDMIX_QUE_DEP}. * @param inputDir Set of files from which split data may be mined for * synthetic jobs. */ public JobSubmitter(JobMonitor monitor, int threads, int queueDepth, FilePool inputDir) { sem = new Semaphore(queueDepth); sched = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); this.inputDir = inputDir; this.monitor = monitor; } /** * Runnable wrapping a job to be submitted to the cluster. */ private class SubmitTask implements Runnable { final GridmixJob job; public SubmitTask(GridmixJob job) { this.job = job; } public void run() { try { // pre-compute split information try { + UserGroupInformation.setCurrentUser(job.getUgi()); job.buildSplits(inputDir); } catch (IOException e) { - LOG.warn("Failed to submit " + job.getJob().getJobName(), e); + LOG.warn("Failed to submit " + job.getJob().getJobName() + " as " + + job.getUgi(), e); return; } // Sleep until deadline long nsDelay = job.getDelay(TimeUnit.NANOSECONDS); while (nsDelay > 0) { TimeUnit.NANOSECONDS.sleep(nsDelay); nsDelay = job.getDelay(TimeUnit.NANOSECONDS); } try { // submit job monitor.add(job.call()); LOG.debug("SUBMIT " + job + "@" + System.currentTimeMillis() + " (" + job.getJob().getJobID() + ")"); } catch (IOException e) { - LOG.warn("Failed to submit " + job.getJob().getJobName(), e); + LOG.warn("Failed to submit " + job.getJob().getJobName() + " as " + + job.getUgi(), e); if (e.getCause() instanceof ClosedByInterruptException) { throw new InterruptedException("Failed to submit " + job.getJob().getJobName()); } } catch (ClassNotFoundException e) { LOG.warn("Failed to submit " + job.getJob().getJobName(), e); } } catch (InterruptedException e) { // abort execution, remove splits if nesc // TODO release ThdLoc GridmixJob.pullDescription(job.id()); Thread.currentThread().interrupt(); return; } finally { sem.release(); } } } /** * Enqueue the job to be submitted per the deadline associated with it. */ public void add(final GridmixJob job) throws InterruptedException { final boolean addToQueue = !shutdown; if (addToQueue) { final SubmitTask task = new SubmitTask(job); sem.acquire(); try { sched.execute(task); } catch (RejectedExecutionException e) { sem.release(); } } } /** * (Re)scan the set of input files from which splits are derived. */ public void refreshFilePool() throws IOException { inputDir.refresh(); } /** * Does nothing, as the threadpool is already initialized and waiting for * work from the upstream factory. */ public void start() { } /** * Continue running until all queued jobs have been submitted to the * cluster. */ public void join(long millis) throws InterruptedException { if (!shutdown) { throw new IllegalStateException("Cannot wait for active submit thread"); } sched.awaitTermination(millis, TimeUnit.MILLISECONDS); } /** * Finish all jobs pending submission, but do not accept new work. */ public void shutdown() { // complete pending tasks, but accept no new tasks shutdown = true; sched.shutdown(); } /** * Discard pending work, including precomputed work waiting to be * submitted. */ public void abort() { //pendingJobs.clear(); shutdown = true; sched.shutdownNow(); } } diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java new file mode 100644 index 0000000..f192452 --- /dev/null +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/RoundRobinUserResolver.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.gridmix; + +import java.io.IOException; +import java.net.URI; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UnixUserGroupInformation; + +public class RoundRobinUserResolver extends UserResolver { + + private int uidx = 0; + private List<UserGroupInformation> users = Collections.emptyList(); + private final HashMap<UserGroupInformation,UserGroupInformation> usercache = + new HashMap<UserGroupInformation,UserGroupInformation>(); + + public RoundRobinUserResolver() { } + + @Override + public synchronized boolean setTargetUsers(URI userloc, Configuration conf) + throws IOException { + users = parseUserList(userloc, conf); + if (users.size() == 0) { + throw new IOException("Empty user list"); + } + usercache.keySet().retainAll(users); + return true; + } + + @Override + public synchronized UserGroupInformation getTargetUgi( + UserGroupInformation ugi) { + UserGroupInformation ret = usercache.get(ugi); + if (null == ret) { + ret = users.get(uidx++ % users.size()); + usercache.put(ugi, ret); + } + return ret; + } + +} diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java new file mode 100644 index 0000000..3c6f46e --- /dev/null +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/SubmitterUserResolver.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.gridmix; + +import java.io.IOException; +import java.net.URI; +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UnixUserGroupInformation; + +/** + * Resolves all UGIs to the submitting user. + */ +public class SubmitterUserResolver extends UserResolver { + + private UserGroupInformation ugi = null; + + public SubmitterUserResolver() { } + + public synchronized boolean setTargetUsers(URI userdesc, Configuration conf) + throws IOException { + try { + ugi = UnixUserGroupInformation.login(conf, false); + } catch (LoginException e) { + throw new IOException("Failed to get submitter UGI", e); + } + return false; + } + + public synchronized UserGroupInformation getTargetUgi( + UserGroupInformation ugi) { + return this.ugi; + } + +} diff --git a/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java new file mode 100644 index 0000000..692c74f --- /dev/null +++ b/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/UserResolver.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.gridmix; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.LineReader; + +/** + * Maps users in the trace to a set of valid target users on the test cluster. + */ +public abstract class UserResolver { + + /** + * Userlist assumes one UGI per line, each UGI matching + * &lt;username&gt;,&lt;group&gt;[,group]* + */ + protected List<UserGroupInformation> parseUserList( + URI userUri, Configuration conf) throws IOException { + if (null == userUri) { + return Collections.emptyList(); + } + final Path userloc = new Path(userUri.toString()); + final Text rawUgi = new Text(); + final FileSystem fs = userloc.getFileSystem(conf); + final ArrayList<UserGroupInformation> ret = new ArrayList(); + + LineReader in = null; + try { + final ArrayList<String> groups = new ArrayList(); + in = new LineReader(fs.open(userloc)); + while (in.readLine(rawUgi) > 0) { + int e = rawUgi.find(","); + if (e <= 0) { + throw new IOException("Missing username: " + rawUgi); + } + final String username = Text.decode(rawUgi.getBytes(), 0, e); + int s = e; + while ((e = rawUgi.find(",", ++s)) != -1) { + groups.add(Text.decode(rawUgi.getBytes(), s, e - s)); + s = e; + } + groups.add(Text.decode(rawUgi.getBytes(), s, rawUgi.getLength() - s)); + if (groups.size() == 0) { + throw new IOException("Missing groups: " + rawUgi); + } + ret.add(new UnixUserGroupInformation( + username, groups.toArray(new String[groups.size()]))); + groups.clear(); + } + } finally { + if (in != null) { + in.close(); + } + } + return ret; + } + + /** + * Configure the user map given the URI and configuration. The resolver's + * contract will define how the resource will be interpreted, but the default + * will typically interpret the URI as a {@link org.apache.hadoop.fs.Path} + * listing target users. The format of this file is defined by {@link + * #parseUserList}. + * @param userdesc URI (possibly null) from which user information may be + * loaded per the subclass contract. + * @param conf The tool configuration. + * @return true if the resource provided was used in building the list of + * target users + */ + public abstract boolean setTargetUsers(URI userdesc, Configuration conf) + throws IOException; + + // tmp compatibility hack prior to UGI from Rumen + public UserGroupInformation getTargetUgi(String user) + throws IOException { + return getTargetUgi(new UnixUserGroupInformation( + user, new String[] { "users" })); + } + + /** + * Map the given UGI to another per the subclass contract. + * @param ugi User information from the trace. + */ + public abstract UserGroupInformation getTargetUgi(UserGroupInformation ugi); + +} diff --git a/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java b/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java index 8ba3b10..ec632da 100644 --- a/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java +++ b/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/DebugJobFactory.java @@ -1,277 +1,278 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; import org.apache.hadoop.tools.rumen.TaskAttemptInfo; import org.apache.hadoop.tools.rumen.TaskInfo; import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;; /** * Component generating random job traces for testing on a single node. */ class DebugJobFactory extends JobFactory { public DebugJobFactory(JobSubmitter submitter, Path scratch, int numJobs, - Configuration conf, CountDownLatch startFlag) throws IOException { + Configuration conf, CountDownLatch startFlag, UserResolver userResolver) + throws IOException { super(submitter, new DebugJobProducer(numJobs, conf), scratch, conf, - startFlag); + startFlag, userResolver); } ArrayList<JobStory> getSubmitted() { return ((DebugJobProducer)jobProducer).submitted; } private static class DebugJobProducer implements JobStoryProducer { final ArrayList<JobStory> submitted; private final Configuration conf; private final AtomicInteger numJobs; public DebugJobProducer(int numJobs, Configuration conf) { super(); this.conf = conf; this.numJobs = new AtomicInteger(numJobs); this.submitted = new ArrayList<JobStory>(); } @Override public JobStory getNextJob() throws IOException { if (numJobs.getAndDecrement() > 0) { final MockJob ret = new MockJob(conf); submitted.add(ret); return ret; } return null; } @Override public void close() { } } static double[] getDistr(Random r, double mindist, int size) { assert 0.0 <= mindist && mindist <= 1.0; final double min = mindist / size; final double rem = 1.0 - min * size; final double[] tmp = new double[size]; for (int i = 0; i < tmp.length - 1; ++i) { tmp[i] = r.nextDouble() * rem; } tmp[tmp.length - 1] = rem; Arrays.sort(tmp); final double[] ret = new double[size]; ret[0] = tmp[0] + min; for (int i = 1; i < size; ++i) { ret[i] = tmp[i] - tmp[i-1] + min; } return ret; } /** * Generate random task data for a synthetic job. */ static class MockJob implements JobStory { static final int MIN_REC = 1 << 14; static final int MIN_BYTES = 1 << 20; static final int VAR_REC = 1 << 14; static final int VAR_BYTES = 4 << 20; static final int MAX_MAP = 5; static final int MAX_RED = 3; static void initDist(Random r, double min, int[] recs, long[] bytes, long tot_recs, long tot_bytes) { final double[] recs_dist = getDistr(r, min, recs.length); final double[] bytes_dist = getDistr(r, min, recs.length); long totalbytes = 0L; int totalrecs = 0; for (int i = 0; i < recs.length; ++i) { recs[i] = (int) Math.round(tot_recs * recs_dist[i]); bytes[i] = Math.round(tot_bytes * bytes_dist[i]); totalrecs += recs[i]; totalbytes += bytes[i]; } // Add/remove excess recs[0] += totalrecs - tot_recs; bytes[0] += totalbytes - tot_bytes; if (LOG.isInfoEnabled()) { LOG.info("DIST: " + Arrays.toString(recs) + " " + tot_recs + "/" + totalrecs + " " + Arrays.toString(bytes) + " " + tot_bytes + "/" + totalbytes); } } private static final AtomicInteger seq = new AtomicInteger(0); // set timestamps in the past private static final AtomicLong timestamp = new AtomicLong(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(60, TimeUnit.DAYS)); private final int id; private final String name; private final int[] m_recsIn, m_recsOut, r_recsIn, r_recsOut; private final long[] m_bytesIn, m_bytesOut, r_bytesIn, r_bytesOut; private final long submitTime; public MockJob(Configuration conf) { final Random r = new Random(); final long seed = r.nextLong(); r.setSeed(seed); id = seq.getAndIncrement(); name = String.format("MOCKJOB%05d", id); LOG.info(name + " (" + seed + ")"); submitTime = timestamp.addAndGet(TimeUnit.MILLISECONDS.convert( r.nextInt(10), TimeUnit.SECONDS)); m_recsIn = new int[r.nextInt(MAX_MAP) + 1]; m_bytesIn = new long[m_recsIn.length]; m_recsOut = new int[m_recsIn.length]; m_bytesOut = new long[m_recsIn.length]; r_recsIn = new int[r.nextInt(MAX_RED) + 1]; r_bytesIn = new long[r_recsIn.length]; r_recsOut = new int[r_recsIn.length]; r_bytesOut = new long[r_recsIn.length]; // map input final long map_recs = r.nextInt(VAR_REC) + MIN_REC; final long map_bytes = r.nextInt(VAR_BYTES) + MIN_BYTES; initDist(r, 0.5, m_recsIn, m_bytesIn, map_recs, map_bytes); // shuffle final long shuffle_recs = r.nextInt(VAR_REC) + MIN_REC; final long shuffle_bytes = r.nextInt(VAR_BYTES) + MIN_BYTES; initDist(r, 0.4, m_recsOut, m_bytesOut, shuffle_recs, shuffle_bytes); initDist(r, 0.8, r_recsIn, r_bytesIn, shuffle_recs, shuffle_bytes); // reduce output final long red_recs = r.nextInt(VAR_REC) + MIN_REC; final long red_bytes = r.nextInt(VAR_BYTES) + MIN_BYTES; initDist(r, 0.4, r_recsOut, r_bytesOut, red_recs, red_bytes); if (LOG.isDebugEnabled()) { int iMapBTotal = 0, oMapBTotal = 0, iRedBTotal = 0, oRedBTotal = 0; int iMapRTotal = 0, oMapRTotal = 0, iRedRTotal = 0, oRedRTotal = 0; for (int i = 0; i < m_recsIn.length; ++i) { iMapRTotal += m_recsIn[i]; iMapBTotal += m_bytesIn[i]; oMapRTotal += m_recsOut[i]; oMapBTotal += m_bytesOut[i]; } for (int i = 0; i < r_recsIn.length; ++i) { iRedRTotal += r_recsIn[i]; iRedBTotal += r_bytesIn[i]; oRedRTotal += r_recsOut[i]; oRedBTotal += r_bytesOut[i]; } LOG.debug(String.format("%s: M (%03d) %6d/%10d -> %6d/%10d" + " R (%03d) %6d/%10d -> %6d/%10d @%d", name, m_bytesIn.length, iMapRTotal, iMapBTotal, oMapRTotal, oMapBTotal, r_bytesIn.length, iRedRTotal, iRedBTotal, oRedRTotal, oRedBTotal, submitTime)); } } @Override public String getName() { return name; } @Override public String getUser() { - return "FOOBAR"; + return String.format("foobar%d", id); } @Override public JobID getJobID() { return new JobID("job_mock_" + name, id); } @Override public Values getOutcome() { return Values.SUCCESS; } @Override public long getSubmissionTime() { return submitTime; } @Override public int getNumberMaps() { return m_bytesIn.length; } @Override public int getNumberReduces() { return r_bytesIn.length; } @Override public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) { switch (taskType) { case MAP: return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber], m_bytesOut[taskNumber], m_recsOut[taskNumber], -1); case REDUCE: return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber], r_bytesOut[taskNumber], r_recsOut[taskNumber], -1); default: throw new IllegalArgumentException("Not interested"); } } @Override public InputSplit[] getInputSplits() { throw new UnsupportedOperationException(); } @Override public TaskAttemptInfo getTaskAttemptInfo(TaskType taskType, int taskNumber, int taskAttemptNumber) { throw new UnsupportedOperationException(); } @Override public TaskAttemptInfo getMapTaskAttemptInfoAdjusted(int taskNumber, int taskAttemptNumber, int locality) { throw new UnsupportedOperationException(); } @Override public org.apache.hadoop.mapred.JobConf getJobConf() { throw new UnsupportedOperationException(); } } } diff --git a/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java b/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java index d5c2d68..edca525 100644 --- a/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java +++ b/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java @@ -1,322 +1,336 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.gridmix; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.LinkedBlockingQueue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.TaskReport; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.TaskInfo; import org.apache.hadoop.util.ToolRunner; import static org.apache.hadoop.mapred.Task.Counter.*; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.*; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.log4j.Level; public class TestGridmixSubmission { { ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.mapred.gridmix") ).getLogger().setLevel(Level.DEBUG); } + private static final Path DEST = new Path("/gridmix"); + private static FileSystem dfs = null; private static MiniDFSCluster dfsCluster = null; private static MiniMRCluster mrCluster = null; private static final int NJOBS = 2; private static final long GENDATA = 50; // in megabytes private static final int GENSLOP = 100 * 1024; // +/- 100k for logs @BeforeClass public static void initCluster() throws IOException { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 3, true, null); dfs = dfsCluster.getFileSystem(); mrCluster = new MiniMRCluster(3, dfs.getUri().toString(), 1, null, null, new JobConf(conf)); } @AfterClass public static void shutdownCluster() throws IOException { if (mrCluster != null) { mrCluster.shutdown(); } if (dfsCluster != null) { dfsCluster.shutdown(); } } static class TestMonitor extends JobMonitor { static final long SLOPBYTES = 1024; private final int expected; private final BlockingQueue<Job> retiredJobs; public TestMonitor(int expected) { super(); this.expected = expected; retiredJobs = new LinkedBlockingQueue<Job>(); } public void verify(ArrayList<JobStory> submitted) throws Exception { final ArrayList<Job> succeeded = new ArrayList<Job>(); assertEquals("Bad job count", expected, retiredJobs.drainTo(succeeded)); final HashMap<String,JobStory> sub = new HashMap<String,JobStory>(); for (JobStory spec : submitted) { sub.put(spec.getName(), spec); } final JobClient client = new JobClient(mrCluster.createJobConf()); for (Job job : succeeded) { final String jobname = job.getJobName(); if ("GRIDMIX_GENDATA".equals(jobname)) { final Path in = new Path("foo").makeQualified(dfs); final Path out = new Path("/gridmix").makeQualified(dfs); final ContentSummary generated = dfs.getContentSummary(in); assertTrue("Mismatched data gen", // +/- 100k for logs (GENDATA << 20) < generated.getLength() + GENSLOP || (GENDATA << 20) > generated.getLength() - GENSLOP); FileStatus[] outstat = dfs.listStatus(out); assertEquals("Mismatched job count", NJOBS, outstat.length); continue; } final JobStory spec = sub.get(job.getJobName().replace("GRIDMIX", "MOCKJOB")); assertNotNull("No spec for " + job.getJobName(), spec); assertNotNull("No counters for " + job.getJobName(), job.getCounters()); + final String specname = spec.getName(); + final FileStatus stat = dfs.getFileStatus(new Path(DEST, "" + + Integer.valueOf(specname.substring(specname.length() - 5)))); + assertEquals("Wrong owner for " + job.getJobName(), spec.getUser(), + stat.getOwner()); final int nMaps = spec.getNumberMaps(); final int nReds = spec.getNumberReduces(); // TODO Blocked by MAPREDUCE-118 if (true) return; // TODO System.out.println(jobname + ": " + nMaps + "/" + nReds); final TaskReport[] mReports = client.getMapTaskReports(JobID.downgrade(job.getJobID())); assertEquals("Mismatched map count", nMaps, mReports.length); check(TaskType.MAP, job, spec, mReports, 0, 0, SLOPBYTES, nReds); final TaskReport[] rReports = client.getReduceTaskReports(JobID.downgrade(job.getJobID())); assertEquals("Mismatched reduce count", nReds, rReports.length); check(TaskType.REDUCE, job, spec, rReports, nMaps * SLOPBYTES, 2 * nMaps, 0, 0); } } public void check(final TaskType type, Job job, JobStory spec, final TaskReport[] runTasks, long extraInputBytes, int extraInputRecords, long extraOutputBytes, int extraOutputRecords) throws Exception { long[] runInputRecords = new long[runTasks.length]; long[] runInputBytes = new long[runTasks.length]; long[] runOutputRecords = new long[runTasks.length]; long[] runOutputBytes = new long[runTasks.length]; long[] specInputRecords = new long[runTasks.length]; long[] specInputBytes = new long[runTasks.length]; long[] specOutputRecords = new long[runTasks.length]; long[] specOutputBytes = new long[runTasks.length]; for (int i = 0; i < runTasks.length; ++i) { final TaskInfo specInfo; final Counters counters = runTasks[i].getCounters(); switch (type) { case MAP: runInputBytes[i] = counters.findCounter("FileSystemCounters", "HDFS_BYTES_READ").getValue(); runInputRecords[i] = (int)counters.findCounter(MAP_INPUT_RECORDS).getValue(); runOutputBytes[i] = counters.findCounter(MAP_OUTPUT_BYTES).getValue(); runOutputRecords[i] = (int)counters.findCounter(MAP_OUTPUT_RECORDS).getValue(); specInfo = spec.getTaskInfo(TaskType.MAP, i); specInputRecords[i] = specInfo.getInputRecords(); specInputBytes[i] = specInfo.getInputBytes(); specOutputRecords[i] = specInfo.getOutputRecords(); specOutputBytes[i] = specInfo.getOutputBytes(); System.out.printf(type + " SPEC: %9d -> %9d :: %5d -> %5d\n", specInputBytes[i], specOutputBytes[i], specInputRecords[i], specOutputRecords[i]); System.out.printf(type + " RUN: %9d -> %9d :: %5d -> %5d\n", runInputBytes[i], runOutputBytes[i], runInputRecords[i], runOutputRecords[i]); break; case REDUCE: runInputBytes[i] = 0; runInputRecords[i] = (int)counters.findCounter(REDUCE_INPUT_RECORDS).getValue(); runOutputBytes[i] = counters.findCounter("FileSystemCounters", "HDFS_BYTES_WRITTEN").getValue(); runOutputRecords[i] = (int)counters.findCounter(REDUCE_OUTPUT_RECORDS).getValue(); specInfo = spec.getTaskInfo(TaskType.REDUCE, i); // There is no reliable counter for reduce input bytes. The // variable-length encoding of intermediate records and other noise // make this quantity difficult to estimate. The shuffle and spec // input bytes are included in debug output for reference, but are // not checked specInputBytes[i] = 0; specInputRecords[i] = specInfo.getInputRecords(); specOutputRecords[i] = specInfo.getOutputRecords(); specOutputBytes[i] = specInfo.getOutputBytes(); System.out.printf(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n", specInfo.getInputBytes(), specOutputBytes[i], specInputRecords[i], specOutputRecords[i]); System.out.printf(type + " RUN: (%9d) -> %9d :: %5d -> %5d\n", counters.findCounter(REDUCE_SHUFFLE_BYTES).getValue(), runOutputBytes[i], runInputRecords[i], runOutputRecords[i]); break; default: specInfo = null; fail("Unexpected type: " + type); } } // Check input bytes Arrays.sort(specInputBytes); Arrays.sort(runInputBytes); for (int i = 0; i < runTasks.length; ++i) { assertTrue("Mismatched " + type + " input bytes " + specInputBytes[i] + "/" + runInputBytes[i], eqPlusMinus(runInputBytes[i], specInputBytes[i], extraInputBytes)); } // Check input records Arrays.sort(specInputRecords); Arrays.sort(runInputRecords); for (int i = 0; i < runTasks.length; ++i) { assertTrue("Mismatched " + type + " input records " + specInputRecords[i] + "/" + runInputRecords[i], eqPlusMinus(runInputRecords[i], specInputRecords[i], extraInputRecords)); } // Check output bytes Arrays.sort(specOutputBytes); Arrays.sort(runOutputBytes); for (int i = 0; i < runTasks.length; ++i) { assertTrue("Mismatched " + type + " output bytes " + specOutputBytes[i] + "/" + runOutputBytes[i], eqPlusMinus(runOutputBytes[i], specOutputBytes[i], extraOutputBytes)); } // Check output records Arrays.sort(specOutputRecords); Arrays.sort(runOutputRecords); for (int i = 0; i < runTasks.length; ++i) { assertTrue("Mismatched " + type + " output records " + specOutputRecords[i] + "/" + runOutputRecords[i], eqPlusMinus(runOutputRecords[i], specOutputRecords[i], extraOutputRecords)); } } private static boolean eqPlusMinus(long a, long b, long x) { final long diff = Math.abs(a - b); return diff <= x; } @Override protected void onSuccess(Job job) { retiredJobs.add(job); } @Override protected void onFailure(Job job) { fail("Job failure: " + job); } } static class DebugGridmix extends Gridmix { private DebugJobFactory factory; private TestMonitor monitor; public void checkMonitor() throws Exception { monitor.verify(factory.getSubmitted()); } @Override protected JobMonitor createJobMonitor() { monitor = new TestMonitor(NJOBS + 1); // include data generation job return monitor; } @Override protected JobFactory createJobFactory(JobSubmitter submitter, String traceIn, Path scratchDir, Configuration conf, - CountDownLatch startFlag) throws IOException { + CountDownLatch startFlag, UserResolver userResolver) + throws IOException { factory = - new DebugJobFactory(submitter, scratchDir, NJOBS, conf, startFlag); + new DebugJobFactory(submitter, scratchDir, NJOBS, conf, startFlag, + userResolver); return factory; } } @Test public void testSubmit() throws Exception { final Path in = new Path("foo").makeQualified(dfs); - final Path out = new Path("/gridmix").makeQualified(dfs); + final Path out = DEST.makeQualified(dfs); final String[] argv = { "-D" + FilePool.GRIDMIX_MIN_FILE + "=0", "-D" + Gridmix.GRIDMIX_OUT_DIR + "=" + out, + "-D" + Gridmix.GRIDMIX_USR_RSV + "=" + EchoUserResolver.class.getName(), "-generate", String.valueOf(GENDATA) + "m", in.toString(), "-" // ignored by DebugGridmix }; DebugGridmix client = new DebugGridmix(); final Configuration conf = mrCluster.createJobConf(); - //conf.setInt(Gridmix.GRIDMIX_KEY_LEN, 2); + // allow synthetic users to create home directories + final Path root = new Path("/user"); + dfs.mkdirs(root, new FsPermission((short)0777)); + dfs.setPermission(root, new FsPermission((short)0777)); int res = ToolRunner.run(conf, client, argv); assertEquals("Client exited with nonzero status", 0, res); client.checkMonitor(); } } diff --git a/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java b/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java new file mode 100644 index 0000000..6e1cbf9 --- /dev/null +++ b/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestUserResolve.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred.gridmix; + +import java.io.IOException; +import java.net.URI; + +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; + +public class TestUserResolve { + + static Path userlist; + + @BeforeClass + public static void writeUserList() throws IOException { + final Configuration conf = new Configuration(); + final FileSystem fs = FileSystem.getLocal(conf); + final Path wd = new Path(new Path( + System.getProperty("test.build.data", "/tmp")).makeQualified(fs), + "gridmixUserResolve"); + userlist = new Path(wd, "users"); + FSDataOutputStream out = null; + try { + out = fs.create(userlist, true); + out.writeBytes("user0,groupA,groupB,groupC\n"); + out.writeBytes("user1,groupA,groupC\n"); + out.writeBytes("user2,groupB\n"); + out.writeBytes("user3,groupA,groupB,groupC\n"); + } finally { + if (out != null) { + out.close(); + } + } + } + + @Test + public void testRoundRobinResolver() throws Exception { + final Configuration conf = new Configuration(); + final UserResolver rslv = new RoundRobinUserResolver(); + + boolean fail = false; + try { + rslv.setTargetUsers(null, conf); + } catch (IOException e) { + fail = true; + } + assertTrue("User list required for RoundRobinUserResolver", fail); + + rslv.setTargetUsers(new URI(userlist.toString()), conf); + assertEquals("user0", rslv.getTargetUgi("hfre0").getUserName()); + assertEquals("user1", rslv.getTargetUgi("hfre1").getUserName()); + assertEquals("user2", rslv.getTargetUgi("hfre2").getUserName()); + assertEquals("user0", rslv.getTargetUgi("hfre0").getUserName()); + assertEquals("user3", rslv.getTargetUgi("hfre3").getUserName()); + assertEquals("user0", rslv.getTargetUgi("hfre0").getUserName()); + assertEquals("user0", rslv.getTargetUgi("hfre4").getUserName()); + assertArrayEquals(new String[] { "groupA", "groupB", "groupC" }, + rslv.getTargetUgi("hfre0").getGroupNames()); + assertArrayEquals(new String[] { "groupB" }, + rslv.getTargetUgi("hfre2").getGroupNames()); + } + + @Test + public void testSubmitterResolver() throws Exception { + final Configuration conf = new Configuration(); + final UserResolver rslv = new SubmitterUserResolver(); + rslv.setTargetUsers(null, conf); + assertEquals(UnixUserGroupInformation.login(), + rslv.getTargetUgi((UserGroupInformation)null)); + } + +}
jaxlaw/hadoop-common
89cc1393a152a89a68957a8989a8bc0a16aa3629
MAPREDUCE:1372 from https://issues.apache.org/jira/secure/attachment/12430691/M1372-2.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 8527aef..0c6107a 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,500 +1,503 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383006 - MAPREDUCE-1361. Fix jobs' retirement from the JobTracker to prevent memory + MAPREDUCE-1372. Fixed a ConcurrentModificationException in jobtracker. + (Arun C Murthy via yhemanth) + + MAPREDUCE-1316. Fix jobs' retirement from the JobTracker to prevent memory leaks via stale references. (Amar Kamat via acmurthy) MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. (Amareshwari Sriramadasu via acmurthy) HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobTracker.java b/src/mapred/org/apache/hadoop/mapred/JobTracker.java index 8d886d5..7e7bc03 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobTracker.java +++ b/src/mapred/org/apache/hadoop/mapred/JobTracker.java @@ -1,671 +1,679 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintWriter; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.Writer; import java.net.BindException; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import javax.security.auth.login.LoginException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.RPC.VersionMismatch; import org.apache.hadoop.mapred.JobHistory.Keys; import org.apache.hadoop.mapred.JobHistory.Listener; import org.apache.hadoop.mapred.JobHistory.Values; import org.apache.hadoop.mapred.JobInProgress.KillInterruptedException; import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.PermissionChecker; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ConfiguredPolicy; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.mapreduce.ClusterMetrics; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; /******************************************************* * JobTracker is the central location for submitting and * tracking MR jobs in a network environment. * *******************************************************/ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmissionProtocol, TaskTrackerManager, RefreshAuthorizationPolicyProtocol, AdminOperationsProtocol { static{ Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); } static long TASKTRACKER_EXPIRY_INTERVAL = 10 * 60 * 1000; static long RETIRE_JOB_INTERVAL; static long RETIRE_JOB_CHECK_INTERVAL; // The interval after which one fault of a tracker will be discarded, // if there are no faults during this. private static long UPDATE_FAULTY_TRACKER_INTERVAL = 24 * 60 * 60 * 1000; // The maximum percentage of trackers in cluster added // to the 'blacklist' across all the jobs. private static double MAX_BLACKLIST_PERCENT = 0.50; // A tracker is blacklisted across jobs only if number of // blacklists are X% above the average number of blacklists. // X is the blacklist threshold here. private double AVERAGE_BLACKLIST_THRESHOLD = 0.50; // The maximum number of blacklists for a tracker after which the // tracker could be blacklisted across all jobs private int MAX_BLACKLISTS_PER_TRACKER = 4; // Approximate number of heartbeats that could arrive JobTracker // in a second static final String JT_HEARTBEATS_IN_SECOND = "mapred.heartbeats.in.second"; private int NUM_HEARTBEATS_IN_SECOND; private final int DEFAULT_NUM_HEARTBEATS_IN_SECOND = 100; private final int MIN_NUM_HEARTBEATS_IN_SECOND = 1; // Scaling factor for heartbeats, used for testing only static final String JT_HEARTBEATS_SCALING_FACTOR = "mapreduce.jobtracker.heartbeats.scaling.factor"; private float HEARTBEATS_SCALING_FACTOR; private final float MIN_HEARTBEATS_SCALING_FACTOR = 0.01f; private final float DEFAULT_HEARTBEATS_SCALING_FACTOR = 1.0f; public static enum State { INITIALIZING, RUNNING } State state = State.INITIALIZING; private static final int FS_ACCESS_RETRY_PERIOD = 10000; private DNSToSwitchMapping dnsToSwitchMapping; private NetworkTopology clusterMap = new NetworkTopology(); private int numTaskCacheLevels; // the max level to which we cache tasks - private Set<Node> nodesAtMaxLevel = new HashSet<Node>(); + /** + * {@link #nodesAtMaxLevel} is using the keySet from {@link ConcurrentHashMap} + * so that it can be safely written to and iterated on via 2 separate threads. + * Note: It can only be iterated from a single thread which is feasible since + * the only iteration is done in {@link JobInProgress} under the + * {@link JobTracker} lock. + */ + private Set<Node> nodesAtMaxLevel = + Collections.newSetFromMap(new ConcurrentHashMap<Node, Boolean>()); private final TaskScheduler taskScheduler; private final List<JobInProgressListener> jobInProgressListeners = new CopyOnWriteArrayList<JobInProgressListener>(); private static final LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); // system directories are world-wide readable and owner readable final static FsPermission SYSTEM_DIR_PERMISSION = FsPermission.createImmutable((short) 0733); // rwx-wx-wx // system files should have 700 permission final static FsPermission SYSTEM_FILE_PERMISSION = FsPermission.createImmutable((short) 0700); // rwx------ /** * A client tried to submit a job before the Job Tracker was ready. */ public static class IllegalStateException extends IOException { public IllegalStateException(String msg) { super(msg); } } /** * The maximum no. of 'completed' (successful/failed/killed) * jobs kept in memory per-user. */ final int MAX_COMPLETE_USER_JOBS_IN_MEMORY; /** * The minimum time (in ms) that a job's information has to remain * in the JobTracker's memory before it is retired. */ static final int MIN_TIME_BEFORE_RETIRE = 0; private int nextJobId = 1; public static final Log LOG = LogFactory.getLog(JobTracker.class); /** * Start the JobTracker with given configuration. * * The conf will be modified to reflect the actual ports on which * the JobTracker is up and running if the user passes the port as * <code>zero</code>. * * @param conf configuration for the JobTracker. * @throws IOException */ public static JobTracker startTracker(JobConf conf ) throws IOException, InterruptedException { return startTracker(conf, generateNewIdentifier()); } public static JobTracker startTracker(JobConf conf, String identifier) throws IOException, InterruptedException { JobTracker result = null; while (true) { try { result = new JobTracker(conf, identifier); result.taskScheduler.setTaskTrackerManager(result); break; } catch (VersionMismatch e) { throw e; } catch (BindException e) { throw e; } catch (UnknownHostException e) { throw e; } catch (AccessControlException ace) { // in case of jobtracker not having right access // bail out throw ace; } catch (IOException e) { LOG.warn("Error starting tracker: " + StringUtils.stringifyException(e)); } Thread.sleep(1000); } if (result != null) { JobEndNotifier.startNotifier(); } return result; } public void stopTracker() throws IOException { JobEndNotifier.stopNotifier(); close(); } public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(InterTrackerProtocol.class.getName())) { return InterTrackerProtocol.versionID; } else if (protocol.equals(JobSubmissionProtocol.class.getName())){ return JobSubmissionProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(AdminOperationsProtocol.class.getName())){ return AdminOperationsProtocol.versionID; } else { throw new IOException("Unknown protocol to job tracker: " + protocol); } } /** * A thread to timeout tasks that have been assigned to task trackers, * but that haven't reported back yet. * Note that I included a stop() method, even though there is no place * where JobTrackers are cleaned up. */ private class ExpireLaunchingTasks implements Runnable { /** * This is a map of the tasks that have been assigned to task trackers, * but that have not yet been seen in a status report. * map: task-id -> time-assigned */ private Map<TaskAttemptID, Long> launchingTasks = new LinkedHashMap<TaskAttemptID, Long>(); public void run() { while (true) { try { // Every 3 minutes check for any tasks that are overdue Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL/3); long now = System.currentTimeMillis(); LOG.debug("Starting launching task sweep"); synchronized (JobTracker.this) { synchronized (launchingTasks) { Iterator<Map.Entry<TaskAttemptID, Long>> itr = launchingTasks.entrySet().iterator(); while (itr.hasNext()) { Map.Entry<TaskAttemptID, Long> pair = itr.next(); TaskAttemptID taskId = pair.getKey(); long age = now - (pair.getValue()).longValue(); LOG.info(taskId + " is " + age + " ms debug."); if (age > TASKTRACKER_EXPIRY_INTERVAL) { LOG.info("Launching task " + taskId + " timed out."); TaskInProgress tip = null; tip = taskidToTIPMap.get(taskId); if (tip != null) { JobInProgress job = tip.getJob(); String trackerName = getAssignedTracker(taskId); TaskTrackerStatus trackerStatus = getTaskTrackerStatus(trackerName); // This might happen when the tasktracker has already // expired and this thread tries to call failedtask // again. expire tasktracker should have called failed // task! if (trackerStatus != null) job.failedTask(tip, taskId, "Error launching task", tip.isMapTask()? TaskStatus.Phase.MAP: TaskStatus.Phase.STARTING, TaskStatus.State.FAILED, trackerName); } itr.remove(); } else { // the tasks are sorted by start time, so once we find // one that we want to keep, we are done for this cycle. break; } } } } } catch (InterruptedException ie) { // all done break; } catch (Exception e) { LOG.error("Expire Launching Task Thread got exception: " + StringUtils.stringifyException(e)); } } } public void addNewTask(TaskAttemptID taskName) { synchronized (launchingTasks) { launchingTasks.put(taskName, System.currentTimeMillis()); } } public void removeTask(TaskAttemptID taskName) { synchronized (launchingTasks) { launchingTasks.remove(taskName); } } } /////////////////////////////////////////////////////// // Used to expire TaskTrackers that have gone down /////////////////////////////////////////////////////// class ExpireTrackers implements Runnable { public ExpireTrackers() { } /** * The run method lives for the life of the JobTracker, and removes TaskTrackers * that have not checked in for some time. */ public void run() { while (true) { try { // // Thread runs periodically to check whether trackers should be expired. // The sleep interval must be no more than half the maximum expiry time // for a task tracker. // Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL / 3); // // Loop through all expired items in the queue // // Need to lock the JobTracker here since we are // manipulating it's data-structures via // ExpireTrackers.run -> JobTracker.lostTaskTracker -> // JobInProgress.failedTask -> JobTracker.markCompleteTaskAttempt // Also need to lock JobTracker before locking 'taskTracker' & // 'trackerExpiryQueue' to prevent deadlock: // @see {@link JobTracker.processHeartbeat(TaskTrackerStatus, boolean)} synchronized (JobTracker.this) { synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { long now = System.currentTimeMillis(); TaskTrackerStatus leastRecent = null; while ((trackerExpiryQueue.size() > 0) && (leastRecent = trackerExpiryQueue.first()) != null && ((now - leastRecent.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL)) { // Remove profile from head of queue trackerExpiryQueue.remove(leastRecent); String trackerName = leastRecent.getTrackerName(); // Figure out if last-seen time should be updated, or if tracker is dead TaskTracker current = getTaskTracker(trackerName); TaskTrackerStatus newProfile = (current == null ) ? null : current.getStatus(); // Items might leave the taskTracker set through other means; the // status stored in 'taskTrackers' might be null, which means the // tracker has already been destroyed. if (newProfile != null) { if ((now - newProfile.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL) { removeTracker(current); // remove the mapping from the hosts list String hostname = newProfile.getHost(); hostnameToTaskTracker.get(hostname).remove(trackerName); } else { // Update time by inserting latest profile trackerExpiryQueue.add(newProfile); } } } } } } } catch (InterruptedException iex) { break; } catch (Exception t) { LOG.error("Tracker Expiry Thread got exception: " + StringUtils.stringifyException(t)); } } } } synchronized void historyFileCopied(JobID jobid, String historyFile) { JobInProgress job = getJob(jobid); if (job != null) { //found in main cache job.setHistoryFileCopied(); if (historyFile != null) { job.setHistoryFile(historyFile); } return; } RetireJobInfo jobInfo = retireJobs.get(jobid); if (jobInfo != null) { //found in retired cache if (historyFile != null) { jobInfo.setHistoryFile(historyFile); } } } static class RetireJobInfo { final JobStatus status; final JobProfile profile; final long finishTime; private String historyFile; RetireJobInfo(JobStatus status, JobProfile profile, long finishTime, String historyFile) { this.status = status; this.profile = profile; this.finishTime = finishTime; this.historyFile = historyFile; } void setHistoryFile(String file) { this.historyFile = file; } String getHistoryFile() { return historyFile; } } /////////////////////////////////////////////////////// // Used to remove old finished Jobs that have been around for too long /////////////////////////////////////////////////////// class RetireJobs implements Runnable { private final Map<JobID, RetireJobInfo> jobIDStatusMap = new HashMap<JobID, RetireJobInfo>(); private final LinkedList<RetireJobInfo> jobRetireInfoQ = new LinkedList<RetireJobInfo>(); public RetireJobs() { } synchronized void addToCache(JobInProgress job) { RetireJobInfo info = new RetireJobInfo(job.getStatus(), job.getProfile(), job.getFinishTime(), job.getHistoryFile()); jobRetireInfoQ.add(info); jobIDStatusMap.put(info.status.getJobID(), info); if (jobRetireInfoQ.size() > retiredJobsCacheSize) { RetireJobInfo removed = jobRetireInfoQ.remove(); jobIDStatusMap.remove(removed.status.getJobID()); LOG.info("Retired job removed from cache " + removed.status.getJobID()); } } synchronized RetireJobInfo get(JobID jobId) { return jobIDStatusMap.get(jobId); } @SuppressWarnings("unchecked") synchronized LinkedList<RetireJobInfo> getAll() { return (LinkedList<RetireJobInfo>) jobRetireInfoQ.clone(); } synchronized LinkedList<JobStatus> getAllJobStatus() { LinkedList<JobStatus> list = new LinkedList<JobStatus>(); for (RetireJobInfo info : jobRetireInfoQ) { list.add(info.status); } return list; } private boolean minConditionToRetire(JobInProgress job, long now) { return job.getStatus().getRunState() != JobStatus.RUNNING && job.getStatus().getRunState() != JobStatus.PREP && (job.getFinishTime() + MIN_TIME_BEFORE_RETIRE < now) && job.isHistoryFileCopied(); } /** * The run method lives for the life of the JobTracker, * and removes Jobs that are not still running, but which * finished a long time ago. */ public void run() { while (true) { try { Thread.sleep(RETIRE_JOB_CHECK_INTERVAL); List<JobInProgress> retiredJobs = new ArrayList<JobInProgress>(); long now = System.currentTimeMillis(); long retireBefore = now - RETIRE_JOB_INTERVAL; synchronized (jobs) { for(JobInProgress job: jobs.values()) { if (minConditionToRetire(job, now) && (job.getFinishTime() < retireBefore)) { retiredJobs.add(job); } } } synchronized (userToJobsMap) { Iterator<Map.Entry<String, ArrayList<JobInProgress>>> userToJobsMapIt = userToJobsMap.entrySet().iterator(); while (userToJobsMapIt.hasNext()) { Map.Entry<String, ArrayList<JobInProgress>> entry = userToJobsMapIt.next(); ArrayList<JobInProgress> userJobs = entry.getValue(); Iterator<JobInProgress> it = userJobs.iterator(); while (it.hasNext() && userJobs.size() > MAX_COMPLETE_USER_JOBS_IN_MEMORY) { JobInProgress jobUser = it.next(); if (retiredJobs.contains(jobUser)) { LOG.info("Removing from userToJobsMap: " + jobUser.getJobID()); it.remove(); } else if (minConditionToRetire(jobUser, now)) { LOG.info("User limit exceeded. Marking job: " + jobUser.getJobID() + " for retire."); retiredJobs.add(jobUser); it.remove(); } } if (userJobs.isEmpty()) { userToJobsMapIt.remove(); } } } if (!retiredJobs.isEmpty()) { synchronized (JobTracker.this) { synchronized (jobs) { synchronized (taskScheduler) { for (JobInProgress job: retiredJobs) { removeJobTasks(job); jobs.remove(job.getProfile().getJobID()); for (JobInProgressListener l : jobInProgressListeners) { l.jobRemoved(job); } String jobUser = job.getProfile().getUser(); LOG.info("Retired job with id: '" + job.getProfile().getJobID() + "' of user '" + jobUser + "'"); // clean up job files from the local disk JobHistory.JobInfo.cleanupJob(job.getProfile().getJobID()); addToCache(job); } } } } } } catch (InterruptedException t) { break; } catch (Throwable t) { LOG.error("Error in retiring job:\n" + StringUtils.stringifyException(t)); } } } } enum ReasonForBlackListing { EXCEEDING_FAILURES, NODE_UNHEALTHY } // The FaultInfo which indicates the number of faults of a tracker // and when the last fault occurred // and whether the tracker is blacklisted across all jobs or not private static class FaultInfo { static final String FAULT_FORMAT_STRING = "%d failures on the tracker"; int numFaults = 0; long lastUpdated; boolean blacklisted; private boolean isHealthy; private HashMap<ReasonForBlackListing, String>rfbMap; FaultInfo() { numFaults = 0; lastUpdated = System.currentTimeMillis(); blacklisted = false; rfbMap = new HashMap<ReasonForBlackListing, String>(); } void setFaultCount(int num) { numFaults = num; } void setLastUpdated(long timeStamp) { lastUpdated = timeStamp; } int getFaultCount() { return numFaults; } long getLastUpdated() { return lastUpdated; } boolean isBlacklisted() { return blacklisted; } void setBlacklist(ReasonForBlackListing rfb, String trackerFaultReport) { blacklisted = true; this.rfbMap.put(rfb, trackerFaultReport); } public void setHealthy(boolean isHealthy) { this.isHealthy = isHealthy; } public boolean isHealthy() { return isHealthy; } public String getTrackerFaultReport() { StringBuffer sb = new StringBuffer(); for(String reasons : rfbMap.values()) { sb.append(reasons); sb.append("\n"); } return sb.toString(); } Set<ReasonForBlackListing> getReasonforblacklisting() { return this.rfbMap.keySet(); } public void unBlacklist() { this.blacklisted = false; this.rfbMap.clear(); } public boolean removeBlackListedReason(ReasonForBlackListing rfb) { String str = rfbMap.remove(rfb); return str!=null; } @@ -2298,1043 +2306,1045 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, this.retireJobsThread.interrupt(); try { this.retireJobsThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } if (taskScheduler != null) { taskScheduler.terminate(); } if (this.expireLaunchingTaskThread != null && this.expireLaunchingTaskThread.isAlive()) { LOG.info("Stopping expireLaunchingTasks"); this.expireLaunchingTaskThread.interrupt(); try { this.expireLaunchingTaskThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } if (this.completedJobsStoreThread != null && this.completedJobsStoreThread.isAlive()) { LOG.info("Stopping completedJobsStore thread"); this.completedJobsStoreThread.interrupt(); try { this.completedJobsStoreThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } LOG.info("stopped all jobtracker services"); return; } /////////////////////////////////////////////////////// // Maintain lookup tables; called by JobInProgress // and TaskInProgress /////////////////////////////////////////////////////// void createTaskEntry(TaskAttemptID taskid, String taskTracker, TaskInProgress tip) { LOG.info("Adding task (" + tip.getAttemptType(taskid) + ") " + "'" + taskid + "' to tip " + tip.getTIPId() + ", for tracker '" + taskTracker + "'"); // taskid --> tracker taskidToTrackerMap.put(taskid, taskTracker); // tracker --> taskid Set<TaskAttemptID> taskset = trackerToTaskMap.get(taskTracker); if (taskset == null) { taskset = new TreeSet<TaskAttemptID>(); trackerToTaskMap.put(taskTracker, taskset); } taskset.add(taskid); // taskid --> TIP taskidToTIPMap.put(taskid, tip); } void removeTaskEntry(TaskAttemptID taskid) { // taskid --> tracker String tracker = taskidToTrackerMap.remove(taskid); // tracker --> taskid if (tracker != null) { Set<TaskAttemptID> trackerSet = trackerToTaskMap.get(tracker); if (trackerSet != null) { trackerSet.remove(taskid); } } // taskid --> TIP if (taskidToTIPMap.remove(taskid) != null) { LOG.info("Removing task '" + taskid + "'"); } } /** * Mark a 'task' for removal later. * This function assumes that the JobTracker is locked on entry. * * @param taskTracker the tasktracker at which the 'task' was running * @param taskid completed (success/failure/killed) task */ void markCompletedTaskAttempt(String taskTracker, TaskAttemptID taskid) { // tracker --> taskid Set<TaskAttemptID> taskset = trackerToMarkedTasksMap.get(taskTracker); if (taskset == null) { taskset = new TreeSet<TaskAttemptID>(); trackerToMarkedTasksMap.put(taskTracker, taskset); } taskset.add(taskid); LOG.debug("Marked '" + taskid + "' from '" + taskTracker + "'"); } /** * Mark all 'non-running' jobs of the job for pruning. * This function assumes that the JobTracker is locked on entry. * * @param job the completed job */ void markCompletedJob(JobInProgress job) { for (TaskInProgress tip : job.getTasks(TaskType.JOB_SETUP)) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { markCompletedTaskAttempt(taskStatus.getTaskTracker(), taskStatus.getTaskID()); } } } for (TaskInProgress tip : job.getTasks(TaskType.MAP)) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { markCompletedTaskAttempt(taskStatus.getTaskTracker(), taskStatus.getTaskID()); } } } for (TaskInProgress tip : job.getTasks(TaskType.REDUCE)) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { markCompletedTaskAttempt(taskStatus.getTaskTracker(), taskStatus.getTaskID()); } } } } /** * Remove all 'marked' tasks running on a given {@link TaskTracker} * from the {@link JobTracker}'s data-structures. * This function assumes that the JobTracker is locked on entry. * * @param taskTracker tasktracker whose 'non-running' tasks are to be purged */ private void removeMarkedTasks(String taskTracker) { // Purge all the 'marked' tasks which were running at taskTracker Set<TaskAttemptID> markedTaskSet = trackerToMarkedTasksMap.get(taskTracker); if (markedTaskSet != null) { for (TaskAttemptID taskid : markedTaskSet) { removeTaskEntry(taskid); if (LOG.isDebugEnabled()) { LOG.debug("Removed marked completed task '" + taskid + "' from '" + taskTracker + "'"); } } // Clear trackerToMarkedTasksMap.remove(taskTracker); } } /** * Call {@link #removeTaskEntry(String)} for each of the * job's tasks. * When the JobTracker is retiring the long-completed * job, either because it has outlived {@link #RETIRE_JOB_INTERVAL} * or the limit of {@link #MAX_COMPLETE_USER_JOBS_IN_MEMORY} jobs * has been reached, we can afford to nuke all it's tasks; a little * unsafe, but practically feasible. * * @param job the job about to be 'retired' */ synchronized void removeJobTasks(JobInProgress job) { // iterate over all the task types for (TaskType type : TaskType.values()) { // iterate over all the tips of the type under consideration for (TaskInProgress tip : job.getTasks(type)) { // iterate over all the task-ids in the tip under consideration for (TaskAttemptID id : tip.getAllTaskAttemptIDs()) { // remove the task-id entry from the jobtracker removeTaskEntry(id); } } } } /** * Safe clean-up all data structures at the end of the * job (success/failure/killed). * Here we also ensure that for a given user we maintain * information for only MAX_COMPLETE_USER_JOBS_IN_MEMORY jobs * on the JobTracker. * * @param job completed job. */ synchronized void finalizeJob(JobInProgress job) { // Mark the 'non-running' tasks for pruning markCompletedJob(job); JobEndNotifier.registerNotification(job.getJobConf(), job.getStatus()); // start the merge of log files JobID id = job.getStatus().getJobID(); if (job.hasRestarted()) { try { JobHistory.JobInfo.finalizeRecovery(id, job.getJobConf()); } catch (IOException ioe) { LOG.info("Failed to finalize the log file recovery for job " + id, ioe); } } // mark the job as completed try { JobHistory.JobInfo.markCompleted(id); } catch (IOException ioe) { LOG.info("Failed to mark job " + id + " as completed!", ioe); } final JobTrackerInstrumentation metrics = getInstrumentation(); metrics.finalizeJob(conf, id); long now = System.currentTimeMillis(); // mark the job for cleanup at all the trackers addJobForCleanup(id); try { File userFileForJob = new File(lDirAlloc.getLocalPathToRead(SUBDIR + "/" + id, conf).toString()); if (userFileForJob != null) { userFileForJob.delete(); } } catch (IOException ioe) { LOG.info("Failed to delete job id mapping for job " + id, ioe); } // add the blacklisted trackers to potentially faulty list if (job.getStatus().getRunState() == JobStatus.SUCCEEDED) { if (job.getNoOfBlackListedTrackers() > 0) { for (String hostName : job.getBlackListedTrackers()) { faultyTrackers.incrementFaults(hostName); } } } String jobUser = job.getProfile().getUser(); //add to the user to jobs mapping synchronized (userToJobsMap) { ArrayList<JobInProgress> userJobs = userToJobsMap.get(jobUser); if (userJobs == null) { userJobs = new ArrayList<JobInProgress>(); userToJobsMap.put(jobUser, userJobs); } userJobs.add(job); } } /////////////////////////////////////////////////////// // Accessors for objects that want info on jobs, tasks, // trackers, etc. /////////////////////////////////////////////////////// public int getTotalSubmissions() { return totalSubmissions; } public String getJobTrackerMachine() { return localMachine; } /** * Get the unique identifier (ie. timestamp) of this job tracker start. * @return a string with a unique identifier */ public String getTrackerIdentifier() { return trackerIdentifier; } public int getTrackerPort() { return port; } public int getInfoPort() { return infoPort; } public long getStartTime() { return startTime; } public Vector<JobInProgress> runningJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if (status.getRunState() == JobStatus.RUNNING) { v.add(jip); } } return v; } /** * Version that is called from a timer thread, and therefore needs to be * careful to synchronize. */ public synchronized List<JobInProgress> getRunningJobs() { synchronized (jobs) { return runningJobs(); } } public Vector<JobInProgress> failedJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if ((status.getRunState() == JobStatus.FAILED) || (status.getRunState() == JobStatus.KILLED)) { v.add(jip); } } return v; } public Vector<JobInProgress> completedJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if (status.getRunState() == JobStatus.SUCCEEDED) { v.add(jip); } } return v; } /** * Get all the task trackers in the cluster * * @return {@link Collection} of {@link TaskTrackerStatus} */ // lock to taskTrackers should hold JT lock first. public synchronized Collection<TaskTrackerStatus> taskTrackers() { Collection<TaskTrackerStatus> ttStatuses; synchronized (taskTrackers) { ttStatuses = new ArrayList<TaskTrackerStatus>(taskTrackers.values().size()); for (TaskTracker tt : taskTrackers.values()) { ttStatuses.add(tt.getStatus()); } } return ttStatuses; } /** * Get the active task tracker statuses in the cluster * * @return {@link Collection} of active {@link TaskTrackerStatus} */ // This method is synchronized to make sure that the locking order // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers // lock" is under JobTracker lock to avoid deadlocks. synchronized public Collection<TaskTrackerStatus> activeTaskTrackers() { Collection<TaskTrackerStatus> activeTrackers = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for ( TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (!faultyTrackers.isBlacklisted(status.getHost())) { activeTrackers.add(status); } } } return activeTrackers; } /** * Get the active and blacklisted task tracker names in the cluster. The first * element in the returned list contains the list of active tracker names. * The second element in the returned list contains the list of blacklisted * tracker names. */ // This method is synchronized to make sure that the locking order // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers // lock" is under JobTracker lock to avoid deadlocks. synchronized public List<List<String>> taskTrackerNames() { List<String> activeTrackers = new ArrayList<String>(); List<String> blacklistedTrackers = new ArrayList<String>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (!faultyTrackers.isBlacklisted(status.getHost())) { activeTrackers.add(status.getTrackerName()); } else { blacklistedTrackers.add(status.getTrackerName()); } } } List<List<String>> result = new ArrayList<List<String>>(2); result.add(activeTrackers); result.add(blacklistedTrackers); return result; } /** * Get the blacklisted task tracker statuses in the cluster * * @return {@link Collection} of blacklisted {@link TaskTrackerStatus} */ // This method is synchronized to make sure that the locking order // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers // lock" is under JobTracker lock to avoid deadlocks. synchronized public Collection<TaskTrackerStatus> blacklistedTaskTrackers() { Collection<TaskTrackerStatus> blacklistedTrackers = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (faultyTrackers.isBlacklisted(status.getHost())) { blacklistedTrackers.add(status); } } } return blacklistedTrackers; } synchronized int getFaultCount(String hostName) { return faultyTrackers.getFaultCount(hostName); } /** * Get the number of blacklisted trackers across all the jobs * * @return */ int getBlacklistedTrackerCount() { return faultyTrackers.numBlacklistedTrackers; } /** * Whether the tracker is blacklisted or not * * @param trackerID * * @return true if blacklisted, false otherwise */ synchronized public boolean isBlacklisted(String trackerID) { TaskTrackerStatus status = getTaskTrackerStatus(trackerID); if (status != null) { return faultyTrackers.isBlacklisted(status.getHost()); } return false; } // lock to taskTrackers should hold JT lock first. synchronized public TaskTrackerStatus getTaskTrackerStatus(String trackerID) { TaskTracker taskTracker; synchronized (taskTrackers) { taskTracker = taskTrackers.get(trackerID); } return (taskTracker == null) ? null : taskTracker.getStatus(); } // lock to taskTrackers should hold JT lock first. synchronized public TaskTracker getTaskTracker(String trackerID) { synchronized (taskTrackers) { return taskTrackers.get(trackerID); } } JobTrackerStatistics getStatistics() { return statistics; } /** * Adds a new node to the jobtracker. It involves adding it to the expiry * thread and adding it for resolution * * Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry * * @param status Task Tracker's status */ private void addNewTracker(TaskTracker taskTracker) { TaskTrackerStatus status = taskTracker.getStatus(); trackerExpiryQueue.add(status); // Register the tracker if its not registered String hostname = status.getHost(); if (getNode(status.getTrackerName()) == null) { // Making the network location resolution inline .. resolveAndAddToTopology(hostname); } // add it to the set of tracker per host Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname); if (trackers == null) { trackers = Collections.synchronizedSet(new HashSet<TaskTracker>()); hostnameToTaskTracker.put(hostname, trackers); } statistics.taskTrackerAdded(status.getTrackerName()); getInstrumentation().addTrackers(1); LOG.info("Adding tracker " + status.getTrackerName() + " to host " + hostname); trackers.add(taskTracker); } public Node resolveAndAddToTopology(String name) { List <String> tmpList = new ArrayList<String>(1); tmpList.add(name); List <String> rNameList = dnsToSwitchMapping.resolve(tmpList); String rName = rNameList.get(0); String networkLoc = NodeBase.normalize(rName); return addHostToNodeMapping(name, networkLoc); } private Node addHostToNodeMapping(String host, String networkLoc) { - Node node; - if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) { - node = new NodeBase(host, networkLoc); - clusterMap.add(node); - if (node.getLevel() < getNumTaskCacheLevels()) { - LOG.fatal("Got a host whose level is: " + node.getLevel() + "." - + " Should get at least a level of value: " - + getNumTaskCacheLevels()); - try { - stopTracker(); - } catch (IOException ie) { - LOG.warn("Exception encountered during shutdown: " - + StringUtils.stringifyException(ie)); - System.exit(-1); + Node node = null; + synchronized (nodesAtMaxLevel) { + if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) { + node = new NodeBase(host, networkLoc); + clusterMap.add(node); + if (node.getLevel() < getNumTaskCacheLevels()) { + LOG.fatal("Got a host whose level is: " + node.getLevel() + "." + + " Should get at least a level of value: " + + getNumTaskCacheLevels()); + try { + stopTracker(); + } catch (IOException ie) { + LOG.warn("Exception encountered during shutdown: " + + StringUtils.stringifyException(ie)); + System.exit(-1); + } } + hostnameToNodeMap.put(host, node); + // Make an entry for the node at the max level in the cache + nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1)); } - hostnameToNodeMap.put(host, node); - // Make an entry for the node at the max level in the cache - nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1)); } return node; } /** * Returns a collection of nodes at the max level */ public Collection<Node> getNodesAtMaxLevel() { return nodesAtMaxLevel; } public static Node getParentNode(Node node, int level) { for (int i = 0; i < level; ++i) { node = node.getParent(); } return node; } /** * Return the Node in the network topology that corresponds to the hostname */ public Node getNode(String name) { return hostnameToNodeMap.get(name); } public int getNumTaskCacheLevels() { return numTaskCacheLevels; } public int getNumResolvedTaskTrackers() { return numResolved; } public int getNumberOfUniqueHosts() { return uniqueHostsMap.size(); } public void addJobInProgressListener(JobInProgressListener listener) { jobInProgressListeners.add(listener); } public void removeJobInProgressListener(JobInProgressListener listener) { jobInProgressListeners.remove(listener); } // Update the listeners about the job // Assuming JobTracker is locked on entry. private void updateJobInProgressListeners(JobChangeEvent event) { for (JobInProgressListener listener : jobInProgressListeners) { listener.jobUpdated(event); } } /** * Return the {@link QueueManager} associated with the JobTracker. */ public QueueManager getQueueManager() { return queueManager; } //////////////////////////////////////////////////// // InterTrackerProtocol //////////////////////////////////////////////////// public String getBuildVersion() throws IOException{ return VersionInfo.getBuildVersion(); } /** * The periodic heartbeat mechanism between the {@link TaskTracker} and * the {@link JobTracker}. * * The {@link JobTracker} processes the status information sent by the * {@link TaskTracker} and responds with instructions to start/stop * tasks or jobs, and also 'reset' instructions during contingencies. */ public synchronized HeartbeatResponse heartbeat(TaskTrackerStatus status, boolean restarted, boolean initialContact, boolean acceptNewTasks, short responseId) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Got heartbeat from: " + status.getTrackerName() + " (restarted: " + restarted + " initialContact: " + initialContact + " acceptNewTasks: " + acceptNewTasks + ")" + " with responseId: " + responseId); } // Make sure heartbeat is from a tasktracker allowed by the jobtracker. if (!acceptTaskTracker(status)) { throw new DisallowedTaskTrackerException(status); } // First check if the last heartbeat response got through String trackerName = status.getTrackerName(); long now = System.currentTimeMillis(); boolean isBlacklisted = false; if (restarted) { faultyTrackers.markTrackerHealthy(status.getHost()); } else { isBlacklisted = faultyTrackers.shouldAssignTasksToTracker(status.getHost(), now); } HeartbeatResponse prevHeartbeatResponse = trackerToHeartbeatResponseMap.get(trackerName); boolean addRestartInfo = false; if (initialContact != true) { // If this isn't the 'initial contact' from the tasktracker, // there is something seriously wrong if the JobTracker has // no record of the 'previous heartbeat'; if so, ask the // tasktracker to re-initialize itself. if (prevHeartbeatResponse == null) { // This is the first heartbeat from the old tracker to the newly // started JobTracker if (hasRestarted()) { addRestartInfo = true; // inform the recovery manager about this tracker joining back recoveryManager.unMarkTracker(trackerName); } else { // Jobtracker might have restarted but no recovery is needed // otherwise this code should not be reached LOG.warn("Serious problem, cannot find record of 'previous' " + "heartbeat for '" + trackerName + "'; reinitializing the tasktracker"); return new HeartbeatResponse(responseId, new TaskTrackerAction[] {new ReinitTrackerAction()}); } } else { // It is completely safe to not process a 'duplicate' heartbeat from a // {@link TaskTracker} since it resends the heartbeat when rpcs are // lost see {@link TaskTracker.transmitHeartbeat()}; // acknowledge it by re-sending the previous response to let the // {@link TaskTracker} go forward. if (prevHeartbeatResponse.getResponseId() != responseId) { LOG.info("Ignoring 'duplicate' heartbeat from '" + trackerName + "'; resending the previous 'lost' response"); return prevHeartbeatResponse; } } } // Process this heartbeat short newResponseId = (short)(responseId + 1); status.setLastSeen(now); if (!processHeartbeat(status, initialContact)) { if (prevHeartbeatResponse != null) { trackerToHeartbeatResponseMap.remove(trackerName); } return new HeartbeatResponse(newResponseId, new TaskTrackerAction[] {new ReinitTrackerAction()}); } // Initialize the response to be sent for the heartbeat HeartbeatResponse response = new HeartbeatResponse(newResponseId, null); List<TaskTrackerAction> actions = new ArrayList<TaskTrackerAction>(); isBlacklisted = faultyTrackers.isBlacklisted(status.getHost()); // Check for new tasks to be executed on the tasktracker if (recoveryManager.shouldSchedule() && acceptNewTasks && !isBlacklisted) { TaskTrackerStatus taskTrackerStatus = getTaskTrackerStatus(trackerName) ; if (taskTrackerStatus == null) { LOG.warn("Unknown task tracker polling; ignoring: " + trackerName); } else { List<Task> tasks = getSetupAndCleanupTasks(taskTrackerStatus); if (tasks == null ) { tasks = taskScheduler.assignTasks(taskTrackers.get(trackerName)); } if (tasks != null) { for (Task task : tasks) { expireLaunchingTasks.addNewTask(task.getTaskID()); LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID()); actions.add(new LaunchTaskAction(task)); } } } } // Check for tasks to be killed List<TaskTrackerAction> killTasksList = getTasksToKill(trackerName); if (killTasksList != null) { actions.addAll(killTasksList); } // Check for jobs to be killed/cleanedup List<TaskTrackerAction> killJobsList = getJobsForCleanup(trackerName); if (killJobsList != null) { actions.addAll(killJobsList); } // Check for tasks whose outputs can be saved List<TaskTrackerAction> commitTasksList = getTasksToSave(status); if (commitTasksList != null) { actions.addAll(commitTasksList); } // calculate next heartbeat interval and put in heartbeat response int nextInterval = getNextHeartbeatInterval(); response.setHeartbeatInterval(nextInterval); response.setActions( actions.toArray(new TaskTrackerAction[actions.size()])); // check if the restart info is req if (addRestartInfo) { response.setRecoveredJobs(recoveryManager.getJobsToRecover()); } // Update the trackerToHeartbeatResponseMap trackerToHeartbeatResponseMap.put(trackerName, response); // Done processing the hearbeat, now remove 'marked' tasks removeMarkedTasks(trackerName); return response; } /** * Calculates next heartbeat interval using cluster size. * Heartbeat interval is incremented by 1 second for every 100 nodes by default. * @return next heartbeat interval. */ public int getNextHeartbeatInterval() { // get the no of task trackers int clusterSize = getClusterStatus().getTaskTrackers(); int heartbeatInterval = Math.max( (int)(1000 * HEARTBEATS_SCALING_FACTOR * Math.ceil((double)clusterSize / NUM_HEARTBEATS_IN_SECOND)), HEARTBEAT_INTERVAL_MIN) ; return heartbeatInterval; } /** * Return if the specified tasktracker is in the hosts list, * if one was configured. If none was configured, then this * returns true. */ private boolean inHostsList(TaskTrackerStatus status) { Set<String> hostsList = hostsReader.getHosts(); return (hostsList.isEmpty() || hostsList.contains(status.getHost())); } /** * Return if the specified tasktracker is in the exclude list. */ private boolean inExcludedHostsList(TaskTrackerStatus status) { Set<String> excludeList = hostsReader.getExcludedHosts(); return excludeList.contains(status.getHost()); } /** * Returns true if the tasktracker is in the hosts list and * not in the exclude list. */ private boolean acceptTaskTracker(TaskTrackerStatus status) { return (inHostsList(status) && !inExcludedHostsList(status)); } /** * Update the last recorded status for the given task tracker. * It assumes that the taskTrackers are locked on entry. * @param trackerName The name of the tracker * @param status The new status for the task tracker * @return Was an old status found? */ private boolean updateTaskTrackerStatus(String trackerName, TaskTrackerStatus status) { TaskTracker tt = getTaskTracker(trackerName); TaskTrackerStatus oldStatus = (tt == null) ? null : tt.getStatus(); if (oldStatus != null) { totalMaps -= oldStatus.countMapTasks(); totalReduces -= oldStatus.countReduceTasks(); occupiedMapSlots -= oldStatus.countOccupiedMapSlots(); occupiedReduceSlots -= oldStatus.countOccupiedReduceSlots(); getInstrumentation().decRunningMaps(oldStatus.countMapTasks()); getInstrumentation().decRunningReduces(oldStatus.countReduceTasks()); getInstrumentation().decOccupiedMapSlots(oldStatus.countOccupiedMapSlots()); getInstrumentation().decOccupiedReduceSlots(oldStatus.countOccupiedReduceSlots()); if (!faultyTrackers.isBlacklisted(oldStatus.getHost())) { int mapSlots = oldStatus.getMaxMapSlots(); totalMapTaskCapacity -= mapSlots; int reduceSlots = oldStatus.getMaxReduceSlots(); totalReduceTaskCapacity -= reduceSlots; } if (status == null) { taskTrackers.remove(trackerName); Integer numTaskTrackersInHost = uniqueHostsMap.get(oldStatus.getHost()); if (numTaskTrackersInHost != null) { numTaskTrackersInHost --; if (numTaskTrackersInHost > 0) { uniqueHostsMap.put(oldStatus.getHost(), numTaskTrackersInHost); } else { uniqueHostsMap.remove(oldStatus.getHost()); } } } } if (status != null) { totalMaps += status.countMapTasks(); totalReduces += status.countReduceTasks(); occupiedMapSlots += status.countOccupiedMapSlots(); occupiedReduceSlots += status.countOccupiedReduceSlots(); getInstrumentation().addRunningMaps(status.countMapTasks()); getInstrumentation().addRunningReduces(status.countReduceTasks()); getInstrumentation().addOccupiedMapSlots(status.countOccupiedMapSlots()); getInstrumentation().addOccupiedReduceSlots(status.countOccupiedReduceSlots()); if (!faultyTrackers.isBlacklisted(status.getHost())) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity += mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity += reduceSlots; } boolean alreadyPresent = false; TaskTracker taskTracker = taskTrackers.get(trackerName); if (taskTracker != null) { alreadyPresent = true; } else { taskTracker = new TaskTracker(trackerName); } taskTracker.setStatus(status); taskTrackers.put(trackerName, taskTracker); if (LOG.isDebugEnabled()) { int runningMaps = 0, runningReduces = 0; int commitPendingMaps = 0, commitPendingReduces = 0; int unassignedMaps = 0, unassignedReduces = 0; int miscMaps = 0, miscReduces = 0; List<TaskStatus> taskReports = status.getTaskReports(); for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); boolean isMap = ts.getIsMap(); TaskStatus.State state = ts.getRunState(); if (state == TaskStatus.State.RUNNING) { if (isMap) { ++runningMaps; } else { ++runningReduces; } } else if (state == TaskStatus.State.UNASSIGNED) { if (isMap) { ++unassignedMaps; } else { ++unassignedReduces; } } else if (state == TaskStatus.State.COMMIT_PENDING) { if (isMap) { ++commitPendingMaps; } else { ++commitPendingReduces; } } else { if (isMap) { ++miscMaps; } else { ++miscReduces; } } } LOG.debug(trackerName + ": Status -" + " running(m) = " + runningMaps + " unassigned(m) = " + unassignedMaps + " commit_pending(m) = " + commitPendingMaps + " misc(m) = " + miscMaps + " running(r) = " + runningReduces + " unassigned(r) = " + unassignedReduces + " commit_pending(r) = " + commitPendingReduces + " misc(r) = " + miscReduces); } if (!alreadyPresent) { Integer numTaskTrackersInHost = uniqueHostsMap.get(status.getHost()); if (numTaskTrackersInHost == null) { numTaskTrackersInHost = 0; } numTaskTrackersInHost ++; uniqueHostsMap.put(status.getHost(), numTaskTrackersInHost); } } getInstrumentation().setMapSlots(totalMapTaskCapacity); getInstrumentation().setReduceSlots(totalReduceTaskCapacity); return oldStatus != null; } // Increment the number of reserved slots in the cluster. // This method assumes the caller has JobTracker lock. void incrementReservations(TaskType type, int reservedSlots) { if (type.equals(TaskType.MAP)) { reservedMapSlots += reservedSlots; } else if (type.equals(TaskType.REDUCE)) { reservedReduceSlots += reservedSlots; } } // Decrement the number of reserved slots in the cluster. // This method assumes the caller has JobTracker lock. void decrementReservations(TaskType type, int reservedSlots) { if (type.equals(TaskType.MAP)) { reservedMapSlots -= reservedSlots; } else if (type.equals(TaskType.REDUCE)) { reservedReduceSlots -= reservedSlots; } } private void updateNodeHealthStatus(TaskTrackerStatus trackerStatus) { TaskTrackerHealthStatus status = trackerStatus.getHealthStatus(); synchronized (faultyTrackers) { faultyTrackers.setNodeHealthStatus(trackerStatus.getHost(), status.isNodeHealthy(), status.getHealthReport()); } } /** * Process incoming heartbeat messages from the task trackers. */ private synchronized boolean processHeartbeat( TaskTrackerStatus trackerStatus, boolean initialContact) { String trackerName = trackerStatus.getTrackerName(); synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { boolean seenBefore = updateTaskTrackerStatus(trackerName, trackerStatus); TaskTracker taskTracker = getTaskTracker(trackerName); if (initialContact) { // If it's first contact, then clear out // any state hanging around if (seenBefore) { lostTaskTracker(taskTracker); } } else { // If not first contact, there should be some record of the tracker if (!seenBefore) { LOG.warn("Status from unknown Tracker : " + trackerName); updateTaskTrackerStatus(trackerName, null); return false; } } if (initialContact) { // if this is lost tracker that came back now, and if it blacklisted // increment the count of blacklisted trackers in the cluster if (isBlacklisted(trackerName)) { faultyTrackers.incrBlackListedTrackers(1); } addNewTracker(taskTracker); } } } updateTaskStatuses(trackerStatus); updateNodeHealthStatus(trackerStatus); return true; } /** * A tracker wants to know if any of its Tasks have been * closed (because the job completed, whether successfully or not) */ private synchronized List<TaskTrackerAction> getTasksToKill( String taskTracker) { Set<TaskAttemptID> taskIds = trackerToTaskMap.get(taskTracker); List<TaskTrackerAction> killList = new ArrayList<TaskTrackerAction>(); if (taskIds != null) { for (TaskAttemptID killTaskId : taskIds) { TaskInProgress tip = taskidToTIPMap.get(killTaskId); if (tip == null) { continue; } if (tip.shouldClose(killTaskId)) { // // This is how the JobTracker ends a task at the TaskTracker. // It may be successfully completed, or may be killed in // mid-execution. // if (!tip.getJob().isComplete()) { killList.add(new KillTaskAction(killTaskId)); LOG.debug(taskTracker + " -> KillTaskAction: " + killTaskId); } } } } // add the stray attempts for uninited jobs synchronized (trackerToTasksToCleanup) { Set<TaskAttemptID> set = trackerToTasksToCleanup.remove(taskTracker); if (set != null) { for (TaskAttemptID id : set) { killList.add(new KillTaskAction(id)); } } } return killList; } /** * Add a job to cleanup for the tracker. */ private void addJobForCleanup(JobID id) { for (String taskTracker : taskTrackers.keySet()) { LOG.debug("Marking job " + id + " for cleanup by tracker " + taskTracker); synchronized (trackerToJobsToCleanup) { Set<JobID> jobsToKill = trackerToJobsToCleanup.get(taskTracker); if (jobsToKill == null) { jobsToKill = new HashSet<JobID>(); trackerToJobsToCleanup.put(taskTracker, jobsToKill); } jobsToKill.add(id); } } } /** * A tracker wants to know if any job needs cleanup because the job completed. */ private List<TaskTrackerAction> getJobsForCleanup(String taskTracker) {
jaxlaw/hadoop-common
f32aae3d0ceed0c728f54a347df852d353f52009
MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. Contributed by Amareshwari Sriramadasu.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 9382d42..51be6f7 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,494 +1,497 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383006 + MAPREDUCE-1342. Fixed deadlock in global blacklisting of tasktrackers. + (Amareshwari Sriramadasu via acmurthy) + HADOOP-6460. Reinitializes buffers used for serializing responses in ipc server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobTracker.java b/src/mapred/org/apache/hadoop/mapred/JobTracker.java index a6884b0..93dca57 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobTracker.java +++ b/src/mapred/org/apache/hadoop/mapred/JobTracker.java @@ -181,1304 +181,1317 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, } /** * The maximum no. of 'completed' (successful/failed/killed) * jobs kept in memory per-user. */ final int MAX_COMPLETE_USER_JOBS_IN_MEMORY; /** * The minimum time (in ms) that a job's information has to remain * in the JobTracker's memory before it is retired. */ static final int MIN_TIME_BEFORE_RETIRE = 0; private int nextJobId = 1; public static final Log LOG = LogFactory.getLog(JobTracker.class); /** * Start the JobTracker with given configuration. * * The conf will be modified to reflect the actual ports on which * the JobTracker is up and running if the user passes the port as * <code>zero</code>. * * @param conf configuration for the JobTracker. * @throws IOException */ public static JobTracker startTracker(JobConf conf ) throws IOException, InterruptedException { return startTracker(conf, generateNewIdentifier()); } public static JobTracker startTracker(JobConf conf, String identifier) throws IOException, InterruptedException { JobTracker result = null; while (true) { try { result = new JobTracker(conf, identifier); result.taskScheduler.setTaskTrackerManager(result); break; } catch (VersionMismatch e) { throw e; } catch (BindException e) { throw e; } catch (UnknownHostException e) { throw e; } catch (AccessControlException ace) { // in case of jobtracker not having right access // bail out throw ace; } catch (IOException e) { LOG.warn("Error starting tracker: " + StringUtils.stringifyException(e)); } Thread.sleep(1000); } if (result != null) { JobEndNotifier.startNotifier(); } return result; } public void stopTracker() throws IOException { JobEndNotifier.stopNotifier(); close(); } public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(InterTrackerProtocol.class.getName())) { return InterTrackerProtocol.versionID; } else if (protocol.equals(JobSubmissionProtocol.class.getName())){ return JobSubmissionProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(AdminOperationsProtocol.class.getName())){ return AdminOperationsProtocol.versionID; } else { throw new IOException("Unknown protocol to job tracker: " + protocol); } } /** * A thread to timeout tasks that have been assigned to task trackers, * but that haven't reported back yet. * Note that I included a stop() method, even though there is no place * where JobTrackers are cleaned up. */ private class ExpireLaunchingTasks implements Runnable { /** * This is a map of the tasks that have been assigned to task trackers, * but that have not yet been seen in a status report. * map: task-id -> time-assigned */ private Map<TaskAttemptID, Long> launchingTasks = new LinkedHashMap<TaskAttemptID, Long>(); public void run() { while (true) { try { // Every 3 minutes check for any tasks that are overdue Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL/3); long now = System.currentTimeMillis(); LOG.debug("Starting launching task sweep"); synchronized (JobTracker.this) { synchronized (launchingTasks) { Iterator<Map.Entry<TaskAttemptID, Long>> itr = launchingTasks.entrySet().iterator(); while (itr.hasNext()) { Map.Entry<TaskAttemptID, Long> pair = itr.next(); TaskAttemptID taskId = pair.getKey(); long age = now - (pair.getValue()).longValue(); LOG.info(taskId + " is " + age + " ms debug."); if (age > TASKTRACKER_EXPIRY_INTERVAL) { LOG.info("Launching task " + taskId + " timed out."); TaskInProgress tip = null; tip = taskidToTIPMap.get(taskId); if (tip != null) { JobInProgress job = tip.getJob(); String trackerName = getAssignedTracker(taskId); TaskTrackerStatus trackerStatus = getTaskTrackerStatus(trackerName); // This might happen when the tasktracker has already // expired and this thread tries to call failedtask // again. expire tasktracker should have called failed // task! if (trackerStatus != null) job.failedTask(tip, taskId, "Error launching task", tip.isMapTask()? TaskStatus.Phase.MAP: TaskStatus.Phase.STARTING, TaskStatus.State.FAILED, trackerName); } itr.remove(); } else { // the tasks are sorted by start time, so once we find // one that we want to keep, we are done for this cycle. break; } } } } } catch (InterruptedException ie) { // all done break; } catch (Exception e) { LOG.error("Expire Launching Task Thread got exception: " + StringUtils.stringifyException(e)); } } } public void addNewTask(TaskAttemptID taskName) { synchronized (launchingTasks) { launchingTasks.put(taskName, System.currentTimeMillis()); } } public void removeTask(TaskAttemptID taskName) { synchronized (launchingTasks) { launchingTasks.remove(taskName); } } } /////////////////////////////////////////////////////// // Used to expire TaskTrackers that have gone down /////////////////////////////////////////////////////// class ExpireTrackers implements Runnable { public ExpireTrackers() { } /** * The run method lives for the life of the JobTracker, and removes TaskTrackers * that have not checked in for some time. */ public void run() { while (true) { try { // // Thread runs periodically to check whether trackers should be expired. // The sleep interval must be no more than half the maximum expiry time // for a task tracker. // Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL / 3); // // Loop through all expired items in the queue // // Need to lock the JobTracker here since we are // manipulating it's data-structures via // ExpireTrackers.run -> JobTracker.lostTaskTracker -> // JobInProgress.failedTask -> JobTracker.markCompleteTaskAttempt // Also need to lock JobTracker before locking 'taskTracker' & // 'trackerExpiryQueue' to prevent deadlock: // @see {@link JobTracker.processHeartbeat(TaskTrackerStatus, boolean)} synchronized (JobTracker.this) { synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { long now = System.currentTimeMillis(); TaskTrackerStatus leastRecent = null; while ((trackerExpiryQueue.size() > 0) && (leastRecent = trackerExpiryQueue.first()) != null && ((now - leastRecent.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL)) { // Remove profile from head of queue trackerExpiryQueue.remove(leastRecent); String trackerName = leastRecent.getTrackerName(); // Figure out if last-seen time should be updated, or if tracker is dead TaskTracker current = getTaskTracker(trackerName); TaskTrackerStatus newProfile = (current == null ) ? null : current.getStatus(); // Items might leave the taskTracker set through other means; the // status stored in 'taskTrackers' might be null, which means the // tracker has already been destroyed. if (newProfile != null) { if ((now - newProfile.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL) { removeTracker(current); // remove the mapping from the hosts list String hostname = newProfile.getHost(); hostnameToTaskTracker.get(hostname).remove(trackerName); } else { // Update time by inserting latest profile trackerExpiryQueue.add(newProfile); } } } } } } } catch (InterruptedException iex) { break; } catch (Exception t) { LOG.error("Tracker Expiry Thread got exception: " + StringUtils.stringifyException(t)); } } } } synchronized void historyFileCopied(JobID jobid, String historyFile) { JobInProgress job = getJob(jobid); if (job != null) { //found in main cache job.setHistoryFileCopied(); if (historyFile != null) { job.setHistoryFile(historyFile); } return; } RetireJobInfo jobInfo = retireJobs.get(jobid); if (jobInfo != null) { //found in retired cache if (historyFile != null) { jobInfo.setHistoryFile(historyFile); } } } static class RetireJobInfo { final JobStatus status; final JobProfile profile; final long finishTime; private String historyFile; RetireJobInfo(JobStatus status, JobProfile profile, long finishTime, String historyFile) { this.status = status; this.profile = profile; this.finishTime = finishTime; this.historyFile = historyFile; } void setHistoryFile(String file) { this.historyFile = file; } String getHistoryFile() { return historyFile; } } /////////////////////////////////////////////////////// // Used to remove old finished Jobs that have been around for too long /////////////////////////////////////////////////////// class RetireJobs implements Runnable { private final Map<JobID, RetireJobInfo> jobIDStatusMap = new HashMap<JobID, RetireJobInfo>(); private final LinkedList<RetireJobInfo> jobRetireInfoQ = new LinkedList<RetireJobInfo>(); public RetireJobs() { } synchronized void addToCache(JobInProgress job) { RetireJobInfo info = new RetireJobInfo(job.getStatus(), job.getProfile(), job.getFinishTime(), job.getHistoryFile()); jobRetireInfoQ.add(info); jobIDStatusMap.put(info.status.getJobID(), info); if (jobRetireInfoQ.size() > retiredJobsCacheSize) { RetireJobInfo removed = jobRetireInfoQ.remove(); jobIDStatusMap.remove(removed.status.getJobID()); LOG.info("Retired job removed from cache " + removed.status.getJobID()); } } synchronized RetireJobInfo get(JobID jobId) { return jobIDStatusMap.get(jobId); } @SuppressWarnings("unchecked") synchronized LinkedList<RetireJobInfo> getAll() { return (LinkedList<RetireJobInfo>) jobRetireInfoQ.clone(); } synchronized LinkedList<JobStatus> getAllJobStatus() { LinkedList<JobStatus> list = new LinkedList<JobStatus>(); for (RetireJobInfo info : jobRetireInfoQ) { list.add(info.status); } return list; } private boolean minConditionToRetire(JobInProgress job, long now) { return job.getStatus().getRunState() != JobStatus.RUNNING && job.getStatus().getRunState() != JobStatus.PREP && (job.getFinishTime() + MIN_TIME_BEFORE_RETIRE < now) && job.isHistoryFileCopied(); } /** * The run method lives for the life of the JobTracker, * and removes Jobs that are not still running, but which * finished a long time ago. */ public void run() { while (true) { try { Thread.sleep(RETIRE_JOB_CHECK_INTERVAL); List<JobInProgress> retiredJobs = new ArrayList<JobInProgress>(); long now = System.currentTimeMillis(); long retireBefore = now - RETIRE_JOB_INTERVAL; synchronized (jobs) { for(JobInProgress job: jobs.values()) { if (minConditionToRetire(job, now) && (job.getFinishTime() < retireBefore)) { retiredJobs.add(job); } } } synchronized (userToJobsMap) { Iterator<Map.Entry<String, ArrayList<JobInProgress>>> userToJobsMapIt = userToJobsMap.entrySet().iterator(); while (userToJobsMapIt.hasNext()) { Map.Entry<String, ArrayList<JobInProgress>> entry = userToJobsMapIt.next(); ArrayList<JobInProgress> userJobs = entry.getValue(); Iterator<JobInProgress> it = userJobs.iterator(); while (it.hasNext() && userJobs.size() > MAX_COMPLETE_USER_JOBS_IN_MEMORY) { JobInProgress jobUser = it.next(); if (retiredJobs.contains(jobUser)) { LOG.info("Removing from userToJobsMap: " + jobUser.getJobID()); it.remove(); } else if (minConditionToRetire(jobUser, now)) { LOG.info("User limit exceeded. Marking job: " + jobUser.getJobID() + " for retire."); retiredJobs.add(jobUser); it.remove(); } } if (userJobs.isEmpty()) { userToJobsMapIt.remove(); } } } if (!retiredJobs.isEmpty()) { synchronized (JobTracker.this) { synchronized (jobs) { synchronized (taskScheduler) { for (JobInProgress job: retiredJobs) { removeJobTasks(job); jobs.remove(job.getProfile().getJobID()); for (JobInProgressListener l : jobInProgressListeners) { l.jobRemoved(job); } String jobUser = job.getProfile().getUser(); LOG.info("Retired job with id: '" + job.getProfile().getJobID() + "' of user '" + jobUser + "'"); // clean up job files from the local disk JobHistory.JobInfo.cleanupJob(job.getProfile().getJobID()); addToCache(job); } } } } } } catch (InterruptedException t) { break; } catch (Throwable t) { LOG.error("Error in retiring job:\n" + StringUtils.stringifyException(t)); } } } } enum ReasonForBlackListing { EXCEEDING_FAILURES, NODE_UNHEALTHY } // The FaultInfo which indicates the number of faults of a tracker // and when the last fault occurred // and whether the tracker is blacklisted across all jobs or not private static class FaultInfo { static final String FAULT_FORMAT_STRING = "%d failures on the tracker"; int numFaults = 0; long lastUpdated; boolean blacklisted; private boolean isHealthy; private HashMap<ReasonForBlackListing, String>rfbMap; FaultInfo() { numFaults = 0; lastUpdated = System.currentTimeMillis(); blacklisted = false; rfbMap = new HashMap<ReasonForBlackListing, String>(); } void setFaultCount(int num) { numFaults = num; } void setLastUpdated(long timeStamp) { lastUpdated = timeStamp; } int getFaultCount() { return numFaults; } long getLastUpdated() { return lastUpdated; } boolean isBlacklisted() { return blacklisted; } void setBlacklist(ReasonForBlackListing rfb, String trackerFaultReport) { blacklisted = true; this.rfbMap.put(rfb, trackerFaultReport); } public void setHealthy(boolean isHealthy) { this.isHealthy = isHealthy; } public boolean isHealthy() { return isHealthy; } public String getTrackerFaultReport() { StringBuffer sb = new StringBuffer(); for(String reasons : rfbMap.values()) { sb.append(reasons); sb.append("\n"); } return sb.toString(); } Set<ReasonForBlackListing> getReasonforblacklisting() { return this.rfbMap.keySet(); } public void unBlacklist() { this.blacklisted = false; this.rfbMap.clear(); } public boolean removeBlackListedReason(ReasonForBlackListing rfb) { String str = rfbMap.remove(rfb); return str!=null; } public void addBlackListedReason(ReasonForBlackListing rfb, String reason) { this.rfbMap.put(rfb, reason); } } private class FaultyTrackersInfo { // A map from hostName to its faults private Map<String, FaultInfo> potentiallyFaultyTrackers = new HashMap<String, FaultInfo>(); // This count gives the number of blacklisted trackers in the cluster // at any time. This is maintained to avoid iteration over // the potentiallyFaultyTrackers to get blacklisted trackers. And also // this count doesn't include blacklisted trackers which are lost, // although the fault info is maintained for lost trackers. private volatile int numBlacklistedTrackers = 0; /** * Increments faults(blacklist by job) for the tracker by one. * * Adds the tracker to the potentially faulty list. + * Assumes JobTracker is locked on the entry. * * @param hostName */ void incrementFaults(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = getFaultInfo(hostName, true); int numFaults = fi.getFaultCount(); ++numFaults; fi.setFaultCount(numFaults); fi.setLastUpdated(System.currentTimeMillis()); if (exceedsFaults(fi)) { LOG.info("Adding " + hostName + " to the blacklist" + " across all jobs"); String reason = String.format(FaultInfo.FAULT_FORMAT_STRING, numFaults); blackListTracker(hostName, reason, ReasonForBlackListing.EXCEEDING_FAILURES); } } } private void incrBlackListedTrackers(int count) { numBlacklistedTrackers += count; getInstrumentation().addBlackListedTrackers(count); } private void decrBlackListedTrackers(int count) { numBlacklistedTrackers -= count; getInstrumentation().decBlackListedTrackers(count); } private void blackListTracker(String hostName, String reason, ReasonForBlackListing rfb) { FaultInfo fi = getFaultInfo(hostName, true); boolean blackListed = fi.isBlacklisted(); if(blackListed) { if (LOG.isDebugEnabled()) { LOG.debug("Adding blacklisted reason for tracker : " + hostName + " Reason for blacklisting is : " + rfb); } if (!fi.getReasonforblacklisting().contains(rfb)) { LOG.info("Adding blacklisted reason for tracker : " + hostName + " Reason for blacklisting is : " + rfb); } fi.addBlackListedReason(rfb, reason); } else { LOG.info("Blacklisting tracker : " + hostName + " Reason for blacklisting is : " + rfb); Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostName); synchronized (trackers) { for (TaskTracker tracker : trackers) { tracker.cancelAllReservations(); } } removeHostCapacity(hostName); fi.setBlacklist(rfb, reason); } } private boolean canUnBlackListTracker(String hostName, ReasonForBlackListing rfb) { FaultInfo fi = getFaultInfo(hostName, false); if(fi == null) { return false; } Set<ReasonForBlackListing> rfbSet = fi.getReasonforblacklisting(); return fi.isBlacklisted() && rfbSet.contains(rfb); } private void unBlackListTracker(String hostName, ReasonForBlackListing rfb) { // check if you can black list the tracker then call this methods FaultInfo fi = getFaultInfo(hostName, false); if(fi.removeBlackListedReason(rfb)) { if(fi.getReasonforblacklisting().isEmpty()) { addHostCapacity(hostName); LOG.info("Unblacklisting tracker : " + hostName); fi.unBlacklist(); //We have unBlackListed tracker, so tracker should //definitely be healthy. Check fault count if fault count //is zero don't keep it memory. if(fi.numFaults == 0) { potentiallyFaultyTrackers.remove(hostName); } } } } + // Assumes JobTracker is locked on the entry private FaultInfo getFaultInfo(String hostName, boolean createIfNeccessary) { - FaultInfo fi = potentiallyFaultyTrackers.get(hostName); - if (fi == null && createIfNeccessary) { - fi = new FaultInfo(); - potentiallyFaultyTrackers.put(hostName, fi); + FaultInfo fi = null; + synchronized (potentiallyFaultyTrackers) { + fi = potentiallyFaultyTrackers.get(hostName); + if (fi == null && createIfNeccessary) { + fi = new FaultInfo(); + potentiallyFaultyTrackers.put(hostName, fi); + } } return fi; } /** * Blacklists the tracker across all jobs if * <ol> * <li>#faults are more than * MAX_BLACKLISTS_PER_TRACKER (configurable) blacklists</li> * <li>#faults is 50% (configurable) above the average #faults</li> * <li>50% the cluster is not blacklisted yet </li> * </ol> */ private boolean exceedsFaults(FaultInfo fi) { int faultCount = fi.getFaultCount(); if (faultCount >= MAX_BLACKLISTS_PER_TRACKER) { // calculate avgBlackLists long clusterSize = getClusterStatus().getTaskTrackers(); long sum = 0; for (FaultInfo f : potentiallyFaultyTrackers.values()) { sum += f.getFaultCount(); } double avg = (double) sum / clusterSize; long totalCluster = clusterSize + numBlacklistedTrackers; if ((faultCount - avg) > (AVERAGE_BLACKLIST_THRESHOLD * avg) && numBlacklistedTrackers < (totalCluster * MAX_BLACKLIST_PERCENT)) { return true; } } return false; } /** * Removes the tracker from blacklist and * from potentially faulty list, when it is restarted. * + * Assumes JobTracker is locked on the entry. + * * @param hostName */ void markTrackerHealthy(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = potentiallyFaultyTrackers.remove(hostName); if (fi != null && fi.isBlacklisted()) { LOG.info("Removing " + hostName + " from blacklist"); addHostCapacity(hostName); } } } /** * Check whether tasks can be assigned to the tracker. * * One fault of the tracker is discarded if there * are no faults during one day. So, the tracker will get a * chance again to run tasks of a job. + * Assumes JobTracker is locked on the entry. * * @param hostName The tracker name * @param now The current time * * @return true if the tracker is blacklisted * false otherwise */ boolean shouldAssignTasksToTracker(String hostName, long now) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = potentiallyFaultyTrackers.get(hostName); if (fi != null && (now - fi.getLastUpdated()) > UPDATE_FAULTY_TRACKER_INTERVAL) { int numFaults = fi.getFaultCount() - 1; fi.setFaultCount(numFaults); fi.setLastUpdated(now); if (canUnBlackListTracker(hostName, ReasonForBlackListing.EXCEEDING_FAILURES)) { unBlackListTracker(hostName, ReasonForBlackListing.EXCEEDING_FAILURES); } } return (fi != null && fi.isBlacklisted()); } } private void removeHostCapacity(String hostName) { synchronized (taskTrackers) { // remove the capacity of trackers on this host int numTrackersOnHost = 0; for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity -= mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity -= reduceSlots; ++numTrackersOnHost; getInstrumentation().addBlackListedMapSlots( mapSlots); getInstrumentation().addBlackListedReduceSlots( reduceSlots); } uniqueHostsMap.remove(hostName); incrBlackListedTrackers(numTrackersOnHost); } } // This is called on tracker's restart or after a day of blacklist. private void addHostCapacity(String hostName) { synchronized (taskTrackers) { int numTrackersOnHost = 0; // add the capacity of trackers on the host for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity += mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity += reduceSlots; numTrackersOnHost++; getInstrumentation().decBlackListedMapSlots(mapSlots); getInstrumentation().decBlackListedReduceSlots(reduceSlots); } uniqueHostsMap.put(hostName, numTrackersOnHost); decrBlackListedTrackers(numTrackersOnHost); } } /** * Whether a host is blacklisted across all the jobs. * + * Assumes JobTracker is locked on the entry. * @param hostName * @return */ boolean isBlacklisted(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = null; if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { return fi.isBlacklisted(); } } return false; } + // Assumes JobTracker is locked on the entry. int getFaultCount(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = null; if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { return fi.getFaultCount(); } } return 0; } + // Assumes JobTracker is locked on the entry. Set<ReasonForBlackListing> getReasonForBlackListing(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = null; if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { return fi.getReasonforblacklisting(); } } return null; } + // Assumes JobTracker is locked on the entry. void setNodeHealthStatus(String hostName, boolean isHealthy, String reason) { FaultInfo fi = null; // If tracker is not healthy, create a fault info object // blacklist it. if (!isHealthy) { fi = getFaultInfo(hostName, true); fi.setHealthy(isHealthy); synchronized (potentiallyFaultyTrackers) { blackListTracker(hostName, reason, ReasonForBlackListing.NODE_UNHEALTHY); } } else { fi = getFaultInfo(hostName, false); if (fi == null) { return; } else { if (canUnBlackListTracker(hostName, ReasonForBlackListing.NODE_UNHEALTHY)) { unBlackListTracker(hostName, ReasonForBlackListing.NODE_UNHEALTHY); } } } } } /** * Get all task tracker statuses on given host * + * Assumes JobTracker is locked on the entry * @param hostName * @return {@link java.util.List} of {@link TaskTrackerStatus} */ private List<TaskTrackerStatus> getStatusesOnHost(String hostName) { List<TaskTrackerStatus> statuses = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (hostName.equals(status.getHost())) { statuses.add(status); } } } return statuses; } /////////////////////////////////////////////////////// // Used to recover the jobs upon restart /////////////////////////////////////////////////////// class RecoveryManager { Set<JobID> jobsToRecover; // set of jobs to be recovered private int totalEventsRecovered = 0; private int restartCount = 0; private boolean shouldRecover = false; Set<String> recoveredTrackers = Collections.synchronizedSet(new HashSet<String>()); /** A custom listener that replays the events in the order in which the * events (task attempts) occurred. */ class JobRecoveryListener implements Listener { // The owner job private JobInProgress jip; private JobHistory.JobInfo job; // current job's info object // Maintain the count of the (attempt) events recovered private int numEventsRecovered = 0; // Maintains open transactions private Map<String, String> hangingAttempts = new HashMap<String, String>(); // Whether there are any updates for this job private boolean hasUpdates = false; public JobRecoveryListener(JobInProgress jip) { this.jip = jip; this.job = new JobHistory.JobInfo(jip.getJobID().toString()); } /** * Process a task. Note that a task might commit a previously pending * transaction. */ private void processTask(String taskId, JobHistory.Task task) { // Any TASK info commits the previous transaction boolean hasHanging = hangingAttempts.remove(taskId) != null; if (hasHanging) { numEventsRecovered += 2; } TaskID id = TaskID.forName(taskId); TaskInProgress tip = getTip(id); updateTip(tip, task); } /** * Adds a task-attempt in the listener */ private void processTaskAttempt(String taskAttemptId, JobHistory.TaskAttempt attempt) { TaskAttemptID id = TaskAttemptID.forName(taskAttemptId); // Check if the transaction for this attempt can be committed String taskStatus = attempt.get(Keys.TASK_STATUS); TaskAttemptID taskID = TaskAttemptID.forName(taskAttemptId); JobInProgress jip = getJob(taskID.getJobID()); JobStatus prevStatus = (JobStatus)jip.getStatus().clone(); if (taskStatus.length() > 0) { // This means this is an update event if (taskStatus.equals(Values.SUCCESS.name())) { // Mark this attempt as hanging hangingAttempts.put(id.getTaskID().toString(), taskAttemptId); addSuccessfulAttempt(jip, id, attempt); } else { addUnsuccessfulAttempt(jip, id, attempt); numEventsRecovered += 2; } } else { createTaskAttempt(jip, id, attempt); } JobStatus newStatus = (JobStatus)jip.getStatus().clone(); if (prevStatus.getRunState() != newStatus.getRunState()) { if(LOG.isDebugEnabled()) LOG.debug("Status changed hence informing prevStatus" + prevStatus + " currentStatus "+ newStatus); JobStatusChangeEvent event = new JobStatusChangeEvent(jip, EventType.RUN_STATE_CHANGED, prevStatus, newStatus); updateJobInProgressListeners(event); } } public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values) throws IOException { if (recType == JobHistory.RecordTypes.Job) { // Update the meta-level job information job.handle(values); // Forcefully init the job as we have some updates for it checkAndInit(); } else if (recType.equals(JobHistory.RecordTypes.Task)) { String taskId = values.get(Keys.TASKID); // Create a task JobHistory.Task task = new JobHistory.Task(); task.handle(values); // Ignore if its a cleanup task if (isCleanup(task)) { return; } // Process the task i.e update the tip state processTask(taskId, task); } else if (recType.equals(JobHistory.RecordTypes.MapAttempt)) { String attemptId = values.get(Keys.TASK_ATTEMPT_ID); // Create a task attempt JobHistory.MapAttempt attempt = new JobHistory.MapAttempt(); attempt.handle(values); // Ignore if its a cleanup task if (isCleanup(attempt)) { return; } // Process the attempt i.e update the attempt state via job processTaskAttempt(attemptId, attempt); } else if (recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { String attemptId = values.get(Keys.TASK_ATTEMPT_ID); // Create a task attempt JobHistory.ReduceAttempt attempt = new JobHistory.ReduceAttempt(); attempt.handle(values); // Ignore if its a cleanup task if (isCleanup(attempt)) { return; } // Process the attempt i.e update the job state via job processTaskAttempt(attemptId, attempt); } } // Check if the task is of type CLEANUP private boolean isCleanup(JobHistory.Task task) { String taskType = task.get(Keys.TASK_TYPE); return Values.CLEANUP.name().equals(taskType); } // Init the job if its ready for init. Also make sure that the scheduler // is updated private void checkAndInit() throws IOException { String jobStatus = this.job.get(Keys.JOB_STATUS); if (Values.PREP.name().equals(jobStatus)) { hasUpdates = true; LOG.info("Calling init from RM for job " + jip.getJobID().toString()); try { initJob(jip); } catch (Throwable t) { LOG.error("Job initialization failed : \n" + StringUtils.stringifyException(t)); failJob(jip); throw new IOException(t); } } } void close() { if (hasUpdates) { // Apply the final (job-level) updates JobStatusChangeEvent event = updateJob(jip, job); synchronized (JobTracker.this) { // Update the job listeners updateJobInProgressListeners(event); } } } public int getNumEventsRecovered() { return numEventsRecovered; } } public RecoveryManager() { jobsToRecover = new TreeSet<JobID>(); } public boolean contains(JobID id) { return jobsToRecover.contains(id); } void addJobForRecovery(JobID id) { jobsToRecover.add(id); } public boolean shouldRecover() { return shouldRecover; } public boolean shouldSchedule() { return recoveredTrackers.isEmpty(); } private void markTracker(String trackerName) { recoveredTrackers.add(trackerName); } void unMarkTracker(String trackerName) { recoveredTrackers.remove(trackerName); } Set<JobID> getJobsToRecover() { return jobsToRecover; } /** Check if the given string represents a job-id or not */ private boolean isJobNameValid(String str) { if(str == null) { return false; } String[] parts = str.split("_"); if(parts.length == 3) { if(parts[0].equals("job")) { // other 2 parts should be parseable return JobTracker.validateIdentifier(parts[1]) && JobTracker.validateJobNumber(parts[2]); } } return false; } // checks if the job dir has the required files public void checkAndAddJob(FileStatus status) throws IOException { String fileName = status.getPath().getName(); if (isJobNameValid(fileName)) { if (JobClient.isJobDirValid(status.getPath(), fs)) { recoveryManager.addJobForRecovery(JobID.forName(fileName)); shouldRecover = true; // enable actual recovery if num-files > 1 } else { LOG.info("Found an incomplete job directory " + fileName + "." + " Deleting it!!"); fs.delete(status.getPath(), true); } } } private JobStatusChangeEvent updateJob(JobInProgress jip, JobHistory.JobInfo job) { // Change the job priority String jobpriority = job.get(Keys.JOB_PRIORITY); JobPriority priority = JobPriority.valueOf(jobpriority); // It's important to update this via the jobtracker's api as it will // take care of updating the event listeners too setJobPriority(jip.getJobID(), priority); // Save the previous job status JobStatus oldStatus = (JobStatus)jip.getStatus().clone(); // Set the start/launch time only if there are recovered tasks // Increment the job's restart count jip.updateJobInfo(job.getLong(JobHistory.Keys.SUBMIT_TIME), job.getLong(JobHistory.Keys.LAUNCH_TIME)); // Save the new job status JobStatus newStatus = (JobStatus)jip.getStatus().clone(); return new JobStatusChangeEvent(jip, EventType.START_TIME_CHANGED, oldStatus, newStatus); } private void updateTip(TaskInProgress tip, JobHistory.Task task) { long startTime = task.getLong(Keys.START_TIME); if (startTime != 0) { tip.setExecStartTime(startTime); } long finishTime = task.getLong(Keys.FINISH_TIME); // For failed tasks finish-time will be missing if (finishTime != 0) { tip.setExecFinishTime(finishTime); } String cause = task.get(Keys.TASK_ATTEMPT_ID); if (cause.length() > 0) { // This means that the this is a FAILED events TaskAttemptID id = TaskAttemptID.forName(cause); TaskStatus status = tip.getTaskStatus(id); synchronized (JobTracker.this) { // This will add the tip failed event in the new log tip.getJob().failedTask(tip, id, status.getDiagnosticInfo(), status.getPhase(), status.getRunState(), status.getTaskTracker()); } } } private void createTaskAttempt(JobInProgress job, TaskAttemptID attemptId, JobHistory.TaskAttempt attempt) { TaskID id = attemptId.getTaskID(); String type = attempt.get(Keys.TASK_TYPE); TaskInProgress tip = job.getTaskInProgress(id); // I. Get the required info TaskStatus taskStatus = null; String trackerName = attempt.get(Keys.TRACKER_NAME); String trackerHostName = JobInProgress.convertTrackerNameToHostName(trackerName); // recover the port information. int port = 0; // default to 0 String hport = attempt.get(Keys.HTTP_PORT); if (hport != null && hport.length() > 0) { port = attempt.getInt(Keys.HTTP_PORT); } long attemptStartTime = attempt.getLong(Keys.START_TIME); // II. Create the (appropriate) task status if (type.equals(Values.MAP.name())) { taskStatus = new MapTaskStatus(attemptId, 0.0f, job.getNumSlotsPerTask(TaskType.MAP), TaskStatus.State.RUNNING, "", "", trackerName, TaskStatus.Phase.MAP, new Counters()); } else { taskStatus = new ReduceTaskStatus(attemptId, 0.0f, job.getNumSlotsPerTask(TaskType.REDUCE), TaskStatus.State.RUNNING, "", "", trackerName, TaskStatus.Phase.REDUCE, new Counters()); } // Set the start time taskStatus.setStartTime(attemptStartTime); List<TaskStatus> ttStatusList = new ArrayList<TaskStatus>(); ttStatusList.add(taskStatus); // III. Create the dummy tasktracker status TaskTrackerStatus ttStatus = new TaskTrackerStatus(trackerName, trackerHostName, port, ttStatusList, 0 , 0, 0); ttStatus.setLastSeen(System.currentTimeMillis()); synchronized (JobTracker.this) { synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { // IV. Register a new tracker TaskTracker taskTracker = getTaskTracker(trackerName); boolean isTrackerRegistered = (taskTracker != null); if (!isTrackerRegistered) { markTracker(trackerName); // add the tracker to recovery-manager taskTracker = new TaskTracker(trackerName); taskTracker.setStatus(ttStatus); addNewTracker(taskTracker); } // V. Update the tracker status // This will update the meta info of the jobtracker and also add the // tracker status if missing i.e register it updateTaskTrackerStatus(trackerName, ttStatus); } } // Register the attempt with job and tip, under JobTracker lock. // Since, as of today they are atomic through heartbeat. // VI. Register the attempt // a) In the job job.addRunningTaskToTIP(tip, attemptId, ttStatus, false); // b) In the tip tip.updateStatus(taskStatus); } // VII. Make an entry in the launched tasks expireLaunchingTasks.addNewTask(attemptId); } private void addSuccessfulAttempt(JobInProgress job, TaskAttemptID attemptId, JobHistory.TaskAttempt attempt) { // I. Get the required info TaskID taskId = attemptId.getTaskID(); String type = attempt.get(Keys.TASK_TYPE); TaskInProgress tip = job.getTaskInProgress(taskId); long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME); // Get the task status and the tracker name and make a copy of it TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone(); taskStatus.setFinishTime(attemptFinishTime); String stateString = attempt.get(Keys.STATE_STRING); // Update the basic values taskStatus.setStateString(stateString); taskStatus.setProgress(1.0f); taskStatus.setRunState(TaskStatus.State.SUCCEEDED); // Set the shuffle/sort finished times if (type.equals(Values.REDUCE.name())) { long shuffleTime = Long.parseLong(attempt.get(Keys.SHUFFLE_FINISHED)); long sortTime = Long.parseLong(attempt.get(Keys.SORT_FINISHED)); taskStatus.setShuffleFinishTime(shuffleTime); taskStatus.setSortFinishTime(sortTime); } // Add the counters String counterString = attempt.get(Keys.COUNTERS); Counters counter = null; //TODO Check if an exception should be thrown try { counter = Counters.fromEscapedCompactString(counterString); } catch (ParseException pe) { counter = new Counters(); // Set it to empty counter } taskStatus.setCounters(counter); synchronized (JobTracker.this) { // II. Replay the status job.updateTaskStatus(tip, taskStatus); } // III. Prevent the task from expiry expireLaunchingTasks.removeTask(attemptId); } private void addUnsuccessfulAttempt(JobInProgress job, TaskAttemptID attemptId, JobHistory.TaskAttempt attempt) { // I. Get the required info TaskID taskId = attemptId.getTaskID(); TaskInProgress tip = job.getTaskInProgress(taskId); long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME); TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone(); taskStatus.setFinishTime(attemptFinishTime); // Reset the progress taskStatus.setProgress(0.0f); String stateString = attempt.get(Keys.STATE_STRING); taskStatus.setStateString(stateString); boolean hasFailed = attempt.get(Keys.TASK_STATUS).equals(Values.FAILED.name()); // Set the state failed/killed if (hasFailed) { taskStatus.setRunState(TaskStatus.State.FAILED); } else { taskStatus.setRunState(TaskStatus.State.KILLED); } // Get/Set the error msg String diagInfo = attempt.get(Keys.ERROR); taskStatus.setDiagnosticInfo(diagInfo); // diag info synchronized (JobTracker.this) { // II. Update the task status job.updateTaskStatus(tip, taskStatus); } // III. Prevent the task from expiry expireLaunchingTasks.removeTask(attemptId); } Path getRestartCountFile() { return new Path(getSystemDir(), "jobtracker.info"); } Path getTempRestartCountFile() { return new Path(getSystemDir(), "jobtracker.info.recover"); } /** * Initialize the recovery process. It simply creates a jobtracker.info file * in the jobtracker's system directory and writes its restart count in it. * For the first start, the jobtracker writes '0' in it. Upon subsequent * restarts the jobtracker replaces the count with its current count which * is (old count + 1). The whole purpose of this api is to obtain restart * counts across restarts to avoid attempt-id clashes. * * Note that in between if the jobtracker.info files goes missing then the * jobtracker will disable recovery and continue. * */ void updateRestartCount() throws IOException { Path restartFile = getRestartCountFile(); Path tmpRestartFile = getTempRestartCountFile(); FileSystem fs = restartFile.getFileSystem(conf); FsPermission filePerm = new FsPermission(SYSTEM_FILE_PERMISSION); // read the count from the jobtracker info file @@ -2107,1151 +2120,1163 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, if (Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } // Same with 'localDir' except it's always on the local disk. if (!hasRestarted) { jobConf.deleteLocalFiles(SUBDIR); } // Initialize history DONE folder if (historyInitialized) { JobHistory.initDone(conf, fs); String historyLogDir = JobHistory.getCompletedJobHistoryLocation().toString(); infoServer.setAttribute("historyLogDir", historyLogDir); FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf); infoServer.setAttribute("fileSys", historyFS); } this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); this.numTaskCacheLevels = conf.getInt("mapred.task.cache.levels", NetworkTopology.DEFAULT_HOST_LEVEL); //initializes the job status store completedJobStatusStore = new CompletedJobStatusStore(conf); } private static SimpleDateFormat getDateFormat() { return new SimpleDateFormat("yyyyMMddHHmm"); } private static String generateNewIdentifier() { return getDateFormat().format(new Date()); } static boolean validateIdentifier(String id) { try { // the jobtracker id should be 'date' parseable getDateFormat().parse(id); return true; } catch (ParseException pe) {} return false; } static boolean validateJobNumber(String id) { try { // the job number should be integer parseable Integer.parseInt(id); return true; } catch (IllegalArgumentException pe) {} return false; } /** * Whether the JT has restarted */ public boolean hasRestarted() { return hasRestarted; } /** * Whether the JT has recovered upon restart */ public boolean hasRecovered() { return hasRecovered; } /** * How long the jobtracker took to recover from restart. */ public long getRecoveryDuration() { return hasRestarted() ? recoveryDuration : 0; } public static Class<? extends JobTrackerInstrumentation> getInstrumentationClass(Configuration conf) { return conf.getClass("mapred.jobtracker.instrumentation", JobTrackerMetricsInst.class, JobTrackerInstrumentation.class); } public static void setInstrumentationClass(Configuration conf, Class<? extends JobTrackerInstrumentation> t) { conf.setClass("mapred.jobtracker.instrumentation", t, JobTrackerInstrumentation.class); } JobTrackerInstrumentation getInstrumentation() { return myInstrumentation; } public static InetSocketAddress getAddress(Configuration conf) { String jobTrackerStr = conf.get("mapred.job.tracker", "localhost:8012"); return NetUtils.createSocketAddr(jobTrackerStr); } /** * Run forever */ public void offerService() throws InterruptedException, IOException { // Prepare for recovery. This is done irrespective of the status of restart // flag. while (true) { try { recoveryManager.updateRestartCount(); break; } catch (IOException ioe) { LOG.warn("Failed to initialize recovery manager. ", ioe); // wait for some time Thread.sleep(FS_ACCESS_RETRY_PERIOD); LOG.warn("Retrying..."); } } taskScheduler.start(); // Start the recovery after starting the scheduler try { recoveryManager.recover(); } catch (Throwable t) { LOG.warn("Recovery manager crashed! Ignoring.", t); } // refresh the node list as the recovery manager might have added // disallowed trackers refreshHosts(); this.expireTrackersThread = new Thread(this.expireTrackers, "expireTrackers"); this.expireTrackersThread.start(); this.retireJobsThread = new Thread(this.retireJobs, "retireJobs"); this.retireJobsThread.start(); expireLaunchingTaskThread.start(); if (completedJobStatusStore.isActive()) { completedJobsStoreThread = new Thread(completedJobStatusStore, "completedjobsStore-housekeeper"); completedJobsStoreThread.start(); } // start the inter-tracker server once the jt is ready this.interTrackerServer.start(); synchronized (this) { state = State.RUNNING; } LOG.info("Starting RUNNING"); this.interTrackerServer.join(); LOG.info("Stopped interTrackerServer"); } void close() throws IOException { if (this.infoServer != null) { LOG.info("Stopping infoServer"); try { this.infoServer.stop(); } catch (Exception ex) { LOG.warn("Exception shutting down JobTracker", ex); } } if (this.interTrackerServer != null) { LOG.info("Stopping interTrackerServer"); this.interTrackerServer.stop(); } if (this.expireTrackersThread != null && this.expireTrackersThread.isAlive()) { LOG.info("Stopping expireTrackers"); this.expireTrackersThread.interrupt(); try { this.expireTrackersThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } if (this.retireJobsThread != null && this.retireJobsThread.isAlive()) { LOG.info("Stopping retirer"); this.retireJobsThread.interrupt(); try { this.retireJobsThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } if (taskScheduler != null) { taskScheduler.terminate(); } if (this.expireLaunchingTaskThread != null && this.expireLaunchingTaskThread.isAlive()) { LOG.info("Stopping expireLaunchingTasks"); this.expireLaunchingTaskThread.interrupt(); try { this.expireLaunchingTaskThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } if (this.completedJobsStoreThread != null && this.completedJobsStoreThread.isAlive()) { LOG.info("Stopping completedJobsStore thread"); this.completedJobsStoreThread.interrupt(); try { this.completedJobsStoreThread.join(); } catch (InterruptedException ex) { ex.printStackTrace(); } } LOG.info("stopped all jobtracker services"); return; } /////////////////////////////////////////////////////// // Maintain lookup tables; called by JobInProgress // and TaskInProgress /////////////////////////////////////////////////////// void createTaskEntry(TaskAttemptID taskid, String taskTracker, TaskInProgress tip) { LOG.info("Adding task " + (tip.isCleanupAttempt(taskid) ? "(cleanup)" : "") + "'" + taskid + "' to tip " + tip.getTIPId() + ", for tracker '" + taskTracker + "'"); // taskid --> tracker taskidToTrackerMap.put(taskid, taskTracker); // tracker --> taskid Set<TaskAttemptID> taskset = trackerToTaskMap.get(taskTracker); if (taskset == null) { taskset = new TreeSet<TaskAttemptID>(); trackerToTaskMap.put(taskTracker, taskset); } taskset.add(taskid); // taskid --> TIP taskidToTIPMap.put(taskid, tip); } void removeTaskEntry(TaskAttemptID taskid) { // taskid --> tracker String tracker = taskidToTrackerMap.remove(taskid); // tracker --> taskid if (tracker != null) { Set<TaskAttemptID> trackerSet = trackerToTaskMap.get(tracker); if (trackerSet != null) { trackerSet.remove(taskid); } } // taskid --> TIP taskidToTIPMap.remove(taskid); LOG.debug("Removing task '" + taskid + "'"); } /** * Mark a 'task' for removal later. * This function assumes that the JobTracker is locked on entry. * * @param taskTracker the tasktracker at which the 'task' was running * @param taskid completed (success/failure/killed) task */ void markCompletedTaskAttempt(String taskTracker, TaskAttemptID taskid) { // tracker --> taskid Set<TaskAttemptID> taskset = trackerToMarkedTasksMap.get(taskTracker); if (taskset == null) { taskset = new TreeSet<TaskAttemptID>(); trackerToMarkedTasksMap.put(taskTracker, taskset); } taskset.add(taskid); LOG.debug("Marked '" + taskid + "' from '" + taskTracker + "'"); } /** * Mark all 'non-running' jobs of the job for pruning. * This function assumes that the JobTracker is locked on entry. * * @param job the completed job */ void markCompletedJob(JobInProgress job) { for (TaskInProgress tip : job.getSetupTasks()) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { markCompletedTaskAttempt(taskStatus.getTaskTracker(), taskStatus.getTaskID()); } } } for (TaskInProgress tip : job.getMapTasks()) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { markCompletedTaskAttempt(taskStatus.getTaskTracker(), taskStatus.getTaskID()); } } } for (TaskInProgress tip : job.getReduceTasks()) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && taskStatus.getRunState() != TaskStatus.State.FAILED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.KILLED_UNCLEAN && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED) { markCompletedTaskAttempt(taskStatus.getTaskTracker(), taskStatus.getTaskID()); } } } } /** * Remove all 'marked' tasks running on a given {@link TaskTracker} * from the {@link JobTracker}'s data-structures. * This function assumes that the JobTracker is locked on entry. * * @param taskTracker tasktracker whose 'non-running' tasks are to be purged */ private void removeMarkedTasks(String taskTracker) { // Purge all the 'marked' tasks which were running at taskTracker Set<TaskAttemptID> markedTaskSet = trackerToMarkedTasksMap.get(taskTracker); if (markedTaskSet != null) { for (TaskAttemptID taskid : markedTaskSet) { removeTaskEntry(taskid); LOG.info("Removed completed task '" + taskid + "' from '" + taskTracker + "'"); } // Clear trackerToMarkedTasksMap.remove(taskTracker); } } /** * Call {@link #removeTaskEntry(String)} for each of the * job's tasks. * When the JobTracker is retiring the long-completed * job, either because it has outlived {@link #RETIRE_JOB_INTERVAL} * or the limit of {@link #MAX_COMPLETE_USER_JOBS_IN_MEMORY} jobs * has been reached, we can afford to nuke all it's tasks; a little * unsafe, but practically feasible. * * @param job the job about to be 'retired' */ synchronized private void removeJobTasks(JobInProgress job) { for (TaskInProgress tip : job.getMapTasks()) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { removeTaskEntry(taskStatus.getTaskID()); } } for (TaskInProgress tip : job.getReduceTasks()) { for (TaskStatus taskStatus : tip.getTaskStatuses()) { removeTaskEntry(taskStatus.getTaskID()); } } } /** * Safe clean-up all data structures at the end of the * job (success/failure/killed). * Here we also ensure that for a given user we maintain * information for only MAX_COMPLETE_USER_JOBS_IN_MEMORY jobs * on the JobTracker. * * @param job completed job. */ synchronized void finalizeJob(JobInProgress job) { // Mark the 'non-running' tasks for pruning markCompletedJob(job); JobEndNotifier.registerNotification(job.getJobConf(), job.getStatus()); // start the merge of log files JobID id = job.getStatus().getJobID(); if (job.hasRestarted()) { try { JobHistory.JobInfo.finalizeRecovery(id, job.getJobConf()); } catch (IOException ioe) { LOG.info("Failed to finalize the log file recovery for job " + id, ioe); } } // mark the job as completed try { JobHistory.JobInfo.markCompleted(id); } catch (IOException ioe) { LOG.info("Failed to mark job " + id + " as completed!", ioe); } final JobTrackerInstrumentation metrics = getInstrumentation(); metrics.finalizeJob(conf, id); long now = System.currentTimeMillis(); // mark the job for cleanup at all the trackers addJobForCleanup(id); try { File userFileForJob = new File(lDirAlloc.getLocalPathToRead(SUBDIR + "/" + id, conf).toString()); if (userFileForJob != null) { userFileForJob.delete(); } } catch (IOException ioe) { LOG.info("Failed to delete job id mapping for job " + id, ioe); } // add the blacklisted trackers to potentially faulty list if (job.getStatus().getRunState() == JobStatus.SUCCEEDED) { if (job.getNoOfBlackListedTrackers() > 0) { for (String hostName : job.getBlackListedTrackers()) { faultyTrackers.incrementFaults(hostName); } } } String jobUser = job.getProfile().getUser(); //add to the user to jobs mapping synchronized (userToJobsMap) { ArrayList<JobInProgress> userJobs = userToJobsMap.get(jobUser); if (userJobs == null) { userJobs = new ArrayList<JobInProgress>(); userToJobsMap.put(jobUser, userJobs); } userJobs.add(job); } } /////////////////////////////////////////////////////// // Accessors for objects that want info on jobs, tasks, // trackers, etc. /////////////////////////////////////////////////////// public int getTotalSubmissions() { return totalSubmissions; } public String getJobTrackerMachine() { return localMachine; } /** * Get the unique identifier (ie. timestamp) of this job tracker start. * @return a string with a unique identifier */ public String getTrackerIdentifier() { return trackerIdentifier; } public int getTrackerPort() { return port; } public int getInfoPort() { return infoPort; } public long getStartTime() { return startTime; } public Vector<JobInProgress> runningJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if (status.getRunState() == JobStatus.RUNNING) { v.add(jip); } } return v; } /** * Version that is called from a timer thread, and therefore needs to be * careful to synchronize. */ public synchronized List<JobInProgress> getRunningJobs() { synchronized (jobs) { return runningJobs(); } } public Vector<JobInProgress> failedJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if ((status.getRunState() == JobStatus.FAILED) || (status.getRunState() == JobStatus.KILLED)) { v.add(jip); } } return v; } public Vector<JobInProgress> completedJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if (status.getRunState() == JobStatus.SUCCEEDED) { v.add(jip); } } return v; } /** * Get all the task trackers in the cluster * * @return {@link Collection} of {@link TaskTrackerStatus} */ - public Collection<TaskTrackerStatus> taskTrackers() { + // lock to taskTrackers should hold JT lock first. + public synchronized Collection<TaskTrackerStatus> taskTrackers() { Collection<TaskTrackerStatus> ttStatuses; synchronized (taskTrackers) { ttStatuses = new ArrayList<TaskTrackerStatus>(taskTrackers.values().size()); for (TaskTracker tt : taskTrackers.values()) { ttStatuses.add(tt.getStatus()); } } return ttStatuses; } /** * Get the active task tracker statuses in the cluster * * @return {@link Collection} of active {@link TaskTrackerStatus} */ - public Collection<TaskTrackerStatus> activeTaskTrackers() { + // This method is synchronized to make sure that the locking order + // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers + // lock" is under JobTracker lock to avoid deadlocks. + synchronized public Collection<TaskTrackerStatus> activeTaskTrackers() { Collection<TaskTrackerStatus> activeTrackers = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for ( TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (!faultyTrackers.isBlacklisted(status.getHost())) { activeTrackers.add(status); } } } return activeTrackers; } /** * Get the active and blacklisted task tracker names in the cluster. The first * element in the returned list contains the list of active tracker names. * The second element in the returned list contains the list of blacklisted * tracker names. */ - public List<List<String>> taskTrackerNames() { + // This method is synchronized to make sure that the locking order + // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers + // lock" is under JobTracker lock to avoid deadlocks. + synchronized public List<List<String>> taskTrackerNames() { List<String> activeTrackers = new ArrayList<String>(); List<String> blacklistedTrackers = new ArrayList<String>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (!faultyTrackers.isBlacklisted(status.getHost())) { activeTrackers.add(status.getTrackerName()); } else { blacklistedTrackers.add(status.getTrackerName()); } } } List<List<String>> result = new ArrayList<List<String>>(2); result.add(activeTrackers); result.add(blacklistedTrackers); return result; } /** * Get the blacklisted task tracker statuses in the cluster * * @return {@link Collection} of blacklisted {@link TaskTrackerStatus} */ - public Collection<TaskTrackerStatus> blacklistedTaskTrackers() { + // This method is synchronized to make sure that the locking order + // "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers + // lock" is under JobTracker lock to avoid deadlocks. + synchronized public Collection<TaskTrackerStatus> blacklistedTaskTrackers() { Collection<TaskTrackerStatus> blacklistedTrackers = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (faultyTrackers.isBlacklisted(status.getHost())) { blacklistedTrackers.add(status); } } } return blacklistedTrackers; } - int getFaultCount(String hostName) { + synchronized int getFaultCount(String hostName) { return faultyTrackers.getFaultCount(hostName); } /** * Get the number of blacklisted trackers across all the jobs * * @return */ int getBlacklistedTrackerCount() { return faultyTrackers.numBlacklistedTrackers; } /** * Whether the tracker is blacklisted or not * * @param trackerID * * @return true if blacklisted, false otherwise */ - public boolean isBlacklisted(String trackerID) { + synchronized public boolean isBlacklisted(String trackerID) { TaskTrackerStatus status = getTaskTrackerStatus(trackerID); if (status != null) { return faultyTrackers.isBlacklisted(status.getHost()); } return false; } - public TaskTrackerStatus getTaskTrackerStatus(String trackerID) { + // lock to taskTrackers should hold JT lock first. + synchronized public TaskTrackerStatus getTaskTrackerStatus(String trackerID) { TaskTracker taskTracker; synchronized (taskTrackers) { taskTracker = taskTrackers.get(trackerID); } return (taskTracker == null) ? null : taskTracker.getStatus(); } - public TaskTracker getTaskTracker(String trackerID) { + // lock to taskTrackers should hold JT lock first. + synchronized public TaskTracker getTaskTracker(String trackerID) { synchronized (taskTrackers) { return taskTrackers.get(trackerID); } } JobTrackerStatistics getStatistics() { return statistics; } /** * Adds a new node to the jobtracker. It involves adding it to the expiry * thread and adding it for resolution * - * Assuming trackerExpiryQueue is locked on entry + * Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry * * @param status Task Tracker's status */ private void addNewTracker(TaskTracker taskTracker) { TaskTrackerStatus status = taskTracker.getStatus(); trackerExpiryQueue.add(status); // Register the tracker if its not registered String hostname = status.getHost(); if (getNode(status.getTrackerName()) == null) { // Making the network location resolution inline .. resolveAndAddToTopology(hostname); } // add it to the set of tracker per host Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname); if (trackers == null) { trackers = Collections.synchronizedSet(new HashSet<TaskTracker>()); hostnameToTaskTracker.put(hostname, trackers); } statistics.taskTrackerAdded(status.getTrackerName()); getInstrumentation().addTrackers(1); LOG.info("Adding tracker " + status.getTrackerName() + " to host " + hostname); trackers.add(taskTracker); } public Node resolveAndAddToTopology(String name) { List <String> tmpList = new ArrayList<String>(1); tmpList.add(name); List <String> rNameList = dnsToSwitchMapping.resolve(tmpList); String rName = rNameList.get(0); String networkLoc = NodeBase.normalize(rName); return addHostToNodeMapping(name, networkLoc); } private Node addHostToNodeMapping(String host, String networkLoc) { Node node; if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) { node = new NodeBase(host, networkLoc); clusterMap.add(node); if (node.getLevel() < getNumTaskCacheLevels()) { LOG.fatal("Got a host whose level is: " + node.getLevel() + "." + " Should get at least a level of value: " + getNumTaskCacheLevels()); try { stopTracker(); } catch (IOException ie) { LOG.warn("Exception encountered during shutdown: " + StringUtils.stringifyException(ie)); System.exit(-1); } } hostnameToNodeMap.put(host, node); // Make an entry for the node at the max level in the cache nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1)); } return node; } /** * Returns a collection of nodes at the max level */ public Collection<Node> getNodesAtMaxLevel() { return nodesAtMaxLevel; } public static Node getParentNode(Node node, int level) { for (int i = 0; i < level; ++i) { node = node.getParent(); } return node; } /** * Return the Node in the network topology that corresponds to the hostname */ public Node getNode(String name) { return hostnameToNodeMap.get(name); } public int getNumTaskCacheLevels() { return numTaskCacheLevels; } public int getNumResolvedTaskTrackers() { return numResolved; } public int getNumberOfUniqueHosts() { return uniqueHostsMap.size(); } public void addJobInProgressListener(JobInProgressListener listener) { jobInProgressListeners.add(listener); } public void removeJobInProgressListener(JobInProgressListener listener) { jobInProgressListeners.remove(listener); } // Update the listeners about the job // Assuming JobTracker is locked on entry. private void updateJobInProgressListeners(JobChangeEvent event) { for (JobInProgressListener listener : jobInProgressListeners) { listener.jobUpdated(event); } } /** * Return the {@link QueueManager} associated with the JobTracker. */ public QueueManager getQueueManager() { return queueManager; } //////////////////////////////////////////////////// // InterTrackerProtocol //////////////////////////////////////////////////// public String getBuildVersion() throws IOException{ return VersionInfo.getBuildVersion(); } /** * The periodic heartbeat mechanism between the {@link TaskTracker} and * the {@link JobTracker}. * * The {@link JobTracker} processes the status information sent by the * {@link TaskTracker} and responds with instructions to start/stop * tasks or jobs, and also 'reset' instructions during contingencies. */ public synchronized HeartbeatResponse heartbeat(TaskTrackerStatus status, boolean restarted, boolean initialContact, boolean acceptNewTasks, short responseId) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Got heartbeat from: " + status.getTrackerName() + " (restarted: " + restarted + " initialContact: " + initialContact + " acceptNewTasks: " + acceptNewTasks + ")" + " with responseId: " + responseId); } // Make sure heartbeat is from a tasktracker allowed by the jobtracker. if (!acceptTaskTracker(status)) { throw new DisallowedTaskTrackerException(status); } // First check if the last heartbeat response got through String trackerName = status.getTrackerName(); long now = System.currentTimeMillis(); boolean isBlacklisted = false; if (restarted) { faultyTrackers.markTrackerHealthy(status.getHost()); } else { isBlacklisted = faultyTrackers.shouldAssignTasksToTracker(status.getHost(), now); } HeartbeatResponse prevHeartbeatResponse = trackerToHeartbeatResponseMap.get(trackerName); boolean addRestartInfo = false; if (initialContact != true) { // If this isn't the 'initial contact' from the tasktracker, // there is something seriously wrong if the JobTracker has // no record of the 'previous heartbeat'; if so, ask the // tasktracker to re-initialize itself. if (prevHeartbeatResponse == null) { // This is the first heartbeat from the old tracker to the newly // started JobTracker if (hasRestarted()) { addRestartInfo = true; // inform the recovery manager about this tracker joining back recoveryManager.unMarkTracker(trackerName); } else { // Jobtracker might have restarted but no recovery is needed // otherwise this code should not be reached LOG.warn("Serious problem, cannot find record of 'previous' " + "heartbeat for '" + trackerName + "'; reinitializing the tasktracker"); return new HeartbeatResponse(responseId, new TaskTrackerAction[] {new ReinitTrackerAction()}); } } else { // It is completely safe to not process a 'duplicate' heartbeat from a // {@link TaskTracker} since it resends the heartbeat when rpcs are // lost see {@link TaskTracker.transmitHeartbeat()}; // acknowledge it by re-sending the previous response to let the // {@link TaskTracker} go forward. if (prevHeartbeatResponse.getResponseId() != responseId) { LOG.info("Ignoring 'duplicate' heartbeat from '" + trackerName + "'; resending the previous 'lost' response"); return prevHeartbeatResponse; } } } // Process this heartbeat short newResponseId = (short)(responseId + 1); status.setLastSeen(now); if (!processHeartbeat(status, initialContact)) { if (prevHeartbeatResponse != null) { trackerToHeartbeatResponseMap.remove(trackerName); } return new HeartbeatResponse(newResponseId, new TaskTrackerAction[] {new ReinitTrackerAction()}); } // Initialize the response to be sent for the heartbeat HeartbeatResponse response = new HeartbeatResponse(newResponseId, null); List<TaskTrackerAction> actions = new ArrayList<TaskTrackerAction>(); isBlacklisted = faultyTrackers.isBlacklisted(status.getHost()); // Check for new tasks to be executed on the tasktracker if (recoveryManager.shouldSchedule() && acceptNewTasks && !isBlacklisted) { TaskTrackerStatus taskTrackerStatus = getTaskTrackerStatus(trackerName) ; if (taskTrackerStatus == null) { LOG.warn("Unknown task tracker polling; ignoring: " + trackerName); } else { List<Task> tasks = getSetupAndCleanupTasks(taskTrackerStatus); if (tasks == null ) { tasks = taskScheduler.assignTasks(taskTrackers.get(trackerName)); } if (tasks != null) { for (Task task : tasks) { expireLaunchingTasks.addNewTask(task.getTaskID()); LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID()); actions.add(new LaunchTaskAction(task)); } } } } // Check for tasks to be killed List<TaskTrackerAction> killTasksList = getTasksToKill(trackerName); if (killTasksList != null) { actions.addAll(killTasksList); } // Check for jobs to be killed/cleanedup List<TaskTrackerAction> killJobsList = getJobsForCleanup(trackerName); if (killJobsList != null) { actions.addAll(killJobsList); } // Check for tasks whose outputs can be saved List<TaskTrackerAction> commitTasksList = getTasksToSave(status); if (commitTasksList != null) { actions.addAll(commitTasksList); } // calculate next heartbeat interval and put in heartbeat response int nextInterval = getNextHeartbeatInterval(); response.setHeartbeatInterval(nextInterval); response.setActions( actions.toArray(new TaskTrackerAction[actions.size()])); // check if the restart info is req if (addRestartInfo) { response.setRecoveredJobs(recoveryManager.getJobsToRecover()); } // Update the trackerToHeartbeatResponseMap trackerToHeartbeatResponseMap.put(trackerName, response); // Done processing the hearbeat, now remove 'marked' tasks removeMarkedTasks(trackerName); return response; } /** * Calculates next heartbeat interval using cluster size. * Heartbeat interval is incremented by 1 second for every 100 nodes by default. * @return next heartbeat interval. */ public int getNextHeartbeatInterval() { // get the no of task trackers int clusterSize = getClusterStatus().getTaskTrackers(); int heartbeatInterval = Math.max( (int)(1000 * HEARTBEATS_SCALING_FACTOR * Math.ceil((double)clusterSize / NUM_HEARTBEATS_IN_SECOND)), HEARTBEAT_INTERVAL_MIN) ; return heartbeatInterval; } /** * Return if the specified tasktracker is in the hosts list, * if one was configured. If none was configured, then this * returns true. */ private boolean inHostsList(TaskTrackerStatus status) { Set<String> hostsList = hostsReader.getHosts(); return (hostsList.isEmpty() || hostsList.contains(status.getHost())); } /** * Return if the specified tasktracker is in the exclude list. */ private boolean inExcludedHostsList(TaskTrackerStatus status) { Set<String> excludeList = hostsReader.getExcludedHosts(); return excludeList.contains(status.getHost()); } /** * Returns true if the tasktracker is in the hosts list and * not in the exclude list. */ private boolean acceptTaskTracker(TaskTrackerStatus status) { return (inHostsList(status) && !inExcludedHostsList(status)); } /** * Update the last recorded status for the given task tracker. * It assumes that the taskTrackers are locked on entry. * @param trackerName The name of the tracker * @param status The new status for the task tracker * @return Was an old status found? */ private boolean updateTaskTrackerStatus(String trackerName, TaskTrackerStatus status) { TaskTracker tt = getTaskTracker(trackerName); TaskTrackerStatus oldStatus = (tt == null) ? null : tt.getStatus(); if (oldStatus != null) { totalMaps -= oldStatus.countMapTasks(); totalReduces -= oldStatus.countReduceTasks(); occupiedMapSlots -= oldStatus.countOccupiedMapSlots(); occupiedReduceSlots -= oldStatus.countOccupiedReduceSlots(); getInstrumentation().decRunningMaps(oldStatus.countMapTasks()); getInstrumentation().decRunningReduces(oldStatus.countReduceTasks()); getInstrumentation().decOccupiedMapSlots(oldStatus.countOccupiedMapSlots()); getInstrumentation().decOccupiedReduceSlots(oldStatus.countOccupiedReduceSlots()); if (!faultyTrackers.isBlacklisted(oldStatus.getHost())) { int mapSlots = oldStatus.getMaxMapSlots(); totalMapTaskCapacity -= mapSlots; int reduceSlots = oldStatus.getMaxReduceSlots(); totalReduceTaskCapacity -= reduceSlots; } if (status == null) { taskTrackers.remove(trackerName); Integer numTaskTrackersInHost = uniqueHostsMap.get(oldStatus.getHost()); if (numTaskTrackersInHost != null) { numTaskTrackersInHost --; if (numTaskTrackersInHost > 0) { uniqueHostsMap.put(oldStatus.getHost(), numTaskTrackersInHost); } else { uniqueHostsMap.remove(oldStatus.getHost()); } } } } if (status != null) { totalMaps += status.countMapTasks(); totalReduces += status.countReduceTasks(); occupiedMapSlots += status.countOccupiedMapSlots(); occupiedReduceSlots += status.countOccupiedReduceSlots(); getInstrumentation().addRunningMaps(status.countMapTasks()); getInstrumentation().addRunningReduces(status.countReduceTasks()); getInstrumentation().addOccupiedMapSlots(status.countOccupiedMapSlots()); getInstrumentation().addOccupiedReduceSlots(status.countOccupiedReduceSlots()); if (!faultyTrackers.isBlacklisted(status.getHost())) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity += mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity += reduceSlots; } boolean alreadyPresent = false; TaskTracker taskTracker = taskTrackers.get(trackerName); if (taskTracker != null) { alreadyPresent = true; } else { taskTracker = new TaskTracker(trackerName); } taskTracker.setStatus(status); taskTrackers.put(trackerName, taskTracker); if (LOG.isDebugEnabled()) { int runningMaps = 0, runningReduces = 0; int commitPendingMaps = 0, commitPendingReduces = 0; int unassignedMaps = 0, unassignedReduces = 0; int miscMaps = 0, miscReduces = 0; List<TaskStatus> taskReports = status.getTaskReports(); for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); boolean isMap = ts.getIsMap(); TaskStatus.State state = ts.getRunState(); if (state == TaskStatus.State.RUNNING) { if (isMap) { ++runningMaps; } else { ++runningReduces; } } else if (state == TaskStatus.State.UNASSIGNED) { if (isMap) { ++unassignedMaps; } else { ++unassignedReduces; } } else if (state == TaskStatus.State.COMMIT_PENDING) { if (isMap) { ++commitPendingMaps; } else { ++commitPendingReduces; } } else { if (isMap) { ++miscMaps; } else { ++miscReduces; } } } LOG.debug(trackerName + ": Status -" + " running(m) = " + runningMaps + " unassigned(m) = " + unassignedMaps + " commit_pending(m) = " + commitPendingMaps + " misc(m) = " + miscMaps + " running(r) = " + runningReduces + " unassigned(r) = " + unassignedReduces + " commit_pending(r) = " + commitPendingReduces + " misc(r) = " + miscReduces); } if (!alreadyPresent) { Integer numTaskTrackersInHost = uniqueHostsMap.get(status.getHost()); if (numTaskTrackersInHost == null) { numTaskTrackersInHost = 0; } numTaskTrackersInHost ++; uniqueHostsMap.put(status.getHost(), numTaskTrackersInHost); } } getInstrumentation().setMapSlots(totalMapTaskCapacity); getInstrumentation().setReduceSlots(totalReduceTaskCapacity); return oldStatus != null; } // Increment the number of reserved slots in the cluster. // This method assumes the caller has JobTracker lock. void incrementReservations(TaskType type, int reservedSlots) { if (type.equals(TaskType.MAP)) { reservedMapSlots += reservedSlots; } else if (type.equals(TaskType.REDUCE)) { reservedReduceSlots += reservedSlots; } } // Decrement the number of reserved slots in the cluster. // This method assumes the caller has JobTracker lock. void decrementReservations(TaskType type, int reservedSlots) { if (type.equals(TaskType.MAP)) { reservedMapSlots -= reservedSlots; } else if (type.equals(TaskType.REDUCE)) { reservedReduceSlots -= reservedSlots; } } private void updateNodeHealthStatus(TaskTrackerStatus trackerStatus) { TaskTrackerHealthStatus status = trackerStatus.getHealthStatus(); synchronized (faultyTrackers) { faultyTrackers.setNodeHealthStatus(trackerStatus.getHost(), status.isNodeHealthy(), status.getHealthReport()); } } /** * Process incoming heartbeat messages from the task trackers. */ private synchronized boolean processHeartbeat( TaskTrackerStatus trackerStatus, boolean initialContact) { String trackerName = trackerStatus.getTrackerName(); synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { boolean seenBefore = updateTaskTrackerStatus(trackerName, trackerStatus); TaskTracker taskTracker = getTaskTracker(trackerName); if (initialContact) { // If it's first contact, then clear out // any state hanging around if (seenBefore) { lostTaskTracker(taskTracker); } } else { // If not first contact, there should be some record of the tracker if (!seenBefore) { LOG.warn("Status from unknown Tracker : " + trackerName); updateTaskTrackerStatus(trackerName, null); return false; } } if (initialContact) { // if this is lost tracker that came back now, and if it blacklisted // increment the count of blacklisted trackers in the cluster if (isBlacklisted(trackerName)) { faultyTrackers.incrBlackListedTrackers(1); } addNewTracker(taskTracker); } } } updateTaskStatuses(trackerStatus); updateNodeHealthStatus(trackerStatus); return true; } /** * A tracker wants to know if any of its Tasks have been * closed (because the job completed, whether successfully or not) */ private synchronized List<TaskTrackerAction> getTasksToKill( String taskTracker) { @@ -3784,803 +3809,809 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, void storeCompletedJob(JobInProgress job) { //persists the job info in DFS completedJobStatusStore.store(job); } public JobProfile getJobProfile(JobID jobid) { synchronized (this) { JobInProgress job = jobs.get(jobid); if (job != null) { return job.getProfile(); } else { RetireJobInfo info = retireJobs.get(jobid); if (info != null) { return info.profile; } } } return completedJobStatusStore.readJobProfile(jobid); } public JobStatus getJobStatus(JobID jobid) { if (null == jobid) { LOG.warn("JobTracker.getJobStatus() cannot get status for null jobid"); return null; } synchronized (this) { JobInProgress job = jobs.get(jobid); if (job != null) { return job.getStatus(); } else { RetireJobInfo info = retireJobs.get(jobid); if (info != null) { return info.status; } } } return completedJobStatusStore.readJobStatus(jobid); } public Counters getJobCounters(JobID jobid) { synchronized (this) { JobInProgress job = jobs.get(jobid); if (job != null) { return job.getCounters(); } } return completedJobStatusStore.readCounters(jobid); } public synchronized TaskReport[] getMapTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeMapTasks = job.reportTasksInProgress(true, true); for (Iterator it = completeMapTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteMapTasks = job.reportTasksInProgress(true, false); for (Iterator it = incompleteMapTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } public synchronized TaskReport[] getReduceTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector completeReduceTasks = job.reportTasksInProgress(false, true); for (Iterator it = completeReduceTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector incompleteReduceTasks = job.reportTasksInProgress(false, false); for (Iterator it = incompleteReduceTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } public synchronized TaskReport[] getCleanupTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeTasks = job.reportCleanupTIPs(true); for (Iterator<TaskInProgress> it = completeTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteTasks = job.reportCleanupTIPs(false); for (Iterator<TaskInProgress> it = incompleteTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } public synchronized TaskReport[] getSetupTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeTasks = job.reportSetupTIPs(true); for (Iterator<TaskInProgress> it = completeTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteTasks = job.reportSetupTIPs(false); for (Iterator<TaskInProgress> it = incompleteTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } TaskCompletionEvent[] EMPTY_EVENTS = new TaskCompletionEvent[0]; static final String MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY = "mapred.cluster.map.memory.mb"; static final String MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY = "mapred.cluster.reduce.memory.mb"; static final String MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY = "mapred.cluster.max.map.memory.mb"; static final String MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY = "mapred.cluster.max.reduce.memory.mb"; /* * Returns a list of TaskCompletionEvent for the given job, * starting from fromEventId. * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getTaskCompletionEvents(java.lang.String, int, int) */ public synchronized TaskCompletionEvent[] getTaskCompletionEvents( JobID jobid, int fromEventId, int maxEvents) throws IOException{ synchronized (this) { JobInProgress job = this.jobs.get(jobid); if (null != job) { if (job.inited()) { return job.getTaskCompletionEvents(fromEventId, maxEvents); } else { return EMPTY_EVENTS; } } } return completedJobStatusStore.readJobTaskCompletionEvents(jobid, fromEventId, maxEvents); } /** * Get the diagnostics for a given task * @param taskId the id of the task * @return an array of the diagnostic messages */ public synchronized String[] getTaskDiagnostics(TaskAttemptID taskId) throws IOException { List<String> taskDiagnosticInfo = null; JobID jobId = taskId.getJobID(); TaskID tipId = taskId.getTaskID(); JobInProgress job = jobs.get(jobId); if (job != null) { TaskInProgress tip = job.getTaskInProgress(tipId); if (tip != null) { taskDiagnosticInfo = tip.getDiagnosticInfo(taskId); } } return ((taskDiagnosticInfo == null) ? new String[0] : taskDiagnosticInfo.toArray(new String[0])); } /** Get all the TaskStatuses from the tipid. */ TaskStatus[] getTaskStatuses(TaskID tipid) { TaskInProgress tip = getTip(tipid); return (tip == null ? new TaskStatus[0] : tip.getTaskStatuses()); } /** Returns the TaskStatus for a particular taskid. */ TaskStatus getTaskStatus(TaskAttemptID taskid) { TaskInProgress tip = getTip(taskid.getTaskID()); return (tip == null ? null : tip.getTaskStatus(taskid)); } /** * Returns the counters for the specified task in progress. */ Counters getTipCounters(TaskID tipid) { TaskInProgress tip = getTip(tipid); return (tip == null ? null : tip.getCounters()); } /** * Returns the configured task scheduler for this job tracker. * @return the configured task scheduler */ TaskScheduler getTaskScheduler() { return taskScheduler; } /** * Returns specified TaskInProgress, or null. */ public TaskInProgress getTip(TaskID tipid) { JobInProgress job = jobs.get(tipid.getJobID()); return (job == null ? null : job.getTaskInProgress(tipid)); } /** Mark a Task to be killed */ public synchronized boolean killTask(TaskAttemptID taskid, boolean shouldFail) throws IOException{ TaskInProgress tip = taskidToTIPMap.get(taskid); if(tip != null) { checkAccess(tip.getJob(), QueueManager.QueueOperation.ADMINISTER_JOBS); return tip.killTask(taskid, shouldFail); } else { LOG.info("Kill task attempt failed since task " + taskid + " was not found"); return false; } } /** * Get tracker name for a given task id. * @param taskId the name of the task * @return The name of the task tracker */ public synchronized String getAssignedTracker(TaskAttemptID taskId) { return taskidToTrackerMap.get(taskId); } public JobStatus[] jobsToComplete() { return getJobStatus(jobs.values(), true); } public JobStatus[] getAllJobs() { List<JobStatus> list = new ArrayList<JobStatus>(); list.addAll(Arrays.asList(getJobStatus(jobs.values(),false))); list.addAll(retireJobs.getAllJobStatus()); return list.toArray(new JobStatus[list.size()]); } /** * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir() */ public String getSystemDir() { Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system")); return fs.makeQualified(sysDir).toString(); } /////////////////////////////////////////////////////////////// // JobTracker methods /////////////////////////////////////////////////////////////// public JobInProgress getJob(JobID jobid) { return jobs.get(jobid); } // Get the job directory in system directory Path getSystemDirectoryForJob(JobID id) { return new Path(getSystemDir(), id.toString()); } /** * Change the run-time priority of the given job. * @param jobId job id * @param priority new {@link JobPriority} for the job */ synchronized void setJobPriority(JobID jobId, JobPriority priority) { JobInProgress job = jobs.get(jobId); if (job != null) { synchronized (taskScheduler) { JobStatus oldStatus = (JobStatus)job.getStatus().clone(); job.setPriority(priority); JobStatus newStatus = (JobStatus)job.getStatus().clone(); JobStatusChangeEvent event = new JobStatusChangeEvent(job, EventType.PRIORITY_CHANGED, oldStatus, newStatus); updateJobInProgressListeners(event); } } else { LOG.warn("Trying to change the priority of an unknown job: " + jobId); } } //////////////////////////////////////////////////// // Methods to track all the TaskTrackers //////////////////////////////////////////////////// /** * Accept and process a new TaskTracker profile. We might * have known about the TaskTracker previously, or it might * be brand-new. All task-tracker structures have already * been updated. Just process the contained tasks and any * jobs that might be affected. */ void updateTaskStatuses(TaskTrackerStatus status) { String trackerName = status.getTrackerName(); for (TaskStatus report : status.getTaskReports()) { report.setTaskTracker(trackerName); TaskAttemptID taskId = report.getTaskID(); // expire it expireLaunchingTasks.removeTask(taskId); JobInProgress job = getJob(taskId.getJobID()); if (job == null) { // if job is not there in the cleanup list ... add it synchronized (trackerToJobsToCleanup) { Set<JobID> jobs = trackerToJobsToCleanup.get(trackerName); if (jobs == null) { jobs = new HashSet<JobID>(); trackerToJobsToCleanup.put(trackerName, jobs); } jobs.add(taskId.getJobID()); } continue; } if (!job.inited()) { // if job is not yet initialized ... kill the attempt synchronized (trackerToTasksToCleanup) { Set<TaskAttemptID> tasks = trackerToTasksToCleanup.get(trackerName); if (tasks == null) { tasks = new HashSet<TaskAttemptID>(); trackerToTasksToCleanup.put(trackerName, tasks); } tasks.add(taskId); } continue; } TaskInProgress tip = taskidToTIPMap.get(taskId); // Check if the tip is known to the jobtracker. In case of a restarted // jt, some tasks might join in later if (tip != null || hasRestarted()) { if (tip == null) { tip = job.getTaskInProgress(taskId.getTaskID()); job.addRunningTaskToTIP(tip, taskId, status, false); } // Update the job and inform the listeners if necessary JobStatus prevStatus = (JobStatus)job.getStatus().clone(); // Clone TaskStatus object here, because JobInProgress // or TaskInProgress can modify this object and // the changes should not get reflected in TaskTrackerStatus. // An old TaskTrackerStatus is used later in countMapTasks, etc. job.updateTaskStatus(tip, (TaskStatus)report.clone()); JobStatus newStatus = (JobStatus)job.getStatus().clone(); // Update the listeners if an incomplete job completes if (prevStatus.getRunState() != newStatus.getRunState()) { JobStatusChangeEvent event = new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, newStatus); updateJobInProgressListeners(event); } } else { LOG.info("Serious problem. While updating status, cannot find taskid " + report.getTaskID()); } // Process 'failed fetch' notifications List<TaskAttemptID> failedFetchMaps = report.getFetchFailedMaps(); if (failedFetchMaps != null) { for (TaskAttemptID mapTaskId : failedFetchMaps) { TaskInProgress failedFetchMap = taskidToTIPMap.get(mapTaskId); if (failedFetchMap != null) { // Gather information about the map which has to be failed, if need be String failedFetchTrackerName = getAssignedTracker(mapTaskId); if (failedFetchTrackerName == null) { failedFetchTrackerName = "Lost task tracker"; } failedFetchMap.getJob().fetchFailureNotification(failedFetchMap, mapTaskId, failedFetchTrackerName); } } } } } /** * We lost the task tracker! All task-tracker structures have * already been updated. Just process the contained tasks and any * jobs that might be affected. */ void lostTaskTracker(TaskTracker taskTracker) { String trackerName = taskTracker.getTrackerName(); LOG.info("Lost tracker '" + trackerName + "'"); // remove the tracker from the local structures synchronized (trackerToJobsToCleanup) { trackerToJobsToCleanup.remove(trackerName); } synchronized (trackerToTasksToCleanup) { trackerToTasksToCleanup.remove(trackerName); } // Inform the recovery manager recoveryManager.unMarkTracker(trackerName); Set<TaskAttemptID> lostTasks = trackerToTaskMap.get(trackerName); trackerToTaskMap.remove(trackerName); if (lostTasks != null) { // List of jobs which had any of their tasks fail on this tracker Set<JobInProgress> jobsWithFailures = new HashSet<JobInProgress>(); for (TaskAttemptID taskId : lostTasks) { TaskInProgress tip = taskidToTIPMap.get(taskId); JobInProgress job = tip.getJob(); // Completed reduce tasks never need to be failed, because // their outputs go to dfs // And completed maps with zero reducers of the job // never need to be failed. if (!tip.isComplete() || (tip.isMapTask() && !tip.isJobSetupTask() && job.desiredReduces() != 0)) { // if the job is done, we don't want to change anything if (job.getStatus().getRunState() == JobStatus.RUNNING || job.getStatus().getRunState() == JobStatus.PREP) { // the state will be KILLED_UNCLEAN, if the task(map or reduce) // was RUNNING on the tracker TaskStatus.State killState = (tip.isRunningTask(taskId) && !tip.isJobSetupTask() && !tip.isJobCleanupTask()) ? TaskStatus.State.KILLED_UNCLEAN : TaskStatus.State.KILLED; job.failedTask(tip, taskId, ("Lost task tracker: " + trackerName), (tip.isMapTask() ? TaskStatus.Phase.MAP : TaskStatus.Phase.REDUCE), killState, trackerName); jobsWithFailures.add(job); } } else { // Completed 'reduce' task and completed 'maps' with zero // reducers of the job, not failed; // only removed from data-structures. markCompletedTaskAttempt(trackerName, taskId); } } // Penalize this tracker for each of the jobs which // had any tasks running on it when it was 'lost' // Also, remove any reserved slots on this tasktracker for (JobInProgress job : jobsWithFailures) { job.addTrackerTaskFailure(trackerName, taskTracker); } // Cleanup taskTracker.cancelAllReservations(); // Purge 'marked' tasks, needs to be done // here to prevent hanging references! removeMarkedTasks(trackerName); } } /** * Rereads the config to get hosts and exclude list file names. * Rereads the files to update the hosts and exclude lists. */ public synchronized void refreshNodes() throws IOException { // check access PermissionChecker.checkSuperuserPrivilege(mrOwner, supergroup); // call the actual api refreshHosts(); } private synchronized void refreshHosts() throws IOException { // Reread the config to get mapred.hosts and mapred.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list LOG.info("Refreshing hosts information"); Configuration conf = new Configuration(); hostsReader.updateFileNames(conf.get("mapred.hosts",""), conf.get("mapred.hosts.exclude", "")); hostsReader.refresh(); Set<String> excludeSet = new HashSet<String>(); for(Map.Entry<String, TaskTracker> eSet : taskTrackers.entrySet()) { String trackerName = eSet.getKey(); TaskTrackerStatus status = eSet.getValue().getStatus(); // Check if not include i.e not in host list or in hosts list but excluded if (!inHostsList(status) || inExcludedHostsList(status)) { excludeSet.add(status.getHost()); // add to rejected trackers } } decommissionNodes(excludeSet); } + // Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry // Remove a tracker from the system private void removeTracker(TaskTracker tracker) { String trackerName = tracker.getTrackerName(); // Remove completely after marking the tasks as 'KILLED' lostTaskTracker(tracker); // tracker is lost, and if it is blacklisted, remove // it from the count of blacklisted trackers in the cluster if (isBlacklisted(trackerName)) { faultyTrackers.decrBlackListedTrackers(1); } updateTaskTrackerStatus(trackerName, null); statistics.taskTrackerRemoved(trackerName); getInstrumentation().decTrackers(1); } // main decommission synchronized void decommissionNodes(Set<String> hosts) throws IOException { LOG.info("Decommissioning " + hosts.size() + " nodes"); // create a list of tracker hostnames synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { int trackersDecommissioned = 0; for (String host : hosts) { LOG.info("Decommissioning host " + host); Set<TaskTracker> trackers = hostnameToTaskTracker.remove(host); if (trackers != null) { for (TaskTracker tracker : trackers) { LOG.info("Decommission: Losing tracker " + tracker.getTrackerName() + " on host " + host); removeTracker(tracker); } trackersDecommissioned += trackers.size(); } LOG.info("Host " + host + " is ready for decommissioning"); } getInstrumentation().setDecommissionedTrackers(trackersDecommissioned); } } } /** * Returns a set of excluded nodes. */ Collection<String> getExcludedNodes() { return hostsReader.getExcludedHosts(); } /** * Get the localized job file path on the job trackers local file system * @param jobId id of the job * @return the path of the job conf file on the local file system */ public static String getLocalJobFilePath(JobID jobId){ return JobHistory.JobInfo.getLocalJobFilePath(jobId); } //////////////////////////////////////////////////////////// // main() //////////////////////////////////////////////////////////// /** * Start the JobTracker process. This is used only for debugging. As a rule, * JobTracker should be run as part of the DFS Namenode process. */ public static void main(String argv[] ) throws IOException, InterruptedException { StringUtils.startupShutdownMessage(JobTracker.class, argv, LOG); try { if(argv.length == 0) { JobTracker tracker = startTracker(new JobConf()); tracker.offerService(); } else { if ("-dumpConfiguration".equals(argv[0]) && argv.length == 1) { dumpConfiguration(new PrintWriter(System.out)); } else { System.out.println("usage: JobTracker [-dumpConfiguration]"); System.exit(-1); } } } catch (Throwable e) { LOG.fatal(StringUtils.stringifyException(e)); System.exit(-1); } } /** * Dumps the configuration properties in Json format * @param writer {@link}Writer object to which the output is written * @throws IOException */ private static void dumpConfiguration(Writer writer) throws IOException { Configuration.dumpConfiguration(new JobConf(), writer); writer.write("\n"); // get the QueueManager configuration properties QueueManager.dumpConfiguration(writer); writer.write("\n"); } @Override public JobQueueInfo[] getQueues() throws IOException { return queueManager.getJobQueueInfos(); } @Override public JobQueueInfo getQueueInfo(String queue) throws IOException { return queueManager.getJobQueueInfo(queue); } @Override public JobStatus[] getJobsFromQueue(String queue) throws IOException { Collection<JobInProgress> jips = taskScheduler.getJobs(queue); return getJobStatus(jips,false); } @Override public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{ return queueManager.getQueueAcls( UserGroupInformation.getCurrentUGI()); } private synchronized JobStatus[] getJobStatus(Collection<JobInProgress> jips, boolean toComplete) { if(jips == null || jips.isEmpty()) { return new JobStatus[]{}; } ArrayList<JobStatus> jobStatusList = new ArrayList<JobStatus>(); for(JobInProgress jip : jips) { JobStatus status = jip.getStatus(); status.setStartTime(jip.getStartTime()); status.setUsername(jip.getProfile().getUser()); if(toComplete) { if(status.getRunState() == JobStatus.RUNNING || status.getRunState() == JobStatus.PREP) { jobStatusList.add(status); } }else { jobStatusList.add(status); } } return (JobStatus[]) jobStatusList.toArray( new JobStatus[jobStatusList.size()]); } /** * Returns the confgiured maximum number of tasks for a single job */ int getMaxTasksPerJob() { return conf.getInt("mapred.jobtracker.maxtasks.per.job", -1); } @Override public void refreshServiceAcl() throws IOException { if (!conf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { throw new AuthorizationException("Service Level Authorization not enabled!"); } SecurityUtil.getPolicy().refresh(); } private void initializeTaskMemoryRelatedConfig() { memSizeForMapSlotOnJT = JobConf.normalizeMemoryConfigValue(conf.getLong( JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); memSizeForReduceSlotOnJT = JobConf.normalizeMemoryConfigValue(conf.getLong( JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); if (conf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) { LOG.warn( JobConf.deprecatedString( JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY)+ " instead use "+JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY+ " and " + JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY ); limitMaxMemForMapTasks = limitMaxMemForReduceTasks = JobConf.normalizeMemoryConfigValue( conf.getLong( JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); if (limitMaxMemForMapTasks != JobConf.DISABLED_MEMORY_LIMIT && limitMaxMemForMapTasks >= 0) { limitMaxMemForMapTasks = limitMaxMemForReduceTasks = limitMaxMemForMapTasks / (1024 * 1024); //Converting old values in bytes to MB } } else { limitMaxMemForMapTasks = JobConf.normalizeMemoryConfigValue( conf.getLong( JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); limitMaxMemForReduceTasks = JobConf.normalizeMemoryConfigValue( conf.getLong( JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); } LOG.info(new StringBuilder().append("Scheduler configured with ").append( "(memSizeForMapSlotOnJT, memSizeForReduceSlotOnJT,").append( " limitMaxMemForMapTasks, limitMaxMemForReduceTasks) (").append( memSizeForMapSlotOnJT).append(", ").append(memSizeForReduceSlotOnJT) .append(", ").append(limitMaxMemForMapTasks).append(", ").append( limitMaxMemForReduceTasks).append(")")); } private boolean perTaskMemoryConfigurationSetOnJT() { if (limitMaxMemForMapTasks == JobConf.DISABLED_MEMORY_LIMIT || limitMaxMemForReduceTasks == JobConf.DISABLED_MEMORY_LIMIT || memSizeForMapSlotOnJT == JobConf.DISABLED_MEMORY_LIMIT || memSizeForReduceSlotOnJT == JobConf.DISABLED_MEMORY_LIMIT) { return false; } return true; } /** * Check the job if it has invalid requirements and throw and IOException if does have. * * @param job * @throws IOException */ private void checkMemoryRequirements(JobInProgress job) throws IOException { if (!perTaskMemoryConfigurationSetOnJT()) { LOG.debug("Per-Task memory configuration is not set on JT. " + "Not checking the job for invalid memory requirements."); return; } boolean invalidJob = false; String msg = ""; long maxMemForMapTask = job.getJobConf().getMemoryForMapTask(); long maxMemForReduceTask = job.getJobConf().getMemoryForReduceTask(); if (maxMemForMapTask == JobConf.DISABLED_MEMORY_LIMIT || maxMemForReduceTask == JobConf.DISABLED_MEMORY_LIMIT) { invalidJob = true; msg = "Invalid job requirements."; } if (maxMemForMapTask > limitMaxMemForMapTasks || maxMemForReduceTask > limitMaxMemForReduceTasks) { invalidJob = true; msg = "Exceeds the cluster's max-memory-limit."; } if (invalidJob) { StringBuilder jobStr = new StringBuilder().append(job.getJobID().toString()).append("(") .append(maxMemForMapTask).append(" memForMapTasks ").append( maxMemForReduceTask).append(" memForReduceTasks): "); LOG.warn(jobStr.toString() + msg); throw new IOException(jobStr.toString() + msg); } } @Override public void refreshQueueAcls() throws IOException{ LOG.info("Refreshing queue acls. requested by : " + UserGroupInformation.getCurrentUGI().getUserName()); this.queueManager.refreshAcls(new Configuration(this.conf)); } - String getReasonsForBlacklisting(String host) { + synchronized String getReasonsForBlacklisting(String host) { FaultInfo fi = faultyTrackers.getFaultInfo(host, false); if (fi == null) { return ""; } return fi.getTrackerFaultReport(); } /** Test Methods */ - Set<ReasonForBlackListing> getReasonForBlackList(String host) { + synchronized Set<ReasonForBlackListing> getReasonForBlackList(String host) { FaultInfo fi = faultyTrackers.getFaultInfo(host, false); if (fi == null) { return new HashSet<ReasonForBlackListing>(); } return fi.getReasonforblacklisting(); } - - void incrementFaults(String hostName) { + + /* + * This method is synchronized to make sure that the locking order + * "faultyTrackers.potentiallyFaultyTrackers lock followed by taskTrackers + * lock" is under JobTracker lock to avoid deadlocks. + */ + synchronized void incrementFaults(String hostName) { faultyTrackers.incrementFaults(hostName); } }
jaxlaw/hadoop-common
ba474785b19cea97259c6a530f585832b1a4b57b
HADOOP-6460 from https://issues.apache.org/jira/secure/attachment/12428898/hadoop-6460.rel20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 54fb22d..971b5b0 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,496 +1,498 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383003 + HADOOP-6460. Reinitializes buffers used for serializing responses in ipc + server on exceeding maximum response size to free up Java heap. (suresh) MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/ipc/Server.java b/src/core/org/apache/hadoop/ipc/Server.java index 3b7b192..a7fbcd1 100644 --- a/src/core/org/apache/hadoop/ipc/Server.java +++ b/src/core/org/apache/hadoop/ipc/Server.java @@ -1,1250 +1,1263 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.nio.ByteBuffer; import java.nio.channels.CancelledKeyException; import java.nio.channels.ClosedChannelException; import java.nio.channels.ReadableByteChannel; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.nio.channels.WritableByteChannel; import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.SocketException; import java.net.UnknownHostException; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Iterator; import java.util.Map; import java.util.Random; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import javax.security.auth.Subject; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.security.authorize.AuthorizationException; /** An abstract IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. * * @see Client */ public abstract class Server { /** * The first four bytes of Hadoop RPC connections */ public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes()); // 1 : Introduce ping and server does not throw away RPCs // 3 : Introduce the protocol into the RPC connection header public static final byte CURRENT_VERSION = 3; /** * How many calls/handler are allowed in the queue. */ private static final int MAX_QUEUE_SIZE_PER_HANDLER = 100; + /** + * Initial and max size of response buffer + */ + static int INITIAL_RESP_BUF_SIZE = 10240; + static int MAX_RESP_BUF_SIZE = 1024*1024; + public static final Log LOG = LogFactory.getLog(Server.class); private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>(); private static final Map<String, Class<?>> PROTOCOL_CACHE = new ConcurrentHashMap<String, Class<?>>(); static Class<?> getProtocolClass(String protocolName, Configuration conf) throws ClassNotFoundException { Class<?> protocol = PROTOCOL_CACHE.get(protocolName); if (protocol == null) { protocol = conf.getClassByName(protocolName); PROTOCOL_CACHE.put(protocolName, protocol); } return protocol; } /** Returns the server instance called under or null. May be called under * {@link #call(Writable, long)} implementations, and under {@link Writable} * methods of paramters and return values. Permits applications to access * the server context.*/ public static Server get() { return SERVER.get(); } /** This is set to Call object before Handler invokes an RPC and reset * after the call returns. */ private static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>(); /** Returns the remote side ip address when invoked inside an RPC * Returns null incase of an error. */ public static InetAddress getRemoteIp() { Call call = CurCall.get(); if (call != null) { return call.connection.socket.getInetAddress(); } return null; } /** Returns remote address as a string when invoked inside an RPC. * Returns null in case of an error. */ public static String getRemoteAddress() { InetAddress addr = getRemoteIp(); return (addr == null) ? null : addr.getHostAddress(); } private String bindAddress; private int port; // port we listen on private int handlerCount; // number of handler threads private Class<? extends Writable> paramClass; // class of call parameters private int maxIdleTime; // the maximum idle time after // which a client may be disconnected private int thresholdIdleConnections; // the number of idle connections // after which we will start // cleaning up idle // connections int maxConnectionsToNuke; // the max number of // connections to nuke //during a cleanup protected RpcMetrics rpcMetrics; private Configuration conf; private int maxQueueSize; private int socketSendBufferSize; private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm volatile private boolean running = true; // true while server runs private BlockingQueue<Call> callQueue; // queued calls private List<Connection> connectionList = Collections.synchronizedList(new LinkedList<Connection>()); //maintain a list //of client connections private Listener listener = null; private Responder responder = null; private int numConnections = 0; private Handler[] handlers = null; /** * A convenience method to bind to a given address and report * better exceptions if the address is not a valid host. * @param socket the socket to bind * @param address the address to bind to * @param backlog the number of connections allowed in the queue * @throws BindException if the address can't be bound * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } else { throw e; } } } /** A call queued for handling. */ private static class Call { private int id; // the client's call id private Writable param; // the parameter passed private Connection connection; // connection to client private long timestamp; // the time received when response is null // the time served when response is not null private ByteBuffer response; // the response for this call public Call(int id, Writable param, Connection connection) { this.id = id; this.param = param; this.connection = connection; this.timestamp = System.currentTimeMillis(); this.response = null; } @Override public String toString() { return param.toString() + " from " + connection.toString(); } public void setResponse(ByteBuffer response) { this.response = response; } } /** Listens on the socket. Creates jobs for the handler threads*/ private class Listener extends Thread { private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server private InetSocketAddress address; //the address we bind at private Random rand = new Random(); private long lastCleanupRunTime = 0; //the last time when a cleanup connec- //-tion (for idle connections) ran private long cleanupInterval = 10000; //the minimum interval between //two cleanup runs private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128); public Listener() throws IOException { address = new InetSocketAddress(bindAddress, port); // Create a new server socket and set to non blocking mode acceptChannel = ServerSocketChannel.open(); acceptChannel.configureBlocking(false); // Bind the server socket to the local host and port bind(acceptChannel.socket(), address, backlogLength); port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port // create a selector; selector= Selector.open(); // Register accepts on the server socket with the selector. acceptChannel.register(selector, SelectionKey.OP_ACCEPT); this.setName("IPC Server listener on " + port); this.setDaemon(true); } /** cleanup connections from connectionList. Choose a random range * to scan and also have a limit on the number of the connections * that will be cleanedup per run. The criteria for cleanup is the time * for which the connection was idle. If 'force' is true then all * connections will be looked at for the cleanup. */ private void cleanupConnections(boolean force) { if (force || numConnections > thresholdIdleConnections) { long currentTime = System.currentTimeMillis(); if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) { return; } int start = 0; int end = numConnections - 1; if (!force) { start = rand.nextInt() % numConnections; end = rand.nextInt() % numConnections; int temp; if (end < start) { temp = start; start = end; end = temp; } } int i = start; int numNuked = 0; while (i <= end) { Connection c; synchronized (connectionList) { try { c = connectionList.get(i); } catch (Exception e) {return;} } if (c.timedOut(currentTime)) { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); closeConnection(c); numNuked++; end--; c = null; if (!force && numNuked == maxConnectionsToNuke) break; } else i++; } lastCleanupRunTime = System.currentTimeMillis(); } } @Override public void run() { LOG.info(getName() + ": starting"); SERVER.set(Server.this); while (running) { SelectionKey key = null; try { selector.select(); Iterator<SelectionKey> iter = selector.selectedKeys().iterator(); while (iter.hasNext()) { key = iter.next(); iter.remove(); try { if (key.isValid()) { if (key.isAcceptable()) doAccept(key); else if (key.isReadable()) doRead(key); } } catch (IOException e) { } key = null; } } catch (OutOfMemoryError e) { // we can run out of memory if we have too many threads // log the event and sleep for a minute and give // some thread(s) a chance to finish LOG.warn("Out of Memory in server select", e); closeCurrentConnection(key, e); cleanupConnections(true); try { Thread.sleep(60000); } catch (Exception ie) {} } catch (InterruptedException e) { if (running) { // unexpected -- log it LOG.info(getName() + " caught: " + StringUtils.stringifyException(e)); } } catch (Exception e) { closeCurrentConnection(key, e); } cleanupConnections(false); } LOG.info("Stopping " + this.getName()); synchronized (this) { try { acceptChannel.close(); selector.close(); } catch (IOException e) { } selector= null; acceptChannel= null; // clean up all connections while (!connectionList.isEmpty()) { closeConnection(connectionList.remove(0)); } } } private void closeCurrentConnection(SelectionKey key, Throwable e) { if (key != null) { Connection c = (Connection)key.attachment(); if (c != null) { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); closeConnection(c); c = null; } } } InetSocketAddress getAddress() { return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); } void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { Connection c = null; ServerSocketChannel server = (ServerSocketChannel) key.channel(); // accept up to 10 connections for (int i=0; i<10; i++) { SocketChannel channel = server.accept(); if (channel==null) return; channel.configureBlocking(false); channel.socket().setTcpNoDelay(tcpNoDelay); SelectionKey readKey = channel.register(selector, SelectionKey.OP_READ); c = new Connection(readKey, channel, System.currentTimeMillis()); readKey.attach(c); synchronized (connectionList) { connectionList.add(numConnections, c); numConnections++; } if (LOG.isDebugEnabled()) LOG.debug("Server connection from " + c.toString() + "; # active connections: " + numConnections + "; # queued calls: " + callQueue.size()); } } void doRead(SelectionKey key) throws InterruptedException { int count = 0; Connection c = (Connection)key.attachment(); if (c == null) { return; } c.setLastContact(System.currentTimeMillis()); try { count = c.readAndProcess(); } catch (InterruptedException ieo) { LOG.info(getName() + ": readAndProcess caught InterruptedException", ieo); throw ieo; } catch (Exception e) { LOG.info(getName() + ": readAndProcess threw exception " + e + ". Count of bytes read: " + count, e); count = -1; //so that the (count < 0) block is executed } if (count < 0) { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + ". Number of active connections: "+ numConnections); closeConnection(c); c = null; } else { c.setLastContact(System.currentTimeMillis()); } } synchronized void doStop() { if (selector != null) { selector.wakeup(); Thread.yield(); } if (acceptChannel != null) { try { acceptChannel.socket().close(); } catch (IOException e) { LOG.info(getName() + ":Exception in closing listener socket. " + e); } } } } // Sends responses of RPC back to clients. private class Responder extends Thread { private Selector writeSelector; private int pending; // connections waiting to register final static int PURGE_INTERVAL = 900000; // 15mins Responder() throws IOException { this.setName("IPC Server Responder"); this.setDaemon(true); writeSelector = Selector.open(); // create a selector pending = 0; } @Override public void run() { LOG.info(getName() + ": starting"); SERVER.set(Server.this); long lastPurgeTime = 0; // last check for old calls. while (running) { try { waitPending(); // If a channel is being registered, wait. writeSelector.select(PURGE_INTERVAL); Iterator<SelectionKey> iter = writeSelector.selectedKeys().iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); iter.remove(); try { if (key.isValid() && key.isWritable()) { doAsyncWrite(key); } } catch (IOException e) { LOG.info(getName() + ": doAsyncWrite threw exception " + e); } } long now = System.currentTimeMillis(); if (now < lastPurgeTime + PURGE_INTERVAL) { continue; } lastPurgeTime = now; // // If there were some calls that have not been sent out for a // long time, discard them. // LOG.debug("Checking for old call responses."); ArrayList<Call> calls; // get the list of channels from list of keys. synchronized (writeSelector.keys()) { calls = new ArrayList<Call>(writeSelector.keys().size()); iter = writeSelector.keys().iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); Call call = (Call)key.attachment(); if (call != null && key.channel() == call.connection.channel) { calls.add(call); } } } for(Call call : calls) { try { doPurge(call, now); } catch (IOException e) { LOG.warn("Error in purging old calls " + e); } } } catch (OutOfMemoryError e) { // // we can run out of memory if we have too many threads // log the event and sleep for a minute and give // some thread(s) a chance to finish // LOG.warn("Out of Memory in server select", e); try { Thread.sleep(60000); } catch (Exception ie) {} } catch (Exception e) { LOG.warn("Exception in Responder " + StringUtils.stringifyException(e)); } } LOG.info("Stopping " + this.getName()); } private void doAsyncWrite(SelectionKey key) throws IOException { Call call = (Call)key.attachment(); if (call == null) { return; } if (key.channel() != call.connection.channel) { throw new IOException("doAsyncWrite: bad channel"); } synchronized(call.connection.responseQueue) { if (processResponse(call.connection.responseQueue, false)) { try { key.interestOps(0); } catch (CancelledKeyException e) { /* The Listener/reader might have closed the socket. * We don't explicitly cancel the key, so not sure if this will * ever fire. * This warning could be removed. */ LOG.warn("Exception while changing ops : " + e); } } } } // // Remove calls that have been pending in the responseQueue // for a long time. // private void doPurge(Call call, long now) throws IOException { LinkedList<Call> responseQueue = call.connection.responseQueue; synchronized (responseQueue) { Iterator<Call> iter = responseQueue.listIterator(0); while (iter.hasNext()) { call = iter.next(); if (now > call.timestamp + PURGE_INTERVAL) { closeConnection(call.connection); break; } } } } // Processes one response. Returns true if there are no more pending // data for this channel. // private boolean processResponse(LinkedList<Call> responseQueue, boolean inHandler) throws IOException { boolean error = true; boolean done = false; // there is more data for this channel. int numElements = 0; Call call = null; try { synchronized (responseQueue) { // // If there are no items for this channel, then we are done // numElements = responseQueue.size(); if (numElements == 0) { error = false; return true; // no more data for this channel. } // // Extract the first call // call = responseQueue.removeFirst(); SocketChannel channel = call.connection.channel; if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + call.connection); } // // Send as much data as we can in the non-blocking fashion // int numBytes = channelWrite(channel, call.response); if (numBytes < 0) { return true; } if (!call.response.hasRemaining()) { call.connection.decRpcCount(); if (numElements == 1) { // last call fully processes. done = true; // no more data for this channel. } else { done = false; // more calls pending to be sent. } if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + call.connection + " Wrote " + numBytes + " bytes."); } } else { // // If we were unable to write the entire response out, then // insert in Selector queue. // call.connection.responseQueue.addFirst(call); if (inHandler) { // set the serve time when the response has to be sent later call.timestamp = System.currentTimeMillis(); incPending(); try { // Wakeup the thread blocked on select, only then can the call // to channel.register() complete. writeSelector.wakeup(); channel.register(writeSelector, SelectionKey.OP_WRITE, call); } catch (ClosedChannelException e) { //Its ok. channel might be closed else where. done = true; } finally { decPending(); } } if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": responding to #" + call.id + " from " + call.connection + " Wrote partial " + numBytes + " bytes."); } } error = false; // everything went off well } } finally { if (error && call != null) { LOG.warn(getName()+", call " + call + ": output error"); done = true; // error. no more data for this channel. closeConnection(call.connection); } } return done; } // // Enqueue a response from the application. // void doRespond(Call call) throws IOException { synchronized (call.connection.responseQueue) { call.connection.responseQueue.addLast(call); if (call.connection.responseQueue.size() == 1) { processResponse(call.connection.responseQueue, true); } } } private synchronized void incPending() { // call waiting to be enqueued. pending++; } private synchronized void decPending() { // call done enqueueing. pending--; notify(); } private synchronized void waitPending() throws InterruptedException { while (pending > 0) { wait(); } } } /** Reads calls from a connection and queues them for handling. */ private class Connection { private boolean versionRead = false; //if initial signature and //version are read private boolean headerRead = false; //if the connection header that //follows version is read. private SocketChannel channel; private ByteBuffer data; private ByteBuffer dataLengthBuffer; private LinkedList<Call> responseQueue; private volatile int rpcCount = 0; // number of outstanding rpcs private long lastContact; private int dataLength; private Socket socket; // Cache the remote host & port info so that even if the socket is // disconnected, we can say where it used to connect to. private String hostAddress; private int remotePort; ConnectionHeader header = new ConnectionHeader(); Class<?> protocol; Subject user = null; // Fake 'call' for failed authorization response private final int AUTHROIZATION_FAILED_CALLID = -1; private final Call authFailedCall = new Call(AUTHROIZATION_FAILED_CALLID, null, null); private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream(); public Connection(SelectionKey key, SocketChannel channel, long lastContact) { this.channel = channel; this.lastContact = lastContact; this.data = null; this.dataLengthBuffer = ByteBuffer.allocate(4); this.socket = channel.socket(); InetAddress addr = socket.getInetAddress(); if (addr == null) { this.hostAddress = "*Unknown*"; } else { this.hostAddress = addr.getHostAddress(); } this.remotePort = socket.getPort(); this.responseQueue = new LinkedList<Call>(); if (socketSendBufferSize != 0) { try { socket.setSendBufferSize(socketSendBufferSize); } catch (IOException e) { LOG.warn("Connection: unable to set socket send buffer size to " + socketSendBufferSize); } } } @Override public String toString() { return getHostAddress() + ":" + remotePort; } public String getHostAddress() { return hostAddress; } public void setLastContact(long lastContact) { this.lastContact = lastContact; } public long getLastContact() { return lastContact; } /* Return true if the connection has no outstanding rpc */ private boolean isIdle() { return rpcCount == 0; } /* Decrement the outstanding RPC count */ private void decRpcCount() { rpcCount--; } /* Increment the outstanding RPC count */ private void incRpcCount() { rpcCount++; } private boolean timedOut(long currentTime) { if (isIdle() && currentTime - lastContact > maxIdleTime) return true; return false; } public int readAndProcess() throws IOException, InterruptedException { while (true) { /* Read at most one RPC. If the header is not read completely yet * then iterate until we read first RPC or until there is no data left. */ int count = -1; if (dataLengthBuffer.remaining() > 0) { count = channelRead(channel, dataLengthBuffer); if (count < 0 || dataLengthBuffer.remaining() > 0) return count; } if (!versionRead) { //Every connection is expected to send the header. ByteBuffer versionBuffer = ByteBuffer.allocate(1); count = channelRead(channel, versionBuffer); if (count <= 0) { return count; } int version = versionBuffer.get(0); dataLengthBuffer.flip(); if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) { //Warning is ok since this is not supposed to happen. LOG.warn("Incorrect header or version mismatch from " + hostAddress + ":" + remotePort + " got version " + version + " expected version " + CURRENT_VERSION); return -1; } dataLengthBuffer.clear(); versionRead = true; continue; } if (data == null) { dataLengthBuffer.flip(); dataLength = dataLengthBuffer.getInt(); if (dataLength == Client.PING_CALL_ID) { dataLengthBuffer.clear(); return 0; //ping message } data = ByteBuffer.allocate(dataLength); incRpcCount(); // Increment the rpc count } count = channelRead(channel, data); if (data.remaining() == 0) { dataLengthBuffer.clear(); data.flip(); if (headerRead) { processData(); data = null; return count; } else { processHeader(); headerRead = true; data = null; // Authorize the connection try { authorize(user, header); if (LOG.isDebugEnabled()) { LOG.debug("Successfully authorized " + header); } } catch (AuthorizationException ae) { authFailedCall.connection = this; setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null, ae.getClass().getName(), ae.getMessage()); responder.doRespond(authFailedCall); // Close this connection return -1; } continue; } } return count; } } /// Reads the connection header following version private void processHeader() throws IOException { DataInputStream in = new DataInputStream(new ByteArrayInputStream(data.array())); header.readFields(in); try { String protocolClassName = header.getProtocol(); if (protocolClassName != null) { protocol = getProtocolClass(header.getProtocol(), conf); } } catch (ClassNotFoundException cnfe) { throw new IOException("Unknown protocol: " + header.getProtocol()); } // TODO: Get the user name from the GSS API for Kerberbos-based security // Create the user subject user = SecurityUtil.getSubject(header.getUgi()); } private void processData() throws IOException, InterruptedException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(data.array())); int id = dis.readInt(); // try to read an id if (LOG.isDebugEnabled()) LOG.debug(" got #" + id); Writable param = ReflectionUtils.newInstance(paramClass, conf); // read param param.readFields(dis); Call call = new Call(id, param, this); callQueue.put(call); // queue the call; maybe blocked here } private synchronized void close() throws IOException { data = null; dataLengthBuffer = null; if (!channel.isOpen()) return; try {socket.shutdownOutput();} catch(Exception e) {} if (channel.isOpen()) { try {channel.close();} catch(Exception e) {} } try {socket.close();} catch(Exception e) {} } } /** Handles queued calls . */ private class Handler extends Thread { public Handler(int instanceNumber) { this.setDaemon(true); this.setName("IPC Server handler "+ instanceNumber + " on " + port); } @Override public void run() { LOG.info(getName() + ": starting"); SERVER.set(Server.this); - ByteArrayOutputStream buf = new ByteArrayOutputStream(10240); + ByteArrayOutputStream buf = + new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE); while (running) { try { final Call call = callQueue.take(); // pop the queue; maybe blocked here if (LOG.isDebugEnabled()) LOG.debug(getName() + ": has #" + call.id + " from " + call.connection); String errorClass = null; String error = null; Writable value = null; CurCall.set(call); try { // Make the call as the user via Subject.doAs, thus associating // the call with the Subject value = Subject.doAs(call.connection.user, new PrivilegedExceptionAction<Writable>() { @Override public Writable run() throws Exception { // make the call return call(call.connection.protocol, call.param, call.timestamp); } } ); } catch (PrivilegedActionException pae) { Exception e = pae.getException(); LOG.info(getName()+", call "+call+": error: " + e, e); errorClass = e.getClass().getName(); error = StringUtils.stringifyException(e); } catch (Throwable e) { LOG.info(getName()+", call "+call+": error: " + e, e); errorClass = e.getClass().getName(); error = StringUtils.stringifyException(e); } CurCall.set(null); - setupResponse(buf, call, (error == null) ? Status.SUCCESS : Status.ERROR, value, errorClass, error); + // Discard the large buf and reset it back to + // smaller size to freeup heap + if (buf.size() > MAX_RESP_BUF_SIZE) { + LOG.warn("Large response size " + buf.size() + " for call " + + call.toString()); + buf = new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE); + } responder.doRespond(call); } catch (InterruptedException e) { if (running) { // unexpected -- log it LOG.info(getName() + " caught: " + StringUtils.stringifyException(e)); } } catch (Exception e) { LOG.info(getName() + " caught: " + StringUtils.stringifyException(e)); } } LOG.info(getName() + ": exiting"); } } protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, Configuration conf) throws IOException { this(bindAddress, port, paramClass, handlerCount, conf, Integer.toString(port)); } /** Constructs a server listening on the named port and address. Parameters passed must * be of the named class. The <code>handlerCount</handlerCount> determines * the number of handler threads that will be used to process calls. * */ protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, Configuration conf, String serverName) throws IOException { this.bindAddress = bindAddress; this.conf = conf; this.port = port; this.paramClass = paramClass; this.handlerCount = handlerCount; this.socketSendBufferSize = 0; this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER; this.callQueue = new LinkedBlockingQueue<Call>(maxQueueSize); this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000); this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); // Start the listener here and let it bind to the port listener = new Listener(); this.port = listener.getAddress().getPort(); this.rpcMetrics = new RpcMetrics(serverName, Integer.toString(this.port), this); this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false); // Create the responder here responder = new Responder(); } private void closeConnection(Connection connection) { synchronized (connectionList) { if (connectionList.remove(connection)) numConnections--; } try { connection.close(); } catch (IOException e) { } } /** * Setup response for the IPC Call. * * @param response buffer to serialize the response into * @param call {@link Call} to which we are setting up the response * @param status {@link Status} of the IPC call * @param rv return value for the IPC Call, if the call was successful * @param errorClass error class, if the the call failed * @param error error message, if the call failed * @throws IOException */ private void setupResponse(ByteArrayOutputStream response, Call call, Status status, Writable rv, String errorClass, String error) throws IOException { response.reset(); DataOutputStream out = new DataOutputStream(response); out.writeInt(call.id); // write call id out.writeInt(status.state); // write status if (status == Status.SUCCESS) { rv.write(out); } else { WritableUtils.writeString(out, errorClass); WritableUtils.writeString(out, error); } call.setResponse(ByteBuffer.wrap(response.toByteArray())); } Configuration getConf() { return conf; } /** Sets the socket buffer size used for responding to RPCs */ public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } /** Starts the service. Must be called before any calls will be handled. */ public synchronized void start() throws IOException { responder.start(); listener.start(); handlers = new Handler[handlerCount]; for (int i = 0; i < handlerCount; i++) { handlers[i] = new Handler(i); handlers[i].start(); } } /** Stops the service. No new calls will be handled after this is called. */ public synchronized void stop() { LOG.info("Stopping server on " + port); running = false; if (handlers != null) { for (int i = 0; i < handlerCount; i++) { if (handlers[i] != null) { handlers[i].interrupt(); } } } listener.interrupt(); listener.doStop(); responder.interrupt(); notifyAll(); if (this.rpcMetrics != null) { this.rpcMetrics.shutdown(); } } /** Wait for the server to be stopped. * Does not wait for all subthreads to finish. * See {@link #stop()}. */ public synchronized void join() throws InterruptedException { while (running) { wait(); } } /** * Return the socket (ip+port) on which the RPC server is listening to. * @return the socket (ip+port) on which the RPC server is listening to. */ public synchronized InetSocketAddress getListenerAddress() { return listener.getAddress(); } /** * Called for each call. * @deprecated Use {@link #call(Class, Writable, long)} instead */ @Deprecated public Writable call(Writable param, long receiveTime) throws IOException { return call(null, param, receiveTime); } /** Called for each call. */ public abstract Writable call(Class<?> protocol, Writable param, long receiveTime) throws IOException; /** * Authorize the incoming client connection. * * @param user client user * @param connection incoming connection * @throws AuthorizationException when the client isn't authorized to talk the protocol */ public void authorize(Subject user, ConnectionHeader connection) throws AuthorizationException {} /** * The number of open RPC conections * @return the number of open rpc connections */ public int getNumOpenConnections() { return numConnections; } /** * The number of rpc calls in the queue. * @return The number of rpc calls in the queue. */ public int getCallQueueLen() { return callQueue.size(); } /** * When the read or write buffer size is larger than this limit, i/o will be * done in chunks of this size. Most RPC requests and responses would be * be smaller. */ private static int NIO_BUFFER_LIMIT = 8*1024; //should not be more than 64KB. /** * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * buffer increases. This also minimizes extra copies in NIO layer * as a result of multiple write operations required to write a large * buffer. * * @see WritableByteChannel#write(ByteBuffer) */ private static int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer) : channelIO(null, channel, buffer); } /** * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. * * @see ReadableByteChannel#read(ByteBuffer) */ private static int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { return (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); } /** * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)} * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only * one of readCh or writeCh should be non-null. * * @see #channelRead(ReadableByteChannel, ByteBuffer) * @see #channelWrite(WritableByteChannel, ByteBuffer) */ private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, ByteBuffer buf) throws IOException { int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); int ret = 0; while (buf.remaining() > 0) { try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); if (ret < ioSize) { break; } } finally { buf.limit(originalLimit); } } int nBytes = initialRemaining - buf.remaining(); return (nBytes > 0) ? nBytes : ret; } } diff --git a/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java b/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java index 2591da0..e1370fe 100644 --- a/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java +++ b/src/test/org/apache/hadoop/ipc/TestIPCServerResponder.java @@ -1,150 +1,156 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Random; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.net.NetUtils; /** * This test provokes partial writes in the server, which is * serving multiple clients. */ public class TestIPCServerResponder extends TestCase { public static final Log LOG = LogFactory.getLog(TestIPCServerResponder.class); private static Configuration conf = new Configuration(); public TestIPCServerResponder(final String name) { super(name); } private static final Random RANDOM = new Random(); private static final String ADDRESS = "0.0.0.0"; private static final int BYTE_COUNT = 1024; private static final byte[] BYTES = new byte[BYTE_COUNT]; static { for (int i = 0; i < BYTE_COUNT; i++) BYTES[i] = (byte) ('a' + (i % 26)); } private static class TestServer extends Server { private boolean sleep; public TestServer(final int handlerCount, final boolean sleep) throws IOException { super(ADDRESS, 0, BytesWritable.class, handlerCount, conf); // Set the buffer size to half of the maximum parameter/result size // to force the socket to block this.setSocketSendBufSize(BYTE_COUNT / 2); this.sleep = sleep; } @Override public Writable call(Class<?> protocol, Writable param, long receiveTime) throws IOException { if (sleep) { try { Thread.sleep(RANDOM.nextInt(20)); // sleep a bit } catch (InterruptedException e) {} } return param; } } private static class Caller extends Thread { private Client client; private int count; private InetSocketAddress address; private boolean failed; public Caller(final Client client, final InetSocketAddress address, final int count) { this.client = client; this.address = address; this.count = count; } @Override public void run() { for (int i = 0; i < count; i++) { try { int byteSize = RANDOM.nextInt(BYTE_COUNT); byte[] bytes = new byte[byteSize]; System.arraycopy(BYTES, 0, bytes, 0, byteSize); Writable param = new BytesWritable(bytes); Writable value = client.call(param, address); Thread.sleep(RANDOM.nextInt(20)); } catch (Exception e) { LOG.fatal("Caught: " + e); failed = true; } } } } + public void testResponseBuffer() throws Exception { + Server.INITIAL_RESP_BUF_SIZE = 1; + Server.MAX_RESP_BUF_SIZE = 1; + testServerResponder(1, true, 1, 1, 5); + } + public void testServerResponder() throws Exception { testServerResponder(10, true, 1, 10, 200); } public void testServerResponder(final int handlerCount, final boolean handlerSleep, final int clientCount, final int callerCount, final int callCount) throws Exception { Server server = new TestServer(handlerCount, handlerSleep); server.start(); InetSocketAddress address = NetUtils.getConnectAddress(server); Client[] clients = new Client[clientCount]; for (int i = 0; i < clientCount; i++) { clients[i] = new Client(BytesWritable.class, conf); } Caller[] callers = new Caller[callerCount]; for (int i = 0; i < callerCount; i++) { callers[i] = new Caller(clients[i % clientCount], address, callCount); callers[i].start(); } for (int i = 0; i < callerCount; i++) { callers[i].join(); assertFalse(callers[i].failed); } for (int i = 0; i < clientCount; i++) { clients[i].stop(); } server.stop(); } }
jaxlaw/hadoop-common
ef177d447d9e6553fd21adda732b06db5d10a184
Add the next version string to YAHOO-CHANGES.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index ec5a113..54fb22d 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,494 +1,496 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. +yahoo-hadoop-0.20.1-3195383003 + MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) MAPREDUCE-896. Fix bug in earlier implementation to prevent spurious logging in tasktracker logs for absent file paths. (Ravi Gummadi via yhemanth) yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch
jaxlaw/hadoop-common
da60c4058e89f1336f022d4b16e032d610630b27
MAPREDUCE:896 Additional patch to fix spurious logging in tasktracker logs from https://issues.apache.org/jira/secure/attachment/12428180/y896.v2.1.fix.v2.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index e1a9d55..ec5a113 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,490 +1,494 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. (Vinod Kumar Vavilapalli via acmurthy) MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) + MAPREDUCE-896. Fix bug in earlier implementation to prevent + spurious logging in tasktracker logs for absent file paths. + (Ravi Gummadi via yhemanth) + yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey yahoo-hadoop-0.20.1-3092118007: MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to ignore checksums when used with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118006: MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) yahoo-hadoop-0.20.1-3092118005: MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java b/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java index 68f90f4..42da577 100644 --- a/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java +++ b/src/mapred/org/apache/hadoop/mapred/CleanupQueue.java @@ -1,138 +1,141 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.IOException; import java.util.concurrent.LinkedBlockingQueue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; class CleanupQueue { public static final Log LOG = LogFactory.getLog(CleanupQueue.class); private static PathCleanupThread cleanupThread; /** * Create a singleton path-clean-up queue. It can be used to delete * paths(directories/files) in a separate thread. This constructor creates a * clean-up thread and also starts it as a daemon. Callers can instantiate one * CleanupQueue per JVM and can use it for deleting paths. Use * {@link CleanupQueue#addToQueue(PathDeletionContext...)} to add paths for * deletion. */ public CleanupQueue() { synchronized (PathCleanupThread.class) { if (cleanupThread == null) { cleanupThread = new PathCleanupThread(); } } } /** * Contains info related to the path of the file/dir to be deleted */ static class PathDeletionContext { String fullPath;// full path of file or dir FileSystem fs; public PathDeletionContext(FileSystem fs, String fullPath) { this.fs = fs; this.fullPath = fullPath; } protected String getPathForCleanup() { return fullPath; } /** * Makes the path(and its subdirectories recursively) fully deletable */ protected void enablePathForCleanup() throws IOException { // do nothing } } /** * Adds the paths to the queue of paths to be deleted by cleanupThread. */ void addToQueue(PathDeletionContext... contexts) { cleanupThread.addToQueue(contexts); } protected static boolean deletePath(PathDeletionContext context) throws IOException { context.enablePathForCleanup(); if (LOG.isDebugEnabled()) { LOG.debug("Trying to delete " + context.fullPath); } - return context.fs.delete(new Path(context.fullPath), true); + if (context.fs.exists(new Path(context.fullPath))) { + return context.fs.delete(new Path(context.fullPath), true); + } + return true; } private static class PathCleanupThread extends Thread { // cleanup queue which deletes files/directories of the paths queued up. private LinkedBlockingQueue<PathDeletionContext> queue = new LinkedBlockingQueue<PathDeletionContext>(); public PathCleanupThread() { setName("Directory/File cleanup thread"); setDaemon(true); start(); } void addToQueue(PathDeletionContext[] contexts) { for (PathDeletionContext context : contexts) { try { queue.put(context); } catch(InterruptedException ie) {} } } public void run() { if (LOG.isDebugEnabled()) { LOG.debug(getName() + " started."); } PathDeletionContext context = null; while (true) { try { context = queue.take(); // delete the path. if (!deletePath(context)) { LOG.warn("CleanupThread:Unable to delete path " + context.fullPath); } else if (LOG.isDebugEnabled()) { LOG.debug("DELETED " + context.fullPath); } } catch (InterruptedException t) { LOG.warn("Interrupted deletion of " + context.fullPath); return; } catch (Exception e) { LOG.warn("Error deleting path " + context.fullPath + ": " + e); } } } } } diff --git a/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java b/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java index fae50d6..73b3bb7 100644 --- a/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java +++ b/src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java @@ -1,152 +1,154 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.IOException; import java.util.List; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.mapred.JvmManager.JvmEnv; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.util.ProcessTree; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * The default implementation for controlling tasks. * * This class provides an implementation for launching and killing * tasks that need to be run as the tasktracker itself. Hence, * many of the initializing or cleanup methods are not required here. */ class DefaultTaskController extends TaskController { private static final Log LOG = LogFactory.getLog(DefaultTaskController.class); /** * Launch a new JVM for the task. * * This method launches the new JVM for the task by executing the * the JVM command using the {@link Shell.ShellCommandExecutor} */ void launchTaskJVM(TaskController.TaskControllerContext context) throws IOException { JvmEnv env = context.env; List<String> wrappedCommand = TaskLog.captureOutAndError(env.setup, env.vargs, env.stdout, env.stderr, env.logSize, true); ShellCommandExecutor shexec = new ShellCommandExecutor(wrappedCommand.toArray(new String[0]), env.workDir, env.env); // set the ShellCommandExecutor for later use. context.shExec = shexec; shexec.execute(); } /** * Initialize the task environment. * * Since tasks are launched as the tasktracker user itself, this * method has no action to perform. */ void initializeTask(TaskController.TaskControllerContext context) { // The default task controller does not need to set up // any permissions for proper execution. // So this is a dummy method. return; } @Override void setup() { // nothing to setup return; } /* * No need to do anything as we don't need to do as we dont need anything * extra from what TaskTracker has done. */ @Override void initializeJob(JobID jobId) { } @Override void terminateTask(TaskControllerContext context) { ShellCommandExecutor shexec = context.shExec; if (shexec != null) { Process process = shexec.getProcess(); if (Shell.WINDOWS) { // Currently we don't use setsid on WINDOWS. //So kill the process alone. if (process != null) { process.destroy(); } } else { // In addition to the task JVM, kill its subprocesses also. String pid = context.pid; if (pid != null) { if(ProcessTree.isSetsidAvailable) { ProcessTree.terminateProcessGroup(pid); }else { ProcessTree.terminateProcess(pid); } } } } } @Override void killTask(TaskControllerContext context) { ShellCommandExecutor shexec = context.shExec; if (shexec != null) { if (Shell.WINDOWS) { //We don't do send kill process signal in case of windows as //already we have done a process.destroy() in termintateTaskJVM() return; } String pid = context.pid; if (pid != null) { if(ProcessTree.isSetsidAvailable) { ProcessTree.killProcessGroup(pid); }else { ProcessTree.killProcess(pid); } } } } /** * Enables the task for cleanup by changing permissions of the specified path * in the local filesystem */ @Override void enableTaskForCleanup(PathDeletionContext context) throws IOException { try { FileUtil.chmod(context.fullPath, "a+rwx", true); } catch(InterruptedException e) { LOG.warn("Interrupted while setting permissions for " + context.fullPath + " for deletion."); + } catch(IOException ioe) { + LOG.warn("Unable to change permissions of " + context.fullPath); } } } diff --git a/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java b/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java index 4da98a9..c0e938f 100644 --- a/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java +++ b/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java @@ -1,602 +1,607 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.mapred.JvmManager.JvmEnv; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Shell.ShellCommandExecutor; /** * A {@link TaskController} that runs the task JVMs as the user * who submits the job. * * This class executes a setuid executable to implement methods * of the {@link TaskController}, including launching the task * JVM and killing it when needed, and also initializing and * finalizing the task environment. * <p> The setuid executable is launched using the command line:</p> * <p>task-controller user-name command command-args, where</p> * <p>user-name is the name of the owner who submits the job</p> * <p>command is one of the cardinal value of the * {@link LinuxTaskController.TaskCommands} enumeration</p> * <p>command-args depends on the command being launched.</p> * * In addition to running and killing tasks, the class also * sets up appropriate access for the directories and files * that will be used by the tasks. */ class LinuxTaskController extends TaskController { private static final Log LOG = LogFactory.getLog(LinuxTaskController.class); // Name of the executable script that will contain the child // JVM command line. See writeCommand for details. private static final String COMMAND_FILE = "taskjvm.sh"; // Path to the setuid executable. private static String taskControllerExe; static { // the task-controller is expected to be under the $HADOOP_HOME/bin // directory. File hadoopBin = new File(System.getenv("HADOOP_HOME"), "bin"); taskControllerExe = new File(hadoopBin, "task-controller").getAbsolutePath(); } // The list of directory paths specified in the // variable mapred.local.dir. This is used to determine // which among the list of directories is picked up // for storing data for a particular task. private String[] mapredLocalDirs; // permissions to set on files and directories created. // When localized files are handled securely, this string // will change to something more restrictive. Until then, // it opens up the permissions for all, so that the tasktracker // and job owners can access files together. private static final String FILE_PERMISSIONS = "ugo+rwx"; // permissions to set on components of the path leading to // localized files and directories. Read and execute permissions // are required for different users to be able to access the // files. private static final String PATH_PERMISSIONS = "go+rx"; public LinuxTaskController() { super(); } @Override public void setConf(Configuration conf) { super.setConf(conf); mapredLocalDirs = conf.getStrings("mapred.local.dir"); //Setting of the permissions of the local directory is done in //setup() } /** * List of commands that the setuid script will execute. */ enum TaskCommands { LAUNCH_TASK_JVM, TERMINATE_TASK_JVM, KILL_TASK_JVM, ENABLE_TASK_FOR_CLEANUP } /** * Launch a task JVM that will run as the owner of the job. * * This method launches a task JVM by executing a setuid * executable that will switch to the user and run the * task. */ @Override void launchTaskJVM(TaskController.TaskControllerContext context) throws IOException { JvmEnv env = context.env; // get the JVM command line. String cmdLine = TaskLog.buildCommandLine(env.setup, env.vargs, env.stdout, env.stderr, env.logSize, true); StringBuffer sb = new StringBuffer(); //export out all the environment variable before child command as //the setuid/setgid binaries would not be getting, any environmental //variables which begin with LD_*. for(Entry<String, String> entry : env.env.entrySet()) { sb.append("export "); sb.append(entry.getKey()); sb.append("="); sb.append(entry.getValue()); sb.append("\n"); } sb.append(cmdLine); // write the command to a file in the // task specific cache directory writeCommand(sb.toString(), getTaskCacheDirectory(context)); // Call the taskcontroller with the right parameters. List<String> launchTaskJVMArgs = buildLaunchTaskArgs(context); ShellCommandExecutor shExec = buildTaskControllerExecutor( TaskCommands.LAUNCH_TASK_JVM, env.conf.getUser(), launchTaskJVMArgs, env.workDir, env.env); context.shExec = shExec; try { shExec.execute(); } catch (Exception e) { LOG.warn("Exception thrown while launching task JVM : " + StringUtils.stringifyException(e)); LOG.warn("Exit code from task is : " + shExec.getExitCode()); LOG.warn("Output from task-contoller is : " + shExec.getOutput()); throw new IOException(e); } if(LOG.isDebugEnabled()) { LOG.debug("output after executing task jvm = " + shExec.getOutput()); } } /** * Helper method that runs a LinuxTaskController command * * @param taskCommand * @param user * @param cmdArgs * @param env * @throws IOException */ private void runCommand(TaskCommands taskCommand, String user, List<String> cmdArgs, File workDir, Map<String, String> env) throws IOException { ShellCommandExecutor shExec = buildTaskControllerExecutor(taskCommand, user, cmdArgs, workDir, env); try { shExec.execute(); } catch (Exception e) { LOG.warn("Exit code from " + taskCommand.toString() + " is : " + shExec.getExitCode()); LOG.warn("Exception thrown by " + taskCommand.toString() + " : " + StringUtils.stringifyException(e)); LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + " follows:"); logOutput(shExec.getOutput()); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + " follows:"); logOutput(shExec.getOutput()); } } /** * Returns list of arguments to be passed while launching task VM. * See {@code buildTaskControllerExecutor(TaskCommands, * String, List<String>, JvmEnv)} documentation. * @param context * @return Argument to be used while launching Task VM */ private List<String> buildLaunchTaskArgs(TaskControllerContext context) { List<String> commandArgs = new ArrayList<String>(3); String taskId = context.task.getTaskID().toString(); String jobId = getJobId(context); LOG.debug("getting the task directory as: " + getTaskCacheDirectory(context)); commandArgs.add(getDirectoryChosenForTask( new File(getTaskCacheDirectory(context)), context)); commandArgs.add(jobId); if(!context.task.isTaskCleanupTask()) { commandArgs.add(taskId); }else { commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX); } return commandArgs; } private List<String> buildTaskCleanupArgs( TaskControllerPathDeletionContext context) { List<String> commandArgs = new ArrayList<String>(3); commandArgs.add(context.mapredLocalDir.toUri().getPath()); commandArgs.add(context.task.getJobID().toString()); String workDir = ""; if (context.isWorkDir) { workDir = "/work"; } if (context.task.isTaskCleanupTask()) { commandArgs.add(context.task.getTaskID() + TaskTracker.TASK_CLEANUP_SUFFIX + workDir); } else { commandArgs.add(context.task.getTaskID() + workDir); } return commandArgs; } /** * Enables the task for cleanup by changing permissions of the specified path * in the local filesystem */ @Override void enableTaskForCleanup(PathDeletionContext context) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Going to do " + TaskCommands.ENABLE_TASK_FOR_CLEANUP.toString() + " for " + context.fullPath); } if (context instanceof TaskControllerPathDeletionContext) { TaskControllerPathDeletionContext tContext = (TaskControllerPathDeletionContext) context; - if (tContext.task.getUser() != null && tContext.fs instanceof LocalFileSystem) { - runCommand(TaskCommands.ENABLE_TASK_FOR_CLEANUP, + if (tContext.task.getUser() != null && + tContext.fs instanceof LocalFileSystem) { + try { + runCommand(TaskCommands.ENABLE_TASK_FOR_CLEANUP, tContext.task.getUser(), buildTaskCleanupArgs(tContext), null, null); + } catch(IOException e) { + LOG.warn("Uanble to change permissions for " + tContext.fullPath); + } } else { throw new IllegalArgumentException("Either user is null or the " + "file system is not local file system."); } } else { throw new IllegalArgumentException("PathDeletionContext provided is not " + "TaskControllerPathDeletionContext."); } } private void logOutput(String output) { String shExecOutput = output; if (shExecOutput != null) { for (String str : shExecOutput.split("\n")) { LOG.info(str); } } } // get the Job ID from the information in the TaskControllerContext private String getJobId(TaskControllerContext context) { String taskId = context.task.getTaskID().toString(); TaskAttemptID tId = TaskAttemptID.forName(taskId); String jobId = tId.getJobID().toString(); return jobId; } // Get the directory from the list of directories configured // in mapred.local.dir chosen for storing data pertaining to // this task. private String getDirectoryChosenForTask(File directory, TaskControllerContext context) { String jobId = getJobId(context); String taskId = context.task.getTaskID().toString(); for (String dir : mapredLocalDirs) { File mapredDir = new File(dir); File taskDir = new File(mapredDir, TaskTracker.getLocalTaskDir( jobId, taskId, context.task.isTaskCleanupTask())); if (directory.equals(taskDir)) { return dir; } } LOG.error("Couldn't parse task cache directory correctly"); throw new IllegalArgumentException("invalid task cache directory " + directory.getAbsolutePath()); } /** * Setup appropriate permissions for directories and files that * are used by the task. * * As the LinuxTaskController launches tasks as a user, different * from the daemon, all directories and files that are potentially * used by the tasks are setup with appropriate permissions that * will allow access. * * Until secure data handling is implemented (see HADOOP-4491 and * HADOOP-4493, for e.g.), the permissions are set up to allow * read, write and execute access for everyone. This will be * changed to restricted access as data is handled securely. */ void initializeTask(TaskControllerContext context) { // Setup permissions for the job and task cache directories. setupTaskCacheFileAccess(context); // setup permissions for task log directory setupTaskLogFileAccess(context); } // Allows access for the task to create log files under // the task log directory private void setupTaskLogFileAccess(TaskControllerContext context) { TaskAttemptID taskId = context.task.getTaskID(); File f = TaskLog.getTaskLogFile(taskId, TaskLog.LogName.SYSLOG); String taskAttemptLogDir = f.getParentFile().getAbsolutePath(); changeDirectoryPermissions(taskAttemptLogDir, FILE_PERMISSIONS, false); } // Allows access for the task to read, write and execute // the files under the job and task cache directories private void setupTaskCacheFileAccess(TaskControllerContext context) { String taskId = context.task.getTaskID().toString(); JobID jobId = JobID.forName(getJobId(context)); //Change permission for the task across all the disks for(String localDir : mapredLocalDirs) { File f = new File(localDir); File taskCacheDir = new File(f,TaskTracker.getLocalTaskDir( jobId.toString(), taskId, context.task.isTaskCleanupTask())); if(taskCacheDir.exists()) { changeDirectoryPermissions(taskCacheDir.getPath(), FILE_PERMISSIONS, true); } }//end of local directory Iteration } // convenience method to execute chmod. private void changeDirectoryPermissions(String dir, String mode, boolean isRecursive) { int ret = 0; try { ret = FileUtil.chmod(dir, mode, isRecursive); } catch (Exception e) { LOG.warn("Exception in changing permissions for directory " + dir + ". Exception: " + e.getMessage()); } if (ret != 0) { LOG.warn("Could not change permissions for directory " + dir); } } /** * Builds the command line for launching/terminating/killing task JVM. * Following is the format for launching/terminating/killing task JVM * <br/> * For launching following is command line argument: * <br/> * {@code user-name command tt-root job_id task_id} * <br/> * For terminating/killing task jvm. * {@code user-name command tt-root task-pid} * * @param command command to be executed. * @param userName user name * @param cmdArgs list of extra arguments * @param env JVM environment variables. * @return {@link ShellCommandExecutor} * @throws IOException */ private ShellCommandExecutor buildTaskControllerExecutor( TaskCommands command, String userName, List<String> cmdArgs, File workDir, Map<String, String> env) throws IOException { String[] taskControllerCmd = new String[3 + cmdArgs.size()]; taskControllerCmd[0] = getTaskControllerExecutablePath(); taskControllerCmd[1] = userName; taskControllerCmd[2] = String.valueOf(command.ordinal()); int i = 3; for (String cmdArg : cmdArgs) { taskControllerCmd[i++] = cmdArg; } if (LOG.isDebugEnabled()) { for (String cmd : taskControllerCmd) { LOG.debug("taskctrl command = " + cmd); } } ShellCommandExecutor shExec = null; if(workDir != null && workDir.exists()) { shExec = new ShellCommandExecutor(taskControllerCmd, workDir, env); } else { shExec = new ShellCommandExecutor(taskControllerCmd); } return shExec; } // Return the task specific directory under the cache. private String getTaskCacheDirectory(TaskControllerContext context) { // In the case of JVM reuse, the task specific directory // is different from what is set with respect with // env.workDir. Hence building this from the taskId everytime. String taskId = context.task.getTaskID().toString(); File cacheDirForJob = context.env.workDir.getParentFile().getParentFile(); if(context.task.isTaskCleanupTask()) { taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX; } return new File(cacheDirForJob, taskId).getAbsolutePath(); } // Write the JVM command line to a file under the specified directory // Note that the JVM will be launched using a setuid executable, and // could potentially contain strings defined by a user. Hence, to // prevent special character attacks, we write the command line to // a file and execute it. private void writeCommand(String cmdLine, String directory) throws IOException { PrintWriter pw = null; String commandFile = directory + File.separator + COMMAND_FILE; LOG.info("Writing commands to " + commandFile); try { FileWriter fw = new FileWriter(commandFile); BufferedWriter bw = new BufferedWriter(fw); pw = new PrintWriter(bw); pw.write(cmdLine); } catch (IOException ioe) { LOG.error("Caught IOException while writing JVM command line to file. " + ioe.getMessage()); } finally { if (pw != null) { pw.close(); } // set execute permissions for all on the file. File f = new File(commandFile); if (f.exists()) { f.setReadable(true, false); f.setExecutable(true, false); } } } protected String getTaskControllerExecutablePath() { return taskControllerExe; } /** * Sets up the permissions of the following directories: * * Job cache directory * Archive directory * Hadoop log directories * */ @Override void setup() { //set up job cache directory and associated permissions String localDirs[] = this.mapredLocalDirs; for(String localDir : localDirs) { //Cache root File cacheDirectory = new File(localDir,TaskTracker.getCacheSubdir()); File jobCacheDirectory = new File(localDir,TaskTracker.getJobCacheSubdir()); if(!cacheDirectory.exists()) { if(!cacheDirectory.mkdirs()) { LOG.warn("Unable to create cache directory : " + cacheDirectory.getPath()); } } if(!jobCacheDirectory.exists()) { if(!jobCacheDirectory.mkdirs()) { LOG.warn("Unable to create job cache directory : " + jobCacheDirectory.getPath()); } } //Give world writable permission for every directory under //mapred-local-dir. //Child tries to write files under it when executing. changeDirectoryPermissions(localDir, FILE_PERMISSIONS, true); }//end of local directory manipulations //setting up perms for user logs File taskLog = TaskLog.getUserLogDir(); changeDirectoryPermissions(taskLog.getPath(), FILE_PERMISSIONS,false); } /* * Create Job directories across disks and set their permissions to 777 * This way when tasks are run we just need to setup permissions for * task folder. */ @Override void initializeJob(JobID jobid) { for(String localDir : this.mapredLocalDirs) { File jobDirectory = new File(localDir, TaskTracker.getLocalJobDir(jobid.toString())); if(!jobDirectory.exists()) { if(!jobDirectory.mkdir()) { LOG.warn("Unable to create job cache directory : " + jobDirectory.getPath()); continue; } } //Should be recursive because the jar and work folders might be //present under the job cache directory changeDirectoryPermissions( jobDirectory.getPath(), FILE_PERMISSIONS, true); } } /** * API which builds the command line to be pass to LinuxTaskController * binary to terminate/kill the task. See * {@code buildTaskControllerExecutor(TaskCommands, * String, List<String>, JvmEnv)} documentation. * * * @param context context of task which has to be passed kill signal. * */ private List<String> buildKillTaskCommandArgs(TaskControllerContext context){ List<String> killTaskJVMArgs = new ArrayList<String>(); killTaskJVMArgs.add(context.pid); return killTaskJVMArgs; } /** * Convenience method used to sending appropriate Kill signal to the task * VM * @param context * @param command * @throws IOException */ private void finishTask(TaskControllerContext context, TaskCommands command) throws IOException{ if(context.task == null) { LOG.info("Context task null not killing the JVM"); return; } ShellCommandExecutor shExec = buildTaskControllerExecutor( command, context.env.conf.getUser(), buildKillTaskCommandArgs(context), context.env.workDir, context.env.env); try { shExec.execute(); } catch (Exception e) { LOG.warn("Output from task-contoller is : " + shExec.getOutput()); throw new IOException(e); } } @Override void terminateTask(TaskControllerContext context) { try { finishTask(context, TaskCommands.TERMINATE_TASK_JVM); } catch (Exception e) { LOG.warn("Exception thrown while sending kill to the Task VM " + StringUtils.stringifyException(e)); } } @Override void killTask(TaskControllerContext context) { try { finishTask(context, TaskCommands.KILL_TASK_JVM); } catch (Exception e) { LOG.warn("Exception thrown while sending destroy to the Task VM " + StringUtils.stringifyException(e)); } } } diff --git a/src/mapred/org/apache/hadoop/mapred/TaskController.java b/src/mapred/org/apache/hadoop/mapred/TaskController.java index 702d3c5..15a0c6e 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskController.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskController.java @@ -1,217 +1,219 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.mapred.JvmManager.JvmEnv; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Shell.ShellCommandExecutor; /** * Controls initialization, finalization and clean up of tasks, and * also the launching and killing of task JVMs. * * This class defines the API for initializing, finalizing and cleaning * up of tasks, as also the launching and killing task JVMs. * Subclasses of this class will implement the logic required for * performing the actual actions. */ abstract class TaskController implements Configurable { private Configuration conf; public static final Log LOG = LogFactory.getLog(TaskController.class); public Configuration getConf() { return conf; } public void setConf(Configuration conf) { this.conf = conf; } /** * Setup task controller component. * */ abstract void setup(); /** * Launch a task JVM * * This method defines how a JVM will be launched to run a task. * @param context the context associated to the task */ abstract void launchTaskJVM(TaskControllerContext context) throws IOException; /** * Top level cleanup a task JVM method. * * The current implementation does the following. * <ol> * <li>Sends a graceful terminate signal to task JVM allowing its sub-process * to cleanup.</li> * <li>Waits for stipulated period</li> * <li>Sends a forceful kill signal to task JVM, terminating all its * sub-process forcefully.</li> * </ol> * * @param context the task for which kill signal has to be sent. */ final void destroyTaskJVM(TaskControllerContext context) { terminateTask(context); try { Thread.sleep(context.sleeptimeBeforeSigkill); } catch (InterruptedException e) { LOG.warn("Sleep interrupted : " + StringUtils.stringifyException(e)); } killTask(context); } /** * Perform initializing actions required before a task can run. * * For instance, this method can be used to setup appropriate * access permissions for files and directories that will be * used by tasks. Tasks use the job cache, log, PID and distributed cache * directories and files as part of their functioning. Typically, * these files are shared between the daemon and the tasks * themselves. So, a TaskController that is launching tasks * as different users can implement this method to setup * appropriate ownership and permissions for these directories * and files. */ abstract void initializeTask(TaskControllerContext context); /** * Contains task information required for the task controller. */ static class TaskControllerContext { // task being executed Task task; // the JVM environment for the task JvmEnv env; // the Shell executor executing the JVM for this task ShellCommandExecutor shExec; // process handle of task JVM String pid; // waiting time before sending SIGKILL to task JVM after sending SIGTERM long sleeptimeBeforeSigkill; } /** * Contains info related to the path of the file/dir to be deleted. This info * is needed by task-controller to build the full path of the file/dir */ static class TaskControllerPathDeletionContext extends PathDeletionContext { Task task; boolean isWorkDir; TaskController taskController; /** * mapredLocalDir is the base dir under which to-be-deleted taskWorkDir or * taskAttemptDir exists. fullPath of taskAttemptDir or taskWorkDir * is built using mapredLocalDir, jobId, taskId, etc. */ Path mapredLocalDir; public TaskControllerPathDeletionContext(FileSystem fs, Path mapredLocalDir, Task task, boolean isWorkDir, TaskController taskController) { super(fs, null); this.task = task; this.isWorkDir = isWorkDir; this.taskController = taskController; this.mapredLocalDir = mapredLocalDir; } @Override protected String getPathForCleanup() { if (fullPath == null) { fullPath = buildPathForDeletion(); } return fullPath; } /** * Builds the path of taskAttemptDir OR taskWorkDir based on * mapredLocalDir, jobId, taskId, etc */ String buildPathForDeletion() { String subDir = TaskTracker.getLocalTaskDir(task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()); if (isWorkDir) { subDir = subDir + Path.SEPARATOR + "work"; } return mapredLocalDir.toUri().getPath() + Path.SEPARATOR + subDir; } /** * Makes the path(and its subdirectories recursively) fully deletable by * setting proper permissions(777) by task-controller */ @Override protected void enablePathForCleanup() throws IOException { getPathForCleanup();// allow init of fullPath - taskController.enableTaskForCleanup(this); + if (fs.exists(new Path(fullPath))) { + taskController.enableTaskForCleanup(this); + } } } /** * Method which is called after the job is localized so that task controllers * can implement their own job localization logic. * * @param tip Task of job for which localization happens. */ abstract void initializeJob(JobID jobId); /** * Sends a graceful terminate signal to taskJVM and it sub-processes. * * @param context task context */ abstract void terminateTask(TaskControllerContext context); /** * Sends a KILL signal to forcefully terminate the taskJVM and its * sub-processes. * * @param context task context */ abstract void killTask(TaskControllerContext context); /** * Enable the task for cleanup by changing permissions of the path * @param context path deletion context * @throws IOException */ abstract void enableTaskForCleanup(PathDeletionContext context) throws IOException; } diff --git a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java index 984a209..5dc4441 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java @@ -43,637 +43,624 @@ abstract class TaskRunner extends Thread { volatile boolean killed = false; private TaskTracker.TaskInProgress tip; private Task t; private Object lock = new Object(); private volatile boolean done = false; private int exitCode = -1; private boolean exitCodeSet = false; private TaskTracker tracker; protected JobConf conf; JvmManager jvmManager; /** * for cleaning up old map outputs */ protected MapOutputFile mapOutputFile; public TaskRunner(TaskTracker.TaskInProgress tip, TaskTracker tracker, JobConf conf) { this.tip = tip; this.t = tip.getTask(); this.tracker = tracker; this.conf = conf; this.mapOutputFile = new MapOutputFile(t.getJobID()); this.mapOutputFile.setConf(conf); this.jvmManager = tracker.getJvmManagerInstance(); } public Task getTask() { return t; } public TaskTracker.TaskInProgress getTaskInProgress() { return tip; } public TaskTracker getTracker() { return tracker; } /** Called to assemble this task's input. This method is run in the parent * process before the child is spawned. It should not execute user code, * only system code. */ public boolean prepare() throws IOException { return true; } /** Called when this task's output is no longer needed. * This method is run in the parent process after the child exits. It should * not execute user code, only system code. */ public void close() throws IOException {} private static String stringifyPathArray(Path[] p){ if (p == null){ return null; } StringBuffer str = new StringBuffer(p[0].toString()); for (int i = 1; i < p.length; i++){ str.append(","); str.append(p[i].toString()); } return str.toString(); } /** * Get the java command line options for the child map/reduce tasks. * @param jobConf job configuration * @param defaultValue default value * @return the java command line options for child map/reduce tasks * @deprecated Use command line options specific to map or reduce tasks set * via {@link JobConf#MAPRED_MAP_TASK_JAVA_OPTS} or * {@link JobConf#MAPRED_REDUCE_TASK_JAVA_OPTS} */ @Deprecated public String getChildJavaOpts(JobConf jobConf, String defaultValue) { return jobConf.get(JobConf.MAPRED_TASK_JAVA_OPTS, defaultValue); } /** * Get the maximum virtual memory of the child map/reduce tasks. * @param jobConf job configuration * @return the maximum virtual memory of the child task or <code>-1</code> if * none is specified * @deprecated Use limits specific to the map or reduce tasks set via * {@link JobConf#MAPRED_MAP_TASK_ULIMIT} or * {@link JobConf#MAPRED_REDUCE_TASK_ULIMIT} */ @Deprecated public int getChildUlimit(JobConf jobConf) { return jobConf.getInt(JobConf.MAPRED_TASK_ULIMIT, -1); } /** * Get the environment variables for the child map/reduce tasks. * @param jobConf job configuration * @return the environment variables for the child map/reduce tasks or * <code>null</code> if unspecified * @deprecated Use environment variables specific to the map or reduce tasks * set via {@link JobConf#MAPRED_MAP_TASK_ENV} or * {@link JobConf#MAPRED_REDUCE_TASK_ENV} */ public String getChildEnv(JobConf jobConf) { return jobConf.get(JobConf.MAPRED_TASK_ENV); } private static class CacheFile { URI uri; long timeStamp; CacheFile (URI uri, long timeStamp) { this.uri = uri; this.timeStamp = timeStamp; } } @Override public final void run() { String errorInfo = "Child Error"; List<CacheFile> localizedCacheFiles = new ArrayList<CacheFile>(); try { //before preparing the job localize //all the archives TaskAttemptID taskid = t.getTaskID(); LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); File jobCacheDir = null; if (conf.getJar() != null) { jobCacheDir = new File( new Path(conf.getJar()).getParent().toString()); } File workDir = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( t.getJobID().toString(), t.getTaskID().toString(), t.isTaskCleanupTask()) + Path.SEPARATOR + MRConstants.WORKDIR, conf). toString()); URI[] archives = DistributedCache.getCacheArchives(conf); URI[] files = DistributedCache.getCacheFiles(conf); FileStatus fileStatus; FileSystem fileSystem; Path localPath; String baseDir; if ((archives != null) || (files != null)) { if (archives != null) { String[] archivesTimestamps = DistributedCache.getArchiveTimestamps(conf); Path[] p = new Path[archives.length]; for (int i = 0; i < archives.length;i++){ fileSystem = FileSystem.get(archives[i], conf); fileStatus = fileSystem.getFileStatus( new Path(archives[i].getPath())); p[i] = DistributedCache.getLocalCache(archives[i], conf, new Path(TaskTracker.getCacheSubdir()), fileStatus, true, Long.parseLong( archivesTimestamps[i]), new Path(workDir. getAbsolutePath()), false, lDirAlloc); localizedCacheFiles.add(new CacheFile(archives[i], Long .parseLong(archivesTimestamps[i]))); } DistributedCache.setLocalArchives(conf, stringifyPathArray(p)); } if ((files != null)) { String[] fileTimestamps = DistributedCache.getFileTimestamps(conf); Path[] p = new Path[files.length]; for (int i = 0; i < files.length;i++){ fileSystem = FileSystem.get(files[i], conf); fileStatus = fileSystem.getFileStatus( new Path(files[i].getPath())); p[i] = DistributedCache.getLocalCache(files[i], conf, new Path(TaskTracker.getCacheSubdir()), fileStatus, false, Long.parseLong( fileTimestamps[i]), new Path(workDir. getAbsolutePath()), false, lDirAlloc); localizedCacheFiles.add(new CacheFile(files[i], Long .parseLong(fileTimestamps[i]))); } DistributedCache.setLocalFiles(conf, stringifyPathArray(p)); } Path localTaskFile = new Path(t.getJobFile()); FileSystem localFs = FileSystem.getLocal(conf); localFs.delete(localTaskFile, true); OutputStream out = localFs.create(localTaskFile); try { conf.writeXml(out); } finally { out.close(); } } if (!prepare()) { return; } String sep = System.getProperty("path.separator"); StringBuffer classPath = new StringBuffer(); // start with same classpath as parent process classPath.append(System.getProperty("java.class.path")); classPath.append(sep); if (!workDir.mkdirs()) { if (!workDir.isDirectory()) { LOG.fatal("Mkdirs failed to create " + workDir.toString()); } } String jar = conf.getJar(); if (jar != null) { // if jar exists, it into workDir File[] libs = new File(jobCacheDir, "lib").listFiles(); if (libs != null) { for (int i = 0; i < libs.length; i++) { classPath.append(sep); // add libs from jar to classpath classPath.append(libs[i]); } } classPath.append(sep); classPath.append(new File(jobCacheDir, "classes")); classPath.append(sep); classPath.append(jobCacheDir); } // include the user specified classpath //archive paths Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClasspaths != null && archives != null) { Path[] localArchives = DistributedCache .getLocalCacheArchives(conf); if (localArchives != null){ for (int i=0;i<archives.length;i++){ for(int j=0;j<archiveClasspaths.length;j++){ if (archives[i].getPath().equals( archiveClasspaths[j].toString())){ classPath.append(sep); classPath.append(localArchives[i] .toString()); } } } } } //file paths Path[] fileClasspaths = DistributedCache.getFileClassPaths(conf); if (fileClasspaths!=null && files != null) { Path[] localFiles = DistributedCache .getLocalCacheFiles(conf); if (localFiles != null) { for (int i = 0; i < files.length; i++) { for (int j = 0; j < fileClasspaths.length; j++) { if (files[i].getPath().equals( fileClasspaths[j].toString())) { classPath.append(sep); classPath.append(localFiles[i].toString()); } } } } } classPath.append(sep); classPath.append(workDir); // Build exec child jmv args. Vector<String> vargs = new Vector<String>(8); File jvm = // use same jvm as parent new File(new File(System.getProperty("java.home"), "bin"), "java"); vargs.add(jvm.toString()); // Add child (task) java-vm options. // // The following symbols if present in mapred.{map|reduce}.child.java.opts // value are replaced: // + @taskid@ is interpolated with value of TaskID. // Other occurrences of @ will not be altered. // // Example with multiple arguments and substitutions, showing // jvm GC logging, and start of a passwordless JVM JMX agent so can // connect with jconsole and the likes to watch child memory, threads // and get thread dumps. // // <property> // <name>mapred.map.child.java.opts</name> // <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@[email protected] \ // -Dcom.sun.management.jmxremote.authenticate=false \ // -Dcom.sun.management.jmxremote.ssl=false \ // </value> // </property> // // <property> // <name>mapred.reduce.child.java.opts</name> // <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@[email protected] \ // -Dcom.sun.management.jmxremote.authenticate=false \ // -Dcom.sun.management.jmxremote.ssl=false \ // </value> // </property> // String javaOpts = getChildJavaOpts(conf, JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS); javaOpts = javaOpts.replace("@taskid@", taskid.toString()); String [] javaOptsSplit = javaOpts.split(" "); // Add java.library.path; necessary for loading native libraries. // // 1. To support native-hadoop library i.e. libhadoop.so, we add the // parent processes' java.library.path to the child. // 2. We also add the 'cwd' of the task to it's java.library.path to help // users distribute native libraries via the DistributedCache. // 3. The user can also specify extra paths to be added to the // java.library.path via mapred.{map|reduce}.child.java.opts. // String libraryPath = System.getProperty("java.library.path"); if (libraryPath == null) { libraryPath = workDir.getAbsolutePath(); } else { libraryPath += sep + workDir; } boolean hasUserLDPath = false; for(int i=0; i<javaOptsSplit.length ;i++) { if(javaOptsSplit[i].startsWith("-Djava.library.path=")) { javaOptsSplit[i] += sep + libraryPath; hasUserLDPath = true; break; } } if(!hasUserLDPath) { vargs.add("-Djava.library.path=" + libraryPath); } for (int i = 0; i < javaOptsSplit.length; i++) { vargs.add(javaOptsSplit[i]); } // add java.io.tmpdir given by mapred.child.tmp String tmp = conf.get("mapred.child.tmp", "./tmp"); Path tmpDir = new Path(tmp); // if temp directory path is not absolute // prepend it with workDir. if (!tmpDir.isAbsolute()) { tmpDir = new Path(workDir.toString(), tmp); } FileSystem localFs = FileSystem.getLocal(conf); if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) { throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } vargs.add("-Djava.io.tmpdir=" + tmpDir.toString()); // Add classpath. vargs.add("-classpath"); vargs.add(classPath.toString()); // Setup the log4j prop long logSize = TaskLog.getTaskLogLength(conf); vargs.add("-Dhadoop.log.dir=" + new File(System.getProperty("hadoop.log.dir") ).getAbsolutePath()); vargs.add("-Dhadoop.root.logger=INFO,TLA"); vargs.add("-Dhadoop.tasklog.taskid=" + taskid); vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize); if (conf.getProfileEnabled()) { if (conf.getProfileTaskRange(t.isMapTask() ).isIncluded(t.getPartition())) { File prof = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.PROFILE); vargs.add(String.format(conf.getProfileParams(), prof.toString())); } } // Add main class and its arguments vargs.add(Child.class.getName()); // main of Child // pass umbilical address InetSocketAddress address = tracker.getTaskTrackerReportAddress(); vargs.add(address.getAddress().getHostAddress()); vargs.add(Integer.toString(address.getPort())); vargs.add(taskid.toString()); // pass task identifier tracker.addToMemoryManager(t.getTaskID(), t.isMapTask(), conf); // set memory limit using ulimit if feasible and necessary ... String[] ulimitCmd = Shell.getUlimitMemoryCommand(getChildUlimit(conf)); List<String> setup = null; if (ulimitCmd != null) { setup = new ArrayList<String>(); for (String arg : ulimitCmd) { setup.add(arg); } } // Set up the redirection of the task's stdout and stderr streams File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT); File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR); stdout.getParentFile().mkdirs(); tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr); Map<String, String> env = new HashMap<String, String>(); StringBuffer ldLibraryPath = new StringBuffer(); ldLibraryPath.append(workDir.toString()); String oldLdLibraryPath = null; oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH"); if (oldLdLibraryPath != null) { ldLibraryPath.append(sep); ldLibraryPath.append(oldLdLibraryPath); } env.put("LD_LIBRARY_PATH", ldLibraryPath.toString()); // for the child of task jvm, set hadoop.root.logger env.put("HADOOP_ROOT_LOGGER","INFO,TLA"); String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS"); if (hadoopClientOpts == null) { hadoopClientOpts = ""; } else { hadoopClientOpts = hadoopClientOpts + " "; } hadoopClientOpts = hadoopClientOpts + "-Dhadoop.tasklog.taskid=" + taskid + " -Dhadoop.tasklog.totalLogFileSize=" + logSize; env.put("HADOOP_CLIENT_OPTS", "\"" + hadoopClientOpts + "\""); // add the env variables passed by the user String mapredChildEnv = getChildEnv(conf); if (mapredChildEnv != null && mapredChildEnv.length() > 0) { String childEnvs[] = mapredChildEnv.split(","); for (String cEnv : childEnvs) { try { String[] parts = cEnv.split("="); // split on '=' String value = env.get(parts[0]); if (value != null) { // replace $env with the child's env constructed by tt's // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp value = parts[1].replace("$" + parts[0], value); } else { // this key is not configured by the tt for the child .. get it // from the tt's env // example PATH=$PATH:/tmp value = System.getenv(parts[0]); if (value != null) { // the env key is present in the tt's env value = parts[1].replace("$" + parts[0], value); } else { // the env key is note present anywhere .. simply set it // example X=$X:/tmp or X=/tmp value = parts[1].replace("$" + parts[0], ""); } } env.put(parts[0], value); } catch (Throwable t) { // set the error msg errorInfo = "Invalid User environment settings : " + mapredChildEnv + ". Failed to parse user-passed environment param." + " Expecting : env1=value1,env2=value2..."; LOG.warn(errorInfo); throw t; } } } jvmManager.launchJvm(this, jvmManager.constructJvmEnv(setup,vargs,stdout,stderr,logSize, workDir, env, conf)); synchronized (lock) { while (!done) { lock.wait(); } } tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID()); if (exitCodeSet) { if (!killed && exitCode != 0) { if (exitCode == 65) { tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID()); } throw new IOException("Task process exit with nonzero status of " + exitCode + "."); } } } catch (FSError e) { LOG.fatal("FSError", e); try { tracker.fsError(t.getTaskID(), e.getMessage()); } catch (IOException ie) { LOG.fatal(t.getTaskID()+" reporting FSError", ie); } } catch (Throwable throwable) { LOG.warn(t.getTaskID() + errorInfo, throwable); Throwable causeThrowable = new Throwable(errorInfo, throwable); ByteArrayOutputStream baos = new ByteArrayOutputStream(); causeThrowable.printStackTrace(new PrintStream(baos)); try { tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString()); } catch (IOException e) { LOG.warn(t.getTaskID()+" Reporting Diagnostics", e); } } finally { try{ for (CacheFile cf : localizedCacheFiles){ DistributedCache.releaseCache(cf.uri, conf, cf.timeStamp); } }catch(IOException ie){ LOG.warn("Error releasing caches : Cache files might not have been cleaned up"); } // It is safe to call TaskTracker.TaskInProgress.reportTaskFinished with // *false* since the task has either // a) SUCCEEDED - which means commit has been done // b) FAILED - which means we do not need to commit tip.reportTaskFinished(false); } } /** - * Sets permissions recursively and then deletes the contents of dir. * Makes dir empty directory(does not delete dir itself). */ static void deleteDirContents(JobConf conf, File dir) throws IOException { FileSystem fs = FileSystem.getLocal(conf); if (fs.exists(new Path(dir.getAbsolutePath()))) { File contents[] = dir.listFiles(); if (contents != null) { for (int i = 0; i < contents.length; i++) { - try { - int ret = 0; - if ((ret = FileUtil.chmod(contents[i].getAbsolutePath(), - "a+rwx", true)) != 0) { - LOG.warn("Unable to chmod for " + contents[i] + - "; chmod exit status = " + ret); - } - } catch(InterruptedException e) { - LOG.warn("Interrupted while setting permissions for contents of " + - "workDir. Not deleting the remaining contents of workDir."); - return; - } if (!fs.delete(new Path(contents[i].getAbsolutePath()), true)) { LOG.warn("Unable to delete "+ contents[i]); } } } } else { LOG.warn(dir + " does not exist."); } } //Mostly for setting up the symlinks. Note that when we setup the distributed //cache, we didn't create the symlinks. This is done on a per task basis //by the currently executing task. public static void setupWorkDir(JobConf conf) throws IOException { File workDir = new File(".").getAbsoluteFile(); if (LOG.isDebugEnabled()) { LOG.debug("Fully deleting contents of " + workDir); } /** delete only the contents of workDir leaving the directory empty. We * can't delete the workDir as it is the current working directory. */ deleteDirContents(conf, workDir); if (DistributedCache.getSymlink(conf)) { URI[] archives = DistributedCache.getCacheArchives(conf); URI[] files = DistributedCache.getCacheFiles(conf); Path[] localArchives = DistributedCache.getLocalCacheArchives(conf); Path[] localFiles = DistributedCache.getLocalCacheFiles(conf); if (archives != null) { for (int i = 0; i < archives.length; i++) { String link = archives[i].getFragment(); if (link != null) { link = workDir.toString() + Path.SEPARATOR + link; File flink = new File(link); if (!flink.exists()) { FileUtil.symLink(localArchives[i].toString(), link); } } } } if (files != null) { for (int i = 0; i < files.length; i++) { String link = files[i].getFragment(); if (link != null) { link = workDir.toString() + Path.SEPARATOR + link; File flink = new File(link); if (!flink.exists()) { FileUtil.symLink(localFiles[i].toString(), link); } } } } } File jobCacheDir = null; if (conf.getJar() != null) { jobCacheDir = new File( new Path(conf.getJar()).getParent().toString()); } // create symlinks for all the files in job cache dir in current // workingdir for streaming try{ DistributedCache.createAllSymlink(conf, jobCacheDir, workDir); } catch(IOException ie){ // Do not exit even if symlinks have not been created. LOG.warn(StringUtils.stringifyException(ie)); } // add java.io.tmpdir given by mapred.child.tmp String tmp = conf.get("mapred.child.tmp", "./tmp"); Path tmpDir = new Path(tmp); // if temp directory path is not absolute // prepend it with workDir. if (!tmpDir.isAbsolute()) { tmpDir = new Path(workDir.toString(), tmp); FileSystem localFs = FileSystem.getLocal(conf); if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()){ throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } } } /** * Kill the child process */ public void kill() { killed = true; jvmManager.taskKilled(this); signalDone(); } public void signalDone() { synchronized (lock) { done = true; lock.notify(); } } public void setExitCode(int exitCode) { this.exitCodeSet = true; this.exitCode = exitCode; } } diff --git a/src/test/org/apache/hadoop/mapred/TestSetupWorkDir.java b/src/test/org/apache/hadoop/mapred/TestSetupWorkDir.java index bcfc66d..d21cca5 100644 --- a/src/test/org/apache/hadoop/mapred/TestSetupWorkDir.java +++ b/src/test/org/apache/hadoop/mapred/TestSetupWorkDir.java @@ -1,90 +1,84 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import junit.framework.TestCase; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +/** + * Validates if TaskRunner.deleteDirContents() is properly cleaning up the + * contents of workDir. + */ public class TestSetupWorkDir extends TestCase { - private static final Log LOG = - LogFactory.getLog(TestSetupWorkDir.class); + private static int NUM_SUB_DIRS = 3; /** - * Create a file in the given dir and set permissions r_xr_xr_x sothat no one - * can delete it directly(without doing chmod). - * Creates dir/subDir and dir/subDir/file + * Creates subdirectories under given dir and files under those subdirs. + * Creates dir/subDir1, dir/subDir1/file, dir/subDir2, dir/subDir2/file, etc. */ - static void createFileAndSetPermissions(JobConf jobConf, Path dir) + static void createSubDirs(JobConf jobConf, Path dir) throws IOException { - Path subDir = new Path(dir, "subDir"); - FileSystem fs = FileSystem.getLocal(jobConf); - fs.mkdirs(subDir); - Path p = new Path(subDir, "file"); - DataOutputStream out = fs.create(p); - out.writeBytes("dummy input"); - out.close(); - // no write permission for subDir and subDir/file - try { - int ret = 0; - if((ret = FileUtil.chmod(subDir.toUri().getPath(), "a=rx", true)) != 0) { - LOG.warn("chmod failed for " + subDir + ";retVal=" + ret); - } - } catch(InterruptedException e) { - LOG.warn("Interrupted while doing chmod for " + subDir); + for (int i = 1; i <= NUM_SUB_DIRS; i++) { + Path subDir = new Path(dir, "subDir" + i); + FileSystem fs = FileSystem.getLocal(jobConf); + fs.mkdirs(subDir); + Path p = new Path(subDir, "file"); + DataOutputStream out = fs.create(p); + out.writeBytes("dummy input"); + out.close(); } } /** - * Validates if setupWorkDir is properly cleaning up contents of workDir. - * TODO: other things of TaskRunner.setupWorkDir() related to distributed - * cache need to be validated. + * Validates if TaskRunner.deleteDirContents() is properly cleaning up the + * contents of workDir. */ public void testSetupWorkDir() throws IOException { Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), "testSetupWorkDir"); Path myWorkDir = new Path(rootDir, "./work"); JobConf jConf = new JobConf(); FileSystem fs = FileSystem.getLocal(jConf); if (fs.exists(myWorkDir)) { fs.delete(myWorkDir, true); } if (!fs.mkdirs(myWorkDir)) { throw new IOException("Unable to create workDir " + myWorkDir); } - // create {myWorkDir}/subDir/file and set 555 perms for subDir and file - createFileAndSetPermissions(jConf, myWorkDir); + // create subDirs under work dir + createSubDirs(jConf, myWorkDir); + assertTrue("createDirAndSubDirs() did not create subdirs under " + + myWorkDir, fs.listStatus(myWorkDir).length == NUM_SUB_DIRS); + TaskRunner.deleteDirContents(jConf, new File(myWorkDir.toUri().getPath())); assertTrue("Contents of " + myWorkDir + " are not cleaned up properly.", fs.listStatus(myWorkDir).length == 0); // cleanup fs.delete(rootDir, true); } }
jaxlaw/hadoop-common
2e86994c37c286b72be7ea871ad8cb7774df35bc
MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from filling up. Contributed by Vinod Kumar Vavilapalli.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 4d4fe64..311a4ca 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,481 +1,484 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + MAPREDUCE-1100. Truncate user logs to prevent TaskTrackers' disks from + filling up. (Vinod Kumar Vavilapalli via acmurthy) + MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all of the servlets to prevent XSS attacks. (omalley) yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JvmManager.java b/src/mapred/org/apache/hadoop/mapred/JvmManager.java index 2da6bb5..6d420b8 100644 --- a/src/mapred/org/apache/hadoop/mapred/JvmManager.java +++ b/src/mapred/org/apache/hadoop/mapred/JvmManager.java @@ -1,488 +1,507 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Vector; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.mapred.TaskController.TaskControllerContext; import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; import org.apache.hadoop.util.ProcessTree; import org.apache.hadoop.util.Shell.ShellCommandExecutor; class JvmManager { public static final Log LOG = LogFactory.getLog("org.apache.hadoop.mapred.JvmManager"); JvmManagerForType mapJvmManager; JvmManagerForType reduceJvmManager; public JvmEnv constructJvmEnv(List<String> setup, Vector<String>vargs, File stdout,File stderr,long logSize, File workDir, Map<String,String> env, JobConf conf) { return new JvmEnv(setup,vargs,stdout,stderr,logSize,workDir,env,conf); } public JvmManager(TaskTracker tracker) { mapJvmManager = new JvmManagerForType(tracker.getMaxCurrentMapTasks(), true, tracker); reduceJvmManager = new JvmManagerForType(tracker.getMaxCurrentReduceTasks(), false, tracker); } /* * Saves pid of the given taskJvm */ void setPidToJvm(JVMId jvmId, String pid) { if (jvmId.isMapJVM()) { mapJvmManager.jvmIdToPid.put(jvmId, pid); } else { reduceJvmManager.jvmIdToPid.put(jvmId, pid); } } /* * Returns the pid of the task */ String getPid(TaskRunner t) { if (t != null && t.getTask() != null) { if (t.getTask().isMapTask()) { JVMId id = mapJvmManager.runningTaskToJvm.get(t); if (id != null) { return mapJvmManager.jvmIdToPid.get(id); } } else { JVMId id = reduceJvmManager.runningTaskToJvm.get(t); if (id != null) { return reduceJvmManager.jvmIdToPid.get(id); } } } return null; } public void stop() { mapJvmManager.stop(); reduceJvmManager.stop(); } public boolean isJvmKnown(JVMId jvmId) { if (jvmId.isMapJVM()) { return mapJvmManager.isJvmknown(jvmId); } else { return reduceJvmManager.isJvmknown(jvmId); } } public void launchJvm(TaskRunner t, JvmEnv env) { if (t.getTask().isMapTask()) { mapJvmManager.reapJvm(t, env); } else { reduceJvmManager.reapJvm(t, env); } } public TaskInProgress getTaskForJvm(JVMId jvmId) { if (jvmId.isMapJVM()) { return mapJvmManager.getTaskForJvm(jvmId); } else { return reduceJvmManager.getTaskForJvm(jvmId); } } public void taskFinished(TaskRunner tr) { if (tr.getTask().isMapTask()) { mapJvmManager.taskFinished(tr); } else { reduceJvmManager.taskFinished(tr); } } public void taskKilled(TaskRunner tr) { if (tr.getTask().isMapTask()) { mapJvmManager.taskKilled(tr); } else { reduceJvmManager.taskKilled(tr); } } public void killJvm(JVMId jvmId) { if (jvmId.isMap) { mapJvmManager.killJvm(jvmId); } else { reduceJvmManager.killJvm(jvmId); } } private static class JvmManagerForType { //Mapping from the JVM IDs to running Tasks Map <JVMId,TaskRunner> jvmToRunningTask = new HashMap<JVMId, TaskRunner>(); //Mapping from the tasks to JVM IDs Map <TaskRunner,JVMId> runningTaskToJvm = new HashMap<TaskRunner, JVMId>(); //Mapping from the JVM IDs to Reduce JVM processes Map <JVMId, JvmRunner> jvmIdToRunner = new HashMap<JVMId, JvmRunner>(); //Mapping from the JVM IDs to process IDs Map <JVMId, String> jvmIdToPid = new HashMap<JVMId, String>(); int maxJvms; boolean isMap; Random rand = new Random(System.currentTimeMillis()); private TaskTracker tracker; public JvmManagerForType(int maxJvms, boolean isMap, TaskTracker tracker) { this.maxJvms = maxJvms; this.isMap = isMap; this.tracker = tracker; } synchronized public void setRunningTaskForJvm(JVMId jvmId, TaskRunner t) { jvmToRunningTask.put(jvmId, t); runningTaskToJvm.put(t,jvmId); jvmIdToRunner.get(jvmId).setBusy(true); } synchronized public TaskInProgress getTaskForJvm(JVMId jvmId) { if (jvmToRunningTask.containsKey(jvmId)) { //Incase of JVM reuse, tasks are returned to previously launched //JVM via this method. However when a new task is launched //the task being returned has to be initialized. TaskRunner taskRunner = jvmToRunningTask.get(jvmId); JvmRunner jvmRunner = jvmIdToRunner.get(jvmId); Task task = taskRunner.getTaskInProgress().getTask(); TaskControllerContext context = new TaskController.TaskControllerContext(); context.env = jvmRunner.env; context.task = task; //If we are returning the same task as which the JVM was launched //we don't initialize task once again. if(!jvmRunner.env.conf.get("mapred.task.id"). equals(task.getTaskID().toString())) { tracker.getTaskController().initializeTask(context); } + + jvmRunner.taskGiven(task); return taskRunner.getTaskInProgress(); } return null; } synchronized public boolean isJvmknown(JVMId jvmId) { return jvmIdToRunner.containsKey(jvmId); } synchronized public void taskFinished(TaskRunner tr) { JVMId jvmId = runningTaskToJvm.remove(tr); if (jvmId != null) { jvmToRunningTask.remove(jvmId); JvmRunner jvmRunner; if ((jvmRunner = jvmIdToRunner.get(jvmId)) != null) { jvmRunner.taskRan(); } } } synchronized public void taskKilled(TaskRunner tr) { JVMId jvmId = runningTaskToJvm.remove(tr); if (jvmId != null) { jvmToRunningTask.remove(jvmId); killJvm(jvmId); } } synchronized public void killJvm(JVMId jvmId) { JvmRunner jvmRunner; if ((jvmRunner = jvmIdToRunner.get(jvmId)) != null) { jvmRunner.kill(); } } synchronized public void stop() { //since the kill() method invoked later on would remove //an entry from the jvmIdToRunner map, we create a //copy of the values and iterate over it (if we don't //make a copy, we will encounter concurrentModification //exception List <JvmRunner> list = new ArrayList<JvmRunner>(); list.addAll(jvmIdToRunner.values()); for (JvmRunner jvm : list) { jvm.kill(); } } synchronized private void removeJvm(JVMId jvmId) { jvmIdToRunner.remove(jvmId); jvmIdToPid.remove(jvmId); } private synchronized void reapJvm( TaskRunner t, JvmEnv env) { if (t.getTaskInProgress().wasKilled()) { //the task was killed in-flight //no need to do the rest of the operations return; } boolean spawnNewJvm = false; JobID jobId = t.getTask().getJobID(); //Check whether there is a free slot to start a new JVM. //,or, Kill a (idle) JVM and launch a new one //When this method is called, we *must* // (1) spawn a new JVM (if we are below the max) // (2) find an idle JVM (that belongs to the same job), or, // (3) kill an idle JVM (from a different job) // (the order of return is in the order above) int numJvmsSpawned = jvmIdToRunner.size(); JvmRunner runnerToKill = null; if (numJvmsSpawned >= maxJvms) { //go through the list of JVMs for all jobs. Iterator<Map.Entry<JVMId, JvmRunner>> jvmIter = jvmIdToRunner.entrySet().iterator(); while (jvmIter.hasNext()) { JvmRunner jvmRunner = jvmIter.next().getValue(); JobID jId = jvmRunner.jvmId.getJobId(); //look for a free JVM for this job; if one exists then just break if (jId.equals(jobId) && !jvmRunner.isBusy() && !jvmRunner.ranAll()){ setRunningTaskForJvm(jvmRunner.jvmId, t); //reserve the JVM LOG.info("No new JVM spawned for jobId/taskid: " + jobId+"/"+t.getTask().getTaskID() + ". Attempting to reuse: " + jvmRunner.jvmId); return; } //Cases when a JVM is killed: // (1) the JVM under consideration belongs to the same job // (passed in the argument). In this case, kill only when // the JVM ran all the tasks it was scheduled to run (in terms // of count). // (2) the JVM under consideration belongs to a different job and is // currently not busy //But in both the above cases, we see if we can assign the current //task to an idle JVM (hence we continue the loop even on a match) if ((jId.equals(jobId) && jvmRunner.ranAll()) || (!jId.equals(jobId) && !jvmRunner.isBusy())) { runnerToKill = jvmRunner; spawnNewJvm = true; } } } else { spawnNewJvm = true; } if (spawnNewJvm) { if (runnerToKill != null) { LOG.info("Killing JVM: " + runnerToKill.jvmId); runnerToKill.kill(); } spawnNewJvm(jobId, env, t); return; } //*MUST* never reach this throw new RuntimeException("Inconsistent state!!! " + "JVM Manager reached an unstable state " + "while reaping a JVM for task: " + t.getTask().getTaskID()+ " " + getDetails()); } private String getDetails() { StringBuffer details = new StringBuffer(); details.append("Number of active JVMs:"). append(jvmIdToRunner.size()); Iterator<JVMId> jvmIter = jvmIdToRunner.keySet().iterator(); while (jvmIter.hasNext()) { JVMId jvmId = jvmIter.next(); details.append("\n JVMId "). append(jvmId.toString()). append(" #Tasks ran: "). append(jvmIdToRunner.get(jvmId).numTasksRan). append(" Currently busy? "). append(jvmIdToRunner.get(jvmId).busy). append(" Currently running: "). append(jvmToRunningTask.get(jvmId).getTask().getTaskID().toString()); } return details.toString(); } private void spawnNewJvm(JobID jobId, JvmEnv env, TaskRunner t) { JvmRunner jvmRunner = new JvmRunner(env,jobId); jvmIdToRunner.put(jvmRunner.jvmId, jvmRunner); //spawn the JVM in a new thread. Note that there will be very little //extra overhead of launching the new thread for a new JVM since //most of the cost is involved in launching the process. Moreover, //since we are going to be using the JVM for running many tasks, //the thread launch cost becomes trivial when amortized over all //tasks. Doing it this way also keeps code simple. jvmRunner.setDaemon(true); jvmRunner.setName("JVM Runner " + jvmRunner.jvmId + " spawned."); setRunningTaskForJvm(jvmRunner.jvmId, t); LOG.info(jvmRunner.getName()); jvmRunner.start(); } synchronized private void updateOnJvmExit(JVMId jvmId, int exitCode) { removeJvm(jvmId); TaskRunner t = jvmToRunningTask.remove(jvmId); if (t != null) { runningTaskToJvm.remove(t); if (exitCode != 0) { t.setExitCode(exitCode); } t.signalDone(); } } private class JvmRunner extends Thread { JvmEnv env; volatile boolean killed = false; volatile int numTasksRan; final int numTasksToRun; JVMId jvmId; volatile boolean busy = true; private ShellCommandExecutor shexec; // shell terminal for running the task //context used for starting JVM private TaskControllerContext initalContext; + + private List<Task> tasksGiven = new ArrayList<Task>(); + + void taskGiven(Task task) { + tasksGiven.add(task); + } + public JvmRunner(JvmEnv env, JobID jobId) { this.env = env; this.jvmId = new JVMId(jobId, isMap, rand.nextInt()); this.numTasksToRun = env.conf.getNumTasksToExecutePerJvm(); LOG.info("In JvmRunner constructed JVM ID: " + jvmId); } public void run() { runChild(env); + + // Post-JVM-exit logs processing. Truncate the logs. + truncateJVMLogs(); } public void runChild(JvmEnv env) { initalContext = new TaskControllerContext(); try { env.vargs.add(Integer.toString(jvmId.getId())); //Launch the task controller to run task JVM initalContext.task = jvmToRunningTask.get(jvmId).getTask(); initalContext.env = env; tracker.getTaskController().initializeTask(initalContext); tracker.getTaskController().launchTaskJVM(initalContext); } catch (IOException ioe) { // do nothing // error and output are appropriately redirected } finally { // handle the exit code shexec = initalContext.shExec; if (shexec == null) { return; } kill(); int exitCode = shexec.getExitCode(); updateOnJvmExit(jvmId, exitCode); LOG.info("JVM : " + jvmId +" exited. Number of tasks it ran: " + numTasksRan); try { // In case of jvm-reuse, //the task jvm cleans up the common workdir for every //task at the beginning of each task in the task JVM. //For the last task, we do it here. if (env.conf.getNumTasksToExecutePerJvm() != 1) { tracker.directoryCleanupThread.addToQueue( TaskTracker.buildTaskControllerPathDeletionContexts( tracker.getLocalFileSystem(), tracker.getLocalDirs(), initalContext.task, true /* workDir */, tracker.getTaskController())); } } catch (IOException ie){} } } public void kill() { if (!killed) { killed = true; TaskController controller = tracker.getTaskController(); // Check inital context before issuing a kill to prevent situations // where kill is issued before task is launched. if (initalContext != null && initalContext.env != null) { initalContext.pid = jvmIdToPid.get(jvmId); initalContext.sleeptimeBeforeSigkill = tracker.getJobConf() .getLong("mapred.tasktracker.tasks.sleeptime-before-sigkill", ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL); controller.destroyTaskJVM(initalContext); } else { LOG.info(String.format("JVM Not killed %s but just removed", jvmId .toString())); } removeJvm(jvmId); } } - + + // Post-JVM-exit logs processing. Truncate the logs. + private void truncateJVMLogs() { + Task firstTask = initalContext.task; + tracker.getTaskLogsMonitor().addProcessForLogTruncation( + firstTask.getTaskID(), tasksGiven); + } + public void taskRan() { busy = false; numTasksRan++; } public boolean ranAll() { return(numTasksRan == numTasksToRun); } public void setBusy(boolean busy) { this.busy = busy; } public boolean isBusy() { return busy; } } } static class JvmEnv { //Helper class List<String> vargs; List<String> setup; File stdout; File stderr; File workDir; long logSize; JobConf conf; Map<String, String> env; public JvmEnv(List<String> setup, Vector<String> vargs, File stdout, File stderr, long logSize, File workDir, Map<String,String> env, JobConf conf) { this.setup = setup; this.vargs = vargs; this.stdout = stdout; this.stderr = stderr; this.workDir = workDir; this.env = env; this.conf = conf; } } } diff --git a/src/mapred/org/apache/hadoop/mapred/TaskLog.java b/src/mapred/org/apache/hadoop/mapred/TaskLog.java index b7f6bf0..791ec81 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskLog.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskLog.java @@ -1,642 +1,694 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedOutputStream; import java.io.BufferedReader; import java.io.DataOutputStream; import java.io.File; import java.io.FileFilter; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.Arrays; import java.util.Enumeration; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.ProcessTree; import org.apache.hadoop.util.Shell; import org.apache.log4j.Appender; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; /** * A simple logger to handle the task-specific user logs. * This class uses the system property <code>hadoop.log.dir</code>. * */ public class TaskLog { private static final Log LOG = LogFactory.getLog(TaskLog.class); private static final File LOG_DIR = new File(System.getProperty("hadoop.log.dir"), "userlogs").getAbsoluteFile(); // localFS is set in (and used by) writeToIndexFile() static LocalFileSystem localFS = null; static { if (!LOG_DIR.exists()) { LOG_DIR.mkdirs(); } } public static File getTaskLogFile(TaskAttemptID taskid, LogName filter) { return new File(getBaseDir(taskid.toString()), filter.toString()); } + + /** + * @deprecated Instead use + * {@link #getAllLogsFileDetails(TaskAttemptID, boolean)} to get + * the details of all log-files and then use the particular + * log-type's detail to call getRealTaskLogFileLocation(String, + * LogName) real log-location + */ + @Deprecated public static File getRealTaskLogFileLocation(TaskAttemptID taskid, LogName filter) { LogFileDetail l; try { - l = getTaskLogFileDetail(taskid, filter); + Map<LogName, LogFileDetail> allFilesDetails = + getAllLogsFileDetails(taskid, false); + l = allFilesDetails.get(filter); } catch (IOException ie) { - LOG.error("getTaskLogFileDetail threw an exception " + ie); + LOG.error("getTaskLogFileDetailgetAllLogsFileDetails threw an exception " + + ie); return null; } return new File(getBaseDir(l.location), filter.toString()); } - private static class LogFileDetail { + + /** + * Get the real task-log file-path + * + * @param location Location of the log-file. This should point to an + * attempt-directory. + * @param filter + * @return + * @throws IOException + */ + static String getRealTaskLogFilePath(String location, LogName filter) + throws IOException { + return FileUtil.makeShellPath(new File(getBaseDir(location), + filter.toString())); + } + + static class LogFileDetail { final static String LOCATION = "LOG_DIR:"; String location; long start; long length; } - - private static LogFileDetail getTaskLogFileDetail(TaskAttemptID taskid, - LogName filter) throws IOException { - return getLogFileDetail(taskid, filter, false); - } - - private static LogFileDetail getLogFileDetail(TaskAttemptID taskid, - LogName filter, - boolean isCleanup) - throws IOException { + + static Map<LogName, LogFileDetail> getAllLogsFileDetails( + TaskAttemptID taskid, boolean isCleanup) throws IOException { + + Map<LogName, LogFileDetail> allLogsFileDetails = + new HashMap<LogName, LogFileDetail>(); + File indexFile = getIndexFile(taskid.toString(), isCleanup); BufferedReader fis = new BufferedReader(new java.io.FileReader(indexFile)); //the format of the index file is //LOG_DIR: <the dir where the task logs are really stored> //stdout:<start-offset in the stdout file> <length> //stderr:<start-offset in the stderr file> <length> //syslog:<start-offset in the syslog file> <length> - LogFileDetail l = new LogFileDetail(); String str = fis.readLine(); if (str == null) { //the file doesn't have anything throw new IOException ("Index file for the log of " + taskid+" doesn't exist."); } - l.location = str.substring(str.indexOf(LogFileDetail.LOCATION)+ + String loc = str.substring(str.indexOf(LogFileDetail.LOCATION)+ LogFileDetail.LOCATION.length()); //special cases are the debugout and profile.out files. They are guaranteed //to be associated with each task attempt since jvm reuse is disabled //when profiling/debugging is enabled - if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) { + for (LogName filter : new LogName[] { LogName.DEBUGOUT, LogName.PROFILE }) { + LogFileDetail l = new LogFileDetail(); + l.location = loc; l.length = new File(getBaseDir(l.location), filter.toString()).length(); l.start = 0; - fis.close(); - return l; + allLogsFileDetails.put(filter, l); } str = fis.readLine(); while (str != null) { - //look for the exact line containing the logname - if (str.contains(filter.toString())) { - str = str.substring(filter.toString().length()+1); - String[] startAndLen = str.split(" "); - l.start = Long.parseLong(startAndLen[0]); - l.length = Long.parseLong(startAndLen[1]); - break; - } + LogFileDetail l = new LogFileDetail(); + l.location = loc; + int idx = str.indexOf(':'); + LogName filter = LogName.valueOf(str.substring(0, idx).toUpperCase()); + str = str.substring(idx + 1); + String[] startAndLen = str.split(" "); + l.start = Long.parseLong(startAndLen[0]); + l.length = Long.parseLong(startAndLen[1]); + allLogsFileDetails.put(filter, l); str = fis.readLine(); } fis.close(); - return l; + return allLogsFileDetails; } private static File getTmpIndexFile(String taskid) { return new File(getBaseDir(taskid), "log.tmp"); } public static File getIndexFile(String taskid) { return getIndexFile(taskid, false); } public static File getIndexFile(String taskid, boolean isCleanup) { if (isCleanup) { return new File(getBaseDir(taskid), "log.index.cleanup"); } else { return new File(getBaseDir(taskid), "log.index"); } } - private static File getBaseDir(String taskid) { + static File getBaseDir(String taskid) { return new File(LOG_DIR, taskid); } - private static long prevOutLength; - private static long prevErrLength; - private static long prevLogLength; + + static final List<LogName> LOGS_TRACKED_BY_INDEX_FILES = + Arrays.asList(LogName.STDOUT, LogName.STDERR, LogName.SYSLOG); + + private static TaskAttemptID currentTaskid; + + /** + * Map to store previous and current lengths. + */ + private static Map<LogName, Long[]> logLengths = + new HashMap<LogName, Long[]>(); + static { + for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { + logLengths.put(logName, new Long[] { Long.valueOf(0L), + Long.valueOf(0L) }); + } + } - private static void writeToIndexFile(TaskAttemptID firstTaskid, - boolean isCleanup) - throws IOException { + static void writeToIndexFile(TaskAttemptID firstTaskid, + TaskAttemptID currentTaskid, boolean isCleanup, + Map<LogName, Long[]> lengths) throws IOException { // To ensure atomicity of updates to index file, write to temporary index // file first and then rename. File tmpIndexFile = getTmpIndexFile(currentTaskid.toString()); BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(tmpIndexFile,false)); DataOutputStream dos = new DataOutputStream(bos); //the format of the index file is //LOG_DIR: <the dir where the task logs are really stored> //STDOUT: <start-offset in the stdout file> <length> //STDERR: <start-offset in the stderr file> <length> //SYSLOG: <start-offset in the syslog file> <length> - dos.writeBytes(LogFileDetail.LOCATION + firstTaskid.toString()+"\n"+ - LogName.STDOUT.toString()+":"); - dos.writeBytes(Long.toString(prevOutLength)+" "); - dos.writeBytes(Long.toString(getTaskLogFile(firstTaskid, LogName.STDOUT) - .length() - prevOutLength)+"\n"+LogName.STDERR+":"); - dos.writeBytes(Long.toString(prevErrLength)+" "); - dos.writeBytes(Long.toString(getTaskLogFile(firstTaskid, LogName.STDERR) - .length() - prevErrLength)+"\n"+LogName.SYSLOG.toString()+":"); - dos.writeBytes(Long.toString(prevLogLength)+" "); - dos.writeBytes(Long.toString(getTaskLogFile(firstTaskid, LogName.SYSLOG) - .length() - prevLogLength)+"\n"); + dos.writeBytes(LogFileDetail.LOCATION + + firstTaskid.toString() + + "\n"); + for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { + Long[] lens = lengths.get(logName); + dos.writeBytes(logName.toString() + ":" + + lens[0].toString() + " " + + Long.toString(lens[1].longValue() - lens[0].longValue()) + + "\n");} dos.close(); File indexFile = getIndexFile(currentTaskid.toString(), isCleanup); Path indexFilePath = new Path(indexFile.getAbsolutePath()); Path tmpIndexFilePath = new Path(tmpIndexFile.getAbsolutePath()); if (localFS == null) {// set localFS once localFS = FileSystem.getLocal(new Configuration()); } localFS.rename (tmpIndexFilePath, indexFilePath); } - private static void resetPrevLengths(TaskAttemptID firstTaskid) { - prevOutLength = getTaskLogFile(firstTaskid, LogName.STDOUT).length(); - prevErrLength = getTaskLogFile(firstTaskid, LogName.STDERR).length(); - prevLogLength = getTaskLogFile(firstTaskid, LogName.SYSLOG).length(); - } - private volatile static TaskAttemptID currentTaskid = null; public synchronized static void syncLogs(TaskAttemptID firstTaskid, TaskAttemptID taskid) throws IOException { syncLogs(firstTaskid, taskid, false); } - + @SuppressWarnings("unchecked") public synchronized static void syncLogs(TaskAttemptID firstTaskid, TaskAttemptID taskid, boolean isCleanup) throws IOException { System.out.flush(); System.err.flush(); Enumeration<Logger> allLoggers = LogManager.getCurrentLoggers(); while (allLoggers.hasMoreElements()) { Logger l = allLoggers.nextElement(); Enumeration<Appender> allAppenders = l.getAllAppenders(); while (allAppenders.hasMoreElements()) { Appender a = allAppenders.nextElement(); if (a instanceof TaskLogAppender) { ((TaskLogAppender)a).flush(); } } } + // set start and end + for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { + if (currentTaskid != taskid) { + // Set start = current-end + logLengths.get(logName)[0] = + Long.valueOf(getTaskLogFile(firstTaskid, logName).length()); + } + // Set current end + logLengths.get(logName)[1] = + Long.valueOf(getTaskLogFile(firstTaskid, logName).length()); + } if (currentTaskid != taskid) { + if (currentTaskid != null) { + LOG.info("Starting logging for a new task " + taskid + + " in the same JVM as that of the first task " + firstTaskid); + } currentTaskid = taskid; - resetPrevLengths(firstTaskid); } - writeToIndexFile(firstTaskid, isCleanup); + writeToIndexFile(firstTaskid, taskid, isCleanup, logLengths); } /** * The filter for userlogs. */ public static enum LogName { /** Log on the stdout of the task. */ STDOUT ("stdout"), /** Log on the stderr of the task. */ STDERR ("stderr"), /** Log on the map-reduce system logs of the task. */ SYSLOG ("syslog"), /** The java profiler information. */ PROFILE ("profile.out"), /** Log the debug script's stdout */ DEBUGOUT ("debugout"); private String prefix; private LogName(String prefix) { this.prefix = prefix; } @Override public String toString() { return prefix; } } private static class TaskLogsPurgeFilter implements FileFilter { long purgeTimeStamp; TaskLogsPurgeFilter(long purgeTimeStamp) { this.purgeTimeStamp = purgeTimeStamp; } public boolean accept(File file) { LOG.debug("PurgeFilter - file: " + file + ", mtime: " + file.lastModified() + ", purge: " + purgeTimeStamp); return file.lastModified() < purgeTimeStamp; } } + /** * Purge old user logs. * * @throws IOException */ public static synchronized void cleanup(int logsRetainHours ) throws IOException { // Purge logs of tasks on this tasktracker if their // mtime has exceeded "mapred.task.log.retain" hours long purgeTimeStamp = System.currentTimeMillis() - (logsRetainHours*60L*60*1000); File[] oldTaskLogs = LOG_DIR.listFiles (new TaskLogsPurgeFilter(purgeTimeStamp)); if (oldTaskLogs != null) { for (int i=0; i < oldTaskLogs.length; ++i) { FileUtil.fullyDelete(oldTaskLogs[i]); } } } static class Reader extends InputStream { private long bytesRemaining; private FileInputStream file; public Reader(TaskAttemptID taskid, LogName kind, long start, long end) throws IOException { this(taskid, kind, start, end, false); } /** * Read a log file from start to end positions. The offsets may be negative, * in which case they are relative to the end of the file. For example, * Reader(taskid, kind, 0, -1) is the entire file and * Reader(taskid, kind, -4197, -1) is the last 4196 bytes. * @param taskid the id of the task to read the log file for * @param kind the kind of log to read * @param start the offset to read from (negative is relative to tail) * @param end the offset to read upto (negative is relative to tail) * @param isCleanup whether the attempt is cleanup attempt or not * @throws IOException */ public Reader(TaskAttemptID taskid, LogName kind, long start, long end, boolean isCleanup) throws IOException { // find the right log file - LogFileDetail fileDetail = getLogFileDetail(taskid, kind, isCleanup); + Map<LogName, LogFileDetail> allFilesDetails = + getAllLogsFileDetails(taskid, isCleanup); + LogFileDetail fileDetail = allFilesDetails.get(kind); // calculate the start and stop long size = fileDetail.length; if (start < 0) { start += size + 1; } if (end < 0) { end += size + 1; } start = Math.max(0, Math.min(start, size)); end = Math.max(0, Math.min(end, size)); start += fileDetail.start; end += fileDetail.start; bytesRemaining = end - start; file = new FileInputStream(new File(getBaseDir(fileDetail.location), kind.toString())); // skip upto start long pos = 0; while (pos < start) { long result = file.skip(start - pos); if (result < 0) { bytesRemaining = 0; break; } pos += result; } } @Override public int read() throws IOException { int result = -1; if (bytesRemaining > 0) { bytesRemaining -= 1; result = file.read(); } return result; } @Override public int read(byte[] buffer, int offset, int length) throws IOException { length = (int) Math.min(length, bytesRemaining); int bytes = file.read(buffer, offset, length); if (bytes > 0) { bytesRemaining -= bytes; } return bytes; } @Override public int available() throws IOException { return (int) Math.min(bytesRemaining, file.available()); } @Override public void close() throws IOException { file.close(); } } private static final String bashCommand = "bash"; private static final String tailCommand = "tail"; /** * Get the desired maximum length of task's logs. * @param conf the job to look in * @return the number of bytes to cap the log files at */ public static long getTaskLogLength(JobConf conf) { return conf.getLong("mapred.userlog.limit.kb", 100) * 1024; } /** * Wrap a command in a shell to capture stdout and stderr to files. * If the tailLength is 0, the entire output will be saved. * @param cmd The command and the arguments that should be run * @param stdoutFilename The filename that stdout should be saved to * @param stderrFilename The filename that stderr should be saved to * @param tailLength The length of the tail to be saved. * @return the modified command that should be run */ public static List<String> captureOutAndError(List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength ) throws IOException { return captureOutAndError(null, cmd, stdoutFilename, stderrFilename, tailLength, false); } /** * Wrap a command in a shell to capture stdout and stderr to files. * Setup commands such as setting memory limit can be passed which * will be executed before exec. * If the tailLength is 0, the entire output will be saved. * @param setup The setup commands for the execed process. * @param cmd The command and the arguments that should be run * @param stdoutFilename The filename that stdout should be saved to * @param stderrFilename The filename that stderr should be saved to * @param tailLength The length of the tail to be saved. * @return the modified command that should be run */ public static List<String> captureOutAndError(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength ) throws IOException { return captureOutAndError(setup, cmd, stdoutFilename, stderrFilename, tailLength, false); } /** * Wrap a command in a shell to capture stdout and stderr to files. * Setup commands such as setting memory limit can be passed which * will be executed before exec. * If the tailLength is 0, the entire output will be saved. * @param setup The setup commands for the execed process. * @param cmd The command and the arguments that should be run * @param stdoutFilename The filename that stdout should be saved to * @param stderrFilename The filename that stderr should be saved to * @param tailLength The length of the tail to be saved. * @deprecated pidFiles are no more used. Instead pid is exported to * env variable JVM_PID. * @return the modified command that should be run */ @Deprecated public static List<String> captureOutAndError(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength, String pidFileName ) throws IOException { return captureOutAndError(setup, cmd, stdoutFilename, stderrFilename, tailLength, false, pidFileName); } /** * Wrap a command in a shell to capture stdout and stderr to files. * Setup commands such as setting memory limit can be passed which * will be executed before exec. * If the tailLength is 0, the entire output will be saved. * @param setup The setup commands for the execed process. * @param cmd The command and the arguments that should be run * @param stdoutFilename The filename that stdout should be saved to * @param stderrFilename The filename that stderr should be saved to * @param tailLength The length of the tail to be saved. * @param useSetsid Should setsid be used in the command or not. * @deprecated pidFiles are no more used. Instead pid is exported to * env variable JVM_PID. * @return the modified command that should be run * */ @Deprecated public static List<String> captureOutAndError(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength, boolean useSetsid, String pidFileName ) throws IOException { return captureOutAndError(setup,cmd, stdoutFilename, stderrFilename, tailLength, useSetsid); } /** * Wrap a command in a shell to capture stdout and stderr to files. * Setup commands such as setting memory limit can be passed which * will be executed before exec. * If the tailLength is 0, the entire output will be saved. * @param setup The setup commands for the execed process. * @param cmd The command and the arguments that should be run * @param stdoutFilename The filename that stdout should be saved to * @param stderrFilename The filename that stderr should be saved to * @param tailLength The length of the tail to be saved. * @param useSetsid Should setsid be used in the command or not. * @return the modified command that should be run */ public static List<String> captureOutAndError(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength, boolean useSetsid ) throws IOException { List<String> result = new ArrayList<String>(3); result.add(bashCommand); result.add("-c"); String mergedCmd = buildCommandLine(setup, cmd, stdoutFilename, stderrFilename, tailLength, useSetsid); result.add(mergedCmd.toString()); return result; } static String buildCommandLine(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename, long tailLength, boolean useSetSid) throws IOException { String stdout = FileUtil.makeShellPath(stdoutFilename); String stderr = FileUtil.makeShellPath(stderrFilename); StringBuffer mergedCmd = new StringBuffer(); if (!Shell.WINDOWS) { mergedCmd.append(" export JVM_PID=`echo $$` ; "); } if (setup != null && setup.size() > 0) { mergedCmd.append(addCommand(setup, false)); mergedCmd.append(";"); } if (tailLength > 0) { mergedCmd.append("("); } else if (ProcessTree.isSetsidAvailable && useSetSid && !Shell.WINDOWS) { mergedCmd.append("exec setsid "); } else { mergedCmd.append("exec "); } mergedCmd.append(addCommand(cmd, true)); mergedCmd.append(" < /dev/null "); if (tailLength > 0) { mergedCmd.append(" | "); mergedCmd.append(tailCommand); mergedCmd.append(" -c "); mergedCmd.append(tailLength); mergedCmd.append(" >> "); mergedCmd.append(stdout); mergedCmd.append(" ; exit $PIPESTATUS ) 2>&1 | "); mergedCmd.append(tailCommand); mergedCmd.append(" -c "); mergedCmd.append(tailLength); mergedCmd.append(" >> "); mergedCmd.append(stderr); mergedCmd.append(" ; exit $PIPESTATUS"); } else { mergedCmd.append(" 1>> "); mergedCmd.append(stdout); mergedCmd.append(" 2>> "); mergedCmd.append(stderr); } return mergedCmd.toString(); } /** * Add quotes to each of the command strings and * return as a single string * @param cmd The command to be quoted * @param isExecutable makes shell path if the first * argument is executable * @return returns The quoted string. * @throws IOException */ public static String addCommand(List<String> cmd, boolean isExecutable) throws IOException { StringBuffer command = new StringBuffer(); for(String s: cmd) { command.append('\''); if (isExecutable) { // the executable name needs to be expressed as a shell path for the // shell to find it. command.append(FileUtil.makeShellPath(new File(s))); isExecutable = false; } else { command.append(s); } command.append('\''); command.append(" "); } return command.toString(); } /** * Wrap a command in a shell to capture debug script's * stdout and stderr to debugout. * @param cmd The command and the arguments that should be run * @param debugoutFilename The filename that stdout and stderr * should be saved to. * @return the modified command that should be run * @throws IOException */ public static List<String> captureDebugOut(List<String> cmd, File debugoutFilename ) throws IOException { String debugout = FileUtil.makeShellPath(debugoutFilename); List<String> result = new ArrayList<String>(3); result.add(bashCommand); result.add("-c"); StringBuffer mergedCmd = new StringBuffer(); mergedCmd.append("exec "); boolean isExecutable = true; for(String s: cmd) { if (isExecutable) { // the executable name needs to be expressed as a shell path for the // shell to find it. mergedCmd.append(FileUtil.makeShellPath(new File(s))); isExecutable = false; } else { mergedCmd.append(s); } mergedCmd.append(" "); } mergedCmd.append(" < /dev/null "); mergedCmd.append(" >"); mergedCmd.append(debugout); mergedCmd.append(" 2>&1 "); result.add(mergedCmd.toString()); return result; } public static File getUserLogDir() { return LOG_DIR; } } // TaskLog diff --git a/src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java b/src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java new file mode 100644 index 0000000..cf57b9d --- /dev/null +++ b/src/mapred/org/apache/hadoop/mapred/TaskLogsMonitor.java @@ -0,0 +1,449 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.mapred.TaskLog; +import org.apache.hadoop.mapred.TaskLog.LogName; +import org.apache.hadoop.mapred.TaskLog.LogFileDetail; +import org.apache.hadoop.util.StringUtils; + +class TaskLogsMonitor extends Thread { + static final Log LOG = LogFactory.getLog(TaskLogsMonitor.class); + + long mapRetainSize, reduceRetainSize; + + public TaskLogsMonitor(long mapRetSize, long reduceRetSize) { + mapRetainSize = mapRetSize; + reduceRetainSize = reduceRetSize; + LOG.info("Starting logs' monitor with mapRetainSize=" + mapRetainSize + + " and reduceRetainSize=" + reduceRetSize); + } + + /** + * The list of tasks that have finished and so need their logs to be + * truncated. + */ + private Map<TaskAttemptID, PerJVMInfo> finishedJVMs = + new HashMap<TaskAttemptID, PerJVMInfo>(); + + private static final int DEFAULT_BUFFER_SIZE = 4 * 1024; + + static final int MINIMUM_RETAIN_SIZE_FOR_TRUNCATION = 0; + + private static class PerJVMInfo { + + List<Task> allAttempts; + + public PerJVMInfo(List<Task> allAtmpts) { + this.allAttempts = allAtmpts; + } + } + + /** + * Process(JVM/debug script) has finished. Asynchronously truncate the logs of + * all the corresponding tasks to the configured limit. In case of JVM, both + * the firstAttempt as well as the list of all attempts that ran in the same + * JVM have to be passed. For debug script, the (only) attempt itself should + * be passed as both the firstAttempt as well as the list of attempts. + * + * @param firstAttempt + * @param isTaskCleanup + */ + void addProcessForLogTruncation(TaskAttemptID firstAttempt, + List<Task> allAttempts) { + LOG.info("Adding the jvm with first-attempt " + firstAttempt + + " for logs' truncation"); + PerJVMInfo lInfo = new PerJVMInfo(allAttempts); + synchronized (finishedJVMs) { + finishedJVMs.put(firstAttempt, lInfo); + finishedJVMs.notify(); + } + } + + /** + * Process the removed task's logs. This involves truncating them to + * retainSize. + */ + void truncateLogs(TaskAttemptID firstAttempt, PerJVMInfo lInfo) { + + // Read the log-file details for all the attempts that ran in this JVM + Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails; + try { + taskLogFileDetails = getAllLogsFileDetails(lInfo.allAttempts); + } catch (IOException e) { + LOG.warn( + "Exception in truncateLogs while getting allLogsFileDetails()." + + " Ignoring the truncation of logs of this process.", e); + return; + } + + Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails = + new HashMap<Task, Map<LogName, LogFileDetail>>(); + + File attemptLogDir = TaskLog.getBaseDir(firstAttempt.toString()); + + FileWriter tmpFileWriter; + FileReader logFileReader; + // Now truncate file by file + logNameLoop: for (LogName logName : LogName.values()) { + + File logFile = TaskLog.getTaskLogFile(firstAttempt, logName); + + // //// Optimization: if no task is over limit, just skip truncation-code + if (logFile.exists() + && !isTruncationNeeded(lInfo, taskLogFileDetails, logName)) { + LOG.debug("Truncation is not needed for " + + logFile.getAbsolutePath()); + continue; + } + // //// End of optimization + + // Truncation is needed for this log-file. Go ahead now. + File tmpFile = new File(attemptLogDir, "truncate.tmp"); + try { + tmpFileWriter = new FileWriter(tmpFile); + } catch (IOException ioe) { + LOG.warn("Cannot open " + tmpFile.getAbsolutePath() + + " for writing truncated log-file " + + logFile.getAbsolutePath() + + ". Continuing with other log files. ", ioe); + continue; + } + + try { + logFileReader = new FileReader(logFile); + } catch (FileNotFoundException fe) { + LOG.warn("Cannot open " + logFile.getAbsolutePath() + + " for reading. Continuing with other log files"); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + continue; + } + + long newCurrentOffset = 0; + // Process each attempt from the ordered list passed. + for (Task task : lInfo.allAttempts) { + + // Truncate the log files of this task-attempt so that only the last + // retainSize many bytes of this log file is retained and the log + // file is reduced in size saving disk space. + long retainSize = + (task.isMapTask() ? mapRetainSize : reduceRetainSize); + LogFileDetail newLogFileDetail = new LogFileDetail(); + try { + newLogFileDetail = + truncateALogFileOfAnAttempt(task.getTaskID(), + taskLogFileDetails.get(task).get(logName), retainSize, + tmpFileWriter, logFileReader); + } catch (IOException ioe) { + LOG.warn("Cannot truncate the log file " + + logFile.getAbsolutePath() + + ". Caught exception while handling " + task.getTaskID(), + ioe); + // revert back updatedTaskLogFileDetails + revertIndexFileInfo(lInfo, taskLogFileDetails, + updatedTaskLogFileDetails, logName); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + continue logNameLoop; + } + + // Track information for updating the index file properly. + // Index files don't track DEBUGOUT and PROFILE logs, so skip'em. + if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) { + if (!updatedTaskLogFileDetails.containsKey(task)) { + updatedTaskLogFileDetails.put(task, + new HashMap<LogName, LogFileDetail>()); + } + // newLogFileDetail already has the location and length set, just + // set the start offset now. + newLogFileDetail.start = newCurrentOffset; + updatedTaskLogFileDetails.get(task).put(logName, newLogFileDetail); + newCurrentOffset += newLogFileDetail.length; + } + } + + try { + tmpFileWriter.close(); + } catch (IOException ioe) { + LOG.warn("Couldn't close the tmp file " + tmpFile.getAbsolutePath() + + ". Deleting it.", ioe); + revertIndexFileInfo(lInfo, taskLogFileDetails, + updatedTaskLogFileDetails, logName); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + continue; + } + + if (!tmpFile.renameTo(logFile)) { + // If the tmpFile cannot be renamed revert back + // updatedTaskLogFileDetails to maintain the consistency of the + // original log file + revertIndexFileInfo(lInfo, taskLogFileDetails, + updatedTaskLogFileDetails, logName); + if (!tmpFile.delete()) { + LOG.warn("Cannot delete tmpFile " + tmpFile.getAbsolutePath()); + } + } + } + + // Update the index files + updateIndicesAfterLogTruncation(firstAttempt, updatedTaskLogFileDetails); + } + + /** + * @param lInfo + * @param taskLogFileDetails + * @param updatedTaskLogFileDetails + * @param logName + */ + private void revertIndexFileInfo(PerJVMInfo lInfo, + Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails, + Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails, + LogName logName) { + if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) { + for (Task task : lInfo.allAttempts) { + if (!updatedTaskLogFileDetails.containsKey(task)) { + updatedTaskLogFileDetails.put(task, + new HashMap<LogName, LogFileDetail>()); + } + updatedTaskLogFileDetails.get(task).put(logName, + taskLogFileDetails.get(task).get(logName)); + } + } + } + + /** + * Get the logFileDetails of all the list of attempts passed. + * + * @param lInfo + * @return a map of task to the log-file detail + * @throws IOException + */ + private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails( + final List<Task> allAttempts) throws IOException { + Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails = + new HashMap<Task, Map<LogName, LogFileDetail>>(); + for (Task task : allAttempts) { + Map<LogName, LogFileDetail> allLogsFileDetails; + allLogsFileDetails = + TaskLog.getAllLogsFileDetails(task.getTaskID(), + task.isTaskCleanupTask()); + taskLogFileDetails.put(task, allLogsFileDetails); + } + return taskLogFileDetails; + } + + /** + * Check if truncation of logs is needed for the given jvmInfo. If all the + * tasks that ran in a JVM are within the log-limits, then truncation is not + * needed. Otherwise it is needed. + * + * @param lInfo + * @param taskLogFileDetails + * @param logName + * @return true if truncation is needed, false otherwise + */ + private boolean isTruncationNeeded(PerJVMInfo lInfo, + Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails, + LogName logName) { + boolean truncationNeeded = false; + LogFileDetail logFileDetail = null; + for (Task task : lInfo.allAttempts) { + long taskRetainSize = + (task.isMapTask() ? mapRetainSize : reduceRetainSize); + Map<LogName, LogFileDetail> allLogsFileDetails = + taskLogFileDetails.get(task); + logFileDetail = allLogsFileDetails.get(logName); + if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION + && logFileDetail.length > taskRetainSize) { + truncationNeeded = true; + break; + } + } + return truncationNeeded; + } + + /** + * Truncate the log file of this task-attempt so that only the last retainSize + * many bytes of each log file is retained and the log file is reduced in size + * saving disk space. + * + * @param taskID Task whose logs need to be truncated + * @param oldLogFileDetail contains the original log details for the attempt + * @param taskRetainSize retain-size + * @param tmpFileWriter New log file to write to. Already opened in append + * mode. + * @param logFileReader Original log file to read from. + * @return + * @throws IOException + */ + private LogFileDetail truncateALogFileOfAnAttempt( + final TaskAttemptID taskID, final LogFileDetail oldLogFileDetail, + final long taskRetainSize, final FileWriter tmpFileWriter, + final FileReader logFileReader) throws IOException { + LogFileDetail newLogFileDetail = new LogFileDetail(); + + // ///////////// Truncate log file /////////////////////// + + // New location of log file is same as the old + newLogFileDetail.location = oldLogFileDetail.location; + if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION + && oldLogFileDetail.length > taskRetainSize) { + LOG.info("Truncating logs for " + taskID + " from " + + oldLogFileDetail.length + "bytes to " + taskRetainSize + + "bytes."); + newLogFileDetail.length = taskRetainSize; + } else { + LOG.info("No truncation needed for " + taskID + " length is " + + oldLogFileDetail.length + " retain size " + taskRetainSize + + "bytes."); + newLogFileDetail.length = oldLogFileDetail.length; + } + long charsSkipped = + logFileReader.skip(oldLogFileDetail.length + - newLogFileDetail.length); + if (charsSkipped != oldLogFileDetail.length - newLogFileDetail.length) { + throw new IOException("Erroneously skipped " + charsSkipped + + " instead of the expected " + + (oldLogFileDetail.length - newLogFileDetail.length)); + } + long alreadyRead = 0; + while (alreadyRead < newLogFileDetail.length) { + char tmpBuf[]; // Temporary buffer to read logs + if (newLogFileDetail.length - alreadyRead >= DEFAULT_BUFFER_SIZE) { + tmpBuf = new char[DEFAULT_BUFFER_SIZE]; + } else { + tmpBuf = new char[(int) (newLogFileDetail.length - alreadyRead)]; + } + int bytesRead = logFileReader.read(tmpBuf); + if (bytesRead < 0) { + break; + } else { + alreadyRead += bytesRead; + } + tmpFileWriter.write(tmpBuf); + } + // ////// End of truncating log file /////////////////////// + + return newLogFileDetail; + } + + /** + * Truncation of logs is done. Now sync the index files to reflect the + * truncated sizes. + * + * @param firstAttempt + * @param updatedTaskLogFileDetails + */ + private void updateIndicesAfterLogTruncation(TaskAttemptID firstAttempt, + Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) { + for (Entry<Task, Map<LogName, LogFileDetail>> entry : + updatedTaskLogFileDetails.entrySet()) { + Task task = entry.getKey(); + Map<LogName, LogFileDetail> logFileDetails = entry.getValue(); + Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>(); + // set current and previous lengths + for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) { + logLengths.put(logName, new Long[] { Long.valueOf(0L), + Long.valueOf(0L) }); + LogFileDetail lfd = logFileDetails.get(logName); + if (lfd != null) { + // Set previous lengths + logLengths.get(logName)[0] = Long.valueOf(lfd.start); + // Set current lengths + logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length); + } + } + try { + TaskLog.writeToIndexFile(firstAttempt, task.getTaskID(), + task.isTaskCleanupTask(), logLengths); + } catch (IOException ioe) { + LOG.warn("Exception in updateIndicesAfterLogTruncation : " + + StringUtils.stringifyException(ioe)); + LOG.warn("Exception encountered while updating index file of task " + + task.getTaskID() + + ". Ignoring and continuing with other tasks."); + } + } + } + + /** + * + * @throws IOException + */ + void monitorTaskLogs() throws IOException { + + Map<TaskAttemptID, PerJVMInfo> tasksBeingTruncated = + new HashMap<TaskAttemptID, PerJVMInfo>(); + + // Start monitoring newly added finishedJVMs + synchronized (finishedJVMs) { + tasksBeingTruncated.clear(); + tasksBeingTruncated.putAll(finishedJVMs); + finishedJVMs.clear(); + } + + for (Entry<TaskAttemptID, PerJVMInfo> entry : + tasksBeingTruncated.entrySet()) { + truncateLogs(entry.getKey(), entry.getValue()); + } + } + + @Override + public void run() { + + while (true) { + try { + monitorTaskLogs(); + try { + synchronized (finishedJVMs) { + while (finishedJVMs.isEmpty()) { + finishedJVMs.wait(); + } + } + } catch (InterruptedException e) { + LOG.warn(getName() + " is interrupted. Returning"); + return; + } + } catch (Throwable e) { + LOG.warn(getName() + + " encountered an exception while monitoring : " + + StringUtils.stringifyException(e)); + LOG.info("Ingoring the exception and continuing monitoring."); + } + } + } +} diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java index 13c7a2a..c6f4fb3 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java @@ -1,2840 +1,2899 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.mapred; +package org.apache.hadoop.mapred; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.io.RandomAccessFile; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeMap; import java.util.Vector; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.regex.Pattern; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.mapred.TaskLog.LogFileDetail; +import org.apache.hadoop.mapred.TaskLog.LogName; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.mapred.TaskController.TaskControllerPathDeletionContext; import org.apache.hadoop.mapred.TaskStatus.Phase; import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; import org.apache.hadoop.mapred.pipes.Submitter; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsException; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.authorize.ConfiguredPolicy; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.MemoryCalculatorPlugin; import org.apache.hadoop.util.ProcfsBasedProcessTree; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.RunJar; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; /******************************************************* * TaskTracker is a process that starts and tracks MR Tasks * in a networked environment. It contacts the JobTracker * for Task assignments and reporting results. * *******************************************************/ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol, Runnable { /** * @deprecated */ @Deprecated static final String MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY = "mapred.tasktracker.vmem.reserved"; /** * @deprecated */ @Deprecated static final String MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY = "mapred.tasktracker.pmem.reserved"; - + + static final String MAP_USERLOG_RETAIN_SIZE = + "mapreduce.cluster.map.userlog.retain-size"; + static final String REDUCE_USERLOG_RETAIN_SIZE = + "mapreduce.cluster.reduce.userlog.retain-size"; + static final long WAIT_FOR_DONE = 3 * 1000; private int httpPort; static enum State {NORMAL, STALE, INTERRUPTED, DENIED} static{ Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); } public static final Log LOG = LogFactory.getLog(TaskTracker.class); public static final String MR_CLIENTTRACE_FORMAT = "src: %s" + // src IP ", dest: %s" + // dst IP ", bytes: %s" + // byte count ", op: %s" + // operation ", cliID: %s" + // task id ", duration: %s"; // duration public static final Log ClientTraceLog = LogFactory.getLog(TaskTracker.class.getName() + ".clienttrace"); volatile boolean running = true; private LocalDirAllocator localDirAllocator; String taskTrackerName; String localHostname; InetSocketAddress jobTrackAddr; InetSocketAddress taskReportAddress; Server taskReportServer = null; InterTrackerProtocol jobClient; // last heartbeat response recieved short heartbeatResponseId = -1; static final String TASK_CLEANUP_SUFFIX = ".cleanup"; /* * This is the last 'status' report sent by this tracker to the JobTracker. * * If the rpc call succeeds, this 'status' is cleared-out by this tracker; * indicating that a 'fresh' status report be generated; in the event the * rpc calls fails for whatever reason, the previous status report is sent * again. */ TaskTrackerStatus status = null; // The system-directory on HDFS where job files are stored Path systemDirectory = null; // The filesystem where job files are stored FileSystem systemFS = null; private final HttpServer server; volatile boolean shuttingDown = false; Map<TaskAttemptID, TaskInProgress> tasks = new HashMap<TaskAttemptID, TaskInProgress>(); /** * Map from taskId -> TaskInProgress. */ Map<TaskAttemptID, TaskInProgress> runningTasks = null; Map<JobID, RunningJob> runningJobs = null; volatile int mapTotal = 0; volatile int reduceTotal = 0; boolean justStarted = true; boolean justInited = true; // Mark reduce tasks that are shuffling to rollback their events index Set<TaskAttemptID> shouldReset = new HashSet<TaskAttemptID>(); //dir -> DF Map<String, DF> localDirsDf = new HashMap<String, DF>(); long minSpaceStart = 0; //must have this much space free to start new tasks boolean acceptNewTasks = true; long minSpaceKill = 0; //if we run under this limit, kill one task //and make sure we never receive any new jobs //until all the old tasks have been cleaned up. //this is if a machine is so full it's only good //for serving map output to the other nodes static Random r = new Random(); private static final String SUBDIR = "taskTracker"; private static final String CACHEDIR = "archive"; private static final String JOBCACHE = "jobcache"; private static final String OUTPUT = "output"; private JobConf originalConf; private JobConf fConf; private int maxMapSlots; private int maxReduceSlots; private int failures; private FileSystem localFs; // Performance-related config knob to send an out-of-band heartbeat // on task completion static final String TT_OUTOFBAND_HEARBEAT = "mapreduce.tasktracker.outofband.heartbeat"; private volatile boolean oobHeartbeatOnTaskCompletion; // Track number of completed tasks to send an out-of-band heartbeat private IntWritable finishedCount = new IntWritable(0); private MapEventsFetcherThread mapEventsFetcher; int workerThreads; CleanupQueue directoryCleanupThread; volatile JvmManager jvmManager; private TaskMemoryManagerThread taskMemoryManager; private boolean taskMemoryManagerEnabled = true; private long totalVirtualMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long totalPhysicalMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long mapSlotMemorySizeOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long reduceSlotSizeMemoryOnTT = JobConf.DISABLED_MEMORY_LIMIT; private long totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT; + private TaskLogsMonitor taskLogsMonitor; + static final String MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY = "mapred.tasktracker.memory_calculator_plugin"; /** * the minimum interval between jobtracker polls */ private volatile int heartbeatInterval = HEARTBEAT_INTERVAL_MIN; /** * Number of maptask completion events locations to poll for at one time */ private int probe_sample_size = 500; private IndexCache indexCache; /** * Handle to the specific instance of the {@link TaskController} class */ private TaskController taskController; /** * Handle to the specific instance of the {@link NodeHealthCheckerService} */ private NodeHealthCheckerService healthChecker; /* * A list of commitTaskActions for whom commit response has been received */ private List<TaskAttemptID> commitResponses = Collections.synchronizedList(new ArrayList<TaskAttemptID>()); private ShuffleServerMetrics shuffleServerMetrics; /** This class contains the methods that should be used for metrics-reporting * the specific metrics for shuffle. The TaskTracker is actually a server for * the shuffle and hence the name ShuffleServerMetrics. */ private class ShuffleServerMetrics implements Updater { private MetricsRecord shuffleMetricsRecord = null; private int serverHandlerBusy = 0; private long outputBytes = 0; private int failedOutputs = 0; private int successOutputs = 0; ShuffleServerMetrics(JobConf conf) { MetricsContext context = MetricsUtil.getContext("mapred"); shuffleMetricsRecord = MetricsUtil.createRecord(context, "shuffleOutput"); this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId()); context.registerUpdater(this); } synchronized void serverHandlerBusy() { ++serverHandlerBusy; } synchronized void serverHandlerFree() { --serverHandlerBusy; } synchronized void outputBytes(long bytes) { outputBytes += bytes; } synchronized void failedOutput() { ++failedOutputs; } synchronized void successOutput() { ++successOutputs; } public void doUpdates(MetricsContext unused) { synchronized (this) { if (workerThreads != 0) { shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 100*((float)serverHandlerBusy/workerThreads)); } else { shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0); } shuffleMetricsRecord.incrMetric("shuffle_output_bytes", outputBytes); shuffleMetricsRecord.incrMetric("shuffle_failed_outputs", failedOutputs); shuffleMetricsRecord.incrMetric("shuffle_success_outputs", successOutputs); outputBytes = 0; failedOutputs = 0; successOutputs = 0; } shuffleMetricsRecord.update(); } } private TaskTrackerInstrumentation myInstrumentation = null; public TaskTrackerInstrumentation getTaskTrackerInstrumentation() { return myInstrumentation; } /** * A list of tips that should be cleaned up. */ private BlockingQueue<TaskTrackerAction> tasksToCleanup = new LinkedBlockingQueue<TaskTrackerAction>(); /** * A daemon-thread that pulls tips off the list of things to cleanup. */ private Thread taskCleanupThread = new Thread(new Runnable() { public void run() { while (true) { try { TaskTrackerAction action = tasksToCleanup.take(); if (action instanceof KillJobAction) { purgeJob((KillJobAction) action); } else if (action instanceof KillTaskAction) { TaskInProgress tip; KillTaskAction killAction = (KillTaskAction) action; synchronized (TaskTracker.this) { tip = tasks.get(killAction.getTaskID()); } LOG.info("Received KillTaskAction for task: " + killAction.getTaskID()); purgeTask(tip, false); } else { LOG.error("Non-delete action given to cleanup thread: " + action); } } catch (Throwable except) { LOG.warn(StringUtils.stringifyException(except)); } } } }, "taskCleanup"); TaskController getTaskController() { return taskController; } private RunningJob addTaskToJob(JobID jobId, TaskInProgress tip) { synchronized (runningJobs) { RunningJob rJob = null; if (!runningJobs.containsKey(jobId)) { rJob = new RunningJob(jobId); rJob.localized = false; rJob.tasks = new HashSet<TaskInProgress>(); runningJobs.put(jobId, rJob); } else { rJob = runningJobs.get(jobId); } synchronized (rJob) { rJob.tasks.add(tip); } runningJobs.notify(); //notify the fetcher thread return rJob; } } private void removeTaskFromJob(JobID jobId, TaskInProgress tip) { synchronized (runningJobs) { RunningJob rjob = runningJobs.get(jobId); if (rjob == null) { LOG.warn("Unknown job " + jobId + " being deleted."); } else { synchronized (rjob) { rjob.tasks.remove(tip); } } } } + TaskLogsMonitor getTaskLogsMonitor() { + return this.taskLogsMonitor; + } + + void setTaskLogsMonitor(TaskLogsMonitor t) { + this.taskLogsMonitor = t; + } + static String getCacheSubdir() { return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.CACHEDIR; } static String getJobCacheSubdir() { return TaskTracker.SUBDIR + Path.SEPARATOR + TaskTracker.JOBCACHE; } static String getLocalJobDir(String jobid) { return getJobCacheSubdir() + Path.SEPARATOR + jobid; } static String getLocalTaskDir(String jobid, String taskid) { return getLocalTaskDir(jobid, taskid, false) ; } static String getIntermediateOutputDir(String jobid, String taskid) { return getLocalTaskDir(jobid, taskid) + Path.SEPARATOR + TaskTracker.OUTPUT ; } static String getLocalTaskDir(String jobid, String taskid, boolean isCleanupAttempt) { String taskDir = getLocalJobDir(jobid) + Path.SEPARATOR + taskid; if (isCleanupAttempt) { taskDir = taskDir + TASK_CLEANUP_SUFFIX; } return taskDir; } String getPid(TaskAttemptID tid) { TaskInProgress tip = tasks.get(tid); if (tip != null) { return jvmManager.getPid(tip.getTaskRunner()); } return null; } public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(TaskUmbilicalProtocol.class.getName())) { return TaskUmbilicalProtocol.versionID; } else { throw new IOException("Unknown protocol for task tracker: " + protocol); } } /** * Do the real constructor work here. It's in a separate method * so we can call it again and "recycle" the object after calling * close(). */ synchronized void initialize() throws IOException { // use configured nameserver & interface to get local hostname this.fConf = new JobConf(originalConf); localFs = FileSystem.getLocal(fConf); if (fConf.get("slave.host.name") != null) { this.localHostname = fConf.get("slave.host.name"); } if (localHostname == null) { this.localHostname = DNS.getDefaultHost (fConf.get("mapred.tasktracker.dns.interface","default"), fConf.get("mapred.tasktracker.dns.nameserver","default")); } //check local disk checkLocalDirs(this.fConf.getLocalDirs()); fConf.deleteLocalFiles(SUBDIR); // Clear out state tables this.tasks.clear(); this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>(); this.runningJobs = new TreeMap<JobID, RunningJob>(); this.mapTotal = 0; this.reduceTotal = 0; this.acceptNewTasks = true; this.status = null; this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L); this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L); //tweak the probe sample size (make it a function of numCopiers) probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500); Class<? extends TaskTrackerInstrumentation> metricsInst = getInstrumentationClass(fConf); try { java.lang.reflect.Constructor<? extends TaskTrackerInstrumentation> c = metricsInst.getConstructor(new Class[] {TaskTracker.class} ); this.myInstrumentation = c.newInstance(this); } catch(Exception e) { //Reflection can throw lots of exceptions -- handle them all by //falling back on the default. LOG.error("failed to initialize taskTracker metrics", e); this.myInstrumentation = new TaskTrackerMetricsInst(this); } // bind address String address = NetUtils.getServerAddress(fConf, "mapred.task.tracker.report.bindAddress", "mapred.task.tracker.report.port", "mapred.task.tracker.report.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); String bindAddress = socAddr.getHostName(); int tmpPort = socAddr.getPort(); this.jvmManager = new JvmManager(this); // Set service-level authorization security policy if (this.fConf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider)(ReflectionUtils.newInstance( this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, MapReducePolicyProvider.class, PolicyProvider.class), this.fConf)); SecurityUtil.setPolicy(new ConfiguredPolicy(this.fConf, policyProvider)); } // RPC initialization int max = maxMapSlots > maxReduceSlots ? maxMapSlots : maxReduceSlots; //set the num handlers to max*2 since canCommit may wait for the duration //of a heartbeat RPC this.taskReportServer = RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf); this.taskReportServer.start(); // get the assigned address this.taskReportAddress = taskReportServer.getListenerAddress(); this.fConf.set("mapred.task.tracker.report.address", taskReportAddress.getHostName() + ":" + taskReportAddress.getPort()); LOG.info("TaskTracker up at: " + this.taskReportAddress); this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress; LOG.info("Starting tracker " + taskTrackerName); // Clear out temporary files that might be lying around DistributedCache.purgeCache(this.fConf); cleanupStorage(); this.jobClient = (InterTrackerProtocol) RPC.waitForProxy(InterTrackerProtocol.class, InterTrackerProtocol.versionID, jobTrackAddr, this.fConf); this.justInited = true; this.running = true; // start the thread that will fetch map task completion events this.mapEventsFetcher = new MapEventsFetcherThread(); mapEventsFetcher.setDaemon(true); mapEventsFetcher.setName( "Map-events fetcher for all reduce tasks " + "on " + taskTrackerName); mapEventsFetcher.start(); initializeMemoryManagement(); + setTaskLogsMonitor(new TaskLogsMonitor(getMapUserLogRetainSize(), + getReduceUserLogRetainSize())); + getTaskLogsMonitor().start(); + this.indexCache = new IndexCache(this.fConf); mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots); reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots); mapLauncher.start(); reduceLauncher.start(); Class<? extends TaskController> taskControllerClass = fConf.getClass("mapred.task.tracker.task-controller", DefaultTaskController.class, TaskController.class); taskController = (TaskController)ReflectionUtils.newInstance( taskControllerClass, fConf); //setup and create jobcache directory with appropriate permissions taskController.setup(); //Start up node health checker service. if (shouldStartHealthMonitor(this.fConf)) { startHealthMonitor(this.fConf); } oobHeartbeatOnTaskCompletion = fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false); } public static Class<? extends TaskTrackerInstrumentation> getInstrumentationClass( Configuration conf) { return conf.getClass("mapred.tasktracker.instrumentation", TaskTrackerMetricsInst.class, TaskTrackerInstrumentation.class); } public static void setInstrumentationClass( Configuration conf, Class<? extends TaskTrackerInstrumentation> t) { conf.setClass("mapred.tasktracker.instrumentation", t, TaskTrackerInstrumentation.class); } /** * Removes all contents of temporary storage. Called upon * startup, to remove any leftovers from previous run. */ public void cleanupStorage() throws IOException { this.fConf.deleteLocalFiles(); } // Object on wait which MapEventsFetcherThread is going to wait. private Object waitingOn = new Object(); private class MapEventsFetcherThread extends Thread { private List <FetchStatus> reducesInShuffle() { List <FetchStatus> fList = new ArrayList<FetchStatus>(); for (Map.Entry <JobID, RunningJob> item : runningJobs.entrySet()) { RunningJob rjob = item.getValue(); JobID jobId = item.getKey(); FetchStatus f; synchronized (rjob) { f = rjob.getFetchStatus(); for (TaskInProgress tip : rjob.tasks) { Task task = tip.getTask(); if (!task.isMapTask()) { if (((ReduceTask)task).getPhase() == TaskStatus.Phase.SHUFFLE) { if (rjob.getFetchStatus() == null) { //this is a new job; we start fetching its map events f = new FetchStatus(jobId, ((ReduceTask)task).getNumMaps()); rjob.setFetchStatus(f); } f = rjob.getFetchStatus(); fList.add(f); break; //no need to check any more tasks belonging to this } } } } } //at this point, we have information about for which of //the running jobs do we need to query the jobtracker for map //outputs (actually map events). return fList; } @Override public void run() { LOG.info("Starting thread: " + this.getName()); while (running) { try { List <FetchStatus> fList = null; synchronized (runningJobs) { while (((fList = reducesInShuffle()).size()) == 0) { try { runningJobs.wait(); } catch (InterruptedException e) { LOG.info("Shutting down: " + this.getName()); return; } } } // now fetch all the map task events for all the reduce tasks // possibly belonging to different jobs boolean fetchAgain = false; //flag signifying whether we want to fetch //immediately again. for (FetchStatus f : fList) { long currentTime = System.currentTimeMillis(); try { //the method below will return true when we have not //fetched all available events yet if (f.fetchMapCompletionEvents(currentTime)) { fetchAgain = true; } } catch (Exception e) { LOG.warn( "Ignoring exception that fetch for map completion" + " events threw for " + f.jobId + " threw: " + StringUtils.stringifyException(e)); } if (!running) { break; } } synchronized (waitingOn) { try { if (!fetchAgain) { waitingOn.wait(heartbeatInterval); } } catch (InterruptedException ie) { LOG.info("Shutting down: " + this.getName()); return; } } } catch (Exception e) { LOG.info("Ignoring exception " + e.getMessage()); } } } } private class FetchStatus { /** The next event ID that we will start querying the JobTracker from*/ private IntWritable fromEventId; /** This is the cache of map events for a given job */ private List<TaskCompletionEvent> allMapEvents; /** What jobid this fetchstatus object is for*/ private JobID jobId; private long lastFetchTime; private boolean fetchAgain; public FetchStatus(JobID jobId, int numMaps) { this.fromEventId = new IntWritable(0); this.jobId = jobId; this.allMapEvents = new ArrayList<TaskCompletionEvent>(numMaps); } /** * Reset the events obtained so far. */ public void reset() { // Note that the sync is first on fromEventId and then on allMapEvents synchronized (fromEventId) { synchronized (allMapEvents) { fromEventId.set(0); // set the new index for TCE allMapEvents.clear(); } } } public TaskCompletionEvent[] getMapEvents(int fromId, int max) { TaskCompletionEvent[] mapEvents = TaskCompletionEvent.EMPTY_ARRAY; boolean notifyFetcher = false; synchronized (allMapEvents) { if (allMapEvents.size() > fromId) { int actualMax = Math.min(max, (allMapEvents.size() - fromId)); List <TaskCompletionEvent> eventSublist = allMapEvents.subList(fromId, actualMax + fromId); mapEvents = eventSublist.toArray(mapEvents); } else { // Notify Fetcher thread. notifyFetcher = true; } } if (notifyFetcher) { synchronized (waitingOn) { waitingOn.notify(); } } return mapEvents; } public boolean fetchMapCompletionEvents(long currTime) throws IOException { if (!fetchAgain && (currTime - lastFetchTime) < heartbeatInterval) { return false; } int currFromEventId = 0; synchronized (fromEventId) { currFromEventId = fromEventId.get(); List <TaskCompletionEvent> recentMapEvents = queryJobTracker(fromEventId, jobId, jobClient); synchronized (allMapEvents) { allMapEvents.addAll(recentMapEvents); } lastFetchTime = currTime; if (fromEventId.get() - currFromEventId >= probe_sample_size) { //return true when we have fetched the full payload, indicating //that we should fetch again immediately (there might be more to //fetch fetchAgain = true; return true; } } fetchAgain = false; return false; } } private static LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); // intialize the job directory private void localizeJob(TaskInProgress tip) throws IOException { Path localJarFile = null; Task t = tip.getTask(); JobID jobId = t.getJobID(); Path jobFile = new Path(t.getJobFile()); // Get sizes of JobFile and JarFile // sizes are -1 if they are not present. FileStatus status = null; long jobFileSize = -1; try { status = systemFS.getFileStatus(jobFile); jobFileSize = status.getLen(); } catch(FileNotFoundException fe) { jobFileSize = -1; } Path localJobFile = lDirAlloc.getLocalPathForWrite( getLocalJobDir(jobId.toString()) + Path.SEPARATOR + "job.xml", jobFileSize, fConf); RunningJob rjob = addTaskToJob(jobId, tip); synchronized (rjob) { if (!rjob.localized) { FileSystem localFs = FileSystem.getLocal(fConf); // this will happen on a partial execution of localizeJob. // Sometimes the job.xml gets copied but copying job.jar // might throw out an exception // we should clean up and then try again Path jobDir = localJobFile.getParent(); if (localFs.exists(jobDir)){ localFs.delete(jobDir, true); boolean b = localFs.mkdirs(jobDir); if (!b) throw new IOException("Not able to create job directory " + jobDir.toString()); } systemFS.copyToLocalFile(jobFile, localJobFile); JobConf localJobConf = new JobConf(localJobFile); // create the 'work' directory // job-specific shared directory for use as scratch space Path workDir = lDirAlloc.getLocalPathForWrite( (getLocalJobDir(jobId.toString()) + Path.SEPARATOR + "work"), fConf); if (!localFs.mkdirs(workDir)) { throw new IOException("Mkdirs failed to create " + workDir.toString()); } System.setProperty("job.local.dir", workDir.toString()); localJobConf.set("job.local.dir", workDir.toString()); // copy Jar file to the local FS and unjar it. String jarFile = localJobConf.getJar(); long jarFileSize = -1; if (jarFile != null) { Path jarFilePath = new Path(jarFile); try { status = systemFS.getFileStatus(jarFilePath); jarFileSize = status.getLen(); } catch(FileNotFoundException fe) { jarFileSize = -1; } // Here we check for and we check five times the size of jarFileSize // to accommodate for unjarring the jar file in work directory localJarFile = new Path(lDirAlloc.getLocalPathForWrite( getLocalJobDir(jobId.toString()) + Path.SEPARATOR + "jars", 5 * jarFileSize, fConf), "job.jar"); if (!localFs.mkdirs(localJarFile.getParent())) { throw new IOException("Mkdirs failed to create jars directory "); } systemFS.copyToLocalFile(jarFilePath, localJarFile); localJobConf.setJar(localJarFile.toString()); OutputStream out = localFs.create(localJobFile); try { localJobConf.writeXml(out); } finally { out.close(); } // also unjar the job.jar files RunJar.unJar(new File(localJarFile.toString()), new File(localJarFile.getParent().toString())); } rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) || localJobConf.getKeepFailedTaskFiles()); rjob.localized = true; rjob.jobConf = localJobConf; taskController.initializeJob(jobId); } } launchTaskForJob(tip, new JobConf(rjob.jobConf)); } private void launchTaskForJob(TaskInProgress tip, JobConf jobConf) throws IOException{ synchronized (tip) { tip.setJobConf(jobConf); tip.launchTask(); } } public synchronized void shutdown() throws IOException { shuttingDown = true; close(); if (this.server != null) { try { LOG.info("Shutting down StatusHttpServer"); this.server.stop(); } catch (Exception e) { LOG.warn("Exception shutting down TaskTracker", e); } } } /** * Close down the TaskTracker and all its components. We must also shutdown * any running tasks or threads, and cleanup disk space. A new TaskTracker * within the same process space might be restarted, so everything must be * clean. */ public synchronized void close() throws IOException { // // Kill running tasks. Do this in a 2nd vector, called 'tasksToClose', // because calling jobHasFinished() may result in an edit to 'tasks'. // TreeMap<TaskAttemptID, TaskInProgress> tasksToClose = new TreeMap<TaskAttemptID, TaskInProgress>(); tasksToClose.putAll(tasks); for (TaskInProgress tip : tasksToClose.values()) { tip.jobHasFinished(false); } this.running = false; // Clear local storage cleanupStorage(); // Shutdown the fetcher thread this.mapEventsFetcher.interrupt(); //stop the launchers this.mapLauncher.interrupt(); this.reduceLauncher.interrupt(); - + + // All tasks are killed. So, they are removed from TaskLog monitoring also. + // Interrupt the monitor. + getTaskLogsMonitor().interrupt(); + jvmManager.stop(); // shutdown RPC connections RPC.stopProxy(jobClient); // wait for the fetcher thread to exit for (boolean done = false; !done; ) { try { this.mapEventsFetcher.join(); done = true; } catch (InterruptedException e) { } } if (taskReportServer != null) { taskReportServer.stop(); taskReportServer = null; } if (healthChecker != null) { //stop node health checker service healthChecker.stop(); healthChecker = null; } } /** * Start with the local machine name, and the default JobTracker */ public TaskTracker(JobConf conf) throws IOException { originalConf = conf; maxMapSlots = conf.getInt( "mapred.tasktracker.map.tasks.maximum", 2); maxReduceSlots = conf.getInt( "mapred.tasktracker.reduce.tasks.maximum", 2); this.jobTrackAddr = JobTracker.getAddress(conf); String infoAddr = NetUtils.getServerAddress(conf, "tasktracker.http.bindAddress", "tasktracker.http.port", "mapred.task.tracker.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String httpBindAddress = infoSocAddr.getHostName(); int httpPort = infoSocAddr.getPort(); this.server = new HttpServer("task", httpBindAddress, httpPort, httpPort == 0, conf); workerThreads = conf.getInt("tasktracker.http.threads", 40); this.shuffleServerMetrics = new ShuffleServerMetrics(conf); server.setThreads(1, workerThreads); // let the jsp pages get to the task tracker, config, and other relevant // objects FileSystem local = FileSystem.getLocal(conf); this.localDirAllocator = new LocalDirAllocator("mapred.local.dir"); server.setAttribute("task.tracker", this); server.setAttribute("local.file.system", local); server.setAttribute("conf", conf); server.setAttribute("log", LOG); server.setAttribute("localDirAllocator", localDirAllocator); server.setAttribute("shuffleServerMetrics", shuffleServerMetrics); server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class); server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class); server.start(); this.httpPort = server.getPort(); checkJettyPort(httpPort); initialize(); } + /** + * Blank constructor. Only usable by tests. + */ + TaskTracker() { + server = null; + } + private void checkJettyPort(int port) throws IOException { //See HADOOP-4744 if (port < 0) { shuttingDown = true; throw new IOException("Jetty problem. Jetty didn't bind to a " + "valid port"); } } private void startCleanupThreads() throws IOException { taskCleanupThread.setDaemon(true); taskCleanupThread.start(); directoryCleanupThread = new CleanupQueue(); } /** * The connection to the JobTracker, used by the TaskRunner * for locating remote files. */ public InterTrackerProtocol getJobClient() { return jobClient; } /** Return the port at which the tasktracker bound to */ public synchronized InetSocketAddress getTaskTrackerReportAddress() { return taskReportAddress; } /** Queries the job tracker for a set of outputs ready to be copied * @param fromEventId the first event ID we want to start from, this is * modified by the call to this method * @param jobClient the job tracker * @return a set of locations to copy outputs from * @throws IOException */ private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId, JobID jobId, InterTrackerProtocol jobClient) throws IOException { TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents( jobId, fromEventId.get(), probe_sample_size); //we are interested in map task completion events only. So store //only those List <TaskCompletionEvent> recentMapEvents = new ArrayList<TaskCompletionEvent>(); for (int i = 0; i < t.length; i++) { if (t[i].isMap) { recentMapEvents.add(t[i]); } } fromEventId.set(fromEventId.get() + t.length); return recentMapEvents; } /** * Main service loop. Will stay in this loop forever. */ State offerService() throws Exception { long lastHeartbeat = 0; while (running && !shuttingDown) { try { long now = System.currentTimeMillis(); long waitTime = heartbeatInterval - (now - lastHeartbeat); if (waitTime > 0) { // sleeps for the wait time or // until there are empty slots to schedule tasks synchronized (finishedCount) { if (finishedCount.get() == 0) { finishedCount.wait(waitTime); } finishedCount.set(0); } } // If the TaskTracker is just starting up: // 1. Verify the buildVersion // 2. Get the system directory & filesystem if(justInited) { String jobTrackerBV = jobClient.getBuildVersion(); if(!VersionInfo.getBuildVersion().equals(jobTrackerBV)) { String msg = "Shutting down. Incompatible buildVersion." + "\nJobTracker's: " + jobTrackerBV + "\nTaskTracker's: "+ VersionInfo.getBuildVersion(); LOG.error(msg); try { jobClient.reportTaskTrackerError(taskTrackerName, null, msg); } catch(Exception e ) { LOG.info("Problem reporting to jobtracker: " + e); } return State.DENIED; } String dir = jobClient.getSystemDir(); if (dir == null) { throw new IOException("Failed to get system directory"); } systemDirectory = new Path(dir); systemFS = systemDirectory.getFileSystem(fConf); } // Send the heartbeat and process the jobtracker's directives HeartbeatResponse heartbeatResponse = transmitHeartBeat(now); // Note the time when the heartbeat returned, use this to decide when to send the // next heartbeat lastHeartbeat = System.currentTimeMillis(); // Check if the map-event list needs purging Set<JobID> jobs = heartbeatResponse.getRecoveredJobs(); if (jobs.size() > 0) { synchronized (this) { // purge the local map events list for (JobID job : jobs) { RunningJob rjob; synchronized (runningJobs) { rjob = runningJobs.get(job); if (rjob != null) { synchronized (rjob) { FetchStatus f = rjob.getFetchStatus(); if (f != null) { f.reset(); } } } } } // Mark the reducers in shuffle for rollback synchronized (shouldReset) { for (Map.Entry<TaskAttemptID, TaskInProgress> entry : runningTasks.entrySet()) { if (entry.getValue().getStatus().getPhase() == Phase.SHUFFLE) { this.shouldReset.add(entry.getKey()); } } } } } TaskTrackerAction[] actions = heartbeatResponse.getActions(); if(LOG.isDebugEnabled()) { LOG.debug("Got heartbeatResponse from JobTracker with responseId: " + heartbeatResponse.getResponseId() + " and " + ((actions != null) ? actions.length : 0) + " actions"); } if (reinitTaskTracker(actions)) { return State.STALE; } // resetting heartbeat interval from the response. heartbeatInterval = heartbeatResponse.getHeartbeatInterval(); justStarted = false; justInited = false; if (actions != null){ for(TaskTrackerAction action: actions) { if (action instanceof LaunchTaskAction) { addToTaskQueue((LaunchTaskAction)action); } else if (action instanceof CommitTaskAction) { CommitTaskAction commitAction = (CommitTaskAction)action; if (!commitResponses.contains(commitAction.getTaskID())) { LOG.info("Received commit task action for " + commitAction.getTaskID()); commitResponses.add(commitAction.getTaskID()); } } else { tasksToCleanup.put(action); } } } markUnresponsiveTasks(); killOverflowingTasks(); //we've cleaned up, resume normal operation if (!acceptNewTasks && isIdle()) { acceptNewTasks=true; } //The check below may not be required every iteration but we are //erring on the side of caution here. We have seen many cases where //the call to jetty's getLocalPort() returns different values at //different times. Being a real paranoid here. checkJettyPort(server.getPort()); } catch (InterruptedException ie) { LOG.info("Interrupted. Closing down."); return State.INTERRUPTED; } catch (DiskErrorException de) { String msg = "Exiting task tracker for disk error:\n" + StringUtils.stringifyException(de); LOG.error(msg); synchronized (this) { jobClient.reportTaskTrackerError(taskTrackerName, "DiskErrorException", msg); } return State.STALE; } catch (RemoteException re) { String reClass = re.getClassName(); if (DisallowedTaskTrackerException.class.getName().equals(reClass)) { LOG.info("Tasktracker disallowed by JobTracker."); return State.DENIED; } } catch (Exception except) { String msg = "Caught exception: " + StringUtils.stringifyException(except); LOG.error(msg); } } return State.NORMAL; } private long previousUpdate = 0; /** * Build and transmit the heart beat to the JobTracker * @param now current time * @return false if the tracker was unknown * @throws IOException */ private HeartbeatResponse transmitHeartBeat(long now) throws IOException { // Send Counters in the status once every COUNTER_UPDATE_INTERVAL boolean sendCounters; if (now > (previousUpdate + COUNTER_UPDATE_INTERVAL)) { sendCounters = true; previousUpdate = now; } else { sendCounters = false; } // // Check if the last heartbeat got through... // if so then build the heartbeat information for the JobTracker; // else resend the previous status information. // if (status == null) { synchronized (this) { status = new TaskTrackerStatus(taskTrackerName, localHostname, httpPort, cloneAndResetRunningTaskStatuses( sendCounters), failures, maxMapSlots, maxReduceSlots); } } else { LOG.info("Resending 'status' to '" + jobTrackAddr.getHostName() + "' with reponseId '" + heartbeatResponseId); } // // Check if we should ask for a new Task // boolean askForNewTask; long localMinSpaceStart; synchronized (this) { askForNewTask = ((status.countOccupiedMapSlots() < maxMapSlots || status.countOccupiedReduceSlots() < maxReduceSlots) && acceptNewTasks); localMinSpaceStart = minSpaceStart; } if (askForNewTask) { checkLocalDirs(fConf.getLocalDirs()); askForNewTask = enoughFreeSpace(localMinSpaceStart); long freeDiskSpace = getFreeSpace(); long totVmem = getTotalVirtualMemoryOnTT(); long totPmem = getTotalPhysicalMemoryOnTT(); status.getResourceStatus().setAvailableSpace(freeDiskSpace); status.getResourceStatus().setTotalVirtualMemory(totVmem); status.getResourceStatus().setTotalPhysicalMemory(totPmem); status.getResourceStatus().setMapSlotMemorySizeOnTT( mapSlotMemorySizeOnTT); status.getResourceStatus().setReduceSlotMemorySizeOnTT( reduceSlotSizeMemoryOnTT); } //add node health information TaskTrackerHealthStatus healthStatus = status.getHealthStatus(); synchronized (this) { if (healthChecker != null) { healthChecker.setHealthStatus(healthStatus); } else { healthStatus.setNodeHealthy(true); healthStatus.setLastReported(0L); healthStatus.setHealthReport(""); } } // // Xmit the heartbeat // HeartbeatResponse heartbeatResponse = jobClient.heartbeat(status, justStarted, justInited, askForNewTask, heartbeatResponseId); // // The heartbeat got through successfully! // heartbeatResponseId = heartbeatResponse.getResponseId(); synchronized (this) { for (TaskStatus taskStatus : status.getTaskReports()) { if (taskStatus.getRunState() != TaskStatus.State.RUNNING && taskStatus.getRunState() != TaskStatus.State.UNASSIGNED && taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && !taskStatus.inTaskCleanupPhase()) { if (taskStatus.getIsMap()) { mapTotal--; } else { reduceTotal--; } try { myInstrumentation.completeTask(taskStatus.getTaskID()); } catch (MetricsException me) { LOG.warn("Caught: " + StringUtils.stringifyException(me)); } runningTasks.remove(taskStatus.getTaskID()); } } // Clear transient status information which should only // be sent once to the JobTracker for (TaskInProgress tip: runningTasks.values()) { tip.getStatus().clearStatus(); } } // Force a rebuild of 'status' on the next iteration status = null; return heartbeatResponse; } + long getMapUserLogRetainSize() { + return fConf.getLong(MAP_USERLOG_RETAIN_SIZE, -1); + } + + void setMapUserLogRetainSize(long retainSize) { + fConf.setLong(MAP_USERLOG_RETAIN_SIZE, retainSize); + } + + long getReduceUserLogRetainSize() { + return fConf.getLong(REDUCE_USERLOG_RETAIN_SIZE, -1); + } + + void setReduceUserLogRetainSize(long retainSize) { + fConf.setLong(REDUCE_USERLOG_RETAIN_SIZE, retainSize); + } + /** * Return the total virtual memory available on this TaskTracker. * @return total size of virtual memory. */ long getTotalVirtualMemoryOnTT() { return totalVirtualMemoryOnTT; } /** * Return the total physical memory available on this TaskTracker. * @return total size of physical memory. */ long getTotalPhysicalMemoryOnTT() { return totalPhysicalMemoryOnTT; } long getTotalMemoryAllottedForTasksOnTT() { return totalMemoryAllottedForTasks; } /** * Check if the jobtracker directed a 'reset' of the tasktracker. * * @param actions the directives of the jobtracker for the tasktracker. * @return <code>true</code> if tasktracker is to be reset, * <code>false</code> otherwise. */ private boolean reinitTaskTracker(TaskTrackerAction[] actions) { if (actions != null) { for (TaskTrackerAction action : actions) { if (action.getActionId() == TaskTrackerAction.ActionType.REINIT_TRACKER) { LOG.info("Recieved RenitTrackerAction from JobTracker"); return true; } } } return false; } /** * Kill any tasks that have not reported progress in the last X seconds. */ private synchronized void markUnresponsiveTasks() throws IOException { long now = System.currentTimeMillis(); for (TaskInProgress tip: runningTasks.values()) { if (tip.getRunState() == TaskStatus.State.RUNNING || tip.getRunState() == TaskStatus.State.COMMIT_PENDING || tip.isCleaningup()) { // Check the per-job timeout interval for tasks; // an interval of '0' implies it is never timed-out long jobTaskTimeout = tip.getTaskTimeout(); if (jobTaskTimeout == 0) { continue; } // Check if the task has not reported progress for a // time-period greater than the configured time-out long timeSinceLastReport = now - tip.getLastProgressReport(); if (timeSinceLastReport > jobTaskTimeout && !tip.wasKilled) { String msg = "Task " + tip.getTask().getTaskID() + " failed to report status for " + (timeSinceLastReport / 1000) + " seconds. Killing!"; LOG.info(tip.getTask().getTaskID() + ": " + msg); ReflectionUtils.logThreadInfo(LOG, "lost task", 30); tip.reportDiagnosticInfo(msg); myInstrumentation.timedoutTask(tip.getTask().getTaskID()); purgeTask(tip, true); } } } } private static PathDeletionContext[] buildPathDeletionContexts(FileSystem fs, Path[] paths) { int i = 0; PathDeletionContext[] contexts = new PathDeletionContext[paths.length]; for (Path p : paths) { contexts[i++] = new PathDeletionContext(fs, p.toUri().getPath()); } return contexts; } static PathDeletionContext[] buildTaskControllerPathDeletionContexts( FileSystem fs, Path[] paths, Task task, boolean isWorkDir, TaskController taskController) throws IOException { int i = 0; PathDeletionContext[] contexts = new TaskControllerPathDeletionContext[paths.length]; for (Path p : paths) { contexts[i++] = new TaskControllerPathDeletionContext(fs, p, task, isWorkDir, taskController); } return contexts; } /** * The task tracker is done with this job, so we need to clean up. * @param action The action with the job * @throws IOException */ private synchronized void purgeJob(KillJobAction action) throws IOException { JobID jobId = action.getJobID(); LOG.info("Received 'KillJobAction' for job: " + jobId); RunningJob rjob = null; synchronized (runningJobs) { rjob = runningJobs.get(jobId); } if (rjob == null) { LOG.warn("Unknown job " + jobId + " being deleted."); } else { synchronized (rjob) { // Add this tips of this job to queue of tasks to be purged for (TaskInProgress tip : rjob.tasks) { tip.jobHasFinished(false); Task t = tip.getTask(); if (t.isMapTask()) { indexCache.removeMap(tip.getTask().getTaskID().toString()); } } // Delete the job directory for this // task if the job is done/failed if (!rjob.keepJobFiles){ PathDeletionContext[] contexts = buildPathDeletionContexts(localFs, getLocalFiles(fConf, getLocalJobDir(rjob.getJobID().toString()))); directoryCleanupThread.addToQueue(contexts); } // Remove this job rjob.tasks.clear(); } } synchronized(runningJobs) { runningJobs.remove(jobId); } } /** * Remove the tip and update all relevant state. * * @param tip {@link TaskInProgress} to be removed. * @param wasFailure did the task fail or was it killed? */ private void purgeTask(TaskInProgress tip, boolean wasFailure) throws IOException { if (tip != null) { LOG.info("About to purge task: " + tip.getTask().getTaskID()); // Remove the task from running jobs, // removing the job if it's the last task removeTaskFromJob(tip.getTask().getJobID(), tip); tip.jobHasFinished(wasFailure); if (tip.getTask().isMapTask()) { indexCache.removeMap(tip.getTask().getTaskID().toString()); } } } /** Check if we're dangerously low on disk space * If so, kill jobs to free up space and make sure * we don't accept any new tasks * Try killing the reduce jobs first, since I believe they * use up most space * Then pick the one with least progress */ private void killOverflowingTasks() throws IOException { long localMinSpaceKill; synchronized(this){ localMinSpaceKill = minSpaceKill; } if (!enoughFreeSpace(localMinSpaceKill)) { acceptNewTasks=false; //we give up! do not accept new tasks until //all the ones running have finished and they're all cleared up synchronized (this) { TaskInProgress killMe = findTaskToKill(null); if (killMe!=null) { String msg = "Tasktracker running out of space." + " Killing task."; LOG.info(killMe.getTask().getTaskID() + ": " + msg); killMe.reportDiagnosticInfo(msg); purgeTask(killMe, false); } } } } /** * Pick a task to kill to free up memory/disk-space * @param tasksToExclude tasks that are to be excluded while trying to find a * task to kill. If null, all runningTasks will be searched. * @return the task to kill or null, if one wasn't found */ synchronized TaskInProgress findTaskToKill(List<TaskAttemptID> tasksToExclude) { TaskInProgress killMe = null; for (Iterator it = runningTasks.values().iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); if (tasksToExclude != null && tasksToExclude.contains(tip.getTask().getTaskID())) { // exclude this task continue; } if ((tip.getRunState() == TaskStatus.State.RUNNING || tip.getRunState() == TaskStatus.State.COMMIT_PENDING) && !tip.wasKilled) { if (killMe == null) { killMe = tip; } else if (!tip.getTask().isMapTask()) { //reduce task, give priority if (killMe.getTask().isMapTask() || (tip.getTask().getProgress().get() < killMe.getTask().getProgress().get())) { killMe = tip; } } else if (killMe.getTask().isMapTask() && tip.getTask().getProgress().get() < killMe.getTask().getProgress().get()) { //map task, only add if the progress is lower killMe = tip; } } } return killMe; } /** * Check if any of the local directories has enough * free space (more than minSpace) * * If not, do not try to get a new task assigned * @return * @throws IOException */ private boolean enoughFreeSpace(long minSpace) throws IOException { if (minSpace == 0) { return true; } return minSpace < getFreeSpace(); } private long getFreeSpace() throws IOException { long biggestSeenSoFar = 0; String[] localDirs = fConf.getLocalDirs(); for (int i = 0; i < localDirs.length; i++) { DF df = null; if (localDirsDf.containsKey(localDirs[i])) { df = localDirsDf.get(localDirs[i]); } else { df = new DF(new File(localDirs[i]), fConf); localDirsDf.put(localDirs[i], df); } long availOnThisVol = df.getAvailable(); if (availOnThisVol > biggestSeenSoFar) { biggestSeenSoFar = availOnThisVol; } } //Should ultimately hold back the space we expect running tasks to use but //that estimate isn't currently being passed down to the TaskTrackers return biggestSeenSoFar; } /** * Try to get the size of output for this task. * Returns -1 if it can't be found. * @return */ long tryToGetOutputSize(TaskAttemptID taskId, JobConf conf) { try{ TaskInProgress tip; synchronized(this) { tip = tasks.get(taskId); } if(tip == null) return -1; if (!tip.getTask().isMapTask() || tip.getRunState() != TaskStatus.State.SUCCEEDED) { return -1; } MapOutputFile mapOutputFile = new MapOutputFile(); mapOutputFile.setJobId(taskId.getJobID()); mapOutputFile.setConf(conf); Path tmp_output = mapOutputFile.getOutputFile(taskId); if(tmp_output == null) return 0; FileSystem localFS = FileSystem.getLocal(conf); FileStatus stat = localFS.getFileStatus(tmp_output); if(stat == null) return 0; else return stat.getLen(); } catch(IOException e) { LOG.info(e); return -1; } } private TaskLauncher mapLauncher; private TaskLauncher reduceLauncher; public JvmManager getJvmManagerInstance() { return jvmManager; } private void addToTaskQueue(LaunchTaskAction action) { if (action.getTask().isMapTask()) { mapLauncher.addToTaskQueue(action); } else { reduceLauncher.addToTaskQueue(action); } } private class TaskLauncher extends Thread { private IntWritable numFreeSlots; private final int maxSlots; private List<TaskInProgress> tasksToLaunch; public TaskLauncher(TaskType taskType, int numSlots) { this.maxSlots = numSlots; this.numFreeSlots = new IntWritable(numSlots); this.tasksToLaunch = new LinkedList<TaskInProgress>(); setDaemon(true); setName("TaskLauncher for " + taskType + " tasks"); } public void addToTaskQueue(LaunchTaskAction action) { synchronized (tasksToLaunch) { TaskInProgress tip = registerTask(action, this); tasksToLaunch.add(tip); tasksToLaunch.notifyAll(); } } public void cleanTaskQueue() { tasksToLaunch.clear(); } public void addFreeSlots(int numSlots) { synchronized (numFreeSlots) { numFreeSlots.set(numFreeSlots.get() + numSlots); assert (numFreeSlots.get() <= maxSlots); LOG.info("addFreeSlot : current free slots : " + numFreeSlots.get()); numFreeSlots.notifyAll(); } } public void run() { while (!Thread.interrupted()) { try { TaskInProgress tip; Task task; synchronized (tasksToLaunch) { while (tasksToLaunch.isEmpty()) { tasksToLaunch.wait(); } //get the TIP tip = tasksToLaunch.remove(0); task = tip.getTask(); LOG.info("Trying to launch : " + tip.getTask().getTaskID() + " which needs " + task.getNumSlotsRequired() + " slots"); } //wait for free slots to run synchronized (numFreeSlots) { while (numFreeSlots.get() < task.getNumSlotsRequired()) { LOG.info("TaskLauncher : Waiting for " + task.getNumSlotsRequired() + " to launch " + task.getTaskID() + ", currently we have " + numFreeSlots.get() + " free slots"); numFreeSlots.wait(); } LOG.info("In TaskLauncher, current free slots : " + numFreeSlots.get()+ " and trying to launch "+tip.getTask().getTaskID() + " which needs " + task.getNumSlotsRequired() + " slots"); numFreeSlots.set(numFreeSlots.get() - task.getNumSlotsRequired()); assert (numFreeSlots.get() >= 0); } synchronized (tip) { //to make sure that there is no kill task action for this if (tip.getRunState() != TaskStatus.State.UNASSIGNED && tip.getRunState() != TaskStatus.State.FAILED_UNCLEAN && tip.getRunState() != TaskStatus.State.KILLED_UNCLEAN) { //got killed externally while still in the launcher queue addFreeSlots(task.getNumSlotsRequired()); continue; } tip.slotTaken = true; } //got a free slot. launch the task startNewTask(tip); } catch (InterruptedException e) { return; // ALL DONE } catch (Throwable th) { LOG.error("TaskLauncher error " + StringUtils.stringifyException(th)); } } } } private TaskInProgress registerTask(LaunchTaskAction action, TaskLauncher launcher) { Task t = action.getTask(); LOG.info("LaunchTaskAction (registerTask): " + t.getTaskID() + " task's state:" + t.getState()); TaskInProgress tip = new TaskInProgress(t, this.fConf, launcher); synchronized (this) { tasks.put(t.getTaskID(), tip); runningTasks.put(t.getTaskID(), tip); boolean isMap = t.isMapTask(); if (isMap) { mapTotal++; } else { reduceTotal++; } } return tip; } /** * Start a new task. * All exceptions are handled locally, so that we don't mess up the * task tracker. */ private void startNewTask(TaskInProgress tip) { try { localizeJob(tip); } catch (Throwable e) { String msg = ("Error initializing " + tip.getTask().getTaskID() + ":\n" + StringUtils.stringifyException(e)); LOG.warn(msg); tip.reportDiagnosticInfo(msg); try { tip.kill(true); tip.cleanup(true); } catch (IOException ie2) { LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" + StringUtils.stringifyException(ie2)); } // Careful! // This might not be an 'Exception' - don't handle 'Error' here! if (e instanceof Error) { throw ((Error) e); } } } void addToMemoryManager(TaskAttemptID attemptId, boolean isMap, JobConf conf) { if (isTaskMemoryManagerEnabled()) { taskMemoryManager.addTask(attemptId, isMap ? conf .getMemoryForMapTask() * 1024 * 1024L : conf .getMemoryForReduceTask() * 1024 * 1024L); } } void removeFromMemoryManager(TaskAttemptID attemptId) { // Remove the entry from taskMemoryManagerThread's data structures. if (isTaskMemoryManagerEnabled()) { taskMemoryManager.removeTask(attemptId); } } /** * Notify the tasktracker to send an out-of-band heartbeat. */ private void notifyTTAboutTaskCompletion() { if (oobHeartbeatOnTaskCompletion) { synchronized (finishedCount) { int value = finishedCount.get(); finishedCount.set(value+1); finishedCount.notify(); } } } /** * The server retry loop. * This while-loop attempts to connect to the JobTracker. It only * loops when the old TaskTracker has gone bad (its state is * stale somehow) and we need to reinitialize everything. */ public void run() { try { startCleanupThreads(); boolean denied = false; while (running && !shuttingDown && !denied) { boolean staleState = false; try { // This while-loop attempts reconnects if we get network errors while (running && !staleState && !shuttingDown && !denied) { try { State osState = offerService(); if (osState == State.STALE) { staleState = true; } else if (osState == State.DENIED) { denied = true; } } catch (Exception ex) { if (!shuttingDown) { LOG.info("Lost connection to JobTracker [" + jobTrackAddr + "]. Retrying...", ex); try { Thread.sleep(5000); } catch (InterruptedException ie) { } } } } } finally { close(); } if (shuttingDown) { return; } LOG.warn("Reinitializing local state"); initialize(); } if (denied) { shutdown(); } } catch (IOException iex) { LOG.error("Got fatal exception while reinitializing TaskTracker: " + StringUtils.stringifyException(iex)); return; } } /////////////////////////////////////////////////////// // TaskInProgress maintains all the info for a Task that // lives at this TaskTracker. It maintains the Task object, // its TaskStatus, and the TaskRunner. /////////////////////////////////////////////////////// class TaskInProgress { Task task; long lastProgressReport; StringBuffer diagnosticInfo = new StringBuffer(); private TaskRunner runner; volatile boolean done = false; volatile boolean wasKilled = false; private JobConf defaultJobConf; private JobConf localJobConf; private boolean keepFailedTaskFiles; private boolean alwaysKeepTaskFiles; private TaskStatus taskStatus; private long taskTimeout; private String debugCommand; private volatile boolean slotTaken = false; private TaskLauncher launcher; /** */ public TaskInProgress(Task task, JobConf conf) { this(task, conf, null); } public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) { this.task = task; this.launcher = launcher; this.lastProgressReport = System.currentTimeMillis(); this.defaultJobConf = conf; localJobConf = null; taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(), 0.0f, task.getNumSlotsRequired(), task.getState(), diagnosticInfo.toString(), "initializing", getName(), task.isTaskCleanupTask() ? TaskStatus.Phase.CLEANUP : task.isMapTask()? TaskStatus.Phase.MAP: TaskStatus.Phase.SHUFFLE, task.getCounters()); taskTimeout = (10 * 60 * 1000); } private void localizeTask(Task task) throws IOException{ Path localTaskDir = lDirAlloc.getLocalPathForWrite( TaskTracker.getLocalTaskDir(task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()), defaultJobConf ); FileSystem localFs = FileSystem.getLocal(fConf); if (!localFs.mkdirs(localTaskDir)) { throw new IOException("Mkdirs failed to create " + localTaskDir.toString()); } // create symlink for ../work if it already doesnt exist String workDir = lDirAlloc.getLocalPathToRead( TaskTracker.getLocalJobDir(task.getJobID().toString()) + Path.SEPARATOR + "work", defaultJobConf).toString(); String link = localTaskDir.getParent().toString() + Path.SEPARATOR + "work"; File flink = new File(link); if (!flink.exists()) FileUtil.symLink(workDir, link); // create the working-directory of the task Path cwd = lDirAlloc.getLocalPathForWrite( getLocalTaskDir(task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()) + Path.SEPARATOR + MRConstants.WORKDIR, defaultJobConf); if (!localFs.mkdirs(cwd)) { throw new IOException("Mkdirs failed to create " + cwd.toString()); } Path localTaskFile = new Path(localTaskDir, "job.xml"); task.setJobFile(localTaskFile.toString()); localJobConf.set("mapred.local.dir", fConf.get("mapred.local.dir")); if (fConf.get("slave.host.name") != null) { localJobConf.set("slave.host.name", fConf.get("slave.host.name")); } localJobConf.set("mapred.task.id", task.getTaskID().toString()); keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles(); task.localizeConfiguration(localJobConf); List<String[]> staticResolutions = NetUtils.getAllStaticResolutions(); if (staticResolutions != null && staticResolutions.size() > 0) { StringBuffer str = new StringBuffer(); for (int i = 0; i < staticResolutions.size(); i++) { String[] hostToResolved = staticResolutions.get(i); str.append(hostToResolved[0]+"="+hostToResolved[1]); if (i != staticResolutions.size() - 1) { str.append(','); } } localJobConf.set("hadoop.net.static.resolutions", str.toString()); } if (task.isMapTask()) { debugCommand = localJobConf.getMapDebugScript(); } else { debugCommand = localJobConf.getReduceDebugScript(); } String keepPattern = localJobConf.getKeepTaskFilesPattern(); if (keepPattern != null) { alwaysKeepTaskFiles = Pattern.matches(keepPattern, task.getTaskID().toString()); } else { alwaysKeepTaskFiles = false; } if (debugCommand != null || localJobConf.getProfileEnabled() || alwaysKeepTaskFiles || keepFailedTaskFiles) { //disable jvm reuse localJobConf.setNumTasksToExecutePerJvm(1); } if (isTaskMemoryManagerEnabled()) { localJobConf.setBoolean("task.memory.mgmt.enabled", true); } OutputStream out = localFs.create(localTaskFile); try { localJobConf.writeXml(out); } finally { out.close(); } task.setConf(localJobConf); } /** */ public Task getTask() { return task; } public TaskRunner getTaskRunner() { return runner; } public synchronized void setJobConf(JobConf lconf){ this.localJobConf = lconf; keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles(); taskTimeout = localJobConf.getLong("mapred.task.timeout", 10 * 60 * 1000); } public synchronized JobConf getJobConf() { return localJobConf; } /** */ public synchronized TaskStatus getStatus() { taskStatus.setDiagnosticInfo(diagnosticInfo.toString()); if (diagnosticInfo.length() > 0) { diagnosticInfo = new StringBuffer(); } return taskStatus; } /** * Kick off the task execution */ public synchronized void launchTask() throws IOException { if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED || this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { localizeTask(task); if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) { this.taskStatus.setRunState(TaskStatus.State.RUNNING); } this.runner = task.createRunner(TaskTracker.this, this); this.runner.start(); this.taskStatus.setStartTime(System.currentTimeMillis()); } else { LOG.info("Not launching task: " + task.getTaskID() + " since it's state is " + this.taskStatus.getRunState()); } } boolean isCleaningup() { return this.taskStatus.inTaskCleanupPhase(); } /** * The task is reporting its progress */ public synchronized void reportProgress(TaskStatus taskStatus) { LOG.info(task.getTaskID() + " " + taskStatus.getProgress() + "% " + taskStatus.getStateString()); // task will report its state as // COMMIT_PENDING when it is waiting for commit response and // when it is committing. // cleanup attempt will report its state as FAILED_UNCLEAN/KILLED_UNCLEAN if (this.done || (this.taskStatus.getRunState() != TaskStatus.State.RUNNING && this.taskStatus.getRunState() != TaskStatus.State.COMMIT_PENDING && !isCleaningup()) || ((this.taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) && taskStatus.getRunState() == TaskStatus.State.RUNNING)) { //make sure we ignore progress messages after a task has //invoked TaskUmbilicalProtocol.done() or if the task has been //KILLED/FAILED/FAILED_UNCLEAN/KILLED_UNCLEAN //Also ignore progress update if the state change is from //COMMIT_PENDING/FAILED_UNCLEAN/KILLED_UNCLEA to RUNNING LOG.info(task.getTaskID() + " Ignoring status-update since " + ((this.done) ? "task is 'done'" : ("runState: " + this.taskStatus.getRunState())) ); return; } this.taskStatus.statusUpdate(taskStatus); this.lastProgressReport = System.currentTimeMillis(); } /** */ public long getLastProgressReport() { return lastProgressReport; } /** */ public TaskStatus.State getRunState() { return taskStatus.getRunState(); } /** * The task's configured timeout. * * @return the task's configured timeout. */ public long getTaskTimeout() { return taskTimeout; } /** * The task has reported some diagnostic info about its status */ public synchronized void reportDiagnosticInfo(String info) { this.diagnosticInfo.append(info); } public synchronized void reportNextRecordRange(SortedRanges.Range range) { this.taskStatus.setNextRecordRange(range); } /** * The task is reporting that it's done running */ public synchronized void reportDone() { if (isCleaningup()) { if (this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { this.taskStatus.setRunState(TaskStatus.State.FAILED); } else if (this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { this.taskStatus.setRunState(TaskStatus.State.KILLED); } } else { this.taskStatus.setRunState(TaskStatus.State.SUCCEEDED); } this.taskStatus.setProgress(1.0f); this.taskStatus.setFinishTime(System.currentTimeMillis()); this.done = true; jvmManager.taskFinished(runner); runner.signalDone(); LOG.info("Task " + task.getTaskID() + " is done."); LOG.info("reported output size for " + task.getTaskID() + " was " + taskStatus.getOutputSize()); } public boolean wasKilled() { return wasKilled; } /** * A task is reporting in as 'done'. * * We need to notify the tasktracker to send an out-of-band heartbeat. * If isn't <code>commitPending</code>, we need to finalize the task * and release the slot it's occupied. * * @param commitPending is the task-commit pending? */ void reportTaskFinished(boolean commitPending) { if (!commitPending) { taskFinished(); releaseSlot(); } notifyTTAboutTaskCompletion(); } /* State changes: * RUNNING/COMMIT_PENDING -> FAILED_UNCLEAN/FAILED/KILLED_UNCLEAN/KILLED * FAILED_UNCLEAN -> FAILED * KILLED_UNCLEAN -> KILLED */ private void setTaskFailState(boolean wasFailure) { // go FAILED_UNCLEAN -> FAILED and KILLED_UNCLEAN -> KILLED always if (taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { taskStatus.setRunState(TaskStatus.State.FAILED); } else if (taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { taskStatus.setRunState(TaskStatus.State.KILLED); } else if (task.isMapOrReduce() && taskStatus.getPhase() != TaskStatus.Phase.CLEANUP) { if (wasFailure) { taskStatus.setRunState(TaskStatus.State.FAILED_UNCLEAN); } else { taskStatus.setRunState(TaskStatus.State.KILLED_UNCLEAN); } } else { if (wasFailure) { taskStatus.setRunState(TaskStatus.State.FAILED); } else { taskStatus.setRunState(TaskStatus.State.KILLED); } } } /** * The task has actually finished running. */ public void taskFinished() { long start = System.currentTimeMillis(); // // Wait until task reports as done. If it hasn't reported in, // wait for a second and try again. // while (!done && (System.currentTimeMillis() - start < WAIT_FOR_DONE)) { try { Thread.sleep(1000); } catch (InterruptedException ie) { } } // // Change state to success or failure, depending on whether // task was 'done' before terminating // boolean needCleanup = false; synchronized (this) { // Remove the task from MemoryManager, if the task SUCCEEDED or FAILED. // KILLED tasks are removed in method kill(), because Kill // would result in launching a cleanup attempt before // TaskRunner returns; if remove happens here, it would remove // wrong task from memory manager. if (done || !wasKilled) { removeFromMemoryManager(task.getTaskID()); } if (!done) { if (!wasKilled) { failures += 1; setTaskFailState(true); // call the script here for the failed tasks. if (debugCommand != null) { String taskStdout =""; String taskStderr =""; String taskSyslog =""; String jobConf = task.getJobFile(); try { - // get task's stdout file - taskStdout = FileUtil.makeShellPath( - TaskLog.getRealTaskLogFileLocation - (task.getTaskID(), TaskLog.LogName.STDOUT)); - // get task's stderr file - taskStderr = FileUtil.makeShellPath( - TaskLog.getRealTaskLogFileLocation - (task.getTaskID(), TaskLog.LogName.STDERR)); - // get task's syslog file - taskSyslog = FileUtil.makeShellPath( - TaskLog.getRealTaskLogFileLocation - (task.getTaskID(), TaskLog.LogName.SYSLOG)); + Map<LogName, LogFileDetail> allFilesDetails = + TaskLog.getAllLogsFileDetails(task.getTaskID(), false); + // get task's stdout file + taskStdout = + TaskLog.getRealTaskLogFilePath( + allFilesDetails.get(LogName.STDOUT).location, + LogName.STDOUT); + // get task's stderr file + taskStderr = + TaskLog.getRealTaskLogFilePath( + allFilesDetails.get(LogName.STDERR).location, + LogName.STDERR); + // get task's syslog file + taskSyslog = + TaskLog.getRealTaskLogFilePath( + allFilesDetails.get(LogName.SYSLOG).location, + LogName.SYSLOG); } catch(IOException e){ LOG.warn("Exception finding task's stdout/err/syslog files"); } File workDir = null; try { workDir = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( task.getJobID().toString(), task.getTaskID().toString(), task.isTaskCleanupTask()) + Path.SEPARATOR + MRConstants.WORKDIR, localJobConf). toString()); } catch (IOException e) { LOG.warn("Working Directory of the task " + task.getTaskID() + "doesnt exist. Caught exception " + StringUtils.stringifyException(e)); } // Build the command File stdout = TaskLog.getRealTaskLogFileLocation( task.getTaskID(), TaskLog.LogName.DEBUGOUT); // add pipes program as argument if it exists. String program =""; String executable = Submitter.getExecutable(localJobConf); if ( executable != null) { try { program = new URI(executable).getFragment(); } catch (URISyntaxException ur) { LOG.warn("Problem in the URI fragment for pipes executable"); } } String [] debug = debugCommand.split(" "); Vector<String> vargs = new Vector<String>(); for (String component : debug) { vargs.add(component); } vargs.add(taskStdout); vargs.add(taskStderr); vargs.add(taskSyslog); vargs.add(jobConf); vargs.add(program); try { List<String> wrappedCommand = TaskLog.captureDebugOut (vargs, stdout); // run the script. try { runScript(wrappedCommand, workDir); } catch (IOException ioe) { LOG.warn("runScript failed with: " + StringUtils. stringifyException(ioe)); } } catch(IOException e) { LOG.warn("Error in preparing wrapped debug command"); } // add all lines of debug out to diagnostics try { int num = localJobConf.getInt("mapred.debug.out.lines", -1); addDiagnostics(FileUtil.makeShellPath(stdout),num,"DEBUG OUT"); } catch(IOException ioe) { LOG.warn("Exception in add diagnostics!"); } + + // Debug-command is run. Do the post-debug-script-exit debug-logs + // processing. Truncate the logs. + getTaskLogsMonitor().addProcessForLogTruncation( + task.getTaskID(), Arrays.asList(task)); } } taskStatus.setProgress(0.0f); } this.taskStatus.setFinishTime(System.currentTimeMillis()); needCleanup = (taskStatus.getRunState() == TaskStatus.State.FAILED || taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN || taskStatus.getRunState() == TaskStatus.State.KILLED); } // // If the task has failed, or if the task was killAndCleanup()'ed, // we should clean up right away. We only wait to cleanup // if the task succeeded, and its results might be useful // later on to downstream job processing. // if (needCleanup) { removeTaskFromJob(task.getJobID(), this); } try { cleanup(needCleanup); } catch (IOException ie) { } } /** * Runs the script given in args * @param args script name followed by its argumnets * @param dir current working directory. * @throws IOException */ public void runScript(List<String> args, File dir) throws IOException { ShellCommandExecutor shexec = new ShellCommandExecutor(args.toArray(new String[0]), dir); shexec.execute(); int exitCode = shexec.getExitCode(); if (exitCode != 0) { throw new IOException("Task debug script exit with nonzero status of " + exitCode + "."); } } /** * Add last 'num' lines of the given file to the diagnostics. * if num =-1, all the lines of file are added to the diagnostics. * @param file The file from which to collect diagnostics. * @param num The number of lines to be sent to diagnostics. * @param tag The tag is printed before the diagnostics are printed. */ public void addDiagnostics(String file, int num, String tag) { RandomAccessFile rafile = null; try { rafile = new RandomAccessFile(file,"r"); int no_lines =0; String line = null; StringBuffer tail = new StringBuffer(); tail.append("\n-------------------- "+tag+"---------------------\n"); String[] lines = null; if (num >0) { lines = new String[num]; } while ((line = rafile.readLine()) != null) { no_lines++; if (num >0) { if (no_lines <= num) { lines[no_lines-1] = line; } else { // shift them up for (int i=0; i<num-1; ++i) { lines[i] = lines[i+1]; } lines[num-1] = line; } } else if (num == -1) { tail.append(line); tail.append("\n"); } } int n = no_lines > num ?num:no_lines; if (num >0) { for (int i=0;i<n;i++) { tail.append(lines[i]); tail.append("\n"); } } if(n!=0) reportDiagnosticInfo(tail.toString()); } catch (FileNotFoundException fnfe){ LOG.warn("File "+file+ " not found"); } catch (IOException ioe){ LOG.warn("Error reading file "+file); } finally { try { if (rafile != null) { rafile.close(); } } catch (IOException ioe) { LOG.warn("Error closing file "+file); } } } /** * We no longer need anything from this task, as the job has * finished. If the task is still running, kill it and clean up. * * @param wasFailure did the task fail, as opposed to was it killed by * the framework */ public void jobHasFinished(boolean wasFailure) throws IOException { // Kill the task if it is still running synchronized(this){ if (getRunState() == TaskStatus.State.RUNNING || getRunState() == TaskStatus.State.UNASSIGNED || getRunState() == TaskStatus.State.COMMIT_PENDING || isCleaningup()) { kill(wasFailure); } } // Cleanup on the finished task cleanup(true); } /** * Something went wrong and the task must be killed. * @param wasFailure was it a failure (versus a kill request)? */ public synchronized void kill(boolean wasFailure) throws IOException { if (taskStatus.getRunState() == TaskStatus.State.RUNNING || taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || isCleaningup()) { wasKilled = true; if (wasFailure) { failures += 1; } // runner could be null if task-cleanup attempt is not localized yet if (runner != null) { runner.kill(); } setTaskFailState(wasFailure); } else if (taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) { if (wasFailure) { failures += 1; taskStatus.setRunState(TaskStatus.State.FAILED); } else { taskStatus.setRunState(TaskStatus.State.KILLED); } } taskStatus.setFinishTime(System.currentTimeMillis()); removeFromMemoryManager(task.getTaskID()); releaseSlot(); notifyTTAboutTaskCompletion(); } private synchronized void releaseSlot() { if (slotTaken) { if (launcher != null) { launcher.addFreeSlots(task.getNumSlotsRequired()); } slotTaken = false; } } /** * The map output has been lost. */ private synchronized void mapOutputLost(String failure ) throws IOException { if (taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING || taskStatus.getRunState() == TaskStatus.State.SUCCEEDED) { // change status to failure LOG.info("Reporting output lost:"+task.getTaskID()); taskStatus.setRunState(TaskStatus.State.FAILED); taskStatus.setProgress(0.0f); reportDiagnosticInfo("Map output lost, rescheduling: " + failure); runningTasks.put(task.getTaskID(), this); mapTotal++; } else { LOG.warn("Output already reported lost:"+task.getTaskID()); } } /** * We no longer need anything from this task. Either the * controlling job is all done and the files have been copied * away, or the task failed and we don't need the remains. * Any calls to cleanup should not lock the tip first. * cleanup does the right thing- updates tasks in Tasktracker * by locking tasktracker first and then locks the tip. * * if needCleanup is true, the whole task directory is cleaned up. * otherwise the current working directory of the task * i.e. &lt;taskid&gt;/work is cleaned up. */ void cleanup(boolean needCleanup) throws IOException { TaskAttemptID taskId = task.getTaskID(); LOG.debug("Cleaning up " + taskId); synchronized (TaskTracker.this) { if (needCleanup) { // see if tasks data structure is holding this tip. // tasks could hold the tip for cleanup attempt, if cleanup attempt // got launched before this method. if (tasks.get(taskId) == this) { tasks.remove(taskId); } } synchronized (this){ if (alwaysKeepTaskFiles || (taskStatus.getRunState() == TaskStatus.State.FAILED && keepFailedTaskFiles)) { return; } } } synchronized (this) { try { // localJobConf could be null if localization has not happened // then no cleanup will be required. if (localJobConf == null) { return; } String taskDir = getLocalTaskDir(task.getJobID().toString(), taskId.toString(), task.isTaskCleanupTask()); if (needCleanup) { if (runner != null) { //cleans up the output directory of the task (where map outputs //and reduce inputs get stored) runner.close(); } //We don't delete the workdir //since some other task (running in the same JVM) //might be using the dir. The JVM running the tasks would clean //the workdir per a task in the task process itself. if (localJobConf.getNumTasksToExecutePerJvm() == 1) { PathDeletionContext[] contexts = buildTaskControllerPathDeletionContexts(localFs, getLocalDirs(), task, false/* not workDir */, taskController); directoryCleanupThread.addToQueue(contexts); } else { PathDeletionContext[] contexts = buildPathDeletionContexts( localFs, getLocalFiles(defaultJobConf, taskDir+"/job.xml")); directoryCleanupThread.addToQueue(contexts); } } else { if (localJobConf.getNumTasksToExecutePerJvm() == 1) { PathDeletionContext[] contexts = buildTaskControllerPathDeletionContexts(localFs, getLocalDirs(), task, true /* workDir */, taskController); directoryCleanupThread.addToQueue(contexts); } } } catch (Throwable ie) { LOG.info("Error cleaning up task runner: " + StringUtils.stringifyException(ie)); } } } @Override public boolean equals(Object obj) { return (obj instanceof TaskInProgress) && task.getTaskID().equals (((TaskInProgress) obj).getTask().getTaskID()); } @Override public int hashCode() { return task.getTaskID().hashCode(); } } // /////////////////////////////////////////////////////////////// // TaskUmbilicalProtocol ///////////////////////////////////////////////////////////////// /** * Called upon startup by the child process, to fetch Task data. */ public synchronized JvmTask getTask(JvmContext context) throws IOException { JVMId jvmId = context.jvmId; LOG.debug("JVM with ID : " + jvmId + " asked for a task"); // save pid of task JVM sent by child jvmManager.setPidToJvm(jvmId, context.pid); if (!jvmManager.isJvmKnown(jvmId)) { LOG.info("Killing unknown JVM " + jvmId); return new JvmTask(null, true); } RunningJob rjob = runningJobs.get(jvmId.getJobId()); if (rjob == null) { //kill the JVM since the job is dead LOG.info("Killing JVM " + jvmId + " since job " + jvmId.getJobId() + " is dead"); jvmManager.killJvm(jvmId); return new JvmTask(null, true); } TaskInProgress tip = jvmManager.getTaskForJvm(jvmId); if (tip == null) { return new JvmTask(null, false); } if (tasks.get(tip.getTask().getTaskID()) != null) { //is task still present LOG.info("JVM with ID: " + jvmId + " given task: " + tip.getTask().getTaskID()); return new JvmTask(tip.getTask(), false); } else { LOG.info("Killing JVM with ID: " + jvmId + " since scheduled task: " + tip.getTask().getTaskID() + " is " + tip.taskStatus.getRunState()); return new JvmTask(null, true); } } /** * Called periodically to report Task progress, from 0.0 to 1.0. */ public synchronized boolean statusUpdate(TaskAttemptID taskid, TaskStatus taskStatus) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.reportProgress(taskStatus); return true; } else { LOG.warn("Progress from unknown child task: "+taskid); return false; } } /** * Called when the task dies before completion, and we want to report back * diagnostic info */ public synchronized void reportDiagnosticInfo(TaskAttemptID taskid, String info) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.reportDiagnosticInfo(info); } else { LOG.warn("Error from unknown child task: "+taskid+". Ignored."); } } public synchronized void reportNextRecordRange(TaskAttemptID taskid, SortedRanges.Range range) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.reportNextRecordRange(range); } else { LOG.warn("reportNextRecordRange from unknown child task: "+taskid+". " + "Ignored."); } } /** Child checking to see if we're alive. Normally does nothing.*/ public synchronized boolean ping(TaskAttemptID taskid) throws IOException { return tasks.get(taskid) != null; } /** * Task is reporting that it is in commit_pending * and it is waiting for the commit Response */ public synchronized void commitPending(TaskAttemptID taskid, TaskStatus taskStatus) throws IOException { LOG.info("Task " + taskid + " is in commit-pending," +"" + " task state:" +taskStatus.getRunState()); statusUpdate(taskid, taskStatus); reportTaskFinished(taskid, true); } /** * Child checking whether it can commit */ public synchronized boolean canCommit(TaskAttemptID taskid) { return commitResponses.contains(taskid); //don't remove it now } /** * The task is done. */ public synchronized void done(TaskAttemptID taskid) throws IOException { TaskInProgress tip = tasks.get(taskid); commitResponses.remove(taskid); if (tip != null) { tip.reportDone(); } else { LOG.warn("Unknown child task done: "+taskid+". Ignored."); } } /** * A reduce-task failed to shuffle the map-outputs. Kill the task. */ public synchronized void shuffleError(TaskAttemptID taskId, String message) throws IOException { LOG.fatal("Task: " + taskId + " - Killed due to Shuffle Failure: " + message); TaskInProgress tip = runningTasks.get(taskId); tip.reportDiagnosticInfo("Shuffle Error: " + message); purgeTask(tip, true); } /** * A child task had a local filesystem error. Kill the task. */ public synchronized void fsError(TaskAttemptID taskId, String message) throws IOException { LOG.fatal("Task: " + taskId + " - Killed due to FSError: " + message); TaskInProgress tip = runningTasks.get(taskId); tip.reportDiagnosticInfo("FSError: " + message); purgeTask(tip, true); } /** * A child task had a fatal error. Kill the task. */ public synchronized void fatalError(TaskAttemptID taskId, String msg) throws IOException { LOG.fatal("Task: " + taskId + " - Killed : " + msg); TaskInProgress tip = runningTasks.get(taskId); tip.reportDiagnosticInfo("Error: " + msg); purgeTask(tip, true); } public synchronized MapTaskCompletionEventsUpdate getMapCompletionEvents( JobID jobId, int fromEventId, int maxLocs, TaskAttemptID id) throws IOException { TaskCompletionEvent[]mapEvents = TaskCompletionEvent.EMPTY_ARRAY; synchronized (shouldReset) { if (shouldReset.remove(id)) { return new MapTaskCompletionEventsUpdate(mapEvents, true); } } RunningJob rjob; synchronized (runningJobs) { rjob = runningJobs.get(jobId); if (rjob != null) { synchronized (rjob) { FetchStatus f = rjob.getFetchStatus(); if (f != null) { mapEvents = f.getMapEvents(fromEventId, maxLocs); } } } } return new MapTaskCompletionEventsUpdate(mapEvents, false); } ///////////////////////////////////////////////////// // Called by TaskTracker thread after task process ends ///////////////////////////////////////////////////// /** * The task is no longer running. It may not have completed successfully */ void reportTaskFinished(TaskAttemptID taskid, boolean commitPending) { TaskInProgress tip; synchronized (this) { tip = tasks.get(taskid); } if (tip != null) { tip.reportTaskFinished(commitPending); } else { LOG.warn("Unknown child task finished: "+taskid+". Ignored."); } } /** * A completed map task's output has been lost. */ public synchronized void mapOutputLost(TaskAttemptID taskid, String errorMsg) throws IOException { TaskInProgress tip = tasks.get(taskid); if (tip != null) { tip.mapOutputLost(errorMsg); } else { LOG.warn("Unknown child with bad map output: "+taskid+". Ignored."); } } /** * The datastructure for initializing a job */ static class RunningJob{ private JobID jobid; private JobConf jobConf; // keep this for later use volatile Set<TaskInProgress> tasks; boolean localized; boolean keepJobFiles; FetchStatus f; RunningJob(JobID jobid) { this.jobid = jobid; localized = false; tasks = new HashSet<TaskInProgress>(); keepJobFiles = false; } JobID getJobID() { return jobid; } diff --git a/src/test/org/apache/hadoop/mapred/TestTaskLogsMonitor.java b/src/test/org/apache/hadoop/mapred/TestTaskLogsMonitor.java new file mode 100644 index 0000000..d1c2c2c --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/TestTaskLogsMonitor.java @@ -0,0 +1,500 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.Log; + +import org.apache.hadoop.filecache.DistributedCache; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.TaskLog.LogFileDetail; +import org.apache.hadoop.mapred.TaskLog.LogName; +import org.apache.hadoop.mapred.lib.IdentityMapper; + +import org.junit.After; +import org.junit.Test; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; + +/** + * Verify the logs' monitoring functionality. + */ +public class TestTaskLogsMonitor { + + static final Log LOG = LogFactory.getLog(TestTaskLogsMonitor.class); + + /** + * clean-up any stale directories after enabling writable permissions for all + * attempt-dirs. + * + * @throws IOException + */ + @After + public void tearDown() throws IOException { + File logDir = TaskLog.getUserLogDir(); + for (File attemptDir : logDir.listFiles()) { + attemptDir.setWritable(true); + FileUtil.fullyDelete(attemptDir); + } + } + + void writeRealBytes(TaskAttemptID firstAttemptID, + TaskAttemptID attemptID, LogName logName, long numBytes, char data) + throws IOException { + + File logFile = TaskLog.getTaskLogFile(firstAttemptID, logName); + + LOG.info("Going to write " + numBytes + " real bytes to the log file " + + logFile); + + if (!logFile.getParentFile().exists() + && !logFile.getParentFile().mkdirs()) { + throw new IOException("Couldn't create all ancestor dirs for " + + logFile); + } + + File attemptDir = TaskLog.getBaseDir(attemptID.toString()); + if (!attemptDir.exists() && !attemptDir.mkdirs()) { + throw new IOException("Couldn't create all ancestor dirs for " + + logFile); + } + + // Need to call up front to set currenttaskid. + TaskLog.syncLogs(firstAttemptID, attemptID); + + FileWriter writer = new FileWriter(logFile, true); + for (long i = 0; i < numBytes; i++) { + writer.write(data); + } + writer.close(); + TaskLog.syncLogs(firstAttemptID, attemptID); + LOG.info("Written " + numBytes + " real bytes to the log file " + + logFile); + } + + private static Map<LogName, Long> getAllLogsFileLengths( + TaskAttemptID tid, boolean isCleanup) throws IOException { + Map<LogName, Long> allLogsFileLengths = new HashMap<LogName, Long>(); + + // If the index file doesn't exist, we cannot get log-file lengths. So set + // them to zero. + if (!TaskLog.getIndexFile(tid.toString(), isCleanup).exists()) { + for (LogName log : LogName.values()) { + allLogsFileLengths.put(log, Long.valueOf(0)); + } + return allLogsFileLengths; + } + + Map<LogName, LogFileDetail> logFilesDetails = + TaskLog.getAllLogsFileDetails(tid, isCleanup); + for (LogName log : logFilesDetails.keySet()) { + allLogsFileLengths.put(log, + Long.valueOf(logFilesDetails.get(log).length)); + } + return allLogsFileLengths; + } + + /** + * Test cases which don't need any truncation of log-files. Without JVM-reuse. + * + * @throws IOException + */ + @Test + public void testNoTruncationNeeded() throws IOException { + TaskTracker taskTracker = new TaskTracker(); + TaskLogsMonitor logsMonitor = new TaskLogsMonitor(1000L, 1000L); + taskTracker.setTaskLogsMonitor(logsMonitor); + + TaskID baseId = new TaskID(); + int taskcount = 0; + + TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++); + Task task = new MapTask(null, attemptID, 0, null, null, 0, null); + + // Let the tasks write logs within retain-size + writeRealBytes(attemptID, attemptID, LogName.SYSLOG, 500, 'H'); + + logsMonitor.monitorTaskLogs(); + File attemptDir = TaskLog.getBaseDir(attemptID.toString()); + assertTrue(attemptDir + " doesn't exist!", attemptDir.exists()); + + // Finish the task and the JVM too. + logsMonitor.addProcessForLogTruncation(attemptID, Arrays.asList(task)); + + // There should be no truncation of the log-file. + logsMonitor.monitorTaskLogs(); + assertTrue(attemptDir.exists()); + File logFile = TaskLog.getTaskLogFile(attemptID, LogName.SYSLOG); + assertEquals(500, logFile.length()); + // The index file should also be proper. + assertEquals(500, getAllLogsFileLengths(attemptID, false).get( + LogName.SYSLOG).longValue()); + + logsMonitor.monitorTaskLogs(); + assertEquals(500, logFile.length()); + } + + /** + * Test the disabling of truncation of log-file. + * + * @throws IOException + */ + @Test + public void testDisabledLogTruncation() throws IOException { + TaskTracker taskTracker = new TaskTracker(); + // Anything less than 0 disables the truncation. + TaskLogsMonitor logsMonitor = new TaskLogsMonitor(-1L, -1L); + taskTracker.setTaskLogsMonitor(logsMonitor); + + TaskID baseId = new TaskID(); + int taskcount = 0; + + TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++); + Task task = new MapTask(null, attemptID, 0, null, null, 0, null); + + // Let the tasks write some logs + writeRealBytes(attemptID, attemptID, LogName.SYSLOG, 1500, 'H'); + + logsMonitor.monitorTaskLogs(); + File attemptDir = TaskLog.getBaseDir(attemptID.toString()); + assertTrue(attemptDir + " doesn't exist!", attemptDir.exists()); + + // Finish the task and the JVM too. + logsMonitor.addProcessForLogTruncation(attemptID, Arrays.asList(task)); + + // The log-file should not be truncated. + logsMonitor.monitorTaskLogs(); + assertTrue(attemptDir.exists()); + File logFile = TaskLog.getTaskLogFile(attemptID, LogName.SYSLOG); + assertEquals(1500, logFile.length()); + // The index file should also be proper. + assertEquals(1500, getAllLogsFileLengths(attemptID, false).get( + LogName.SYSLOG).longValue()); + } + + /** + * Test the truncation of log-file when JVMs are not reused. + * + * @throws IOException + */ + @Test + public void testLogTruncationOnFinishing() throws IOException { + TaskTracker taskTracker = new TaskTracker(); + TaskLogsMonitor logsMonitor = new TaskLogsMonitor(1000L, 1000L); + taskTracker.setTaskLogsMonitor(logsMonitor); + + TaskID baseId = new TaskID(); + int taskcount = 0; + + TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++); + Task task = new MapTask(null, attemptID, 0, null, null, 0, null); + + // Let the tasks write logs more than retain-size + writeRealBytes(attemptID, attemptID, LogName.SYSLOG, 1500, 'H'); + + logsMonitor.monitorTaskLogs(); + File attemptDir = TaskLog.getBaseDir(attemptID.toString()); + assertTrue(attemptDir + " doesn't exist!", attemptDir.exists()); + + // Finish the task and the JVM too. + logsMonitor.addProcessForLogTruncation(attemptID, Arrays.asList(task)); + + // The log-file should now be truncated. + logsMonitor.monitorTaskLogs(); + assertTrue(attemptDir.exists()); + File logFile = TaskLog.getTaskLogFile(attemptID, LogName.SYSLOG); + assertEquals(1000, logFile.length()); + // The index file should also be proper. + assertEquals(1000, getAllLogsFileLengths(attemptID, false).get( + LogName.SYSLOG).longValue()); + + logsMonitor.monitorTaskLogs(); + assertEquals(1000, logFile.length()); + } + + /** + * Test the truncation of log-file when JVM-reuse is enabled. + * + * @throws IOException + */ + @Test + public void testLogTruncationOnFinishingWithJVMReuse() throws IOException { + TaskTracker taskTracker = new TaskTracker(); + TaskLogsMonitor logsMonitor = new TaskLogsMonitor(150L, 150L); + taskTracker.setTaskLogsMonitor(logsMonitor); + + TaskID baseTaskID = new TaskID(); + int attemptsCount = 0; + + // Assuming the job's retain size is 150 + TaskAttemptID attempt1 = new TaskAttemptID(baseTaskID, attemptsCount++); + Task task1 = new MapTask(null, attempt1, 0, null, null, 0, null); + + // Let the tasks write logs more than retain-size + writeRealBytes(attempt1, attempt1, LogName.SYSLOG, 200, 'A'); + + logsMonitor.monitorTaskLogs(); + + File attemptDir = TaskLog.getBaseDir(attempt1.toString()); + assertTrue(attemptDir + " doesn't exist!", attemptDir.exists()); + + // Start another attempt in the same JVM + TaskAttemptID attempt2 = new TaskAttemptID(baseTaskID, attemptsCount++); + Task task2 = new MapTask(null, attempt2, 0, null, null, 0, null); + logsMonitor.monitorTaskLogs(); + + // Let attempt2 also write some logs + writeRealBytes(attempt1, attempt2, LogName.SYSLOG, 100, 'B'); + logsMonitor.monitorTaskLogs(); + + // Start yet another attempt in the same JVM + TaskAttemptID attempt3 = new TaskAttemptID(baseTaskID, attemptsCount++); + Task task3 = new MapTask(null, attempt3, 0, null, null, 0, null); + logsMonitor.monitorTaskLogs(); + + // Let attempt3 also write some logs + writeRealBytes(attempt1, attempt3, LogName.SYSLOG, 225, 'C'); + logsMonitor.monitorTaskLogs(); + + // Finish the JVM. + logsMonitor.addProcessForLogTruncation(attempt1, + Arrays.asList((new Task[] { task1, task2, task3 }))); + + // The log-file should now be truncated. + logsMonitor.monitorTaskLogs(); + assertTrue(attemptDir.exists()); + File logFile = TaskLog.getTaskLogFile(attempt1, LogName.SYSLOG); + assertEquals(400, logFile.length()); + // The index files should also be proper. + assertEquals(150, getAllLogsFileLengths(attempt1, false).get( + LogName.SYSLOG).longValue()); + assertEquals(100, getAllLogsFileLengths(attempt2, false).get( + LogName.SYSLOG).longValue()); + assertEquals(150, getAllLogsFileLengths(attempt3, false).get( + LogName.SYSLOG).longValue()); + + // assert the data. + FileReader reader = + new FileReader(TaskLog.getTaskLogFile(attempt1, LogName.SYSLOG)); + int ch, bytesRead = 0; + boolean dataValid = true; + while ((ch = reader.read()) != -1) { + bytesRead++; + if (bytesRead <= 150) { + if ((char) ch != 'A') { + LOG.warn("Truncation didn't happen properly. At " + + (bytesRead + 1) + "th byte, expected 'A' but found " + + (char) ch); + dataValid = false; + } + } else if (bytesRead <= 250) { + if ((char) ch != 'B') { + LOG.warn("Truncation didn't happen properly. At " + + (bytesRead + 1) + "th byte, expected 'B' but found " + + (char) ch); + dataValid = false; + } + } else if ((char) ch != 'C') { + LOG.warn("Truncation didn't happen properly. At " + (bytesRead + 1) + + "th byte, expected 'C' but found " + (char) ch); + dataValid = false; + } + } + assertTrue("Log-truncation didn't happen properly!", dataValid); + + logsMonitor.monitorTaskLogs(); + assertEquals(400, logFile.length()); + } + + private static String TEST_ROOT_DIR = + new File(System.getProperty("test.build.data", "/tmp")).toURI().toString().replace( + ' ', '+'); + + public static class LoggingMapper<K, V> extends IdentityMapper<K, V> { + + public void map(K key, V val, OutputCollector<K, V> output, + Reporter reporter) throws IOException { + // Write lots of logs + for (int i = 0; i < 1000; i++) { + System.out.println("Lots of logs! Lots of logs! " + + "Waiting to be truncated! Lots of logs!"); + } + super.map(key, val, output, reporter); + } + } + + /** + * Test logs monitoring with {@link MiniMRCluster} + * + * @throws IOException + */ + @Test + public void testLogsMonitoringWithMiniMR() throws IOException { + + MiniMRCluster mr = null; + try { + JobConf clusterConf = new JobConf(); + clusterConf.setLong(TaskTracker.MAP_USERLOG_RETAIN_SIZE, 10000L); + clusterConf.setLong(TaskTracker.REDUCE_USERLOG_RETAIN_SIZE, 10000L); + mr = new MiniMRCluster(1, "file:///", 3, null, null, clusterConf); + + JobConf conf = mr.createJobConf(); + + Path inDir = new Path(TEST_ROOT_DIR + "/input"); + Path outDir = new Path(TEST_ROOT_DIR + "/output"); + FileSystem fs = FileSystem.get(conf); + if (fs.exists(outDir)) { + fs.delete(outDir, true); + } + if (!fs.exists(inDir)) { + fs.mkdirs(inDir); + } + String input = "The quick brown fox jumped over the lazy dog"; + DataOutputStream file = fs.create(new Path(inDir, "part-0")); + file.writeBytes(input); + file.close(); + + conf.setInputFormat(TextInputFormat.class); + conf.setOutputKeyClass(LongWritable.class); + conf.setOutputValueClass(Text.class); + + FileInputFormat.setInputPaths(conf, inDir); + FileOutputFormat.setOutputPath(conf, outDir); + conf.setNumMapTasks(1); + conf.setNumReduceTasks(0); + conf.setMapperClass(LoggingMapper.class); + + RunningJob job = JobClient.runJob(conf); + assertTrue(job.getJobState() == JobStatus.SUCCEEDED); + for (TaskCompletionEvent tce : job.getTaskCompletionEvents(0)) { + long length = + TaskLog.getTaskLogFile(tce.getTaskAttemptId(), + TaskLog.LogName.STDOUT).length(); + assertTrue("STDOUT log file length for " + tce.getTaskAttemptId() + + " is " + length + " and not <=10000", length <= 10000); + } + } finally { + if (mr != null) { + mr.shutdown(); + } + } + } + + /** + * Test the truncation of DEBUGOUT file by {@link TaskLogsMonitor} + * @throws IOException + */ + @Test + public void testDebugLogsTruncationWithMiniMR() throws IOException { + + MiniMRCluster mr = null; + try { + JobConf clusterConf = new JobConf(); + clusterConf.setLong(TaskTracker.MAP_USERLOG_RETAIN_SIZE, 10000L); + clusterConf.setLong(TaskTracker.REDUCE_USERLOG_RETAIN_SIZE, 10000L); + mr = new MiniMRCluster(1, "file:///", 3, null, null, clusterConf); + + JobConf conf = mr.createJobConf(); + + Path inDir = new Path(TEST_ROOT_DIR + "/input"); + Path outDir = new Path(TEST_ROOT_DIR + "/output"); + FileSystem fs = FileSystem.get(conf); + if (fs.exists(outDir)) { + fs.delete(outDir, true); + } + if (!fs.exists(inDir)) { + fs.mkdirs(inDir); + } + String input = "The quick brown fox jumped over the lazy dog"; + DataOutputStream file = fs.create(new Path(inDir, "part-0")); + file.writeBytes(input); + file.close(); + + conf.setInputFormat(TextInputFormat.class); + conf.setOutputKeyClass(LongWritable.class); + conf.setOutputValueClass(Text.class); + + FileInputFormat.setInputPaths(conf, inDir); + FileOutputFormat.setOutputPath(conf, outDir); + conf.setNumMapTasks(1); + conf.setMaxMapAttempts(1); + conf.setNumReduceTasks(0); + conf.setMapperClass(TestMiniMRMapRedDebugScript.MapClass.class); + + // copy debug script to cache from local file system. + Path scriptPath = new Path(TEST_ROOT_DIR, "debug-script.txt"); + String debugScriptContent = + "for ((i=0;i<1000;i++)); " + "do " + + "echo \"Lots of logs! Lots of logs! " + + "Waiting to be truncated! Lots of logs!\";" + "done"; + DataOutputStream scriptFile = fs.create(scriptPath); + scriptFile.writeBytes(debugScriptContent); + scriptFile.close(); + new File(scriptPath.toUri().getPath()).setExecutable(true); + + URI uri = scriptPath.toUri(); + DistributedCache.createSymlink(conf); + DistributedCache.addCacheFile(uri, conf); + conf.setMapDebugScript(scriptPath.toUri().getPath()); + + RunningJob job = null; + try { + JobClient jc = new JobClient(conf); + job = jc.submitJob(conf); + try { + jc.monitorAndPrintJob(conf, job); + } catch (InterruptedException e) { + // + } + } catch (IOException ioe) { + } finally{ + for (TaskCompletionEvent tce : job.getTaskCompletionEvents(0)) { + File debugOutFile = + TaskLog.getTaskLogFile(tce.getTaskAttemptId(), + TaskLog.LogName.DEBUGOUT); + if (debugOutFile.exists()) { + long length = debugOutFile.length(); + assertTrue("DEBUGOUT log file length for " + + tce.getTaskAttemptId() + " is " + length + + " and not =10000", length == 10000); + } + } + } + } finally { + if (mr != null) { + mr.shutdown(); + } + } + } +}
jaxlaw/hadoop-common
2f8d25d29367bbf27d931102be084a5895178266
HADOOP:6441 from https://issues.apache.org/jira/secure/attachment/12428133/h-6441.20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 86aba0f..cfd2bf9 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,476 +1,479 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) + HADOOP-6151, 6281, 6285, 6441. Add HTML quoting of the parameters to all + of the servlets to prevent XSS attacks. (omalley) + yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/http/HtmlQuoting.java b/src/core/org/apache/hadoop/http/HtmlQuoting.java new file mode 100644 index 0000000..99befee --- /dev/null +++ b/src/core/org/apache/hadoop/http/HtmlQuoting.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +/** + * This class is responsible for quoting HTML characters. + */ +public class HtmlQuoting { + private static final byte[] ampBytes = "&amp;".getBytes(); + private static final byte[] aposBytes = "&apos;".getBytes(); + private static final byte[] gtBytes = "&gt;".getBytes(); + private static final byte[] ltBytes = "&lt;".getBytes(); + private static final byte[] quotBytes = "&quot;".getBytes(); + + /** + * Does the given string need to be quoted? + * @param data the string to check + * @param off the starting position + * @param len the number of bytes to check + * @return does the string contain any of the active html characters? + */ + public static boolean needsQuoting(byte[] data, int off, int len) { + for(int i=off; i< off+len; ++i) { + switch(data[i]) { + case '&': + case '<': + case '>': + case '\'': + case '"': + return true; + default: + break; + } + } + return false; + } + + /** + * Does the given string need to be quoted? + * @param str the string to check + * @return does the string contain any of the active html characters? + */ + public static boolean needsQuoting(String str) { + if (str == null) { + return false; + } + byte[] bytes = str.getBytes(); + return needsQuoting(bytes, 0 , bytes.length); + } + + /** + * Quote all of the active HTML characters in the given string as they + * are added to the buffer. + * @param output the stream to write the output to + * @param buffer the byte array to take the characters from + * @param off the index of the first byte to quote + * @param len the number of bytes to quote + */ + public static void quoteHtmlChars(OutputStream output, byte[] buffer, + int off, int len) throws IOException { + for(int i=off; i < off+len; i++) { + switch (buffer[i]) { + case '&': output.write(ampBytes); break; + case '<': output.write(ltBytes); break; + case '>': output.write(gtBytes); break; + case '\'': output.write(aposBytes); break; + case '"': output.write(quotBytes); break; + default: output.write(buffer, i, 1); + } + } + } + + /** + * Quote the given item to make it html-safe. + * @param item the string to quote + * @return the quoted string + */ + public static String quoteHtmlChars(String item) { + if (item == null) { + return null; + } + byte[] bytes = item.getBytes(); + if (needsQuoting(bytes, 0, bytes.length)) { + ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + try { + quoteHtmlChars(buffer, bytes, 0, bytes.length); + } catch (IOException ioe) { + // Won't happen, since it is a bytearrayoutputstream + } + return buffer.toString(); + } else { + return item; + } + } + + /** + * Return an output stream that quotes all of the output. + * @param out the stream to write the quoted output to + * @return a new stream that the application show write to + * @throws IOException if the underlying output fails + */ + public static OutputStream quoteOutputStream(final OutputStream out + ) throws IOException { + return new OutputStream() { + private byte[] data = new byte[1]; + @Override + public void write(byte[] data, int off, int len) throws IOException { + quoteHtmlChars(out, data, off, len); + } + + @Override + public void write(int b) throws IOException { + data[0] = (byte) b; + quoteHtmlChars(out, data, 0, 1); + } + + @Override + public void flush() throws IOException { + out.flush(); + } + + @Override + public void close() throws IOException { + out.close(); + } + }; + } + + /** + * Remove HTML quoting from a string. + * @param item the string to unquote + * @return the unquoted string + */ + public static String unquoteHtmlChars(String item) { + if (item == null) { + return null; + } + int next = item.indexOf('&'); + // nothing was quoted + if (next == -1) { + return item; + } + int len = item.length(); + int posn = 0; + StringBuilder buffer = new StringBuilder(); + while (next != -1) { + buffer.append(item.substring(posn, next)); + if (item.startsWith("&amp;", next)) { + buffer.append('&'); + next += 5; + } else if (item.startsWith("&apos;", next)) { + buffer.append('\''); + next += 6; + } else if (item.startsWith("&gt;", next)) { + buffer.append('>'); + next += 4; + } else if (item.startsWith("&lt;", next)) { + buffer.append('<'); + next += 4; + } else if (item.startsWith("&quot;", next)) { + buffer.append('"'); + next += 6; + } else { + int end = item.indexOf(';', next)+1; + if (end == 0) { + end = len; + } + throw new IllegalArgumentException("Bad HTML quoting for " + + item.substring(next,end)); + } + posn = next; + next = item.indexOf('&', posn); + } + buffer.append(item.substring(posn, len)); + return buffer.toString(); + } + + public static void main(String[] args) throws Exception { + for(String arg:args) { + System.out.println("Original: " + arg); + String quoted = quoteHtmlChars(arg); + System.out.println("Quoted: "+ quoted); + String unquoted = unquoteHtmlChars(quoted); + System.out.println("Unquoted: " + unquoted); + System.out.println(); + } + } +} diff --git a/src/core/org/apache/hadoop/http/HttpServer.java b/src/core/org/apache/hadoop/http/HttpServer.java index 4fc44c1..ac52ce4 100644 --- a/src/core/org/apache/hadoop/http/HttpServer.java +++ b/src/core/org/apache/hadoop/http/HttpServer.java @@ -1,544 +1,667 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.http; import java.io.IOException; import java.io.PrintWriter; import java.net.BindException; import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; +import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; -import java.nio.channels.ServerSocketChannel; +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.util.ReflectionUtils; import org.mortbay.jetty.Connector; import org.mortbay.jetty.Handler; import org.mortbay.jetty.Server; import org.mortbay.jetty.handler.ContextHandlerCollection; import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.security.SslSocketConnector; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.DefaultServlet; import org.mortbay.jetty.servlet.FilterHolder; import org.mortbay.jetty.servlet.FilterMapping; import org.mortbay.jetty.servlet.ServletHandler; import org.mortbay.jetty.servlet.ServletHolder; import org.mortbay.jetty.webapp.WebAppContext; import org.mortbay.thread.QueuedThreadPool; import org.mortbay.util.MultiException; /** * Create a Jetty embedded server to answer http requests. The primary goal * is to serve up status information for the server. * There are three contexts: * "/logs/" -> points to the log directory * "/static/" -> points to common static files (src/webapps/static) * "/" -> the jsp server code from (src/webapps/<name>) */ public class HttpServer implements FilterContainer { public static final Log LOG = LogFactory.getLog(HttpServer.class); static final String FILTER_INITIALIZER_PROPERTY = "hadoop.http.filter.initializers"; protected final Server webServer; protected final Connector listener; protected final WebAppContext webAppContext; protected final boolean findPort; protected final Map<Context, Boolean> defaultContexts = new HashMap<Context, Boolean>(); protected final List<String> filterNames = new ArrayList<String>(); private static final int MAX_RETRIES = 10; /** Same as this(name, bindAddress, port, findPort, null); */ public HttpServer(String name, String bindAddress, int port, boolean findPort ) throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** * Create a status server on the given port. * The jsp scripts are taken from src/webapps/<name>. * @param name The name of the server * @param port The port to use on the server * @param findPort whether the server should start at the given port and * increment by 1 until it finds a free port. * @param conf Configuration */ public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) throws IOException { webServer = new Server(); this.findPort = findPort; listener = createBaseListener(conf); listener.setHost(bindAddress); listener.setPort(port); webServer.addConnector(listener); webServer.setThreadPool(new QueuedThreadPool()); final String appDir = getWebAppsPath(); ContextHandlerCollection contexts = new ContextHandlerCollection(); webServer.setHandler(contexts); webAppContext = new WebAppContext(); webAppContext.setContextPath("/"); webAppContext.setWar(appDir + "/" + name); webServer.addHandler(webAppContext); addDefaultApps(contexts, appDir); + addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { for(FilterInitializer c : initializers) { c.initFilter(this); } } addDefaultServlets(); } /** * Create a required listener for the Jetty instance listening on the port * provided. This wrapper and all subclasses must create at least one * listener. */ protected Connector createBaseListener(Configuration conf) throws IOException { SelectChannelConnector ret = new SelectChannelConnector(); ret.setLowResourceMaxIdleTime(10000); ret.setAcceptQueueSize(128); ret.setResolveNames(false); ret.setUseDirectBuffers(false); return ret; } /** Get an array of FilterConfiguration specified in the conf */ private static FilterInitializer[] getFilterInitializers(Configuration conf) { if (conf == null) { return null; } Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY); if (classes == null) { return null; } FilterInitializer[] initializers = new FilterInitializer[classes.length]; for(int i = 0; i < classes.length; i++) { initializers[i] = (FilterInitializer)ReflectionUtils.newInstance( classes[i], conf); } return initializers; } /** * Add default apps. * @param appDir The application directory * @throws IOException */ protected void addDefaultApps(ContextHandlerCollection parent, final String appDir) throws IOException { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = System.getProperty("hadoop.log.dir"); if (logDir != null) { Context logContext = new Context(parent, "/logs"); logContext.setResourceBase(logDir); logContext.addServlet(DefaultServlet.class, "/"); defaultContexts.put(logContext, true); } // set up the context for "/static/*" Context staticContext = new Context(parent, "/static"); staticContext.setResourceBase(appDir + "/static"); staticContext.addServlet(DefaultServlet.class, "/*"); defaultContexts.put(staticContext, true); } /** * Add default servlets. */ protected void addDefaultServlets() { // set up default servlets addServlet("stacks", "/stacks", StackServlet.class); addServlet("logLevel", "/logLevel", LogLevel.Servlet.class); } public void addContext(Context ctxt, boolean isFiltered) throws IOException { webServer.addHandler(ctxt); defaultContexts.put(ctxt, isFiltered); } /** * Add a context * @param pathSpec The path spec for the context * @param dir The directory containing the context * @param isFiltered if true, the servlet is added to the filter path mapping * @throws IOException */ protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException { if (0 == webServer.getHandlers().length) { throw new RuntimeException("Couldn't find handler"); } WebAppContext webAppCtx = new WebAppContext(); webAppCtx.setContextPath(pathSpec); webAppCtx.setWar(dir); addContext(webAppCtx, true); } /** * Set a value in the webapp context. These values are available to the jsp * pages as "application.getAttribute(name)". * @param name The name of the attribute * @param value The value of the attribute */ public void setAttribute(String name, Object value) { webAppContext.setAttribute(name, value); } /** * Add a servlet in the server. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class */ public void addServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { addInternalServlet(name, pathSpec, clazz); addFilterPathMapping(pathSpec, webAppContext); } /** * Add an internal servlet in the server. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class * @deprecated this is a temporary method */ @Deprecated public void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); } webAppContext.addServlet(holder, pathSpec); } /** {@inheritDoc} */ public void addFilter(String name, String classname, Map<String, String> parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); final String[] ALL_URLS = { "/*" }; for (Map.Entry<Context, Boolean> e : defaultContexts.entrySet()) { if (e.getValue()) { Context ctx = e.getKey(); defineFilter(ctx, name, classname, parameters, ALL_URLS); LOG.info("Added filter " + name + " (class=" + classname + ") to context " + ctx.getDisplayName()); } } filterNames.add(name); } /** {@inheritDoc} */ public void addGlobalFilter(String name, String classname, Map<String, String> parameters) { final String[] ALL_URLS = { "/*" }; defineFilter(webAppContext, name, classname, parameters, ALL_URLS); for (Context ctx : defaultContexts.keySet()) { defineFilter(ctx, name, classname, parameters, ALL_URLS); } LOG.info("Added global filter" + name + " (class=" + classname + ")"); } /** * Define a filter for a context and set up default url mappings. */ protected void defineFilter(Context ctx, String name, String classname, Map<String,String> parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); holder.setInitParameters(parameters); FilterMapping fmap = new FilterMapping(); fmap.setPathSpecs(urls); fmap.setDispatches(Handler.ALL); fmap.setFilterName(name); ServletHandler handler = ctx.getServletHandler(); handler.addFilter(holder, fmap); } /** * Add the path spec to the filter path mapping. * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ protected void addFilterPathMapping(String pathSpec, Context webAppCtx) { ServletHandler handler = webAppCtx.getServletHandler(); for(String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); fmap.setDispatches(Handler.ALL); handler.addFilterMapping(fmap); } } /** * Get the value in the webapp context. * @param name The name of the attribute * @return The value of the attribute */ public Object getAttribute(String name) { return webAppContext.getAttribute(name); } /** * Get the pathname to the webapps files. * @return the pathname as a URL * @throws IOException if 'webapps' directory cannot be found on CLASSPATH. */ protected String getWebAppsPath() throws IOException { URL url = getClass().getClassLoader().getResource("webapps"); if (url == null) throw new IOException("webapps not found in CLASSPATH"); return url.toString(); } /** * Get the port that the server is on * @return the port */ public int getPort() { return webServer.getConnectors()[0].getLocalPort(); } /** * Set the min, max number of worker threads (simultaneous connections). */ public void setThreads(int min, int max) { QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ; pool.setMinThreads(min); pool.setMaxThreads(max); } /** * Configure an ssl listener on the server. * @param addr address to listen on * @param keystore location of the keystore * @param storPass password for the keystore * @param keyPass password for the key * @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)} */ @Deprecated public void addSslListener(InetSocketAddress addr, String keystore, String storPass, String keyPass) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(keystore); sslListener.setPassword(storPass); sslListener.setKeyPassword(keyPass); webServer.addConnector(sslListener); } /** * Configure an ssl listener on the server. * @param addr address to listen on * @param sslConf conf to retrieve ssl options * @param needClientAuth whether client authentication is required */ public void addSslListener(InetSocketAddress addr, Configuration sslConf, boolean needClientAuth) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } if (needClientAuth) { // setting up SSL truststore for authenticating clients System.setProperty("javax.net.ssl.trustStore", sslConf.get( "ssl.server.truststore.location", "")); System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get( "ssl.server.truststore.password", "")); System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( "ssl.server.truststore.type", "jks")); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); sslListener.setPassword(sslConf.get("ssl.server.keystore.password", "")); sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", "")); sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks")); sslListener.setNeedClientAuth(needClientAuth); webServer.addConnector(sslListener); } /** * Start the server. Does not wait for the server to start. */ public void start() throws IOException { try { int port = 0; int oriPort = listener.getPort(); // The original requested port while (true) { try { port = webServer.getConnectors()[0].getLocalPort(); LOG.info("Port returned by webServer.getConnectors()[0]." + "getLocalPort() before open() is "+ port + ". Opening the listener on " + oriPort); listener.open(); port = listener.getLocalPort(); LOG.info("listener.getLocalPort() returned " + listener.getLocalPort() + " webServer.getConnectors()[0].getLocalPort() returned " + webServer.getConnectors()[0].getLocalPort()); //Workaround to handle the problem reported in HADOOP-4744 if (port < 0) { Thread.sleep(100); int numRetries = 1; while (port < 0) { LOG.warn("listener.getLocalPort returned " + port); if (numRetries++ > MAX_RETRIES) { throw new Exception(" listener.getLocalPort is returning " + "less than 0 even after " +numRetries+" resets"); } for (int i = 0; i < 2; i++) { LOG.info("Retrying listener.getLocalPort()"); port = listener.getLocalPort(); if (port > 0) { break; } Thread.sleep(200); } if (port > 0) { break; } LOG.info("Bouncing the listener"); listener.close(); Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); port = listener.getLocalPort(); } } //Workaround end LOG.info("Jetty bound to port " + port); webServer.start(); // Workaround for HADOOP-6386 port = listener.getLocalPort(); if (port < 0) { LOG.warn("Bounds port is " + port + " after webserver start"); for (int i = 0; i < MAX_RETRIES/2; i++) { try { webServer.stop(); } catch (Exception e) { LOG.warn("Can't stop web-server", e); } Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); webServer.start(); LOG.info(i + "attempts to restart webserver"); port = listener.getLocalPort(); if (port > 0) break; } if (port < 0) throw new Exception("listener.getLocalPort() is returning " + "less than 0 even after " +MAX_RETRIES+" resets"); } // End of HADOOP-6386 workaround break; } catch (IOException ex) { // if this is a bind exception, // then try the next port number. if (ex instanceof BindException) { if (!findPort) { throw (BindException) ex; } } else { LOG.info("HttpServer.start() threw a non Bind IOException"); throw ex; } } catch (MultiException ex) { LOG.info("HttpServer.start() threw a MultiException"); throw ex; } listener.setPort((oriPort += 1)); } } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException("Problem starting http server", e); } } /** * stop the server */ public void stop() throws Exception { listener.close(); webServer.stop(); } public void join() throws InterruptedException { webServer.join(); } /** * A very simple servlet to serve up a text representation of the current * stack traces. It both returns the stacks to the caller and logs them. * Currently the stack traces are done sequentially rather than exactly the * same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - PrintWriter out = new PrintWriter(response.getOutputStream()); + PrintWriter out = new PrintWriter + (HtmlQuoting.quoteOutputStream(response.getOutputStream())); ReflectionUtils.printThreadInfo(out, ""); out.close(); ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1); } } + + /** + * A Servlet input filter that quotes all HTML active characters in the + * parameter names and values. The goal is to quote the characters to make + * all of the servlets resistant to cross-site scripting attacks. + */ + public static class QuotingInputFilter implements Filter { + + public static class RequestQuoter extends HttpServletRequestWrapper { + private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { + super(rawRequest); + this.rawRequest = rawRequest; + } + + /** + * Return the set of parameter names, quoting each name. + */ + @SuppressWarnings("unchecked") + @Override + public Enumeration<String> getParameterNames() { + return new Enumeration<String>() { + private Enumeration<String> rawIterator = + rawRequest.getParameterNames(); + @Override + public boolean hasMoreElements() { + return rawIterator.hasMoreElements(); + } + + @Override + public String nextElement() { + return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement()); + } + }; + } + + /** + * Unquote the name and quote the value. + */ + @Override + public String getParameter(String name) { + return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter + (HtmlQuoting.unquoteHtmlChars(name))); + } + + @Override + public String[] getParameterValues(String name) { + String unquoteName = HtmlQuoting.unquoteHtmlChars(name); + String[] unquoteValue = rawRequest.getParameterValues(unquoteName); + String[] result = new String[unquoteValue.length]; + for(int i=0; i < result.length; ++i) { + result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]); + } + return result; + } + + @SuppressWarnings("unchecked") + @Override + public Map<String, String[]> getParameterMap() { + Map<String, String[]> result = new HashMap<String,String[]>(); + Map<String, String[]> raw = rawRequest.getParameterMap(); + for (Map.Entry<String,String[]> item: raw.entrySet()) { + String[] rawValue = item.getValue(); + String[] cookedValue = new String[rawValue.length]; + for(int i=0; i< rawValue.length; ++i) { + cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]); + } + result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue); + } + return result; + } + + /** + * Quote the url so that users specifying the HOST HTTP header + * can't inject attacks. + */ + @Override + public StringBuffer getRequestURL(){ + String url = rawRequest.getRequestURL().toString(); + return new StringBuffer(HtmlQuoting.quoteHtmlChars(url)); + } + + /** + * Quote the server name so that users specifying the HOST HTTP header + * can't inject attacks. + */ + @Override + public String getServerName() { + return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName()); + } + } + + @Override + public void init(FilterConfig config) throws ServletException { + } + + @Override + public void destroy() { + } + + @Override + public void doFilter(ServletRequest request, + ServletResponse response, + FilterChain chain + ) throws IOException, ServletException { + HttpServletRequestWrapper quoted = + new RequestQuoter((HttpServletRequest) request); + final HttpServletResponse httpResponse = (HttpServletResponse) response; + // set the default to UTF-8 so that we don't need to worry about IE7 + // choosing to interpret the special characters as UTF-7 + httpResponse.setContentType("text/html;charset=utf-8"); + chain.doFilter(quoted, response); + } + + } } diff --git a/src/test/org/apache/hadoop/http/TestHtmlQuoting.java b/src/test/org/apache/hadoop/http/TestHtmlQuoting.java new file mode 100644 index 0000000..1c3663a --- /dev/null +++ b/src/test/org/apache/hadoop/http/TestHtmlQuoting.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class TestHtmlQuoting { + + @Test public void testNeedsQuoting() throws Exception { + assertTrue(HtmlQuoting.needsQuoting("abcde>")); + assertTrue(HtmlQuoting.needsQuoting("<abcde")); + assertTrue(HtmlQuoting.needsQuoting("abc'de")); + assertTrue(HtmlQuoting.needsQuoting("abcde\"")); + assertTrue(HtmlQuoting.needsQuoting("&")); + assertFalse(HtmlQuoting.needsQuoting("")); + assertFalse(HtmlQuoting.needsQuoting("ab\ncdef")); + assertFalse(HtmlQuoting.needsQuoting(null)); + } + + @Test public void testQuoting() throws Exception { + assertEquals("ab&lt;cd", HtmlQuoting.quoteHtmlChars("ab<cd")); + assertEquals("ab&gt;", HtmlQuoting.quoteHtmlChars("ab>")); + assertEquals("&amp;&amp;&amp;", HtmlQuoting.quoteHtmlChars("&&&")); + assertEquals(" &apos;\n", HtmlQuoting.quoteHtmlChars(" '\n")); + assertEquals("&quot;", HtmlQuoting.quoteHtmlChars("\"")); + assertEquals(null, HtmlQuoting.quoteHtmlChars(null)); + } + + private void runRoundTrip(String str) throws Exception { + assertEquals(str, + HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); + } + + @Test public void testRoundtrip() throws Exception { + runRoundTrip(""); + runRoundTrip("<>&'\""); + runRoundTrip("ab>cd<ef&ghi'\""); + runRoundTrip("A string\n with no quotable chars in it!"); + runRoundTrip(null); + StringBuilder buffer = new StringBuilder(); + for(char ch=0; ch < 127; ++ch) { + buffer.append(ch); + } + runRoundTrip(buffer.toString()); + } +} diff --git a/src/test/org/apache/hadoop/http/TestHttpServer.java b/src/test/org/apache/hadoop/http/TestHttpServer.java new file mode 100644 index 0000000..55cc92d --- /dev/null +++ b/src/test/org/apache/hadoop/http/TestHttpServer.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.net.URL; +import java.util.Enumeration; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestHttpServer { + private HttpServer server; + private URL baseUrl; + + @SuppressWarnings("serial") + public static class EchoMapServlet extends HttpServlet { + @SuppressWarnings("unchecked") + @Override + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + PrintStream out = new PrintStream(response.getOutputStream()); + Map<String, String[]> params = request.getParameterMap(); + SortedSet<String> keys = new TreeSet(params.keySet()); + for(String key: keys) { + out.print(key); + out.print(':'); + String[] values = params.get(key); + if (values.length > 0) { + out.print(values[0]); + for(int i=1; i < values.length; ++i) { + out.print(','); + out.print(values[i]); + } + } + out.print('\n'); + } + out.close(); + } + } + + @SuppressWarnings("serial") + public static class EchoServlet extends HttpServlet { + @SuppressWarnings("unchecked") + @Override + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + PrintStream out = new PrintStream(response.getOutputStream()); + SortedSet<String> sortedKeys = new TreeSet(); + Enumeration<String> keys = request.getParameterNames(); + while(keys.hasMoreElements()) { + sortedKeys.add(keys.nextElement()); + } + for(String key: sortedKeys) { + out.print(key); + out.print(':'); + out.print(request.getParameter(key)); + out.print('\n'); + } + out.close(); + } + } + + private String readOutput(URL url) throws IOException { + StringBuilder out = new StringBuilder(); + InputStream in = url.openConnection().getInputStream(); + byte[] buffer = new byte[64 * 1024]; + int len = in.read(buffer); + while (len > 0) { + out.append(new String(buffer, 0, len)); + len = in.read(buffer); + } + return out.toString(); + } + + @Before public void setup() throws Exception { + new File(System.getProperty("build.webapps", "build/webapps") + "/test" + ).mkdirs(); + server = new HttpServer("test", "0.0.0.0", 0, true); + server.addServlet("echo", "/echo", EchoServlet.class); + server.addServlet("echomap", "/echomap", EchoMapServlet.class); + server.start(); + int port = server.getPort(); + baseUrl = new URL("http://localhost:" + port + "/"); + } + + @After public void cleanup() throws Exception { + server.stop(); + } + + @Test public void testEcho() throws Exception { + assertEquals("a:b\nc:d\n", + readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc&lt;:d\ne:&gt;\n", + readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + } + + /** Test the echo map servlet that uses getParameterMap. */ + @Test public void testEchoMap() throws Exception { + assertEquals("a:b\nc:d\n", + readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); + assertEquals("a:b,&gt;\nc&lt;:d\n", + readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); + } + +}
jaxlaw/hadoop-common
9c196ff55368fd232dbd8cd5976bfa900503a851
MAPREDUCE:1063 from https://issues.apache.org/jira/secure/attachment/12427976/M1063-y20-0.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 86aba0f..9da3034 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,476 +1,478 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. MAPREDUCE-1143. Fix running task counters to be updated correctly when speculative attempts are running for a TIP. (Rahul Kumar Singh via yhemanth) yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-1124. Import Gridmix3 and Rumen. (cdouglas) + MAPREDUCE-1063. Document gridmix benchmark. (cdouglas) + yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/docs/src/documentation/content/xdocs/gridmix.xml b/src/docs/src/documentation/content/xdocs/gridmix.xml new file mode 100644 index 0000000..8b2edfb --- /dev/null +++ b/src/docs/src/documentation/content/xdocs/gridmix.xml @@ -0,0 +1,164 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd"> + +<document> + +<header> + <title>Gridmix</title> +</header> + +<body> + + <section> + <title>Overview</title> + + <p>Gridmix is a benchmark for live clusters. It submits a mix of synthetic + jobs, modeling a profile mined from production loads.</p> + + <p>There exist three versions of the Gridmix tool. This document discusses + the third (checked into contrib), distinct from the two checked into the + benchmarks subdirectory. While the first two versions of the tool included + stripped-down versions of common jobs, both were principally saturation + tools for stressing the framework at scale. In support of a broader range of + deployments and finer-tuned job mixes, this version of the tool will attempt + to model the resource profiles of production jobs to identify bottlenecks, + guide development, and serve as a replacement for the existing gridmix + benchmarks.</p> + + </section> + + <section id="usage"> + + <title>Usage</title> + + <p>To run Gridmix, one requires a job trace describing the job mix for a + given cluster. Such traces are typically genenerated by Rumen (see related + documentation). Gridmix also requires input data from which the synthetic + jobs will draw bytes. The input data need not be in any particular format, + as the synthetic jobs are currently binary readers. If one is running on a + new cluster, an optional step generating input data may precede the run.</p> + + <p>Basic command line usage:</p> +<source> + +bin/mapred org.apache.hadoop.mapred.gridmix.Gridmix [-generate &lt;MiB&gt;] &lt;iopath&gt; &lt;trace&gt; +</source> + + <p>The <code>-generate</code> parameter accepts standard units, e.g. + <code>100g</code> will generate 100 * 2<sup>30</sup> bytes. The + &lt;iopath&gt; parameter is the destination directory for generated and/or + the directory from which input data will be read. The &lt;trace&gt; + parameter is a path to a job trace. The following configuration parameters + are also accepted in the standard idiom, before other Gridmix + parameters.</p> + + <section> + <title>Configuration parameters</title> + <p></p> + <table> + <tr><th> Parameter </th><th> Description </th><th> Notes </th></tr> + <tr><td><code>gridmix.output.directory</code></td> + <td>The directory into which output will be written. If specified, the + <code>iopath</code> will be relative to this parameter.</td> + <td>The submitting user must have read/write access to this + directory. The user should also be mindful of any quota issues that + may arise during a run.</td></tr> + <tr><td><code>gridmix.client.submit.threads</code></td> + <td>The number of threads submitting jobs to the cluster. This also + controls how many splits will be loaded into memory at a given time, + pending the submit time in the trace.</td> + <td>Splits are pregenerated to hit submission deadlines, so + particularly dense traces may want more submitting threads. However, + storing splits in memory is reasonably expensive, so one should raise + this cautiously.</td></tr> + <tr><td><code>gridmix.client.pending.queue.depth</code></td> + <td>The depth of the queue of job descriptions awaiting split + generation.</td> + <td>The jobs read from the trace occupy a queue of this depth before + being processed by the submission threads. It is unusual to configure + this.</td></tr> + <tr><td><code>gridmix.min.key.length</code></td> + <td>The key size for jobs submitted to the cluster.</td> + <td>While this is clearly a job-specific, even task-specific property, + no data on key length is currently available. Since the intermediate + data are random, memcomparable data, not even the sort is likely + affected. It exists as a tunable as no default value is appropriate, + but future versions will likely replace it with trace data.</td></tr> + </table> + + </section> +</section> + +<section id="assumptions"> + + <title>Simplifying Assumptions</title> + + <p>Gridmix will be developed in stages, incorporating feedback and patches + from the community. Currently, its intent is to evaluate Map/Reduce and HDFS + performance and not the layers on top of them (i.e. the extensive lib and + subproject space). Given these two limitations, the following + characteristics of job load are not currently captured in job traces and + cannot be accurately reproduced in Gridmix.</p> + + <table> + <tr><th>Property</th><th>Notes</th></tr> + <tr><td>CPU usage</td><td>We have no data for per-task CPU usage, so we + cannot attempt even an approximation. Gridmix tasks are never CPU bound + independent of I/O, though this surely happens in practice.</td></tr> + <tr><td>Filesystem properties</td><td>No attempt is made to match block + sizes, namespace hierarchies, or any property of input, intermediate, or + output data other than the bytes/records consumed and emitted from a given + task. This implies that some of the most heavily used parts of the system- + the compression libraries, text processing, streaming, etc.- cannot be + meaningfully tested with the current implementation.</td></tr> + <tr><td>I/O rates</td><td>The rate at which records are consumed/emitted is + assumed to be limited only by the speed of the reader/writer and constant + throughout the task.</td></tr> + <tr><td>Memory profile</td><td>No data on tasks' memory usage over time is + available, though the max heap size is retained.</td></tr> + <tr><td>Skew</td><td>The records consumed and emitted to/from a given task + are assumed to follow observed averages, i.e. records will be more regular + than may be seen in the wild. Each map also generates a proportional + percentage of data for each reduce, so a job with unbalanced input will be + flattened.</td></tr> + <tr><td>Job failure</td><td>User code is assumed to be correct.</td></tr> + <tr><td>Job independence</td><td>The output or outcome of one job does not + affect when or whether a subsequent job will run.</td></tr> + </table> + +</section> + +<section> + + <title>Appendix</title> + + <p>Issues tracking the implementations of <a + href="https://issues.apache.org/jira/browse/HADOOP-2369">gridmix1</a>, <a + href="https://issues.apache.org/jira/browse/HADOOP-3770">gridmix2</a>, and + <a href="https://issues.apache.org/jira/browse/MAPREDUCE-776">gridmix3</a>. + Other issues tracking the development of Gridmix can be found by searching + the Map/Reduce <a + href="https://issues.apache.org/jira/browse/MAPREDUCE">JIRA</a></p> + +</section> + +</body> + +</document> diff --git a/src/docs/src/documentation/content/xdocs/site.xml b/src/docs/src/documentation/content/xdocs/site.xml index a5d49e0..53eb99e 100644 --- a/src/docs/src/documentation/content/xdocs/site.xml +++ b/src/docs/src/documentation/content/xdocs/site.xml @@ -1,280 +1,281 @@ <?xml version="1.0"?> <!-- Copyright 2002-2004 The Apache Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Forrest site.xml This file contains an outline of the site's information content. It is used to: - Generate the website menus (though these can be overridden - see docs) - Provide semantic, location-independent aliases for internal 'site:' URIs, eg <link href="site:changes"> links to changes.html (or ../changes.html if in subdir). - Provide aliases for external URLs in the external-refs section. Eg, <link href="ext:cocoon"> links to http://xml.apache.org/cocoon/ See http://forrest.apache.org/docs/linking.html for more info. --> <site label="Hadoop" href="" xmlns="http://apache.org/forrest/linkmap/1.0"> <docs label="Getting Started"> <overview label="Overview" href="index.html" /> <quickstart label="Quick Start" href="quickstart.html" /> <setup label="Cluster Setup" href="cluster_setup.html" /> <mapred label="Map/Reduce Tutorial" href="mapred_tutorial.html" /> </docs> <docs label="Programming Guides"> <commands label="Commands" href="commands_manual.html" /> <distcp label="DistCp" href="distcp.html" /> <native_lib label="Native Libraries" href="native_libraries.html" /> <streaming label="Streaming" href="streaming.html" /> <fair_scheduler label="Fair Scheduler" href="fair_scheduler.html"/> <cap_scheduler label="Capacity Scheduler" href="capacity_scheduler.html"/> <SLA label="Service Level Authorization" href="service_level_auth.html"/> <vaidya label="Vaidya" href="vaidya.html"/> <archives label="Archives" href="hadoop_archives.html"/> + <gridmix label="Gridmix" href="gridmix.html"/> </docs> <docs label="HDFS"> <hdfs_user label="User Guide" href="hdfs_user_guide.html" /> <hdfs_arch label="Architecture" href="hdfs_design.html" /> <hdfs_fs label="File System Shell Guide" href="hdfs_shell.html" /> <hdfs_perm label="Permissions Guide" href="hdfs_permissions_guide.html" /> <hdfs_quotas label="Quotas Guide" href="hdfs_quota_admin_guide.html" /> <hdfs_SLG label="Synthetic Load Generator Guide" href="SLG_user_guide.html" /> <hdfs_libhdfs label="C API libhdfs" href="libhdfs.html" /> </docs> <docs label="HOD"> <hod_user label="User Guide" href="hod_user_guide.html"/> <hod_admin label="Admin Guide" href="hod_admin_guide.html"/> <hod_config label="Config Guide" href="hod_config_guide.html"/> </docs> <docs label="Miscellaneous"> <api label="API Docs" href="ext:api/index" /> <jdiff label="API Changes" href="ext:jdiff/changes" /> <wiki label="Wiki" href="ext:wiki" /> <faq label="FAQ" href="ext:faq" /> <relnotes label="Release Notes" href="ext:relnotes" /> <changes label="Change Log" href="ext:changes" /> </docs> <external-refs> <site href="http://hadoop.apache.org/core/"/> <lists href="http://hadoop.apache.org/core/mailing_lists.html"/> <archive href="http://mail-archives.apache.org/mod_mbox/hadoop-core-commits/"/> <releases href="http://hadoop.apache.org/core/releases.html"> <download href="#Download" /> </releases> <jira href="http://hadoop.apache.org/core/issue_tracking.html"/> <wiki href="http://wiki.apache.org/hadoop/" /> <faq href="http://wiki.apache.org/hadoop/FAQ" /> <hadoop-default href="http://hadoop.apache.org/core/docs/current/hadoop-default.html" /> <core-default href="http://hadoop.apache.org/core/docs/current/core-default.html" /> <hdfs-default href="http://hadoop.apache.org/core/docs/current/hdfs-default.html" /> <mapred-default href="http://hadoop.apache.org/core/docs/current/mapred-default.html" /> <zlib href="http://www.zlib.net/" /> <gzip href="http://www.gzip.org/" /> <bzip href="http://www.bzip.org/" /> <cygwin href="http://www.cygwin.com/" /> <osx href="http://www.apple.com/macosx" /> <hod href=""> <cluster-resources href="http://www.clusterresources.com" /> <torque href="http://www.clusterresources.com/pages/products/torque-resource-manager.php" /> <torque-download href="http://www.clusterresources.com/downloads/torque/" /> <torque-docs href="http://www.clusterresources.com/pages/resources/documentation.php" /> <torque-wiki href="http://www.clusterresources.com/wiki/doku.php?id=torque:torque_wiki" /> <torque-mailing-list href="http://www.clusterresources.com/pages/resources/mailing-lists.php" /> <torque-basic-config href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.2_basic_configuration" /> <torque-advanced-config href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.3_advanced_configuration" /> <maui href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php"/> <python href="http://www.python.org" /> <twisted-python href="http://twistedmatrix.com/trac/" /> </hod> <relnotes href="releasenotes.html" /> <changes href="changes.html" /> <jdiff href="jdiff/"> <changes href="changes.html" /> </jdiff> <api href="api/"> <index href="index.html" /> <org href="org/"> <apache href="apache/"> <hadoop href="hadoop/"> <conf href="conf/"> <configuration href="Configuration.html"> <final_parameters href="#FinalParams" /> <get href="#get(java.lang.String, java.lang.String)" /> <set href="#set(java.lang.String, java.lang.String)" /> </configuration> </conf> <filecache href="filecache/"> <distributedcache href="DistributedCache.html"> <addarchivetoclasspath href="#addArchiveToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)" /> <addfiletoclasspath href="#addFileToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)" /> <addcachefile href="#addCacheFile(java.net.URI,%20org.apache.hadoop.conf.Configuration)" /> <addcachearchive href="#addCacheArchive(java.net.URI,%20org.apache.hadoop.conf.Configuration)" /> <setcachefiles href="#setCacheFiles(java.net.URI[],%20org.apache.hadoop.conf.Configuration)" /> <setcachearchives href="#setCacheArchives(java.net.URI[],%20org.apache.hadoop.conf.Configuration)" /> <createsymlink href="#createSymlink(org.apache.hadoop.conf.Configuration)" /> </distributedcache> </filecache> <fs href="fs/"> <filesystem href="FileSystem.html" /> </fs> <io href="io/"> <closeable href="Closeable.html"> <close href="#close()" /> </closeable> <sequencefile href="SequenceFile.html" /> <sequencefilecompressiontype href="SequenceFile.CompressionType.html"> <none href="#NONE" /> <record href="#RECORD" /> <block href="#BLOCK" /> </sequencefilecompressiontype> <writable href="Writable.html" /> <writablecomparable href="WritableComparable.html" /> <compress href="compress/"> <compressioncodec href="CompressionCodec.html" /> </compress> </io> <mapred href="mapred/"> <clusterstatus href="ClusterStatus.html" /> <counters href="Counters.html" /> <fileinputformat href="FileInputFormat.html"> <setinputpaths href="#setInputPaths(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path[])" /> <addinputpath href="#addInputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)" /> <setinputpathstring href="#setInputPaths(org.apache.hadoop.mapred.JobConf,%20java.lang.String)" /> <addinputpathstring href="#addInputPath(org.apache.hadoop.mapred.JobConf,%20java.lang.String)" /> </fileinputformat> <fileoutputformat href="FileOutputFormat.html"> <getoutputpath href="#getOutputPath(org.apache.hadoop.mapred.JobConf)" /> <getworkoutputpath href="#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)" /> <setoutputpath href="#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)" /> <setcompressoutput href="#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)" /> <setoutputcompressorclass href="#setOutputCompressorClass(org.apache.hadoop.mapred.JobConf,%20java.lang.Class)" /> </fileoutputformat> <filesplit href="FileSplit.html" /> <inputformat href="InputFormat.html" /> <inputsplit href="InputSplit.html" /> <isolationrunner href="IsolationRunner.html" /> <jobclient href="JobClient.html"> <runjob href="#runJob(org.apache.hadoop.mapred.JobConf)" /> <submitjob href="#submitJob(org.apache.hadoop.mapred.JobConf)" /> </jobclient> <jobconf href="JobConf.html"> <setnummaptasks href="#setNumMapTasks(int)" /> <setnumreducetasks href="#setNumReduceTasks(int)" /> <setoutputkeycomparatorclass href="#setOutputKeyComparatorClass(java.lang.Class)" /> <setoutputvaluegroupingcomparator href="#setOutputValueGroupingComparator(java.lang.Class)" /> <setcombinerclass href="#setCombinerClass(java.lang.Class)" /> <setmapdebugscript href="#setMapDebugScript(java.lang.String)" /> <setreducedebugscript href="#setReduceDebugScript(java.lang.String)" /> <setmapspeculativeexecution href="#setMapSpeculativeExecution(boolean)" /> <setreducespeculativeexecution href="#setReduceSpeculativeExecution(boolean)" /> <setmaxmapattempts href="#setMaxMapAttempts(int)" /> <setmaxreduceattempts href="#setMaxReduceAttempts(int)" /> <setmaxmaptaskfailurespercent href="#setMaxMapTaskFailuresPercent(int)" /> <setmaxreducetaskfailurespercent href="#setMaxReduceTaskFailuresPercent(int)" /> <setjobendnotificationuri href="#setJobEndNotificationURI(java.lang.String)" /> <setcompressmapoutput href="#setCompressMapOutput(boolean)" /> <setmapoutputcompressorclass href="#setMapOutputCompressorClass(java.lang.Class)" /> <setprofileenabled href="#setProfileEnabled(boolean)" /> <setprofiletaskrange href="#setProfileTaskRange(boolean,%20java.lang.String)" /> <setprofileparams href="#setProfileParams(java.lang.String)" /> <setnumtaskstoexecuteperjvm href="#setNumTasksToExecutePerJvm(int)" /> <setqueuename href="#setQueueName(java.lang.String)" /> <getjoblocaldir href="#getJobLocalDir()" /> <getjar href="#getJar()" /> </jobconf> <jobconfigurable href="JobConfigurable.html"> <configure href="#configure(org.apache.hadoop.mapred.JobConf)" /> </jobconfigurable> <jobcontrol href="jobcontrol/"> <package-summary href="package-summary.html" /> </jobcontrol> <mapper href="Mapper.html"> <map href="#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" /> </mapper> <outputcollector href="OutputCollector.html"> <collect href="#collect(K, V)" /> </outputcollector> <outputcommitter href="OutputCommitter.html" /> <outputformat href="OutputFormat.html" /> <outputlogfilter href="OutputLogFilter.html" /> <sequencefileoutputformat href="SequenceFileOutputFormat.html"> <setoutputcompressiontype href="#setOutputCompressionType(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.io.SequenceFile.CompressionType)" /> </sequencefileoutputformat> <partitioner href="Partitioner.html" /> <recordreader href="RecordReader.html" /> <recordwriter href="RecordWriter.html" /> <reducer href="Reducer.html"> <reduce href="#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" /> </reducer> <reporter href="Reporter.html"> <incrcounterEnum href="#incrCounter(java.lang.Enum, long)" /> <incrcounterString href="#incrCounter(java.lang.String, java.lang.String, long amount)" /> </reporter> <runningjob href="RunningJob.html" /> <skipbadrecords href="SkipBadRecords.html"> <setmappermaxskiprecords href="#setMapperMaxSkipRecords(org.apache.hadoop.conf.Configuration, long)"/> <setreducermaxskipgroups href="#setReducerMaxSkipGroups(org.apache.hadoop.conf.Configuration, long)"/> <setattemptsTostartskipping href="#setAttemptsToStartSkipping(org.apache.hadoop.conf.Configuration, int)"/> <setskipoutputpath href="#setSkipOutputPath(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path)"/> <counter_map_processed_records href="#COUNTER_MAP_PROCESSED_RECORDS"/> <counter_reduce_processed_groups href="#COUNTER_REDUCE_PROCESSED_GROUPS"/> </skipbadrecords> <textinputformat href="TextInputFormat.html" /> <textoutputformat href="TextOutputFormat.html" /> <lib href="lib/"> <package-summary href="package-summary.html" /> <hashpartitioner href="HashPartitioner.html" /> <keyfieldbasedpartitioner href="KeyFieldBasedPartitioner.html" /> <keyfieldbasedcomparator href="KeyFieldBasedComparator.html" /> <aggregate href="aggregate/"> <package-summary href="package-summary.html" /> </aggregate> </lib> <pipes href="pipes/"> <package-summary href="package-summary.html" /> </pipes> </mapred> <net href="net/"> <dnstoswitchmapping href="DNSToSwitchMapping.html"> <resolve href="#resolve(java.util.List)" /> </dnstoswitchmapping> </net> <streaming href="streaming/"> <package-summary href="package-summary.html" /> </streaming> <util href="util/"> <genericoptionsparser href="GenericOptionsParser.html" /> <progress href="Progress.html" /> <tool href="Tool.html" /> <toolrunner href="ToolRunner.html"> <run href="#run(org.apache.hadoop.util.Tool, java.lang.String[])" /> </toolrunner> </util> </hadoop> </apache> </org> </api> </external-refs> </site>
jaxlaw/hadoop-common
4da256a8b5101c72dc41811251486f657d732210
MAPREDUCE:1143 from https://issues.apache.org/jira/secure/attachment/12427898/MAPRED-1143-ydist-9.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 08f3cf4..b091ebc 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,470 +1,474 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + MAPREDUCE-1143. Fix running task counters to be updated correctly + when speculative attempts are running for a TIP. + (Rahul Kumar Singh via yhemanth) + yahoo-hadoop-0.20.1-3195383002 MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. (Suhas Gogate via acmurthy) HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java index 5568df2..71e153c 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java +++ b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java @@ -327,1121 +327,1122 @@ public class JobInProgress { this.reduceFailuresPercent = conf.getMaxReduceTaskFailuresPercent(); MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.jobMetrics = MetricsUtil.createRecord(metricsContext, "job"); this.jobMetrics.setTag("user", conf.getUser()); this.jobMetrics.setTag("sessionId", conf.getSessionId()); this.jobMetrics.setTag("jobName", conf.getJobName()); this.jobMetrics.setTag("jobId", jobid.toString()); hasSpeculativeMaps = conf.getMapSpeculativeExecution(); hasSpeculativeReduces = conf.getReduceSpeculativeExecution(); this.maxLevel = jobtracker.getNumTaskCacheLevels(); this.anyCacheLevel = this.maxLevel+1; this.nonLocalMaps = new LinkedList<TaskInProgress>(); this.nonLocalRunningMaps = new LinkedHashSet<TaskInProgress>(); this.runningMapCache = new IdentityHashMap<Node, Set<TaskInProgress>>(); this.nonRunningReduces = new LinkedList<TaskInProgress>(); this.runningReduces = new LinkedHashSet<TaskInProgress>(); this.resourceEstimator = new ResourceEstimator(this); } /** * Called periodically by JobTrackerMetrics to update the metrics for * this job. */ public void updateMetrics() { Counters counters = getCounters(); for (Counters.Group group : counters) { jobMetrics.setTag("group", group.getDisplayName()); for (Counters.Counter counter : group) { jobMetrics.setTag("counter", counter.getDisplayName()); jobMetrics.setMetric("value", (float) counter.getCounter()); jobMetrics.update(); } } } /** * Called when the job is complete */ public void cleanUpMetrics() { // Deletes all metric data for this job (in internal table in metrics package). // This frees up RAM and possibly saves network bandwidth, since otherwise // the metrics package implementation might continue to send these job metrics // after the job has finished. jobMetrics.removeTag("group"); jobMetrics.removeTag("counter"); jobMetrics.remove(); } private void printCache (Map<Node, List<TaskInProgress>> cache) { LOG.info("The taskcache info:"); for (Map.Entry<Node, List<TaskInProgress>> n : cache.entrySet()) { List <TaskInProgress> tips = n.getValue(); LOG.info("Cached TIPs on node: " + n.getKey()); for (TaskInProgress tip : tips) { LOG.info("tip : " + tip.getTIPId()); } } } private Map<Node, List<TaskInProgress>> createCache( JobClient.RawSplit[] splits, int maxLevel) { Map<Node, List<TaskInProgress>> cache = new IdentityHashMap<Node, List<TaskInProgress>>(maxLevel); for (int i = 0; i < splits.length; i++) { String[] splitLocations = splits[i].getLocations(); if (splitLocations.length == 0) { nonLocalMaps.add(maps[i]); continue; } for(String host: splitLocations) { Node node = jobtracker.resolveAndAddToTopology(host); LOG.info("tip:" + maps[i].getTIPId() + " has split on node:" + node); for (int j = 0; j < maxLevel; j++) { List<TaskInProgress> hostMaps = cache.get(node); if (hostMaps == null) { hostMaps = new ArrayList<TaskInProgress>(); cache.put(node, hostMaps); hostMaps.add(maps[i]); } //check whether the hostMaps already contains an entry for a TIP //This will be true for nodes that are racks and multiple nodes in //the rack contain the input for a tip. Note that if it already //exists in the hostMaps, it must be the last element there since //we process one TIP at a time sequentially in the split-size order if (hostMaps.get(hostMaps.size() - 1) != maps[i]) { hostMaps.add(maps[i]); } node = node.getParent(); } } } return cache; } /** * Check if the job has been initialized. * @return <code>true</code> if the job has been initialized, * <code>false</code> otherwise */ public boolean inited() { return tasksInited.get(); } boolean hasRestarted() { return restartCount > 0; } /** * Get the number of slots required to run a single map task-attempt. * @return the number of slots required to run a single map task-attempt */ synchronized int getNumSlotsPerMap() { return numSlotsPerMap; } /** * Set the number of slots required to run a single map task-attempt. * This is typically set by schedulers which support high-ram jobs. * @param slots the number of slots required to run a single map task-attempt */ synchronized void setNumSlotsPerMap(int numSlotsPerMap) { this.numSlotsPerMap = numSlotsPerMap; } /** * Get the number of slots required to run a single reduce task-attempt. * @return the number of slots required to run a single reduce task-attempt */ synchronized int getNumSlotsPerReduce() { return numSlotsPerReduce; } /** * Set the number of slots required to run a single reduce task-attempt. * This is typically set by schedulers which support high-ram jobs. * @param slots the number of slots required to run a single reduce * task-attempt */ synchronized void setNumSlotsPerReduce(int numSlotsPerReduce) { this.numSlotsPerReduce = numSlotsPerReduce; } /** * Construct the splits, etc. This is invoked from an async * thread so that split-computation doesn't block anyone. */ public synchronized void initTasks() throws IOException, KillInterruptedException { if (tasksInited.get() || isComplete()) { return; } synchronized(jobInitKillStatus){ if(jobInitKillStatus.killed || jobInitKillStatus.initStarted) { return; } jobInitKillStatus.initStarted = true; } LOG.info("Initializing " + jobId); // log job info JobHistory.JobInfo.logSubmitted(getJobID(), conf, jobFile.toString(), this.startTime, hasRestarted()); // log the job priority setPriority(this.priority); // // read input splits and create a map per a split // String jobFile = profile.getJobFile(); Path sysDir = new Path(this.jobtracker.getSystemDir()); FileSystem fs = sysDir.getFileSystem(conf); DataInputStream splitFile = fs.open(new Path(conf.get("mapred.job.split.file"))); JobClient.RawSplit[] splits; try { splits = JobClient.readSplitFile(splitFile); } finally { splitFile.close(); } numMapTasks = splits.length; // if the number of splits is larger than a configured value // then fail the job. int maxTasks = jobtracker.getMaxTasksPerJob(); if (maxTasks > 0 && numMapTasks + numReduceTasks > maxTasks) { throw new IOException( "The number of tasks for this job " + (numMapTasks + numReduceTasks) + " exceeds the configured limit " + maxTasks); } jobtracker.getInstrumentation().addWaitingMaps(getJobID(), numMapTasks); jobtracker.getInstrumentation().addWaitingReduces(getJobID(), numReduceTasks); maps = new TaskInProgress[numMapTasks]; for(int i=0; i < numMapTasks; ++i) { inputLength += splits[i].getDataLength(); maps[i] = new TaskInProgress(jobId, jobFile, splits[i], jobtracker, conf, this, i, numSlotsPerMap); } LOG.info("Input size for job " + jobId + " = " + inputLength + ". Number of splits = " + splits.length); if (numMapTasks > 0) { nonRunningMapCache = createCache(splits, maxLevel); } // set the launch time this.launchTime = System.currentTimeMillis(); // // Create reduce tasks // this.reduces = new TaskInProgress[numReduceTasks]; for (int i = 0; i < numReduceTasks; i++) { reduces[i] = new TaskInProgress(jobId, jobFile, numMapTasks, i, jobtracker, conf, this, numSlotsPerReduce); nonRunningReduces.add(reduces[i]); } // Calculate the minimum number of maps to be complete before // we should start scheduling reduces completedMapsForReduceSlowstart = (int)Math.ceil( (conf.getFloat("mapred.reduce.slowstart.completed.maps", DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART) * numMapTasks)); // create cleanup two cleanup tips, one map and one reduce. cleanup = new TaskInProgress[2]; // cleanup map tip. This map doesn't use any splits. Just assign an empty // split. JobClient.RawSplit emptySplit = new JobClient.RawSplit(); cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit, jobtracker, conf, this, numMapTasks, 1); cleanup[0].setJobCleanupTask(); // cleanup reduce tip. cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks, numReduceTasks, jobtracker, conf, this, 1); cleanup[1].setJobCleanupTask(); // create two setup tips, one map and one reduce. setup = new TaskInProgress[2]; // setup map tip. This map doesn't use any split. Just assign an empty // split. setup[0] = new TaskInProgress(jobId, jobFile, emptySplit, jobtracker, conf, this, numMapTasks + 1, 1); setup[0].setJobSetupTask(); // setup reduce tip. setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks, numReduceTasks + 1, jobtracker, conf, this, 1); setup[1].setJobSetupTask(); synchronized(jobInitKillStatus){ jobInitKillStatus.initDone = true; if(jobInitKillStatus.killed) { throw new KillInterruptedException("Job " + jobId + " killed in init"); } } tasksInited.set(true); JobHistory.JobInfo.logInited(profile.getJobID(), this.launchTime, numMapTasks, numReduceTasks); } ///////////////////////////////////////////////////// // Accessors for the JobInProgress ///////////////////////////////////////////////////// public JobProfile getProfile() { return profile; } public JobStatus getStatus() { return status; } public synchronized long getLaunchTime() { return launchTime; } public long getStartTime() { return startTime; } public long getFinishTime() { return finishTime; } public int desiredMaps() { return numMapTasks; } public synchronized int finishedMaps() { return finishedMapTasks; } public int desiredReduces() { return numReduceTasks; } public synchronized int runningMaps() { return runningMapTasks; } public synchronized int runningReduces() { return runningReduceTasks; } public synchronized int finishedReduces() { return finishedReduceTasks; } public synchronized int pendingMaps() { return numMapTasks - runningMapTasks - failedMapTIPs - finishedMapTasks + speculativeMapTasks; } public synchronized int pendingReduces() { return numReduceTasks - runningReduceTasks - failedReduceTIPs - finishedReduceTasks + speculativeReduceTasks; } public synchronized int getNumSlotsPerTask(TaskType taskType) { if (taskType == TaskType.MAP) { return numSlotsPerMap; } else if (taskType == TaskType.REDUCE) { return numSlotsPerReduce; } else { return 1; } } public JobPriority getPriority() { return this.priority; } public void setPriority(JobPriority priority) { if(priority == null) { this.priority = JobPriority.NORMAL; } else { this.priority = priority; } synchronized (this) { status.setJobPriority(priority); } // log and change to the job's priority JobHistory.JobInfo.logJobPriority(jobId, priority); } // Update the job start/launch time (upon restart) and log to history synchronized void updateJobInfo(long startTime, long launchTime) { // log and change to the job's start/launch time this.startTime = startTime; this.launchTime = launchTime; JobHistory.JobInfo.logJobInfo(jobId, startTime, launchTime); } /** * Get the number of times the job has restarted */ int getNumRestarts() { return restartCount; } long getInputLength() { return inputLength; } boolean isCleanupLaunched() { return launchedCleanup; } boolean isSetupLaunched() { return launchedSetup; } /** * Get the list of map tasks * @return the raw array of maps for this job */ TaskInProgress[] getMapTasks() { return maps; } /** * Get the list of cleanup tasks * @return the array of cleanup tasks for the job */ TaskInProgress[] getCleanupTasks() { return cleanup; } /** * Get the list of setup tasks * @return the array of setup tasks for the job */ TaskInProgress[] getSetupTasks() { return setup; } /** * Get the list of reduce tasks * @return the raw array of reduce tasks for this job */ TaskInProgress[] getReduceTasks() { return reduces; } /** * Return the nonLocalRunningMaps * @return */ Set<TaskInProgress> getNonLocalRunningMaps() { return nonLocalRunningMaps; } /** * Return the runningMapCache * @return */ Map<Node, Set<TaskInProgress>> getRunningMapCache() { return runningMapCache; } /** * Return runningReduces * @return */ Set<TaskInProgress> getRunningReduces() { return runningReduces; } /** * Get the job configuration * @return the job's configuration */ JobConf getJobConf() { return conf; } /** * Get the job user/owner * @return the job's user/owner */ String getUser() { return user; } /** * Return a vector of completed TaskInProgress objects */ public synchronized Vector<TaskInProgress> reportTasksInProgress(boolean shouldBeMap, boolean shouldBeComplete) { Vector<TaskInProgress> results = new Vector<TaskInProgress>(); TaskInProgress tips[] = null; if (shouldBeMap) { tips = maps; } else { tips = reduces; } for (int i = 0; i < tips.length; i++) { if (tips[i].isComplete() == shouldBeComplete) { results.add(tips[i]); } } return results; } /** * Return a vector of cleanup TaskInProgress objects */ public synchronized Vector<TaskInProgress> reportCleanupTIPs( boolean shouldBeComplete) { Vector<TaskInProgress> results = new Vector<TaskInProgress>(); for (int i = 0; i < cleanup.length; i++) { if (cleanup[i].isComplete() == shouldBeComplete) { results.add(cleanup[i]); } } return results; } /** * Return a vector of setup TaskInProgress objects */ public synchronized Vector<TaskInProgress> reportSetupTIPs( boolean shouldBeComplete) { Vector<TaskInProgress> results = new Vector<TaskInProgress>(); for (int i = 0; i < setup.length; i++) { if (setup[i].isComplete() == shouldBeComplete) { results.add(setup[i]); } } return results; } //////////////////////////////////////////////////// // Status update methods //////////////////////////////////////////////////// /** * Assuming {@link JobTracker} is locked on entry. */ public synchronized void updateTaskStatus(TaskInProgress tip, TaskStatus status) { double oldProgress = tip.getProgress(); // save old progress boolean wasRunning = tip.isRunning(); boolean wasComplete = tip.isComplete(); boolean wasPending = tip.isOnlyCommitPending(); TaskAttemptID taskid = status.getTaskID(); - + boolean wasAttemptRunning = tip.isAttemptRunning(taskid); + // If the TIP is already completed and the task reports as SUCCEEDED then // mark the task as KILLED. // In case of task with no promotion the task tracker will mark the task // as SUCCEEDED. // User has requested to kill the task, but TT reported SUCCEEDED, // mark the task KILLED. if ((wasComplete || tip.wasKilled(taskid)) && (status.getRunState() == TaskStatus.State.SUCCEEDED)) { status.setRunState(TaskStatus.State.KILLED); } // If the job is complete and a task has just reported its // state as FAILED_UNCLEAN/KILLED_UNCLEAN, // make the task's state FAILED/KILLED without launching cleanup attempt. // Note that if task is already a cleanup attempt, // we don't change the state to make sure the task gets a killTaskAction if ((this.isComplete() || jobFailed || jobKilled) && !tip.isCleanupAttempt(taskid)) { if (status.getRunState() == TaskStatus.State.FAILED_UNCLEAN) { status.setRunState(TaskStatus.State.FAILED); } else if (status.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { status.setRunState(TaskStatus.State.KILLED); } } boolean change = tip.updateStatus(status); if (change) { TaskStatus.State state = status.getRunState(); // get the TaskTrackerStatus where the task ran TaskTracker taskTracker = this.jobtracker.getTaskTracker(tip.machineWhereTaskRan(taskid)); TaskTrackerStatus ttStatus = (taskTracker == null) ? null : taskTracker.getStatus(); String httpTaskLogLocation = null; if (null != ttStatus){ String host; if (NetUtils.getStaticResolution(ttStatus.getHost()) != null) { host = NetUtils.getStaticResolution(ttStatus.getHost()); } else { host = ttStatus.getHost(); } httpTaskLogLocation = "http://" + host + ":" + ttStatus.getHttpPort(); //+ "/tasklog?plaintext=true&taskid=" + status.getTaskID(); } TaskCompletionEvent taskEvent = null; if (state == TaskStatus.State.SUCCEEDED) { taskEvent = new TaskCompletionEvent( taskCompletionEventTracker, taskid, tip.idWithinJob(), status.getIsMap() && !tip.isJobCleanupTask() && !tip.isJobSetupTask(), TaskCompletionEvent.Status.SUCCEEDED, httpTaskLogLocation ); taskEvent.setTaskRunTime((int)(status.getFinishTime() - status.getStartTime())); tip.setSuccessEventNumber(taskCompletionEventTracker); } else if (state == TaskStatus.State.COMMIT_PENDING) { // If it is the first attempt reporting COMMIT_PENDING // ask the task to commit. if (!wasComplete && !wasPending) { tip.doCommit(taskid); } return; } else if (state == TaskStatus.State.FAILED_UNCLEAN || state == TaskStatus.State.KILLED_UNCLEAN) { tip.incompleteSubTask(taskid, this.status); // add this task, to be rescheduled as cleanup attempt if (tip.isMapTask()) { mapCleanupTasks.add(taskid); } else { reduceCleanupTasks.add(taskid); } // Remove the task entry from jobtracker jobtracker.removeTaskEntry(taskid); } //For a failed task update the JT datastructures. else if (state == TaskStatus.State.FAILED || state == TaskStatus.State.KILLED) { // Get the event number for the (possibly) previously successful // task. If there exists one, then set that status to OBSOLETE int eventNumber; if ((eventNumber = tip.getSuccessEventNumber()) != -1) { TaskCompletionEvent t = this.taskCompletionEvents.get(eventNumber); if (t.getTaskAttemptId().equals(taskid)) t.setTaskStatus(TaskCompletionEvent.Status.OBSOLETE); } // Tell the job to fail the relevant task failedTask(tip, taskid, status, taskTracker, - wasRunning, wasComplete); + wasRunning, wasComplete, wasAttemptRunning); // Did the task failure lead to tip failure? TaskCompletionEvent.Status taskCompletionStatus = (state == TaskStatus.State.FAILED ) ? TaskCompletionEvent.Status.FAILED : TaskCompletionEvent.Status.KILLED; if (tip.isFailed()) { taskCompletionStatus = TaskCompletionEvent.Status.TIPFAILED; } taskEvent = new TaskCompletionEvent(taskCompletionEventTracker, taskid, tip.idWithinJob(), status.getIsMap() && !tip.isJobCleanupTask() && !tip.isJobSetupTask(), taskCompletionStatus, httpTaskLogLocation ); } // Add the 'complete' task i.e. successful/failed // It _is_ safe to add the TaskCompletionEvent.Status.SUCCEEDED // *before* calling TIP.completedTask since: // a. One and only one task of a TIP is declared as a SUCCESS, the // other (speculative tasks) are marked KILLED by the TaskCommitThread // b. TIP.completedTask *does not* throw _any_ exception at all. if (taskEvent != null) { this.taskCompletionEvents.add(taskEvent); taskCompletionEventTracker++; JobTrackerStatistics.TaskTrackerStat ttStat = jobtracker. getStatistics().getTaskTrackerStat(tip.machineWhereTaskRan(taskid)); if(ttStat != null) { // ttStat can be null in case of lost tracker ttStat.incrTotalTasks(); } if (state == TaskStatus.State.SUCCEEDED) { completedTask(tip, status); if(ttStat != null) { ttStat.incrSucceededTasks(); } } } } // // Update JobInProgress status // if(LOG.isDebugEnabled()) { LOG.debug("Taking progress for " + tip.getTIPId() + " from " + oldProgress + " to " + tip.getProgress()); } if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { double progressDelta = tip.getProgress() - oldProgress; if (tip.isMapTask()) { this.status.setMapProgress((float) (this.status.mapProgress() + progressDelta / maps.length)); } else { this.status.setReduceProgress((float) (this.status.reduceProgress() + (progressDelta / reduces.length))); } } } String getHistoryFile() { return historyFile; } synchronized void setHistoryFile(String file) { this.historyFile = file; } boolean isHistoryFileCopied() { return historyFileCopied; } synchronized void setHistoryFileCopied() { this.historyFileCopied = true; } /** * Returns the job-level counters. * * @return the job-level counters. */ public synchronized Counters getJobCounters() { return jobCounters; } /** * Returns map phase counters by summing over all map tasks in progress. */ public synchronized Counters getMapCounters() { return incrementTaskCounters(new Counters(), maps); } /** * Returns map phase counters by summing over all map tasks in progress. */ public synchronized Counters getReduceCounters() { return incrementTaskCounters(new Counters(), reduces); } /** * Returns the total job counters, by adding together the job, * the map and the reduce counters. */ public synchronized Counters getCounters() { Counters result = new Counters(); result.incrAllCounters(getJobCounters()); incrementTaskCounters(result, maps); return incrementTaskCounters(result, reduces); } /** * Increments the counters with the counters from each task. * @param counters the counters to increment * @param tips the tasks to add in to counters * @return counters the same object passed in as counters */ private Counters incrementTaskCounters(Counters counters, TaskInProgress[] tips) { for (TaskInProgress tip : tips) { counters.incrAllCounters(tip.getCounters()); } return counters; } ///////////////////////////////////////////////////// // Create/manage tasks ///////////////////////////////////////////////////// /** * Return a MapTask, if appropriate, to run on the given tasktracker */ public synchronized Task obtainNewMapTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts ) throws IOException { if (status.getRunState() != JobStatus.RUNNING) { LOG.info("Cannot create task split for " + profile.getJobID()); return null; } int target = findNewMapTask(tts, clusterSize, numUniqueHosts, anyCacheLevel, status.mapProgress()); if (target == -1) { return null; } Task result = maps[target].getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(maps[target], result.getTaskID(), tts, true); } return result; } /* * Return task cleanup attempt if any, to run on a given tracker */ public Task obtainTaskCleanupTask(TaskTrackerStatus tts, boolean isMapSlot) throws IOException { if (!tasksInited.get()) { return null; } synchronized (this) { if (this.status.getRunState() != JobStatus.RUNNING || jobFailed || jobKilled) { return null; } String taskTracker = tts.getTrackerName(); if (!shouldRunOnTaskTracker(taskTracker)) { return null; } TaskAttemptID taskid = null; TaskInProgress tip = null; if (isMapSlot) { if (!mapCleanupTasks.isEmpty()) { taskid = mapCleanupTasks.remove(0); tip = maps[taskid.getTaskID().getId()]; } } else { if (!reduceCleanupTasks.isEmpty()) { taskid = reduceCleanupTasks.remove(0); tip = reduces[taskid.getTaskID().getId()]; } } if (tip != null) { return tip.addRunningTask(taskid, taskTracker, true); } return null; } } public synchronized Task obtainNewLocalMapTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts) throws IOException { if (!tasksInited.get()) { LOG.info("Cannot create task split for " + profile.getJobID()); return null; } int target = findNewMapTask(tts, clusterSize, numUniqueHosts, maxLevel, status.mapProgress()); if (target == -1) { return null; } Task result = maps[target].getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(maps[target], result.getTaskID(), tts, true); } return result; } public synchronized Task obtainNewNonLocalMapTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts) throws IOException { if (!tasksInited.get()) { LOG.info("Cannot create task split for " + profile.getJobID()); return null; } int target = findNewMapTask(tts, clusterSize, numUniqueHosts, NON_LOCAL_CACHE_LEVEL, status.mapProgress()); if (target == -1) { return null; } Task result = maps[target].getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(maps[target], result.getTaskID(), tts, true); } return result; } /** * Return a CleanupTask, if appropriate, to run on the given tasktracker * */ public Task obtainJobCleanupTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts, boolean isMapSlot ) throws IOException { if(!tasksInited.get()) { return null; } synchronized(this) { if (!canLaunchJobCleanupTask()) { return null; } String taskTracker = tts.getTrackerName(); // Update the last-known clusterSize this.clusterSize = clusterSize; if (!shouldRunOnTaskTracker(taskTracker)) { return null; } List<TaskInProgress> cleanupTaskList = new ArrayList<TaskInProgress>(); if (isMapSlot) { cleanupTaskList.add(cleanup[0]); } else { cleanupTaskList.add(cleanup[1]); } TaskInProgress tip = findTaskFromList(cleanupTaskList, tts, numUniqueHosts, false); if (tip == null) { return null; } // Now launch the cleanupTask Task result = tip.getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(tip, result.getTaskID(), tts, true); if (jobFailed) { result.setJobCleanupTaskState (org.apache.hadoop.mapreduce.JobStatus.State.FAILED); } else if (jobKilled) { result.setJobCleanupTaskState (org.apache.hadoop.mapreduce.JobStatus.State.KILLED); } else { result.setJobCleanupTaskState (org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED); } } return result; } } /** * Check whether cleanup task can be launched for the job. * * Cleanup task can be launched if it is not already launched * or job is Killed * or all maps and reduces are complete * @return true/false */ private synchronized boolean canLaunchJobCleanupTask() { // check if the job is running if (status.getRunState() != JobStatus.RUNNING && status.getRunState() != JobStatus.PREP) { return false; } // check if cleanup task has been launched already or if setup isn't // launched already. The later check is useful when number of maps is // zero. if (launchedCleanup || !isSetupFinished()) { return false; } // check if job has failed or killed if (jobKilled || jobFailed) { return true; } // Check if all maps and reducers have finished. boolean launchCleanupTask = ((finishedMapTasks + failedMapTIPs) == (numMapTasks)); if (launchCleanupTask) { launchCleanupTask = ((finishedReduceTasks + failedReduceTIPs) == numReduceTasks); } return launchCleanupTask; } /** * Return a SetupTask, if appropriate, to run on the given tasktracker * */ public Task obtainJobSetupTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts, boolean isMapSlot ) throws IOException { if(!tasksInited.get()) { return null; } synchronized(this) { if (!canLaunchSetupTask()) { return null; } String taskTracker = tts.getTrackerName(); // Update the last-known clusterSize this.clusterSize = clusterSize; if (!shouldRunOnTaskTracker(taskTracker)) { return null; } List<TaskInProgress> setupTaskList = new ArrayList<TaskInProgress>(); if (isMapSlot) { setupTaskList.add(setup[0]); } else { setupTaskList.add(setup[1]); } TaskInProgress tip = findTaskFromList(setupTaskList, tts, numUniqueHosts, false); if (tip == null) { return null; } // Now launch the setupTask Task result = tip.getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(tip, result.getTaskID(), tts, true); } return result; } } public synchronized boolean scheduleReduces() { return finishedMapTasks >= completedMapsForReduceSlowstart; } /** * Check whether setup task can be launched for the job. * * Setup task can be launched after the tasks are inited * and Job is in PREP state * and if it is not already launched * or job is not Killed/Failed * @return true/false */ private synchronized boolean canLaunchSetupTask() { return (tasksInited.get() && status.getRunState() == JobStatus.PREP && !launchedSetup && !jobKilled && !jobFailed); } /** * Return a ReduceTask, if appropriate, to run on the given tasktracker. * We don't have cache-sensitivity for reduce tasks, as they * work on temporary MapRed files. */ public synchronized Task obtainNewReduceTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts ) throws IOException { if (status.getRunState() != JobStatus.RUNNING) { LOG.info("Cannot create task split for " + profile.getJobID()); return null; } // Ensure we have sufficient map outputs ready to shuffle before // scheduling reduces if (!scheduleReduces()) { return null; } int target = findNewReduceTask(tts, clusterSize, numUniqueHosts, status.reduceProgress()); if (target == -1) { return null; } Task result = reduces[target].getTaskToRun(tts.getTrackerName()); if (result != null) { addRunningTaskToTIP(reduces[target], result.getTaskID(), tts, true); } return result; } // returns the (cache)level at which the nodes matches private int getMatchingLevelForNodes(Node n1, Node n2) { int count = 0; do { if (n1.equals(n2)) { return count; } ++count; n1 = n1.getParent(); n2 = n2.getParent(); } while (n1 != null); return this.maxLevel; } /** * Populate the data structures as a task is scheduled. * * Assuming {@link JobTracker} is locked on entry. * * @param tip The tip for which the task is added * @param id The attempt-id for the task * @param tts task-tracker status * @param isScheduled Whether this task is scheduled from the JT or has * joined back upon restart */ synchronized void addRunningTaskToTIP(TaskInProgress tip, TaskAttemptID id, TaskTrackerStatus tts, boolean isScheduled) { // Make an entry in the tip if the attempt is not scheduled i.e externally // added if (!isScheduled) { tip.addRunningTask(id, tts.getTrackerName()); } final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); // keeping the earlier ordering intact String name; String splits = ""; Enum counter = null; if (tip.isJobSetupTask()) { launchedSetup = true; name = Values.SETUP.name(); } else if (tip.isJobCleanupTask()) { launchedCleanup = true; name = Values.CLEANUP.name(); } else if (tip.isMapTask()) { ++runningMapTasks; name = Values.MAP.name(); counter = Counter.TOTAL_LAUNCHED_MAPS; splits = tip.getSplitNodes(); if (tip.getActiveTasks().size() > 1) speculativeMapTasks++; metrics.launchMap(id); } else { ++runningReduceTasks; name = Values.REDUCE.name(); counter = Counter.TOTAL_LAUNCHED_REDUCES; if (tip.getActiveTasks().size() > 1) speculativeReduceTasks++; metrics.launchReduce(id); } // Note that the logs are for the scheduled tasks only. Tasks that join on // restart has already their logs in place. if (tip.isFirstAttempt(id)) { JobHistory.Task.logStarted(tip.getTIPId(), name, tip.getExecStartTime(), splits); } if (!tip.isJobSetupTask() && !tip.isJobCleanupTask()) { jobCounters.incrCounter(counter, 1); } //TODO The only problem with these counters would be on restart. // The jobtracker updates the counter only when the task that is scheduled // if from a non-running tip and is local (data, rack ...). But upon restart // as the reports come from the task tracker, there is no good way to infer // when exactly to increment the locality counters. The only solution is to // increment the counters for all the tasks irrespective of // - whether the tip is running or not // - whether its a speculative task or not // // So to simplify, increment the data locality counter whenever there is // data locality. @@ -2058,995 +2059,1012 @@ public class JobInProgress { long currentTime = System.currentTimeMillis(); // 1. Check bottom up for speculative tasks from the running cache if (node != null) { Node key = node; for (int level = 0; level < maxLevel; ++level) { Set<TaskInProgress> cacheForLevel = runningMapCache.get(key); if (cacheForLevel != null) { tip = findSpeculativeTask(cacheForLevel, tts, avgProgress, currentTime, level == 0); if (tip != null) { if (cacheForLevel.size() == 0) { runningMapCache.remove(key); } return tip.getIdWithinJob(); } } key = key.getParent(); } } // 2. Check breadth-wise for speculative tasks for (Node parent : nodesAtMaxLevel) { // ignore the parent which is already scanned if (parent == nodeParentAtMaxLevel) { continue; } Set<TaskInProgress> cache = runningMapCache.get(parent); if (cache != null) { tip = findSpeculativeTask(cache, tts, avgProgress, currentTime, false); if (tip != null) { // remove empty cache entries if (cache.size() == 0) { runningMapCache.remove(parent); } LOG.info("Choosing a non-local task " + tip.getTIPId() + " for speculation"); return tip.getIdWithinJob(); } } } // 3. Check non-local tips for speculation tip = findSpeculativeTask(nonLocalRunningMaps, tts, avgProgress, currentTime, false); if (tip != null) { LOG.info("Choosing a non-local task " + tip.getTIPId() + " for speculation"); return tip.getIdWithinJob(); } } return -1; } /** * Find new reduce task * @param tts The task tracker that is asking for a task * @param clusterSize The number of task trackers in the cluster * @param numUniqueHosts The number of hosts that run task trackers * @param avgProgress The average progress of this kind of task in this job * @return the index in tasks of the selected task (or -1 for no task) */ private synchronized int findNewReduceTask(TaskTrackerStatus tts, int clusterSize, int numUniqueHosts, double avgProgress) { if (numReduceTasks == 0) { if(LOG.isDebugEnabled()) { LOG.debug("No reduces to schedule for " + profile.getJobID()); } return -1; } String taskTracker = tts.getTrackerName(); TaskInProgress tip = null; // Update the last-known clusterSize this.clusterSize = clusterSize; if (!shouldRunOnTaskTracker(taskTracker)) { return -1; } long outSize = resourceEstimator.getEstimatedReduceInputSize(); long availSpace = tts.getResourceStatus().getAvailableSpace(); if(availSpace < outSize) { LOG.warn("No room for reduce task. Node " + taskTracker + " has " + availSpace + " bytes free; but we expect reduce input to take " + outSize); return -1; //see if a different TIP might work better. } // 1. check for a never-executed reduce tip // reducers don't have a cache and so pass -1 to explicitly call that out tip = findTaskFromList(nonRunningReduces, tts, numUniqueHosts, false); if (tip != null) { scheduleReduce(tip); return tip.getIdWithinJob(); } // 2. check for a reduce tip to be speculated if (hasSpeculativeReduces) { tip = findSpeculativeTask(runningReduces, tts, avgProgress, System.currentTimeMillis(), false); if (tip != null) { scheduleReduce(tip); return tip.getIdWithinJob(); } } return -1; } private boolean shouldRunOnTaskTracker(String taskTracker) { // // Check if too many tasks of this job have failed on this // tasktracker prior to assigning it a new one. // int taskTrackerFailedTasks = getTrackerTaskFailures(taskTracker); if ((flakyTaskTrackers < (clusterSize * CLUSTER_BLACKLIST_PERCENT)) && taskTrackerFailedTasks >= conf.getMaxTaskFailuresPerTracker()) { if (LOG.isDebugEnabled()) { String flakyTracker = convertTrackerNameToHostName(taskTracker); LOG.debug("Ignoring the black-listed tasktracker: '" + flakyTracker + "' for assigning a new task"); } return false; } return true; } /** * Metering: Occupied Slots * (Finish - Start) * @param tip {@link TaskInProgress} to be metered which just completed, * cannot be <code>null</code> * @param status {@link TaskStatus} of the completed task, cannot be * <code>null</code> */ private void meterTaskAttempt(TaskInProgress tip, TaskStatus status) { Counter slotCounter = (tip.isMapTask()) ? Counter.SLOTS_MILLIS_MAPS : Counter.SLOTS_MILLIS_REDUCES; jobCounters.incrCounter(slotCounter, tip.getNumSlotsRequired() * (status.getFinishTime() - status.getStartTime())); } /** * A taskid assigned to this JobInProgress has reported in successfully. */ public synchronized boolean completedTask(TaskInProgress tip, TaskStatus status) { TaskAttemptID taskid = status.getTaskID(); int oldNumAttempts = tip.getActiveTasks().size(); final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); // Metering meterTaskAttempt(tip, status); // Sanity check: is the TIP already complete? // It _is_ safe to not decrement running{Map|Reduce}Tasks and // finished{Map|Reduce}Tasks variables here because one and only // one task-attempt of a TIP gets to completedTask. This is because // the TaskCommitThread in the JobTracker marks other, completed, // speculative tasks as _complete_. if (tip.isComplete()) { // Mark this task as KILLED tip.alreadyCompletedTask(taskid); // Let the JobTracker cleanup this taskid if the job isn't running if (this.status.getRunState() != JobStatus.RUNNING) { jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid); } return false; } LOG.info("Task '" + taskid + "' has completed " + tip.getTIPId() + " successfully."); // Mark the TIP as complete tip.completed(taskid); resourceEstimator.updateWithCompletedTask(status, tip); // Update jobhistory TaskTrackerStatus ttStatus = this.jobtracker.getTaskTrackerStatus(status.getTaskTracker()); String trackerHostname = jobtracker.getNode(ttStatus.getHost()).toString(); String taskType = getTaskType(tip); if (status.getIsMap()){ JobHistory.MapAttempt.logStarted(status.getTaskID(), status.getStartTime(), status.getTaskTracker(), ttStatus.getHttpPort(), taskType); JobHistory.MapAttempt.logFinished(status.getTaskID(), status.getFinishTime(), trackerHostname, taskType, status.getStateString(), status.getCounters()); }else{ JobHistory.ReduceAttempt.logStarted( status.getTaskID(), status.getStartTime(), status.getTaskTracker(), ttStatus.getHttpPort(), taskType); JobHistory.ReduceAttempt.logFinished(status.getTaskID(), status.getShuffleFinishTime(), status.getSortFinishTime(), status.getFinishTime(), trackerHostname, taskType, status.getStateString(), status.getCounters()); } JobHistory.Task.logFinished(tip.getTIPId(), taskType, tip.getExecFinishTime(), status.getCounters()); int newNumAttempts = tip.getActiveTasks().size(); if (tip.isJobSetupTask()) { // setup task has finished. kill the extra setup tip killSetupTip(!tip.isMapTask()); // Job can start running now. this.status.setSetupProgress(1.0f); // move the job to running state if the job is in prep state if (this.status.getRunState() == JobStatus.PREP) { changeStateTo(JobStatus.RUNNING); JobHistory.JobInfo.logStarted(profile.getJobID()); } } else if (tip.isJobCleanupTask()) { // cleanup task has finished. Kill the extra cleanup tip if (tip.isMapTask()) { // kill the reduce tip cleanup[1].kill(); } else { cleanup[0].kill(); } // // The Job is done // if the job is failed, then mark the job failed. if (jobFailed) { terminateJob(JobStatus.FAILED); } // if the job is killed, then mark the job killed. if (jobKilled) { terminateJob(JobStatus.KILLED); } else { jobComplete(); } // The job has been killed/failed/successful // JobTracker should cleanup this task jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid); } else if (tip.isMapTask()) { runningMapTasks -= 1; // check if this was a sepculative task if (oldNumAttempts > 1) { speculativeMapTasks -= (oldNumAttempts - newNumAttempts); } finishedMapTasks += 1; metrics.completeMap(taskid); // remove the completed map from the resp running caches retireMap(tip); if ((finishedMapTasks + failedMapTIPs) == (numMapTasks)) { this.status.setMapProgress(1.0f); } } else { runningReduceTasks -= 1; if (oldNumAttempts > 1) { speculativeReduceTasks -= (oldNumAttempts - newNumAttempts); } finishedReduceTasks += 1; metrics.completeReduce(taskid); // remove the completed reduces from the running reducers set retireReduce(tip); if ((finishedReduceTasks + failedReduceTIPs) == (numReduceTasks)) { this.status.setReduceProgress(1.0f); } } return true; } /** * Job state change must happen thru this call */ private void changeStateTo(int newState) { int oldState = this.status.getRunState(); if (oldState == newState) { return; //old and new states are same } this.status.setRunState(newState); //update the metrics if (oldState == JobStatus.PREP) { this.jobtracker.getInstrumentation().decPrepJob(conf, jobId); } else if (oldState == JobStatus.RUNNING) { this.jobtracker.getInstrumentation().decRunningJob(conf, jobId); } if (newState == JobStatus.PREP) { this.jobtracker.getInstrumentation().addPrepJob(conf, jobId); } else if (newState == JobStatus.RUNNING) { this.jobtracker.getInstrumentation().addRunningJob(conf, jobId); } } /** * The job is done since all it's component tasks are either * successful or have failed. */ private void jobComplete() { final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); // // All tasks are complete, then the job is done! // if (this.status.getRunState() == JobStatus.RUNNING ) { changeStateTo(JobStatus.SUCCEEDED); this.status.setCleanupProgress(1.0f); if (maps.length == 0) { this.status.setMapProgress(1.0f); } if (reduces.length == 0) { this.status.setReduceProgress(1.0f); } this.finishTime = System.currentTimeMillis(); LOG.info("Job " + this.status.getJobID() + " has completed successfully."); // Log the job summary (this should be done prior to logging to // job-history to ensure job-counters are in-sync JobSummary.logJobSummary(this, jobtracker.getClusterStatus(false)); // Log job-history JobHistory.JobInfo.logFinished(this.status.getJobID(), finishTime, this.finishedMapTasks, this.finishedReduceTasks, failedMapTasks, failedReduceTasks, getMapCounters(), getReduceCounters(), getCounters()); // Note that finalize will close the job history handles which garbage collect // might try to finalize garbageCollect(); metrics.completeJob(this.conf, this.status.getJobID()); } } private synchronized void terminateJob(int jobTerminationState) { if ((status.getRunState() == JobStatus.RUNNING) || (status.getRunState() == JobStatus.PREP)) { this.finishTime = System.currentTimeMillis(); this.status.setMapProgress(1.0f); this.status.setReduceProgress(1.0f); this.status.setCleanupProgress(1.0f); if (jobTerminationState == JobStatus.FAILED) { changeStateTo(JobStatus.FAILED); // Log the job summary JobSummary.logJobSummary(this, jobtracker.getClusterStatus(false)); // Log to job-history JobHistory.JobInfo.logFailed(this.status.getJobID(), finishTime, this.finishedMapTasks, this.finishedReduceTasks); } else { changeStateTo(JobStatus.KILLED); // Log the job summary JobSummary.logJobSummary(this, jobtracker.getClusterStatus(false)); // Log to job-history JobHistory.JobInfo.logKilled(this.status.getJobID(), finishTime, this.finishedMapTasks, this.finishedReduceTasks); } garbageCollect(); jobtracker.getInstrumentation().terminateJob( this.conf, this.status.getJobID()); if (jobTerminationState == JobStatus.FAILED) { jobtracker.getInstrumentation().failedJob( this.conf, this.status.getJobID()); } else { jobtracker.getInstrumentation().killedJob( this.conf, this.status.getJobID()); } } } /** * Terminate the job and all its component tasks. * Calling this will lead to marking the job as failed/killed. Cleanup * tip will be launched. If the job has not inited, it will directly call * terminateJob as there is no need to launch cleanup tip. * This method is reentrant. * @param jobTerminationState job termination state */ private synchronized void terminate(int jobTerminationState) { if(!tasksInited.get()) { //init could not be done, we just terminate directly. terminateJob(jobTerminationState); return; } if ((status.getRunState() == JobStatus.RUNNING) || (status.getRunState() == JobStatus.PREP)) { LOG.info("Killing job '" + this.status.getJobID() + "'"); if (jobTerminationState == JobStatus.FAILED) { if(jobFailed) {//reentrant return; } jobFailed = true; } else if (jobTerminationState == JobStatus.KILLED) { if(jobKilled) {//reentrant return; } jobKilled = true; } // clear all unclean tasks clearUncleanTasks(); // // kill all TIPs. // for (int i = 0; i < setup.length; i++) { setup[i].kill(); } for (int i = 0; i < maps.length; i++) { maps[i].kill(); } for (int i = 0; i < reduces.length; i++) { reduces[i].kill(); } } } private void cancelReservedSlots() { // Make a copy of the set of TaskTrackers to prevent a // ConcurrentModificationException ... Set<TaskTracker> tm = new HashSet<TaskTracker>(trackersReservedForMaps.keySet()); for (TaskTracker tt : tm) { tt.unreserveSlots(TaskType.MAP, this); } Set<TaskTracker> tr = new HashSet<TaskTracker>(trackersReservedForReduces.keySet()); for (TaskTracker tt : tr) { tt.unreserveSlots(TaskType.REDUCE, this); } } private void clearUncleanTasks() { TaskAttemptID taskid = null; TaskInProgress tip = null; while (!mapCleanupTasks.isEmpty()) { taskid = mapCleanupTasks.remove(0); tip = maps[taskid.getTaskID().getId()]; updateTaskStatus(tip, tip.getTaskStatus(taskid)); } while (!reduceCleanupTasks.isEmpty()) { taskid = reduceCleanupTasks.remove(0); tip = reduces[taskid.getTaskID().getId()]; updateTaskStatus(tip, tip.getTaskStatus(taskid)); } } /** * Kill the job and all its component tasks. This method should be called from * jobtracker and should return fast as it locks the jobtracker. */ public void kill() { boolean killNow = false; synchronized(jobInitKillStatus) { jobInitKillStatus.killed = true; //if not in middle of init, terminate it now if(!jobInitKillStatus.initStarted || jobInitKillStatus.initDone) { //avoiding nested locking by setting flag killNow = true; } } if(killNow) { terminate(JobStatus.KILLED); } } /** * Fails the job and all its component tasks. This should be called only from * {@link JobInProgress} or {@link JobTracker}. Look at * {@link JobTracker#failJob(JobInProgress)} for more details. */ synchronized void fail() { terminate(JobStatus.FAILED); } /** * A task assigned to this JobInProgress has reported in as failed. * Most of the time, we'll just reschedule execution. However, after * many repeated failures we may instead decide to allow the entire * job to fail or succeed if the user doesn't care about a few tasks failing. * * Even if a task has reported as completed in the past, it might later * be reported as failed. That's because the TaskTracker that hosts a map * task might die before the entire job can complete. If that happens, * we need to schedule reexecution so that downstream reduce tasks can * obtain the map task's output. */ private void failedTask(TaskInProgress tip, TaskAttemptID taskid, TaskStatus status, - TaskTracker taskTracker, - boolean wasRunning, boolean wasComplete) { + TaskTracker taskTracker, boolean wasRunning, + boolean wasComplete, boolean wasAttemptRunning) { final JobTrackerInstrumentation metrics = jobtracker.getInstrumentation(); // check if the TIP is already failed boolean wasFailed = tip.isFailed(); // Mark the taskid as FAILED or KILLED tip.incompleteSubTask(taskid, this.status); boolean isRunning = tip.isRunning(); boolean isComplete = tip.isComplete(); + + if (wasAttemptRunning) { + // We are decrementing counters without looking for isRunning , + // because we increment the counters when we obtain + // new map task attempt or reduce task attempt.We do not really check + // for tip being running. + // Whenever we obtain new task attempt following counters are incremented. + // ++runningMapTasks; + //......... + // metrics.launchMap(id); + // hence we are decrementing the same set. + if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { + if (tip.isMapTask()) { + runningMapTasks -= 1; + metrics.failedMap(taskid); + } else { + runningReduceTasks -= 1; + metrics.failedReduce(taskid); + } + } + + // Metering + meterTaskAttempt(tip, status); + } //update running count on task failure. if (wasRunning && !isRunning) { if (tip.isJobCleanupTask()) { launchedCleanup = false; } else if (tip.isJobSetupTask()) { launchedSetup = false; } else if (tip.isMapTask()) { - runningMapTasks -= 1; - metrics.failedMap(taskid); // remove from the running queue and put it in the non-running cache // if the tip is not complete i.e if the tip still needs to be run if (!isComplete) { retireMap(tip); failMap(tip); } } else { - runningReduceTasks -= 1; - metrics.failedReduce(taskid); // remove from the running queue and put in the failed queue if the tip // is not complete if (!isComplete) { retireReduce(tip); failReduce(tip); } } - - // Metering - meterTaskAttempt(tip, status); } // The case when the map was complete but the task tracker went down. // However, we don't need to do any metering here... if (wasComplete && !isComplete) { if (tip.isMapTask()) { // Put the task back in the cache. This will help locality for cases // where we have a different TaskTracker from the same rack/switch // asking for a task. // We bother about only those TIPs that were successful // earlier (wasComplete and !isComplete) // (since they might have been removed from the cache of other // racks/switches, if the input split blocks were present there too) failMap(tip); finishedMapTasks -= 1; } } // update job history // get taskStatus from tip TaskStatus taskStatus = tip.getTaskStatus(taskid); String taskTrackerName = taskStatus.getTaskTracker(); String taskTrackerHostName = convertTrackerNameToHostName(taskTrackerName); int taskTrackerPort = -1; TaskTrackerStatus taskTrackerStatus = (taskTracker == null) ? null : taskTracker.getStatus(); if (taskTrackerStatus != null) { taskTrackerPort = taskTrackerStatus.getHttpPort(); } long startTime = taskStatus.getStartTime(); long finishTime = taskStatus.getFinishTime(); List<String> taskDiagnosticInfo = tip.getDiagnosticInfo(taskid); String diagInfo = taskDiagnosticInfo == null ? "" : StringUtils.arrayToString(taskDiagnosticInfo.toArray(new String[0])); String taskType = getTaskType(tip); if (taskStatus.getIsMap()) { JobHistory.MapAttempt.logStarted(taskid, startTime, taskTrackerName, taskTrackerPort, taskType); if (taskStatus.getRunState() == TaskStatus.State.FAILED) { JobHistory.MapAttempt.logFailed(taskid, finishTime, taskTrackerHostName, diagInfo, taskType); } else { JobHistory.MapAttempt.logKilled(taskid, finishTime, taskTrackerHostName, diagInfo, taskType); } } else { JobHistory.ReduceAttempt.logStarted(taskid, startTime, taskTrackerName, taskTrackerPort, taskType); if (taskStatus.getRunState() == TaskStatus.State.FAILED) { JobHistory.ReduceAttempt.logFailed(taskid, finishTime, taskTrackerHostName, diagInfo, taskType); } else { JobHistory.ReduceAttempt.logKilled(taskid, finishTime, taskTrackerHostName, diagInfo, taskType); } } // After this, try to assign tasks with the one after this, so that // the failed task goes to the end of the list. if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { if (tip.isMapTask()) { failedMapTasks++; } else { failedReduceTasks++; } } // // Note down that a task has failed on this tasktracker // if (status.getRunState() == TaskStatus.State.FAILED) { addTrackerTaskFailure(taskTrackerName, taskTracker); } // // Let the JobTracker know that this task has failed // jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid); // // Check if we need to kill the job because of too many failures or // if the job is complete since all component tasks have completed // We do it once per TIP and that too for the task that fails the TIP if (!wasFailed && tip.isFailed()) { // // Allow upto 'mapFailuresPercent' of map tasks to fail or // 'reduceFailuresPercent' of reduce tasks to fail // boolean killJob = tip.isJobCleanupTask() || tip.isJobSetupTask() ? true : tip.isMapTask() ? ((++failedMapTIPs*100) > (mapFailuresPercent*numMapTasks)) : ((++failedReduceTIPs*100) > (reduceFailuresPercent*numReduceTasks)); if (killJob) { LOG.info("Aborting job " + profile.getJobID()); JobHistory.Task.logFailed(tip.getTIPId(), taskType, finishTime, diagInfo); if (tip.isJobCleanupTask()) { // kill the other tip if (tip.isMapTask()) { cleanup[1].kill(); } else { cleanup[0].kill(); } terminateJob(JobStatus.FAILED); } else { if (tip.isJobSetupTask()) { // kill the other tip killSetupTip(!tip.isMapTask()); } fail(); } } // // Update the counters // if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) { if (tip.isMapTask()) { jobCounters.incrCounter(Counter.NUM_FAILED_MAPS, 1); } else { jobCounters.incrCounter(Counter.NUM_FAILED_REDUCES, 1); } } } } void killSetupTip(boolean isMap) { if (isMap) { setup[0].kill(); } else { setup[1].kill(); } } boolean isSetupFinished() { if (setup[0].isComplete() || setup[0].isFailed() || setup[1].isComplete() || setup[1].isFailed()) { return true; } return false; } /** * Fail a task with a given reason, but without a status object. * * Assuming {@link JobTracker} is locked on entry. * * @param tip The task's tip * @param taskid The task id * @param reason The reason that the task failed * @param trackerName The task tracker the task failed on */ public void failedTask(TaskInProgress tip, TaskAttemptID taskid, String reason, TaskStatus.Phase phase, TaskStatus.State state, String trackerName) { TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskid, 0.0f, tip.isMapTask() ? numSlotsPerMap : numSlotsPerReduce, state, reason, reason, trackerName, phase, new Counters()); // update the actual start-time of the attempt TaskStatus oldStatus = tip.getTaskStatus(taskid); long startTime = oldStatus == null ? System.currentTimeMillis() : oldStatus.getStartTime(); status.setStartTime(startTime); status.setFinishTime(System.currentTimeMillis()); boolean wasComplete = tip.isComplete(); updateTaskStatus(tip, status); boolean isComplete = tip.isComplete(); if (wasComplete && !isComplete) { // mark a successful tip as failed String taskType = getTaskType(tip); JobHistory.Task.logFailed(tip.getTIPId(), taskType, tip.getExecFinishTime(), reason, taskid); } } /** * The job is dead. We're now GC'ing it, getting rid of the job * from all tables. Be sure to remove all of this job's tasks * from the various tables. */ synchronized void garbageCollect() { // Cancel task tracker reservation cancelReservedSlots(); // Let the JobTracker know that a job is complete jobtracker.getInstrumentation().decWaitingMaps(getJobID(), pendingMaps()); jobtracker.getInstrumentation().decWaitingReduces(getJobID(), pendingReduces()); jobtracker.storeCompletedJob(this); jobtracker.finalizeJob(this); try { // Definitely remove the local-disk copy of the job file if (localJobFile != null) { localFs.delete(localJobFile, true); localJobFile = null; } // clean up splits for (int i = 0; i < maps.length; i++) { maps[i].clearSplit(); } // JobClient always creates a new directory with job files // so we remove that directory to cleanup // Delete temp dfs dirs created if any, like in case of // speculative exn of reduces. Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID()); new CleanupQueue().addToQueue(new PathDeletionContext( FileSystem.get(conf), tempDir.toUri().getPath())); } catch (IOException e) { LOG.warn("Error cleaning up "+profile.getJobID()+": "+e); } cleanUpMetrics(); // free up the memory used by the data structures this.nonRunningMapCache = null; this.runningMapCache = null; this.nonRunningReduces = null; this.runningReduces = null; } /** * Return the TaskInProgress that matches the tipid. */ public synchronized TaskInProgress getTaskInProgress(TaskID tipid) { if (tipid.isMap()) { if (tipid.equals(cleanup[0].getTIPId())) { // cleanup map tip return cleanup[0]; } if (tipid.equals(setup[0].getTIPId())) { //setup map tip return setup[0]; } for (int i = 0; i < maps.length; i++) { if (tipid.equals(maps[i].getTIPId())){ return maps[i]; } } } else { if (tipid.equals(cleanup[1].getTIPId())) { // cleanup reduce tip return cleanup[1]; } if (tipid.equals(setup[1].getTIPId())) { //setup reduce tip return setup[1]; } for (int i = 0; i < reduces.length; i++) { if (tipid.equals(reduces[i].getTIPId())){ return reduces[i]; } } } return null; } /** * Find the details of someplace where a map has finished * @param mapId the id of the map * @return the task status of the completed task */ public synchronized TaskStatus findFinishedMap(int mapId) { TaskInProgress tip = maps[mapId]; if (tip.isComplete()) { TaskStatus[] statuses = tip.getTaskStatuses(); for(int i=0; i < statuses.length; i++) { if (statuses[i].getRunState() == TaskStatus.State.SUCCEEDED) { return statuses[i]; } } } return null; } synchronized int getNumTaskCompletionEvents() { return taskCompletionEvents.size(); } synchronized public TaskCompletionEvent[] getTaskCompletionEvents( int fromEventId, int maxEvents) { TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY; if (taskCompletionEvents.size() > fromEventId) { int actualMax = Math.min(maxEvents, (taskCompletionEvents.size() - fromEventId)); events = taskCompletionEvents.subList(fromEventId, actualMax + fromEventId).toArray(events); } return events; } synchronized void fetchFailureNotification(TaskInProgress tip, TaskAttemptID mapTaskId, String trackerName) { Integer fetchFailures = mapTaskIdToFetchFailuresMap.get(mapTaskId); fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1); mapTaskIdToFetchFailuresMap.put(mapTaskId, fetchFailures); LOG.info("Failed fetch notification #" + fetchFailures + " for task " + mapTaskId); float failureRate = (float)fetchFailures / runningReduceTasks; // declare faulty if fetch-failures >= max-allowed-failures boolean isMapFaulty = (failureRate >= MAX_ALLOWED_FETCH_FAILURES_PERCENT) ? true : false; if (fetchFailures >= MAX_FETCH_FAILURES_NOTIFICATIONS && isMapFaulty) { LOG.info("Too many fetch-failures for output of task: " + mapTaskId + " ... killing it"); failedTask(tip, mapTaskId, "Too many fetch-failures", (tip.isMapTask() ? TaskStatus.Phase.MAP : TaskStatus.Phase.REDUCE), TaskStatus.State.FAILED, trackerName); mapTaskIdToFetchFailuresMap.remove(mapTaskId); } } /** * @return The JobID of this JobInProgress. */ public JobID getJobID() { return jobId; } public synchronized Object getSchedulingInfo() { return this.schedulingInfo; } public synchronized void setSchedulingInfo(Object schedulingInfo) { this.schedulingInfo = schedulingInfo; this.status.setSchedulingInfo(schedulingInfo.toString()); } /** * To keep track of kill and initTasks status of this job. initTasks() take * a lock on JobInProgress object. kill should avoid waiting on * JobInProgress lock since it may take a while to do initTasks(). */ private static class JobInitKillStatus { //flag to be set if kill is called boolean killed; boolean initStarted; boolean initDone; } boolean isComplete() { return status.isJobComplete(); } /** * Get the task type for logging it to {@link JobHistory}. */ private String getTaskType(TaskInProgress tip) { if (tip.isJobCleanupTask()) { return Values.CLEANUP.name(); } else if (tip.isJobSetupTask()) { return Values.SETUP.name(); } else if (tip.isMapTask()) { return Values.MAP.name(); } else { return Values.REDUCE.name(); } } /** * Test method to set the cluster sizes */ void setClusterSize(int clusterSize) { this.clusterSize = clusterSize; } static class JobSummary { static final Log LOG = LogFactory.getLog(JobSummary.class); // Escape sequences static final char EQUALS = '='; static final char[] charsToEscape = {StringUtils.COMMA, EQUALS, StringUtils.ESCAPE_CHAR}; /** * Log a summary of the job's runtime. * * @param job {@link JobInProgress} whose summary is to be logged, cannot * be <code>null</code>. * @param cluster {@link ClusterStatus} of the cluster on which the job was * run, cannot be <code>null</code> */ public static void logJobSummary(JobInProgress job, ClusterStatus cluster) { JobStatus status = job.getStatus(); JobProfile profile = job.getProfile(); String user = StringUtils.escapeString(profile.getUser(), StringUtils.ESCAPE_CHAR, charsToEscape); String queue = StringUtils.escapeString(profile.getQueueName(), StringUtils.ESCAPE_CHAR, charsToEscape); Counters jobCounters = job.getJobCounters(); long mapSlotSeconds = (jobCounters.getCounter(Counter.SLOTS_MILLIS_MAPS) + jobCounters.getCounter(Counter.FALLOW_SLOTS_MILLIS_MAPS)) / 1000; long reduceSlotSeconds = (jobCounters.getCounter(Counter.SLOTS_MILLIS_REDUCES) + jobCounters.getCounter(Counter.FALLOW_SLOTS_MILLIS_REDUCES)) / 1000; LOG.info("jobId=" + job.getJobID() + StringUtils.COMMA + "submitTime" + EQUALS + job.getStartTime() + StringUtils.COMMA + "launchTime" + EQUALS + job.getLaunchTime() + StringUtils.COMMA + "finishTime" + EQUALS + job.getFinishTime() + StringUtils.COMMA + "numMaps" + EQUALS + job.getMapTasks().length + StringUtils.COMMA + "numSlotsPerMap" + EQUALS + job.getNumSlotsPerMap() + StringUtils.COMMA + "numReduces" + EQUALS + job.getReduceTasks().length + StringUtils.COMMA + "numSlotsPerReduce" + EQUALS + job.getNumSlotsPerReduce() + StringUtils.COMMA + "user" + EQUALS + user + StringUtils.COMMA + "queue" + EQUALS + queue + StringUtils.COMMA + "status" + EQUALS + JobStatus.getJobRunState(status.getRunState()) + StringUtils.COMMA + "mapSlotSeconds" + EQUALS + mapSlotSeconds + StringUtils.COMMA + "reduceSlotsSeconds" + EQUALS + reduceSlotSeconds + StringUtils.COMMA + "clusterMapCapacity" + EQUALS + cluster.getMaxMapTasks() + StringUtils.COMMA + "clusterReduceCapacity" + EQUALS + cluster.getMaxReduceTasks() ); } } } diff --git a/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java b/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java index 583c4a9..8fdf6a2 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java @@ -1,817 +1,826 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.mapred.JobClient.RawSplit; import org.apache.hadoop.mapred.SortedRanges.Range; import org.apache.hadoop.net.Node; /************************************************************* * TaskInProgress maintains all the info needed for a * Task in the lifetime of its owning Job. A given Task * might be speculatively executed or reexecuted, so we * need a level of indirection above the running-id itself. * <br> * A given TaskInProgress contains multiple taskids, * 0 or more of which might be executing at any one time. * (That's what allows speculative execution.) A taskid * is now *never* recycled. A TIP allocates enough taskids * to account for all the speculation and failures it will * ever have to handle. Once those are up, the TIP is dead. * ************************************************************** */ class TaskInProgress { static final int MAX_TASK_EXECS = 1; int maxTaskAttempts = 4; static final double SPECULATIVE_GAP = 0.2; static final long SPECULATIVE_LAG = 60 * 1000; private static final int NUM_ATTEMPTS_PER_RESTART = 1000; public static final Log LOG = LogFactory.getLog(TaskInProgress.class); // Defines the TIP private String jobFile = null; private RawSplit rawSplit; private int numMaps; private int partition; private JobTracker jobtracker; private TaskID id; private JobInProgress job; private final int numSlotsRequired; // Status of the TIP private int successEventNumber = -1; private int numTaskFailures = 0; private int numKilledTasks = 0; private double progress = 0; private String state = ""; private long startTime = 0; private long execStartTime = 0; private long execFinishTime = 0; private int completes = 0; private boolean failed = false; private boolean killed = false; private long maxSkipRecords = 0; private FailedRanges failedRanges = new FailedRanges(); private volatile boolean skipping = false; private boolean jobCleanup = false; private boolean jobSetup = false; // The 'next' usable taskid of this tip int nextTaskId = 0; // The taskid that took this TIP to SUCCESS private TaskAttemptID successfulTaskId; // The first taskid of this tip private TaskAttemptID firstTaskId; // Map from task Id -> TaskTracker Id, contains tasks that are // currently runnings private TreeMap<TaskAttemptID, String> activeTasks = new TreeMap<TaskAttemptID, String>(); // All attempt Ids of this TIP private TreeSet<TaskAttemptID> tasks = new TreeSet<TaskAttemptID>(); private JobConf conf; private Map<TaskAttemptID,List<String>> taskDiagnosticData = new TreeMap<TaskAttemptID,List<String>>(); /** * Map from taskId -> TaskStatus */ private TreeMap<TaskAttemptID,TaskStatus> taskStatuses = new TreeMap<TaskAttemptID,TaskStatus>(); // Map from taskId -> TaskTracker Id, // contains cleanup attempts and where they ran, if any private TreeMap<TaskAttemptID, String> cleanupTasks = new TreeMap<TaskAttemptID, String>(); private TreeSet<String> machinesWhereFailed = new TreeSet<String>(); private TreeSet<TaskAttemptID> tasksReportedClosed = new TreeSet<TaskAttemptID>(); //list of tasks to kill, <taskid> -> <shouldFail> private TreeMap<TaskAttemptID, Boolean> tasksToKill = new TreeMap<TaskAttemptID, Boolean>(); //task to commit, <taskattemptid> private TaskAttemptID taskToCommit; private Counters counters = new Counters(); /** * Constructor for MapTask */ public TaskInProgress(JobID jobid, String jobFile, RawSplit rawSplit, JobTracker jobtracker, JobConf conf, JobInProgress job, int partition, int numSlotsRequired) { this.jobFile = jobFile; this.rawSplit = rawSplit; this.jobtracker = jobtracker; this.job = job; this.conf = conf; this.partition = partition; this.maxSkipRecords = SkipBadRecords.getMapperMaxSkipRecords(conf); this.numSlotsRequired = numSlotsRequired; setMaxTaskAttempts(); init(jobid); } /** * Constructor for ReduceTask */ public TaskInProgress(JobID jobid, String jobFile, int numMaps, int partition, JobTracker jobtracker, JobConf conf, JobInProgress job, int numSlotsRequired) { this.jobFile = jobFile; this.numMaps = numMaps; this.partition = partition; this.jobtracker = jobtracker; this.job = job; this.conf = conf; this.maxSkipRecords = SkipBadRecords.getReducerMaxSkipGroups(conf); this.numSlotsRequired = numSlotsRequired; setMaxTaskAttempts(); init(jobid); } /** * Set the max number of attempts before we declare a TIP as "failed" */ private void setMaxTaskAttempts() { if (isMapTask()) { this.maxTaskAttempts = conf.getMaxMapAttempts(); } else { this.maxTaskAttempts = conf.getMaxReduceAttempts(); } } /** * Return the index of the tip within the job, so * "task_200707121733_1313_0002_m_012345" would return 12345; * @return int the tip index */ public int idWithinJob() { return partition; } public boolean isJobCleanupTask() { return jobCleanup; } public void setJobCleanupTask() { jobCleanup = true; } public boolean isJobSetupTask() { return jobSetup; } public void setJobSetupTask() { jobSetup = true; } public boolean isOnlyCommitPending() { for (TaskStatus t : taskStatuses.values()) { if (t.getRunState() == TaskStatus.State.COMMIT_PENDING) { return true; } } return false; } public boolean isCommitPending(TaskAttemptID taskId) { TaskStatus t = taskStatuses.get(taskId); if (t == null) { return false; } return t.getRunState() == TaskStatus.State.COMMIT_PENDING; } /** * Initialization common to Map and Reduce */ void init(JobID jobId) { this.startTime = System.currentTimeMillis(); this.id = new TaskID(jobId, isMapTask(), partition); this.skipping = startSkipping(); } //////////////////////////////////// // Accessors, info, profiles, etc. //////////////////////////////////// /** * Return the start time */ public long getStartTime() { return startTime; } /** * Return the exec start time */ public long getExecStartTime() { return execStartTime; } /** * Set the exec start time */ public void setExecStartTime(long startTime) { execStartTime = startTime; } /** * Return the exec finish time */ public long getExecFinishTime() { return execFinishTime; } /** * Set the exec finish time */ public void setExecFinishTime(long finishTime) { execFinishTime = finishTime; JobHistory.Task.logUpdates(id, execFinishTime); // log the update } /** * Return the parent job */ public JobInProgress getJob() { return job; } /** * Return an ID for this task, not its component taskid-threads */ public TaskID getTIPId() { return this.id; } /** * Whether this is a map task */ public boolean isMapTask() { return rawSplit != null; } /** * Is the Task associated with taskid is the first attempt of the tip? * @param taskId * @return Returns true if the Task is the first attempt of the tip */ public boolean isFirstAttempt(TaskAttemptID taskId) { return firstTaskId == null ? false : firstTaskId.equals(taskId); } /** * Is this tip currently running any tasks? * @return true if any tasks are running */ public boolean isRunning() { return !activeTasks.isEmpty(); } + + /** + * Is this attempt currently running ? + * @param taskId task attempt id. + * @return true if attempt taskId is running + */ + boolean isAttemptRunning(TaskAttemptID taskId) { + return activeTasks.containsKey(taskId); + } TaskAttemptID getSuccessfulTaskid() { return successfulTaskId; } private void setSuccessfulTaskid(TaskAttemptID successfulTaskId) { this.successfulTaskId = successfulTaskId; } private void resetSuccessfulTaskid() { this.successfulTaskId = null; } /** * Is this tip complete? * * @return <code>true</code> if the tip is complete, else <code>false</code> */ public synchronized boolean isComplete() { return (completes > 0); } /** * Is the given taskid the one that took this tip to completion? * * @param taskid taskid of attempt to check for completion * @return <code>true</code> if taskid is complete, else <code>false</code> */ public boolean isComplete(TaskAttemptID taskid) { return ((completes > 0) && taskid.equals(getSuccessfulTaskid())); } /** * Is the tip a failure? * * @return <code>true</code> if tip has failed, else <code>false</code> */ public boolean isFailed() { return failed; } /** * Number of times the TaskInProgress has failed. */ public int numTaskFailures() { return numTaskFailures; } /** * Number of times the TaskInProgress has been killed by the framework. */ public int numKilledTasks() { return numKilledTasks; } /** * Get the overall progress (from 0 to 1.0) for this TIP */ public double getProgress() { return progress; } /** * Get the task's counters */ public Counters getCounters() { return counters; } /** * Returns whether a component task-thread should be * closed because the containing JobInProgress has completed * or the task is killed by the user */ public boolean shouldClose(TaskAttemptID taskid) { /** * If the task hasn't been closed yet, and it belongs to a completed * TaskInProgress close it. * * However, for completed map tasks we do not close the task which * actually was the one responsible for _completing_ the TaskInProgress. */ boolean close = false; TaskStatus ts = taskStatuses.get(taskid); if ((ts != null) && (!tasksReportedClosed.contains(taskid)) && ((this.failed) || ((job.getStatus().getRunState() != JobStatus.RUNNING && (job.getStatus().getRunState() != JobStatus.PREP))))) { tasksReportedClosed.add(taskid); close = true; } else if (isComplete() && !(isMapTask() && !jobSetup && !jobCleanup && isComplete(taskid)) && !tasksReportedClosed.contains(taskid)) { tasksReportedClosed.add(taskid); close = true; } else if (isCommitPending(taskid) && !shouldCommit(taskid) && !tasksReportedClosed.contains(taskid)) { tasksReportedClosed.add(taskid); close = true; } else { close = tasksToKill.keySet().contains(taskid); } return close; } /** * Commit this task attempt for the tip. * @param taskid */ public void doCommit(TaskAttemptID taskid) { taskToCommit = taskid; } /** * Returns whether the task attempt should be committed or not */ public boolean shouldCommit(TaskAttemptID taskid) { return !isComplete() && isCommitPending(taskid) && taskToCommit.equals(taskid); } /** * Creates a "status report" for this task. Includes the * task ID and overall status, plus reports for all the * component task-threads that have ever been started. */ synchronized TaskReport generateSingleReport() { ArrayList<String> diagnostics = new ArrayList<String>(); for (List<String> l : taskDiagnosticData.values()) { diagnostics.addAll(l); } TIPStatus currentStatus = null; if (isRunning() && !isComplete()) { currentStatus = TIPStatus.RUNNING; } else if (isComplete()) { currentStatus = TIPStatus.COMPLETE; } else if (wasKilled()) { currentStatus = TIPStatus.KILLED; } else if (isFailed()) { currentStatus = TIPStatus.FAILED; } else if (!(isComplete() || isRunning() || wasKilled())) { currentStatus = TIPStatus.PENDING; } TaskReport report = new TaskReport (getTIPId(), (float)progress, state, diagnostics.toArray(new String[diagnostics.size()]), currentStatus, execStartTime, execFinishTime, counters); if (currentStatus == TIPStatus.RUNNING) { report.setRunningTaskAttempts(activeTasks.keySet()); } else if (currentStatus == TIPStatus.COMPLETE) { report.setSuccessfulAttempt(getSuccessfulTaskid()); } return report; } /** * Get the diagnostic messages for a given task within this tip. * * @param taskId the id of the required task * @return the list of diagnostics for that task */ synchronized List<String> getDiagnosticInfo(TaskAttemptID taskId) { return taskDiagnosticData.get(taskId); } //////////////////////////////////////////////// // Update methods, usually invoked by the owning // job. //////////////////////////////////////////////// /** * Save diagnostic information for a given task. * * @param taskId id of the task * @param diagInfo diagnostic information for the task */ public void addDiagnosticInfo(TaskAttemptID taskId, String diagInfo) { List<String> diagHistory = taskDiagnosticData.get(taskId); if (diagHistory == null) { diagHistory = new ArrayList<String>(); taskDiagnosticData.put(taskId, diagHistory); } diagHistory.add(diagInfo); } /** * A status message from a client has arrived. * It updates the status of a single component-thread-task, * which might result in an overall TaskInProgress status update. * @return has the task changed its state noticably? */ synchronized boolean updateStatus(TaskStatus status) { TaskAttemptID taskid = status.getTaskID(); String diagInfo = status.getDiagnosticInfo(); TaskStatus oldStatus = taskStatuses.get(taskid); boolean changed = true; if (diagInfo != null && diagInfo.length() > 0) { LOG.info("Error from "+taskid+": "+diagInfo); addDiagnosticInfo(taskid, diagInfo); } if(skipping) { failedRanges.updateState(status); } if (oldStatus != null) { TaskStatus.State oldState = oldStatus.getRunState(); TaskStatus.State newState = status.getRunState(); // We should never recieve a duplicate success/failure/killed // status update for the same taskid! This is a safety check, // and is addressed better at the TaskTracker to ensure this. // @see {@link TaskTracker.transmitHeartbeat()} if ((newState != TaskStatus.State.RUNNING && newState != TaskStatus.State.COMMIT_PENDING && newState != TaskStatus.State.FAILED_UNCLEAN && newState != TaskStatus.State.KILLED_UNCLEAN && newState != TaskStatus.State.UNASSIGNED) && (oldState == newState)) { LOG.warn("Recieved duplicate status update of '" + newState + "' for '" + taskid + "' of TIP '" + getTIPId() + "'" + "oldTT=" + oldStatus.getTaskTracker() + " while newTT=" + status.getTaskTracker()); return false; } // The task is not allowed to move from completed back to running. // We have seen out of order status messagesmoving tasks from complete // to running. This is a spot fix, but it should be addressed more // globally. if ((newState == TaskStatus.State.RUNNING || newState == TaskStatus.State.UNASSIGNED) && (oldState == TaskStatus.State.FAILED || oldState == TaskStatus.State.KILLED || oldState == TaskStatus.State.FAILED_UNCLEAN || oldState == TaskStatus.State.KILLED_UNCLEAN || oldState == TaskStatus.State.SUCCEEDED || oldState == TaskStatus.State.COMMIT_PENDING)) { return false; } //Do not accept any status once the task is marked FAILED/KILLED //This is to handle the case of the JobTracker timing out a task //due to launch delay, but the TT comes back with any state or //TT got expired if (oldState == TaskStatus.State.FAILED || oldState == TaskStatus.State.KILLED) { tasksToKill.put(taskid, true); return false; } changed = oldState != newState; } // if task is a cleanup attempt, do not replace the complete status, // update only specific fields. // For example, startTime should not be updated, // but finishTime has to be updated. if (!isCleanupAttempt(taskid)) { taskStatuses.put(taskid, status); } else { taskStatuses.get(taskid).statusUpdate(status.getRunState(), status.getProgress(), status.getStateString(), status.getPhase(), status.getFinishTime()); } // Recompute progress recomputeProgress(); return changed; } /** * Indicate that one of the taskids in this TaskInProgress * has failed. */ public void incompleteSubTask(TaskAttemptID taskid, JobStatus jobStatus) { // // Note the failure and its location // TaskStatus status = taskStatuses.get(taskid); String trackerName; String trackerHostName = null; TaskStatus.State taskState = TaskStatus.State.FAILED; if (status != null) { trackerName = status.getTaskTracker(); trackerHostName = JobInProgress.convertTrackerNameToHostName(trackerName); // Check if the user manually KILLED/FAILED this task-attempt... Boolean shouldFail = tasksToKill.remove(taskid); if (shouldFail != null) { if (status.getRunState() == TaskStatus.State.FAILED || status.getRunState() == TaskStatus.State.KILLED) { taskState = (shouldFail) ? TaskStatus.State.FAILED : TaskStatus.State.KILLED; } else { taskState = (shouldFail) ? TaskStatus.State.FAILED_UNCLEAN : TaskStatus.State.KILLED_UNCLEAN; } status.setRunState(taskState); addDiagnosticInfo(taskid, "Task has been " + taskState + " by the user" ); } taskState = status.getRunState(); if (taskState != TaskStatus.State.FAILED && taskState != TaskStatus.State.KILLED && taskState != TaskStatus.State.FAILED_UNCLEAN && taskState != TaskStatus.State.KILLED_UNCLEAN) { LOG.info("Task '" + taskid + "' running on '" + trackerName + "' in state: '" + taskState + "' being failed!"); status.setRunState(TaskStatus.State.FAILED); taskState = TaskStatus.State.FAILED; } // tasktracker went down and failed time was not reported. if (0 == status.getFinishTime()){ status.setFinishTime(System.currentTimeMillis()); } } this.activeTasks.remove(taskid); // Since we do not fail completed reduces (whose outputs go to hdfs), we // should note this failure only for completed maps, only if this taskid; // completed this map. however if the job is done, there is no need to // manipulate completed maps if (this.isMapTask() && !jobSetup && !jobCleanup && isComplete(taskid) && jobStatus.getRunState() != JobStatus.SUCCEEDED) { this.completes--; // Reset the successfulTaskId since we don't have a SUCCESSFUL task now resetSuccessfulTaskid(); } // Note that there can be failures of tasks that are hosted on a machine // that has not yet registered with restarted jobtracker // recalculate the counts only if its a genuine failure if (tasks.contains(taskid)) { if (taskState == TaskStatus.State.FAILED) { numTaskFailures++; machinesWhereFailed.add(trackerHostName); if(maxSkipRecords>0) { //skipping feature enabled LOG.debug("TaskInProgress adding" + status.getNextRecordRange()); failedRanges.add(status.getNextRecordRange()); skipping = startSkipping(); } } else if (taskState == TaskStatus.State.KILLED) { numKilledTasks++; } } if (numTaskFailures >= maxTaskAttempts) { LOG.info("TaskInProgress " + getTIPId() + " has failed " + numTaskFailures + " times."); kill(); } } /** * Get whether to start skipping mode. */ private boolean startSkipping() { if(maxSkipRecords>0 && numTaskFailures>=SkipBadRecords.getAttemptsToStartSkipping(conf)) { return true; } return false; } /** * Finalize the <b>completed</b> task; note that this might not be the first * task-attempt of the {@link TaskInProgress} and hence might be declared * {@link TaskStatus.State.SUCCEEDED} or {@link TaskStatus.State.KILLED} * * @param taskId id of the completed task-attempt * @param finalTaskState final {@link TaskStatus.State} of the task-attempt */ private void completedTask(TaskAttemptID taskId, TaskStatus.State finalTaskState) { TaskStatus status = taskStatuses.get(taskId); status.setRunState(finalTaskState); activeTasks.remove(taskId); } /** * Indicate that one of the taskids in this already-completed * TaskInProgress has successfully completed; hence we mark this * taskid as {@link TaskStatus.State.KILLED}. */ void alreadyCompletedTask(TaskAttemptID taskid) { // 'KILL' the task completedTask(taskid, TaskStatus.State.KILLED); // Note the reason for the task being 'KILLED' addDiagnosticInfo(taskid, "Already completed TIP"); LOG.info("Already complete TIP " + getTIPId() + " has completed task " + taskid); } /** * Indicate that one of the taskids in this TaskInProgress * has successfully completed! */ public void completed(TaskAttemptID taskid) { // // Record that this taskid is complete // completedTask(taskid, TaskStatus.State.SUCCEEDED); // Note the successful taskid setSuccessfulTaskid(taskid); // // Now that the TIP is complete, the other speculative // subtasks will be closed when the owning tasktracker // reports in and calls shouldClose() on this object. // this.completes++; this.execFinishTime = System.currentTimeMillis(); recomputeProgress(); } /** * Get the split locations */ public String[] getSplitLocations() { if (isMapTask() && !jobSetup && !jobCleanup) { return rawSplit.getLocations(); } return new String[0]; } /** * Get the Status of the tasks managed by this TIP */ public TaskStatus[] getTaskStatuses() { return taskStatuses.values().toArray(new TaskStatus[taskStatuses.size()]); } /** * Get the status of the specified task * @param taskid * @return */ public TaskStatus getTaskStatus(TaskAttemptID taskid) { return taskStatuses.get(taskid); } /** * The TIP's been ordered kill()ed. */ public void kill() { if (isComplete() || failed) { return; } this.failed = true; killed = true; this.execFinishTime = System.currentTimeMillis(); recomputeProgress(); } /** * Was the task killed? * @return true if the task killed */ public boolean wasKilled() { return killed; } /** * Kill the given task */ boolean killTask(TaskAttemptID taskId, boolean shouldFail) { TaskStatus st = taskStatuses.get(taskId); if(st != null && (st.getRunState() == TaskStatus.State.RUNNING || st.getRunState() == TaskStatus.State.COMMIT_PENDING || st.inTaskCleanupPhase() || st.getRunState() == TaskStatus.State.UNASSIGNED) && tasksToKill.put(taskId, shouldFail) == null ) { String logStr = "Request received to " + (shouldFail ? "fail" : "kill") + " task '" + taskId + "' by user"; addDiagnosticInfo(taskId, logStr); LOG.info(logStr); return true; } return false; } /** * This method is called whenever there's a status change * for one of the TIP's sub-tasks. It recomputes the overall * progress for the TIP. We examine all sub-tasks and find * the one that's most advanced (and non-failed). */ void recomputeProgress() { if (isComplete()) { this.progress = 1; // update the counters and the state TaskStatus completedStatus = taskStatuses.get(getSuccessfulTaskid()); this.counters = completedStatus.getCounters(); this.state = completedStatus.getStateString(); } else if (failed) { this.progress = 0; // reset the counters and the state this.state = ""; this.counters = new Counters();
jaxlaw/hadoop-common
4ac058b7e8bc43c9ba794fda964895dcc2298fcf
MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. Contributed by Suhas Gogate.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index db1b73b..08f3cf4 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,467 +1,470 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + MAPREDUCE-676. Fix Hadoop Vaidya to ensure it works for map-only jobs. + (Suhas Gogate via acmurthy) + HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/contrib/vaidya/build.xml b/src/contrib/vaidya/build.xml index d5ab229..f150e05 100644 --- a/src/contrib/vaidya/build.xml +++ b/src/contrib/vaidya/build.xml @@ -1,68 +1,69 @@ <?xml version="1.0" ?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project name="vaidya" default="jar"> <import file="../build-contrib.xml" /> + <import file="../../../build.xml" /> <target name="init"> <mkdir dir="${build.dir}" /> <mkdir dir="${build.classes}" /> <mkdir dir="${build.dir}/bin" /> <mkdir dir="${build.dir}/conf" /> <copy todir="${build.dir}/bin"> <!-- copy hadoop vaidya command script file to hadoop-vaidya/bin --> <fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya"> <include name="vaidya.sh" /> </fileset> </copy> <copy todir="${build.dir}/conf"> <!-- copy hadoop vaidya tests config file to chuckwa/conf --> <fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests"> <include name="postex_diagnosis_tests.xml" /> </fileset> </copy> </target> <!-- ====================================================== --> <!-- Override jar target to include the tests conf xml file --> <!-- ====================================================== --> <target name="jar" depends="compile" unless="skip.contrib"> <echo message="contrib: ${name}" /> <jar jarfile="${build.dir}/hadoop-${version}-${name}.jar"> <fileset dir="${build.classes}" /> <fileset dir="${basedir}/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests"> <include name="postex_diagnosis_tests.xml" /> </fileset> </jar> </target> <target name="package" depends="jar"> <mkdir dir="${dist.dir}/contrib/${name}" /> <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false"> <fileset dir="${build.dir}"> <exclude name="**/classes/" /> </fileset> </copy> <chmod dir="${dist.dir}/contrib/${name}/bin" perm="a+x" includes="*" /> </target> </project> diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java index 2a0fcb0..392acc9 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/DiagnosticTest.java @@ -1,307 +1,370 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya; import java.lang.Runnable; +import java.sql.Timestamp; import org.apache.hadoop.vaidya.statistics.job.*; import org.apache.hadoop.vaidya.util.*; import org.w3c.dom.Node; import org.w3c.dom.Document; import org.w3c.dom.NodeList; import org.w3c.dom.Element; +import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; /* * This is an abstract base class to be extended by each diagnostic test * class. It implements Runnable interface so that if required multiple tests * can be run in parallel. */ public abstract class DiagnosticTest implements Runnable { private static final double HIGHVAL = 0.99; private static final double MEDIUMVAL = 0.66; private static final double LOWVAL = 0.33; /* * Job statistics are passed to this class against which this diagnostic * test is evaluated. */ private JobStatistics _jobExecutionStats; private Element _testConfigElement; private double _impactLevel; private boolean _evaluated; private boolean _testPassed; /* * Checks if test is already evaluated against job execution statistics * @return - true if test is already evaluated once. */ public boolean isEvaluated() { return _evaluated; } /* * If impact level (returned by evaluate method) is less than success threshold * then test is passed (NEGATIVE) else failed (POSITIVE) which inturn indicates the * problem with job performance */ public boolean istestPassed() { return this._testPassed; } /* * Initialize the globals */ public void initGlobals (JobStatistics jobExecutionStats, Element testConfigElement) { this._jobExecutionStats = jobExecutionStats; this._testConfigElement = testConfigElement; } /* * Returns a prescription/advice (formated text) based on the evaluation of * diagnostic test condition (evaluate method). Individual test should override * and implement it. If the value returned is null then the prescription advice * is printed as provided in the test config file. */ public abstract String getPrescription(); /* * This method prints any reference details to support the test result. Individual * test needs to override and implement it and information printed is specific * to individual test. */ public abstract String getReferenceDetails (); /* * Evaluates diagnostic condition and returns impact level (value [0..1]) * Typically this method calculates the impact of a diagnosed condition on the job performance * (Note: for boolean conditions it is either 0 or 1). */ public abstract double evaluate (JobStatistics jobExecutionStats); /* * Get the Title information for this test as set in the test config file */ public String getTitle() throws Exception { return XMLUtils.getElementValue("Title", this._testConfigElement); } /* * Get the Description information as set in the test config file. */ public String getDescription() throws Exception { return XMLUtils.getElementValue("Description", this._testConfigElement); } /* * Get the Importance value as set in the test config file. */ public double getImportance() throws Exception { if (XMLUtils.getElementValue("Importance", this._testConfigElement).equalsIgnoreCase("high")) { return HIGHVAL; } else if (XMLUtils.getElementValue("Importance", this._testConfigElement).equalsIgnoreCase("medium")) { return MEDIUMVAL; } else { return LOWVAL; } } /* * Returns the impact level of this test condition. This value is calculated and * returned by evaluate method. */ public double getImpactLevel() throws Exception { if (!this.isEvaluated()) { throw new Exception("Test has not been evaluated"); } return truncate(this._impactLevel); } /* * Get the severity level as specified in the test config file. */ public double getSeverityLevel() throws Exception { return truncate ((double)(getImportance()*getImpactLevel())); } /* * Get Success Threshold as specified in the test config file. */ public double getSuccessThreshold() throws Exception { double x = Double.parseDouble(XMLUtils.getElementValue("SuccessThreshold", this._testConfigElement)); return truncate (x); } /* * Creates and returns the report element for this test based on the * test evaluation results. */ - public Element getReportElement(Document doc, Node parent) throws Exception { + public Element getReportElement(Document doc, Node parent, int i) throws Exception { + /* * If test is not evaluated yet then throw exception */ if (!this.isEvaluated()) { throw new Exception("Test has not been evaluated"); } - + + /* + * If i == 0, means first test, then print job information + * before it. + */ + if (i == 0) { + Node reportElementx = doc.createElement("JobInformationElement"); + parent.appendChild(reportElementx); + + // Insert JOBTRACKERID + Node itemx = doc.createElement("JobTrackerID"); + reportElementx.appendChild(itemx); + Node valuex = doc.createTextNode(this._jobExecutionStats.getStringValue(JobKeys.JOBTRACKERID)); + itemx.appendChild(valuex); + + // Insert JOBNAME + itemx = doc.createElement("JobName"); + reportElementx.appendChild(itemx); + valuex = doc.createTextNode(this._jobExecutionStats.getStringValue(JobKeys.JOBNAME)); + itemx.appendChild(valuex); + + // Insert JOBTYPE + itemx = doc.createElement("JobType"); + reportElementx.appendChild(itemx); + valuex = doc.createTextNode(this._jobExecutionStats.getStringValue(JobKeys.JOBTYPE)); + itemx.appendChild(valuex); + + // Insert USER + itemx = doc.createElement("User"); + reportElementx.appendChild(itemx); + valuex = doc.createTextNode(this._jobExecutionStats.getStringValue(JobKeys.USER)); + itemx.appendChild(valuex); + + // Insert SUBMIT_TIME + itemx = doc.createElement("SubmitTime"); + reportElementx.appendChild(itemx); + String st1 = (new Timestamp(Long.parseLong(this._jobExecutionStats.getStringValue(JobKeys.SUBMIT_TIME))).toString()); + valuex = doc.createTextNode(st1); + itemx.appendChild(valuex); + + // Insert LAUNCH_TIME + itemx = doc.createElement("LaunchTime"); + reportElementx.appendChild(itemx); + String st2 = (new Timestamp(Long.parseLong(this._jobExecutionStats.getStringValue(JobKeys.LAUNCH_TIME))).toString()); + valuex = doc.createTextNode(st2); + itemx.appendChild(valuex); + + // Insert FINISH_TIME + itemx = doc.createElement("FinishTime"); + reportElementx.appendChild(itemx); + String st3 = (new Timestamp(Long.parseLong(this._jobExecutionStats.getStringValue(JobKeys.FINISH_TIME))).toString()); + valuex = doc.createTextNode(st3); + itemx.appendChild(valuex); + + // Insert STATUS + itemx = doc.createElement("Status"); + reportElementx.appendChild(itemx); + valuex = doc.createTextNode(this._jobExecutionStats.getStringValue(JobKeys.STATUS)); + itemx.appendChild(valuex); + } + /* * Construct and return the report element */ // Insert Child ReportElement Node reportElement = doc.createElement("TestReportElement"); parent.appendChild(reportElement); // Insert title Node item = doc.createElement("TestTitle"); reportElement.appendChild(item); Node value = doc.createTextNode(this.getTitle()); item.appendChild(value); // Insert description item = doc.createElement("TestDescription"); reportElement.appendChild(item); value = doc.createTextNode(this.getDescription()); item.appendChild(value); // Insert Importance item = doc.createElement("TestImportance"); reportElement.appendChild(item); String imp; if (this.getImportance() == HIGHVAL) { imp = "HIGH"; } else if (this.getImportance() == MEDIUMVAL) { imp = "MEDIUM"; } else { imp = "LOW"; } value = doc.createTextNode(imp); item.appendChild(value); // Insert Importance item = doc.createElement("TestResult"); reportElement.appendChild(item); if (this._testPassed) { value = doc.createTextNode("NEGATIVE(PASSED)"); } else { value = doc.createTextNode("POSITIVE(FAILED)"); } item.appendChild(value); // TODO : if (!this._testPassed) { // Insert Severity item = doc.createElement("TestSeverity"); reportElement.appendChild(item); value = doc.createTextNode(""+this.getSeverityLevel()); item.appendChild(value); // Insert Reference Details item = doc.createElement("ReferenceDetails"); reportElement.appendChild(item); value = doc.createTextNode(""+this.getReferenceDetails()); item.appendChild(value); // Insert Prescription Advice item = doc.createElement("TestPrescription"); String val = this.getPrescription(); if (val == null) { val = XMLUtils.getElementValue("Prescription", this._testConfigElement); } reportElement.appendChild(item); value = doc.createTextNode(""+val); item.appendChild(value); // } return (Element)reportElement; } /* * (non-Javadoc) * @see java.lang.Runnable#run() */ public void run() { /* * Evaluate the test */ this._impactLevel = this.evaluate(this._jobExecutionStats); this._evaluated = true; try { if (this._impactLevel >= this.getSuccessThreshold()) { this._testPassed = false; } else { this._testPassed = true; } } catch (Exception e) { e.printStackTrace(); } } /* * Returns value of element of type long part of InputElement of diagnostic * rule */ protected long getInputElementLongValue (String elementName, long defaultValue) { Element inputElement = (Element)(this._testConfigElement.getElementsByTagName("InputElement").item(0)); Element prs = null; long value; prs = (Element)inputElement.getElementsByTagName(elementName).item(0); if (prs != null) { value = Long.parseLong(prs.getFirstChild().getNodeValue().trim()); } else { value = defaultValue; } return value; } /* * Returns value of element of type double part of InputElement of diagnostic rule */ protected double getInputElementDoubleValue(String elementName, double defaultValue) { Element inputElement = (Element)(this._testConfigElement.getElementsByTagName("InputElement").item(0)); Element prs = null; double value; prs = (Element)inputElement.getElementsByTagName(elementName).item(0); if (prs != null) { value = Double.parseDouble(prs.getFirstChild().getNodeValue().trim()); } else { value = defaultValue; } return value; } /* * Returns value of element of type String part of InputElement of diagnostic rule */ protected String getInputElementStringValue(String elementName, String defaultValue) { Element inputElement = (Element)(this._testConfigElement.getElementsByTagName("InputElement").item(0)); Element prs = null; String value; prs = (Element)inputElement.getElementsByTagName(elementName).item(0); if (prs != null) { value = prs.getFirstChild().getNodeValue().trim(); } else { value = defaultValue; } return value; } /* * truncate doubles to 2 digit. */ public static double truncate(double x) { long y=(long)(x*100); return (double)y/100; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java index b2994b8..fb76698 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/PostExPerformanceDiagnoser.java @@ -1,269 +1,269 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis; import java.net.URL; import java.io.InputStream; import java.io.FileInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobHistory.JobInfo; import org.apache.hadoop.mapred.DefaultJobHistoryParser; import org.apache.hadoop.vaidya.util.XMLUtils; import org.apache.hadoop.vaidya.DiagnosticTest; import org.apache.hadoop.vaidya.JobDiagnoser; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.w3c.dom.NodeList; import org.w3c.dom.Document; import org.w3c.dom.Element; /** * This class acts as a driver or rule engine for executing the post execution * performance diagnostics tests of a map/reduce job. It prints or saves the * diagnostic report as a xml document. */ public class PostExPerformanceDiagnoser extends JobDiagnoser { private String _jobHistoryFile = null; private InputStream _testsConfFileIs = null; private String _reportFile = null; private String _jobConfFile = null; /* * Data available for analysts to write post execution performance diagnostic rules */ private JobStatistics _jobExecutionStatistics; /* * Get the report file where diagnostic report is to be saved */ public String getReportFile () { return this._reportFile; } /* * Get the job history log file used in collecting the job counters */ public String getJobHistoryFile () { return this._jobHistoryFile; } /* * Get the test configuration file where all the diagnostic tests are registered * with their configuration information. */ public InputStream getTestsConfFileIs () { return this._testsConfFileIs; } /* * Set the test configuration file */ public void setTestsConfFileIs (InputStream testsConfFileIs) { this._testsConfFileIs = testsConfFileIs; } /** * @return JobStatistics - Object storing the job configuration and execution * counters and statistics information */ public JobStatistics getJobExecutionStatistics() { return _jobExecutionStatistics; } /** * @param jobConfFile - URL pointing to job configuration (job_conf.xml) file * @param jobHistoryLogFile - URL pointing to job history log file * @param testsConfFile - file path for test configuration file (optional). * If not specified default path is:$HADOOP_HOME/contrib/vaidya/pxpd_tests_config.xml * @param reportFile - file path for storing report (optional) */ public PostExPerformanceDiagnoser (String jobConfFile, String jobHistoryFile, InputStream testsConfFileIs, String reportFile) throws Exception { this._jobHistoryFile = jobHistoryFile; this._testsConfFileIs = testsConfFileIs; this._reportFile = reportFile; this._jobConfFile = jobConfFile; /* * Read the job information necessary for post performance analysis */ JobConf jobConf = new JobConf(); JobInfo jobInfo = new JobInfo(""); readJobInformation(jobConf, jobInfo); this._jobExecutionStatistics = new JobStatistics(jobConf, jobInfo); } /** * read and populate job statistics information. */ private void readJobInformation(JobConf jobConf, JobInfo jobInfo) throws Exception { /* * Convert the input strings to URL */ URL jobConfFileUrl = new URL(this._jobConfFile); URL jobHistoryFileUrl = new URL (this._jobHistoryFile); /* * Read the Job Configuration from the jobConfFile url */ jobConf.addResource(jobConfFileUrl); /* * Read JobHistoryFile and build job counters to evaluate diagnostic rules */ if (jobHistoryFileUrl.getProtocol().equals("hdfs")) { DefaultJobHistoryParser.parseJobTasks (jobHistoryFileUrl.getPath(), jobInfo, FileSystem.get(jobConf)); } else if (jobHistoryFileUrl.getProtocol().equals("file")) { DefaultJobHistoryParser.parseJobTasks (jobHistoryFileUrl.getPath(), jobInfo, FileSystem.getLocal(jobConf)); } else { throw new Exception("Malformed URL. Protocol: "+jobHistoryFileUrl.getProtocol()); } } /* * print Help */ private static void printHelp() { System.out.println("Usage:"); System.out.println("PostExPerformanceDiagnoser -jobconf <fileurl> -joblog <fileurl> [-testconf <filepath>] [-report <filepath>]"); System.out.println(); System.out.println("-jobconf <fileurl> : File path for job configuration file (e.g. job_xxxx_conf.xml). It can be on HDFS or"); System.out.println(" : local file system. It should be specified in the URL format."); System.out.println(" : e.g. local file => file://localhost/Users/hadoop-user/job_0001_conf.xml"); System.out.println(" : e.g. hdfs file => hdfs://namenode:port/Users/hadoop-user/hodlogs/.../job_0001_conf.xml"); System.out.println(); System.out.println("-joblog <fileurl> : File path for job history log file. It can be on HDFS or local file system."); System.out.println(" : It should be specified in the URL format."); System.out.println(); System.out.println("-testconf <filepath> : Optional file path for performance advisor tests configuration file. It should be available"); System.out.println(" : on local file system and be specified as as an absolute file path."); System.out.println(" : e.g. => /Users/hadoop-user/postex_diagnosis_tests.xml. If not specified default file will be used"); System.out.println(" : from the hadoop-{ver}-vaidya.jar in a classpath."); System.out.println(" : For user to view or make local copy of default tests, file is available at $HADOOP_HOME/contrib/vaidya/conf/postex_diagnosis_tests.xml"); System.out.println(); System.out.println("-report <filepath> : Optional file path for for storing diagnostic report in a XML format. Path should be available"); System.out.println(" : on local file system and be specified as as an absolute file path."); System.out.println(" : e.g. => /Users/hadoop-user/postex_diagnosis_report.xml. If not specified report will be printed on console"); System.out.println(); System.out.println("-help : prints this usage"); System.out.println(); } /** * @param args */ public static void main(String[] args) { String jobconffile = null; String joblogfile = null; InputStream testsconffileis = null; String reportfile = null; /* * Parse the command line arguments */ try { for (int i=0; i<args.length-1; i=i+2) { if (args[i].equalsIgnoreCase("-jobconf")) { jobconffile = args[i+1]; } else if (args[i].equalsIgnoreCase("-joblog")) { joblogfile = args[i+1]; } else if (args[i].equalsIgnoreCase("-testconf")) { testsconffileis = new FileInputStream(new java.io.File(args[i+1])); } else if (args[i].equalsIgnoreCase("-report")) { reportfile = args[i+1]; } else if (args[i].equalsIgnoreCase("-help")) { printHelp(); return; } else { printHelp(); return; } } } catch (Exception e) { - System.out.println ("Invalid arguments."); + System.err.println ("Invalid arguments."); e.printStackTrace(); - System.out.println(); + System.err.println(); printHelp(); } // Check if required arguments are specified if (jobconffile == null || joblogfile == null) { - System.out.println ("Invalid arguments: -jobconf or -joblog arguments are missing"); + System.err.println ("Invalid arguments: -jobconf or -joblog arguments are missing"); printHelp(); return; } try { /* * Create performance advisor and read job execution statistics */ PostExPerformanceDiagnoser pa = new PostExPerformanceDiagnoser(jobconffile,joblogfile,testsconffileis,reportfile); /* * Read the diagnostic tests configuration file (xml) */ if (pa.getTestsConfFileIs() == null) { java.io.InputStream testsconfis = Thread.currentThread().getContextClassLoader().getResourceAsStream("postex_diagnosis_tests.xml"); pa.setTestsConfFileIs(testsconfis); } /* * Parse the tests configuration file */ Document rulesDoc = XMLUtils.parse(pa.getTestsConfFileIs()); /* * Read the diagnostic rule entries from the config file. * For every rule read and load the rule class name * Execute the Run() method of the class and get the report element */ NodeList list = rulesDoc.getElementsByTagName("DiagnosticTest"); int list_size = list.getLength(); for (int i=0;i<list_size; i++) { Element dRule = (Element)list.item(i); NodeList cNodeList = dRule.getElementsByTagName("ClassName"); Element cn = (Element)cNodeList.item(0); String className = cn.getFirstChild().getNodeValue().trim(); Class rc = Class.forName(className); DiagnosticTest test = (DiagnosticTest)rc.newInstance(); test.initGlobals(pa.getJobExecutionStatistics(), (Element)list.item(i)); test.run(); NodeList nodelist = pa.getReport().getElementsByTagName("PostExPerformanceDiagnosticReport"); Element root = (Element)nodelist.item(0); //root.appendChild(rule.getReportElement(pa.getReport(), root)); - Element re = test.getReportElement(pa.getReport(), root); + Element re = test.getReportElement(pa.getReport(), root, i); //XMLUtils.printDOM(re); } //Optionally print or save the report if (pa.getReportFile() == null) { pa.printReport(); } else { pa.saveReport(pa.getReportFile()); } }catch (Exception e) { - System.out.print("Exception:"+e); + System.err.print("Exception:"+e); e.printStackTrace(); } } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java index 11523fc..afd0eff 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/BalancedReducePartitioning.java @@ -1,111 +1,119 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis.tests; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.KeyDataType; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.ReduceTaskKeys; import org.apache.hadoop.vaidya.statistics.job.ReduceTaskStatistics; import org.apache.hadoop.vaidya.DiagnosticTest; import org.w3c.dom.Element; import java.util.Hashtable; import java.util.List; /** * */ public class BalancedReducePartitioning extends DiagnosticTest { private long totalReduces; private long busyReducers; private long percentReduceRecordsSize; private double percent; private double impact; + private JobStatistics _job; /** * */ public BalancedReducePartitioning() { } /* */ @Override public double evaluate(JobStatistics jobExecutionStats) { + /* Set the global job variable */ + this._job = jobExecutionStats; + + /* If Map only job then impact is zero */ + if (jobExecutionStats.getStringValue(JobKeys.JOBTYPE).equals("MAP_ONLY")) { + this.impact = 0; + return this.impact; + } + /* * Read this rule specific input PercentReduceRecords */ this.percent = getInputElementDoubleValue("PercentReduceRecords", 0.90); - /* * Get the sorted reduce task list by number of INPUT_RECORDS (ascending) */ List<ReduceTaskStatistics> srTaskList = jobExecutionStats.getReduceTaskList(ReduceTaskKeys.INPUT_RECORDS, KeyDataType.LONG); this.percentReduceRecordsSize = (long) (this.percent * jobExecutionStats.getLongValue(JobKeys.REDUCE_INPUT_RECORDS)); this.totalReduces = jobExecutionStats.getLongValue(JobKeys.TOTAL_REDUCES); long tempReduceRecordsCount = 0; this.busyReducers = 0; for (int i=srTaskList.size()-1; i>-1; i--) { tempReduceRecordsCount += srTaskList.get(i).getLongValue(ReduceTaskKeys.INPUT_RECORDS); this.busyReducers++; if (tempReduceRecordsCount >= this.percentReduceRecordsSize) { break; } } // Calculate Impact return this.impact = (1 - (double)this.busyReducers/(double)this.totalReduces); - } /* * helper function to print specific reduce counter for all reduce tasks */ public void printReduceCounters (List<Hashtable<ReduceTaskKeys, String>> x, ReduceTaskKeys key) { for (int i=0; i<x.size(); i++) { - System.out.println("ind:"+i+", Value:<"+x.get(i).get(key)+">"); + System.out.println("ind:"+i+", Value:"+x.get(i).get(key)+":"); } } /* * */ @Override public String getPrescription() { return "* Use the appropriate partitioning function"+ "\n" + "* For streaming job consider following partitioner and hadoop config parameters\n"+ " * org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner\n" + " * -jobconf stream.map.output.field.separator, -jobconf stream.num.map.output.key.fields"; } /* */ @Override public String getReferenceDetails() { String ref = "* TotalReduceTasks: "+this.totalReduces+"\n"+ "* BusyReduceTasks processing "+this.percent+ "% of total records: " +this.busyReducers+"\n"+ "* Impact: "+truncate(this.impact); return ref; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java index fc2682f..87a5506 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapSideDiskSpill.java @@ -1,115 +1,117 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis.tests; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.KeyDataType; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.MapTaskKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.ReduceTaskKeys; import org.apache.hadoop.vaidya.statistics.job.MapTaskStatistics; import org.apache.hadoop.vaidya.DiagnosticTest; import org.w3c.dom.Element; import java.util.Hashtable; import java.util.List; /** * */ public class MapSideDiskSpill extends DiagnosticTest { private double _impact; private JobStatistics _job; private long _numLocalBytesWrittenByMaps; /** * */ public MapSideDiskSpill() { } /* * */ @Override public double evaluate(JobStatistics job) { /* * Set the this._job */ this._job = job; /* * Read the Normalization Factor */ double normF = getInputElementDoubleValue("NormalizationFactor", 3.0); /* - * Get the sorted reduce task list by number MapTaskKeys.OUTPUT_BYTES + * Get the sorted map task list by number MapTaskKeys.OUTPUT_BYTES */ - List<MapTaskStatistics> srTaskList = job.getMapTaskList(MapTaskKeys.LOCAL_BYTES_WRITTEN, KeyDataType.LONG); - int size = srTaskList.size(); + List<MapTaskStatistics> smTaskList = job.getMapTaskList(MapTaskKeys.FILE_BYTES_WRITTEN, KeyDataType.LONG); + int size = smTaskList.size(); long numLocalBytesWrittenByMaps = 0; for (int i=0; i<size; i++) { - numLocalBytesWrittenByMaps += srTaskList.get(i).getLongValue(MapTaskKeys.LOCAL_BYTES_WRITTEN); + numLocalBytesWrittenByMaps += smTaskList.get(i).getLongValue(MapTaskKeys.FILE_BYTES_WRITTEN); } this._numLocalBytesWrittenByMaps = numLocalBytesWrittenByMaps; /* * Map only job vs. map reduce job + * For MapReduce job MAP_OUTPUT_BYTES are normally written by maps on local disk, so they are subtracted + * from the localBytesWrittenByMaps. */ if (job.getLongValue(JobKeys.TOTAL_REDUCES) > 0) { this._impact = (this._numLocalBytesWrittenByMaps - job.getLongValue(JobKeys.MAP_OUTPUT_BYTES))/job.getLongValue(JobKeys.MAP_OUTPUT_BYTES); } else { this._impact = this._numLocalBytesWrittenByMaps/job.getLongValue(JobKeys.MAP_OUTPUT_BYTES); } if (this._impact > normF) { this._impact = 1.0; } else { this._impact = this._impact/normF; } return this._impact; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getAdvice() */ @Override public String getPrescription() { return "* Use combiner to lower the map output size.\n" + "* Increase map side sort buffer size (io.sort.mb:"+this._job.getJobConf().getInt("io.sort.mb", 0) + ").\n" + "* Increase index buffer size (io.sort.record.percent:"+ this._job.getJobConf().getInt("io.sort.record.percent", 0) + ") if number of Map Output Records are large. \n" + - "* Increase (io.sort.spill.percent:"+ this._job.getJobConf().getInt("io.sort.spill.percent", 0) + "), default 0.80 i.e. 80% of sort buffer size & index buffer size. \n"; + "* Increase (io.sort.spill.percent:"+ this._job.getJobConf().getInt("io.sort.spill.percent", 0) + "), default 0.80 i.e. 80% of sort buffer size and index buffer size. \n"; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getReferenceDetails() */ @Override public String getReferenceDetails() { String ref = "* TotalMapOutputBytes: "+this._job.getLongValue(JobKeys.MAP_OUTPUT_BYTES)+"\n"+ "* Total Local Bytes Written by Maps: "+this._numLocalBytesWrittenByMaps+"\n"+ "* Impact: "+ truncate(this._impact); return ref; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java index f8168a5..27e716b 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/MapsReExecutionImpact.java @@ -1,88 +1,87 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis.tests; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.KeyDataType; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.ReduceTaskKeys; import org.apache.hadoop.vaidya.statistics.job.ReduceTaskStatistics; import org.apache.hadoop.vaidya.DiagnosticTest; import org.w3c.dom.Element; import java.util.Hashtable; import java.util.List; /** * */ public class MapsReExecutionImpact extends DiagnosticTest { private double _impact; private JobStatistics _job; private long _percentMapsReExecuted; - /** * */ public MapsReExecutionImpact() { } /* * Evaluate the test */ @Override public double evaluate(JobStatistics job) { /* * Set the this._job */ this._job = job; - + /* * Calculate and return the impact */ this._impact = ((job.getLongValue(JobKeys.LAUNCHED_MAPS) - job.getLongValue(JobKeys.TOTAL_MAPS))/job.getLongValue(JobKeys.TOTAL_MAPS)); this._percentMapsReExecuted = Math.round(this._impact * 100); return this._impact; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getAdvice() */ @Override public String getPrescription() { return "* Need careful evaluation of why maps are re-executed. \n" + " * It could be due to some set of unstable cluster nodes.\n" + " * It could be due application specific failures."; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getReferenceDetails() */ @Override public String getReferenceDetails() { String ref = "* Total Map Tasks: "+this._job.getLongValue(JobKeys.TOTAL_MAPS)+"\n"+ "* Launched Map Tasks: "+this._job.getLongValue(JobKeys.LAUNCHED_MAPS)+"\n"+ "* Percent Maps ReExecuted: "+this._percentMapsReExecuted+"\n"+ "* Impact: "+truncate(this._impact); return ref; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java index 8892f37..8417c46 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java @@ -1,114 +1,112 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis.tests; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.KeyDataType; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.ReduceTaskKeys; import org.apache.hadoop.vaidya.statistics.job.ReduceTaskStatistics; import org.apache.hadoop.vaidya.DiagnosticTest; import org.w3c.dom.Element; import java.util.Hashtable; import java.util.List; /** * */ public class ReadingHDFSFilesAsSideEffect extends DiagnosticTest { private double _impact; private JobStatistics _job; - - /** * */ public ReadingHDFSFilesAsSideEffect() { } /* * Evaluate the test */ @Override public double evaluate(JobStatistics job) { /* * Set the this._job */ this._job = job; - + /* * Read the Normalization Factor */ double normF = getInputElementDoubleValue("NormalizationFactor", 2.0); /* * Calculate and return the impact * * Check if job level aggregate bytes read from HDFS are more than map input bytes * Typically they should be same unless maps and/or reducers are reading some data * from HDFS as a side effect * * If side effect HDFS bytes read are >= twice map input bytes impact is treated as * maximum. */ if(job.getLongValue(JobKeys.MAP_INPUT_BYTES) == 0 && job.getLongValue(JobKeys.HDFS_BYTES_READ) != 0) { return (double)1; } if (job.getLongValue(JobKeys.HDFS_BYTES_READ) == 0) { return (double)0; } this._impact = (job.getLongValue(JobKeys.HDFS_BYTES_READ) / job.getLongValue(JobKeys.MAP_INPUT_BYTES)); if (this._impact >= normF) { this._impact = 1; } else { this._impact = this._impact/normF; } return this._impact; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getAdvice() */ @Override public String getPrescription() { return "Map and/or Reduce tasks are reading application specific files from HDFS. Make sure the replication factor\n" + "of these HDFS files is high enough to avoid the data reading bottleneck. Typically replication factor\n" + "can be square root of map/reduce tasks capacity of the allocated cluster."; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getReferenceDetails() */ @Override public String getReferenceDetails() { String ref = "* Total HDFS Bytes read: "+this._job.getLongValue(JobKeys.HDFS_BYTES_READ)+"\n"+ "* Total Map Input Bytes read: "+this._job.getLongValue(JobKeys.MAP_INPUT_BYTES)+"\n"+ "* Impact: "+truncate(this._impact); return ref; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java index f770ccf..5f6446b 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReducesReExecutionImpact.java @@ -1,89 +1,94 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis.tests; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.KeyDataType; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.ReduceTaskKeys; import org.apache.hadoop.vaidya.statistics.job.ReduceTaskStatistics; import org.apache.hadoop.vaidya.DiagnosticTest; import org.w3c.dom.Element; import java.util.Hashtable; import java.util.List; /** * */ public class ReducesReExecutionImpact extends DiagnosticTest { private double _impact; private JobStatistics _job; private long _percentReducesReExecuted; - /** * */ public ReducesReExecutionImpact() { } /* * Evaluate the test */ @Override public double evaluate(JobStatistics job) { /* * Set the this._job */ this._job = job; + + /* find job type */ + if (job.getStringValue(JobKeys.JOBTYPE).equals("MAP_ONLY")) { + this._impact = 0; + return this._impact; + } /* * Calculate and return the impact */ this._impact = ((job.getLongValue(JobKeys.LAUNCHED_REDUCES) - job.getLongValue(JobKeys.TOTAL_REDUCES))/job.getLongValue(JobKeys.TOTAL_REDUCES)); this._percentReducesReExecuted = Math.round(this._impact * 100); return this._impact; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getAdvice() */ @Override public String getPrescription() { return "* Need careful evaluation of why reduce tasks are re-executed. \n" + " * It could be due to some set of unstable cluster nodes.\n" + " * It could be due application specific failures."; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getReferenceDetails() */ @Override public String getReferenceDetails() { String ref = - "* Total Reduce Tasks: "+this._job.getLongValue(JobKeys.TOTAL_REDUCES)+"\n"+ + "* Total Reduce Tasks: "+this._job.getLongValue(JobKeys.TOTAL_REDUCES)+"\n"+ "* Launched Reduce Tasks: "+this._job.getLongValue(JobKeys.LAUNCHED_REDUCES)+"\n"+ "* Percent Reduce Tasks ReExecuted: "+this._percentReducesReExecuted + "\n" + "* Impact: "+truncate(this._impact); return ref; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java index 3da8ae5..1cfebd8 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java @@ -1,640 +1,656 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.statistics.job; import java.util.ArrayList; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobHistory; import org.apache.hadoop.mapred.JobHistory.JobInfo; import org.apache.hadoop.mapred.JobHistory.Keys; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.Counters.Counter; import java.text.ParseException; //import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import java.util.Hashtable; import java.util.Map; import java.util.regex.Pattern; import java.util.regex.Matcher; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Collections; /** * */ public class JobStatistics implements JobStatisticsInterface { /* * Pattern for parsing the COUNTERS */ private static final Pattern _pattern = Pattern.compile("[[^,]?]+"); //"[[^,]?]+" /* * Job configuration */ private JobConf _jobConf; /** * @param jobConf the jobConf to set */ void setJobConf(JobConf jobConf) { this._jobConf = jobConf; // TODO: Add job conf to _job array } /* * Aggregated Job level counters */ private JobHistory.JobInfo _jobInfo; /* * Job stats */ private java.util.Hashtable<Enum, String> _job; /** * @param jobConf the jobConf to set */ public JobConf getJobConf() { return this._jobConf; } /* * Get Job Counters of type long */ public long getLongValue(Enum key) { if (this._job.get(key) == null) { return (long)0; } else { return Long.parseLong(this._job.get(key)); } } /* * Get job Counters of type Double */ public double getDoubleValue(Enum key) { if (this._job.get(key) == null) { return (double)0; } else { return Double.parseDouble(this._job.get(key)); } } /* * Get Job Counters of type String */ public String getStringValue(Enum key) { - if (this._job.get(key) == null) { - return ""; - } else { + if (this._job.get(key) == null) { + return ""; + } else { return this._job.get(key); - } + } } /* * Set key value of type long */ public void setValue(Enum key, long value) { this._job.put(key, Long.toString(value)); } /* * Set key value of type double */ public void setValue(Enum key, double value) { this._job.put(key, Double.toString(value)); } /* * Set key value of type String */ public void setValue(Enum key, String value) { this._job.put(key, value); } /* * Map Task List (Sorted by task id) */ private ArrayList<MapTaskStatistics> _mapTaskList = new ArrayList<MapTaskStatistics>(); /* * Reduce Task List (Sorted by task id) */ private ArrayList<ReduceTaskStatistics> _reduceTaskList = new ArrayList<ReduceTaskStatistics>(); /* * Ctor: */ public JobStatistics (JobConf jobConf, JobInfo jobInfo) throws ParseException { this._jobConf = jobConf; this._jobInfo = jobInfo; this._job = new Hashtable<Enum, String>(); populate_Job(this._job, this._jobInfo.getValues()); populate_MapReduceTaskLists(this._mapTaskList, this._reduceTaskList, this._jobInfo.getAllTasks()); + + // Add the Job Type: MAP_REDUCE, MAP_ONLY + if (getLongValue(JobKeys.TOTAL_REDUCES) == 0) { + this._job.put(JobKeys.JOBTYPE,"MAP_ONLY"); + } else { + this._job.put(JobKeys.JOBTYPE,"MAP_REDUCE"); + } } /* * */ private void populate_MapReduceTaskLists (ArrayList<MapTaskStatistics> mapTaskList, ArrayList<ReduceTaskStatistics> reduceTaskList, java.util.Map<String, JobHistory.Task> taskMap) throws ParseException { /* * */ int num_tasks = taskMap.entrySet().size(); java.util.Iterator<Map.Entry<String, JobHistory.Task>> ti = taskMap.entrySet().iterator(); for (int i = 0; i < num_tasks; i++) { Map.Entry<String, JobHistory.Task> entry = (Map.Entry<String, JobHistory.Task>) ti.next(); JobHistory.Task task = entry.getValue(); if (task.get(Keys.TASK_TYPE).equals("MAP")) { MapTaskStatistics mapT = new MapTaskStatistics(); java.util.Map<JobHistory.Keys, String> mapTask = task.getValues(); java.util.Map<JobHistory.Keys, String> successTaskAttemptMap = getLastSuccessfulTaskAttempt(task); // NOTE: Following would lead to less number of actual tasks collected in the tasklist array if (successTaskAttemptMap != null) { mapTask.putAll(successTaskAttemptMap); } else { - System.out.println("Task:<"+task.get(Keys.TASKID)+"> is not successful - SKIPPING"); + System.err.println("Task:<"+task.get(Keys.TASKID)+"> is not successful - SKIPPING"); } int size = mapTask.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = mapTask.entrySet().iterator(); for (int j = 0; j < size; j++) { Map.Entry<JobHistory.Keys, String> mtc = kv.next(); JobHistory.Keys key = mtc.getKey(); String value = mtc.getValue(); //System.out.println("JobHistory.MapKeys."+key+": "+value); switch (key) { case TASKID: mapT.setValue(MapTaskKeys.TASK_ID, value); break; case TASK_ATTEMPT_ID: mapT.setValue(MapTaskKeys.ATTEMPT_ID, value); break; case HOSTNAME: mapT.setValue(MapTaskKeys.HOSTNAME, value); break; case TASK_TYPE: mapT.setValue(MapTaskKeys.TASK_TYPE, value); break; case TASK_STATUS: mapT.setValue(MapTaskKeys.STATUS, value); break; case START_TIME: mapT.setValue(MapTaskKeys.START_TIME, value); break; case FINISH_TIME: mapT.setValue(MapTaskKeys.FINISH_TIME, value); break; case SPLITS: mapT.setValue(MapTaskKeys.SPLITS, value); break; case TRACKER_NAME: mapT.setValue(MapTaskKeys.TRACKER_NAME, value); break; case STATE_STRING: mapT.setValue(MapTaskKeys.STATE_STRING, value); break; case HTTP_PORT: mapT.setValue(MapTaskKeys.HTTP_PORT, value); break; case ERROR: mapT.setValue(MapTaskKeys.ERROR, value); break; case COUNTERS: value.concat(","); parseAndAddMapTaskCounters(mapT, value); mapTaskList.add(mapT); break; - default: System.out.println("JobHistory.MapKeys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS"); + default: System.err.println("JobHistory.MapKeys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS"); break; } } // Add number of task attempts mapT.setValue(MapTaskKeys.NUM_ATTEMPTS, (new Integer(task.getTaskAttempts().size())).toString()); + + // Add EXECUTION_TIME = FINISH_TIME - START_TIME + long etime = mapT.getLongValue(MapTaskKeys.FINISH_TIME) - mapT.getLongValue(MapTaskKeys.START_TIME); + mapT.setValue(MapTaskKeys.EXECUTION_TIME, (new Long(etime)).toString()); }else if (task.get(Keys.TASK_TYPE).equals("REDUCE")) { - ReduceTaskStatistics reduceT = new ReduceTaskStatistics(); - java.util.Map<JobHistory.Keys, String> reduceTask = task.getValues(); - java.util.Map<JobHistory.Keys, String> successTaskAttemptMap = getLastSuccessfulTaskAttempt(task); - // NOTE: Following would lead to less number of actual tasks collected in the tasklist array - if (successTaskAttemptMap != null) { - reduceTask.putAll(successTaskAttemptMap); - } else { - System.out.println("Task:<"+task.get(Keys.TASKID)+"> is not successful - SKIPPING"); - } - int size = reduceTask.size(); - java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = reduceTask.entrySet().iterator(); - for (int j = 0; j < size; j++) - { - Map.Entry<JobHistory.Keys, String> rtc = kv.next(); - JobHistory.Keys key = rtc.getKey(); - String value = rtc.getValue(); - //System.out.println("JobHistory.ReduceKeys."+key+": "+value); - switch (key) { - case TASKID: reduceT.setValue(ReduceTaskKeys.TASK_ID, value); break; - case TASK_ATTEMPT_ID: reduceT.setValue(ReduceTaskKeys.ATTEMPT_ID, value); break; - case HOSTNAME: reduceT.setValue(ReduceTaskKeys.HOSTNAME, value); break; - case TASK_TYPE: reduceT.setValue(ReduceTaskKeys.TASK_TYPE, value); break; - case TASK_STATUS: reduceT.setValue(ReduceTaskKeys.STATUS, value); break; - case START_TIME: reduceT.setValue(ReduceTaskKeys.START_TIME, value); break; - case FINISH_TIME: reduceT.setValue(ReduceTaskKeys.FINISH_TIME, value); break; - case SHUFFLE_FINISHED: reduceT.setValue(ReduceTaskKeys.SHUFFLE_FINISH_TIME, value); break; - case SORT_FINISHED: reduceT.setValue(ReduceTaskKeys.SORT_FINISH_TIME, value); break; - case SPLITS: reduceT.setValue(ReduceTaskKeys.SPLITS, value); break; - case TRACKER_NAME: reduceT.setValue(ReduceTaskKeys.TRACKER_NAME, value); break; - case STATE_STRING: reduceT.setValue(ReduceTaskKeys.STATE_STRING, value); break; - case HTTP_PORT: reduceT.setValue(ReduceTaskKeys.HTTP_PORT, value); break; - case COUNTERS: - value.concat(","); - parseAndAddReduceTaskCounters(reduceT, value); - reduceTaskList.add(reduceT); - break; - default: System.out.println("JobHistory.ReduceKeys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE COUNTERS"); - break; + + ReduceTaskStatistics reduceT = new ReduceTaskStatistics(); + java.util.Map<JobHistory.Keys, String> reduceTask = task.getValues(); + java.util.Map<JobHistory.Keys, String> successTaskAttemptMap = getLastSuccessfulTaskAttempt(task); + // NOTE: Following would lead to less number of actual tasks collected in the tasklist array + if (successTaskAttemptMap != null) { + reduceTask.putAll(successTaskAttemptMap); + } else { + System.err.println("Task:<"+task.get(Keys.TASKID)+"> is not successful - SKIPPING"); + } + int size = reduceTask.size(); + java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = reduceTask.entrySet().iterator(); + for (int j = 0; j < size; j++) + { + Map.Entry<JobHistory.Keys, String> rtc = kv.next(); + JobHistory.Keys key = rtc.getKey(); + String value = rtc.getValue(); + //System.out.println("JobHistory.ReduceKeys."+key+": "+value); + switch (key) { + case TASKID: reduceT.setValue(ReduceTaskKeys.TASK_ID, value); break; + case TASK_ATTEMPT_ID: reduceT.setValue(ReduceTaskKeys.ATTEMPT_ID, value); break; + case HOSTNAME: reduceT.setValue(ReduceTaskKeys.HOSTNAME, value); break; + case TASK_TYPE: reduceT.setValue(ReduceTaskKeys.TASK_TYPE, value); break; + case TASK_STATUS: reduceT.setValue(ReduceTaskKeys.STATUS, value); break; + case START_TIME: reduceT.setValue(ReduceTaskKeys.START_TIME, value); break; + case FINISH_TIME: reduceT.setValue(ReduceTaskKeys.FINISH_TIME, value); break; + case SHUFFLE_FINISHED: reduceT.setValue(ReduceTaskKeys.SHUFFLE_FINISH_TIME, value); break; + case SORT_FINISHED: reduceT.setValue(ReduceTaskKeys.SORT_FINISH_TIME, value); break; + case SPLITS: reduceT.setValue(ReduceTaskKeys.SPLITS, value); break; + case TRACKER_NAME: reduceT.setValue(ReduceTaskKeys.TRACKER_NAME, value); break; + case STATE_STRING: reduceT.setValue(ReduceTaskKeys.STATE_STRING, value); break; + case HTTP_PORT: reduceT.setValue(ReduceTaskKeys.HTTP_PORT, value); break; + case COUNTERS: + value.concat(","); + parseAndAddReduceTaskCounters(reduceT, value); + reduceTaskList.add(reduceT); + break; + default: System.err.println("JobHistory.ReduceKeys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE COUNTERS"); + break; + } } - + // Add number of task attempts reduceT.setValue(ReduceTaskKeys.NUM_ATTEMPTS, (new Integer(task.getTaskAttempts().size())).toString()); - } + + // Add EXECUTION_TIME = FINISH_TIME - START_TIME + long etime1 = reduceT.getLongValue(ReduceTaskKeys.FINISH_TIME) - reduceT.getLongValue(ReduceTaskKeys.START_TIME); + reduceT.setValue(ReduceTaskKeys.EXECUTION_TIME, (new Long(etime1)).toString()); + } else if (task.get(Keys.TASK_TYPE).equals("CLEANUP") || task.get(Keys.TASK_TYPE).equals("SETUP")) { //System.out.println("INFO: IGNORING TASK TYPE : "+task.get(Keys.TASK_TYPE)); } else { - System.out.println("UNKNOWN TASK TYPE : "+task.get(Keys.TASK_TYPE)); + System.err.println("UNKNOWN TASK TYPE : "+task.get(Keys.TASK_TYPE)); } } } /* * Get last successful task attempt to be added in the stats */ private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) { Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts(); int size = taskAttempts.size(); java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator(); for (int i=0; i<size; i++) { // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next(); JobHistory.TaskAttempt attempt = tae.getValue(); if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) { return attempt.getValues(); } } return null; } /* * Popuate the job stats */ private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException { int size = jobC.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next(); JobHistory.Keys key = entry.getKey(); String value = entry.getValue(); //System.out.println("JobHistory.JobKeys."+key+": "+value); switch (key) { case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break; - //case START_TIME: job.put(JobKeys., value); break; case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break; case JOBID: job.put(JobKeys.JOBID, value); break; case JOBNAME: job.put(JobKeys.JOBNAME, value); break; case USER: job.put(JobKeys.USER, value); break; case JOBCONF: job.put(JobKeys.JOBCONF, value); break; case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break; case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break; case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break; case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break; case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break; case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break; case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break; case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break; case JOB_STATUS: job.put(JobKeys.STATUS, value); break; case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break; case COUNTERS: value.concat(","); parseAndAddJobCounters(job, value); break; - default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS"); + default: System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS"); break; } } } /* * Parse and add the job counters */ private void parseAndAddJobCounters(Hashtable<Enum, String> job, String counters) throws ParseException { Counters cnt = Counters.fromEscapedCompactString(counters); for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) { Counters.Group grp = grps.next(); //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">"; for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) { Counters.Counter counter = mycounters.next(); //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">"; - //System.out.println("groupName:"+groupname+",countername: "+countername); + //System.err.println("groupName:"+groupname+",countername: "+countername); String countername = grp.getDisplayName()+"."+counter.getDisplayName(); String value = (new Long(counter.getValue())).toString(); String[] parts = {countername,value}; - //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]); + //System.err.println("part0:<"+parts[0]+">,:part1 <"+parts[1]+">"); if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) { - job.put(JobKeys.LOCAL_BYTES_READ, parts[1]); + job.put(JobKeys.FILE_BYTES_READ, parts[1]); } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) { - job.put(JobKeys.LOCAL_BYTES_WRITTEN, parts[1]); + job.put(JobKeys.FILE_BYTES_WRITTEN, parts[1]); } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) { job.put(JobKeys.HDFS_BYTES_READ, parts[1]); } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) { job.put(JobKeys.HDFS_BYTES_WRITTEN, parts[1]); } else if (parts[0].equals("Job Counters .Launched map tasks")) { job.put(JobKeys.LAUNCHED_MAPS, parts[1]); } else if (parts[0].equals("Job Counters .Launched reduce tasks")) { job.put(JobKeys.LAUNCHED_REDUCES, parts[1]); } else if (parts[0].equals("Job Counters .Data-local map tasks")) { job.put(JobKeys.DATALOCAL_MAPS, parts[1]); } else if (parts[0].equals("Job Counters .Rack-local map tasks")) { job.put(JobKeys.RACKLOCAL_MAPS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map input records")) { job.put(JobKeys.MAP_INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map output records")) { job.put(JobKeys.MAP_OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) { job.put(JobKeys.MAP_INPUT_BYTES, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) { job.put(JobKeys.MAP_OUTPUT_BYTES, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { job.put(JobKeys.COMBINE_INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { job.put(JobKeys.COMBINE_OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) { job.put(JobKeys.REDUCE_INPUT_GROUPS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) { job.put(JobKeys.REDUCE_INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) { job.put(JobKeys.REDUCE_OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) { job.put(JobKeys.SPILLED_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce shuffle bytes")) { job.put(JobKeys.SHUFFLE_BYTES, parts[1]); } else { - System.out.println("JobCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR"); + System.err.println("JobCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR"); } } } } /* * Parse and add the Map task counters */ private void parseAndAddMapTaskCounters(MapTaskStatistics mapTask, String counters) throws ParseException { Counters cnt = Counters.fromEscapedCompactString(counters); for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) { Counters.Group grp = grps.next(); //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">"; for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) { Counters.Counter counter = mycounters.next(); //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">"; //System.out.println("groupName:"+groupname+",countername: "+countername); String countername = grp.getDisplayName()+"."+counter.getDisplayName(); String value = (new Long(counter.getValue())).toString(); String[] parts = {countername,value}; //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]); if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) { - mapTask.setValue(MapTaskKeys.LOCAL_BYTES_READ, parts[1]); + mapTask.setValue(MapTaskKeys.FILE_BYTES_READ, parts[1]); } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) { - mapTask.setValue(MapTaskKeys.LOCAL_BYTES_WRITTEN, parts[1]); + mapTask.setValue(MapTaskKeys.FILE_BYTES_WRITTEN, parts[1]); } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) { mapTask.setValue(MapTaskKeys.HDFS_BYTES_READ, parts[1]); } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) { mapTask.setValue(MapTaskKeys.HDFS_BYTES_WRITTEN, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map input records")) { mapTask.setValue(MapTaskKeys.INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map output records")) { mapTask.setValue(MapTaskKeys.OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) { mapTask.setValue(MapTaskKeys.INPUT_BYTES, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) { mapTask.setValue(MapTaskKeys.OUTPUT_BYTES, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { mapTask.setValue(MapTaskKeys.COMBINE_INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { mapTask.setValue(MapTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) { mapTask.setValue(MapTaskKeys.SPILLED_RECORDS, parts[1]); } else { - System.out.println("MapCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK"); + System.err.println("MapCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK"); } } } } /* * Parse and add the reduce task counters */ private void parseAndAddReduceTaskCounters(ReduceTaskStatistics reduceTask, String counters) throws ParseException { Counters cnt = Counters.fromEscapedCompactString(counters); for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) { Counters.Group grp = grps.next(); //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">"; for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) { Counters.Counter counter = mycounters.next(); //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">"; //System.out.println("groupName:"+groupname+",countername: "+countername); String countername = grp.getDisplayName()+"."+counter.getDisplayName(); String value = (new Long(counter.getValue())).toString(); String[] parts = {countername,value}; //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]); if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) { - reduceTask.setValue(ReduceTaskKeys.LOCAL_BYTES_READ, parts[1]); + reduceTask.setValue(ReduceTaskKeys.FILE_BYTES_READ, parts[1]); } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) { - reduceTask.setValue(ReduceTaskKeys.LOCAL_BYTES_WRITTEN, parts[1]); + reduceTask.setValue(ReduceTaskKeys.FILE_BYTES_WRITTEN, parts[1]); } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) { reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_READ, parts[1]); } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) { reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_WRITTEN, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) { reduceTask.setValue(ReduceTaskKeys.INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) { reduceTask.setValue(ReduceTaskKeys.OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { reduceTask.setValue(ReduceTaskKeys.COMBINE_INPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { reduceTask.setValue(ReduceTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) { reduceTask.setValue(ReduceTaskKeys.INPUT_GROUPS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) { reduceTask.setValue(ReduceTaskKeys.SPILLED_RECORDS, parts[1]); } else if (parts[0].equals("Map-Reduce Framework.Reduce shuffle bytes")) { reduceTask.setValue(ReduceTaskKeys.SHUFFLE_BYTES, parts[1]); } else { - System.out.println("ReduceCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE TASK"); + System.err.println("ReduceCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE TASK"); } } } } /* * Print the Job Execution Statistics * TODO: split to pring job, map/reduce task list and individual map/reduce task stats */ public void printJobExecutionStatistics() { /* * Print Job Counters */ System.out.println("JOB COUNTERS *********************************************"); int size = this._job.size(); java.util.Iterator<Map.Entry<Enum, String>> kv = this._job.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<Enum, String> entry = (Map.Entry<Enum, String>) kv.next(); Enum key = entry.getKey(); String value = entry.getValue(); System.out.println("Key:<" + key.name() + ">, value:<"+ value +">"); } /* * */ System.out.println("MAP COUNTERS *********************************************"); int size1 = this._mapTaskList.size(); for (int i = 0; i < size1; i++) { System.out.println("MAP TASK *********************************************"); this._mapTaskList.get(i).printKeys(); } /* * */ System.out.println("REDUCE COUNTERS *********************************************"); int size2 = this._mapTaskList.size(); for (int i = 0; i < size2; i++) { System.out.println("REDUCE TASK *********************************************"); this._reduceTaskList.get(i).printKeys(); } } /* * Hash table keeping sorted lists of map tasks based on the specific map task key */ private Hashtable <Enum, ArrayList<MapTaskStatistics>> _sortedMapTaskListsByKey = new Hashtable<Enum, ArrayList<MapTaskStatistics>>(); /* * @return mapTaskList : ArrayList of MapTaskStatistics * @param mapTaskSortKey : Specific counter key used for sorting the task list * @param datatype : indicates the data type of the counter key used for sorting * If sort key is null then by default map tasks are sorted using map task ids. */ public synchronized ArrayList<MapTaskStatistics> getMapTaskList(Enum mapTaskSortKey, KeyDataType dataType) { /* * If mapTaskSortKey is null then use the task id as a key. */ if (mapTaskSortKey == null) { mapTaskSortKey = MapTaskKeys.TASK_ID; } if (this._sortedMapTaskListsByKey.get(mapTaskSortKey) == null) { ArrayList<MapTaskStatistics> newList = (ArrayList<MapTaskStatistics>)this._mapTaskList.clone(); this._sortedMapTaskListsByKey.put(mapTaskSortKey, this.sortMapTasksByKey(newList, mapTaskSortKey, dataType)); } return this._sortedMapTaskListsByKey.get(mapTaskSortKey); } private ArrayList<MapTaskStatistics> sortMapTasksByKey (ArrayList<MapTaskStatistics> mapTasks, Enum key, Enum dataType) { MapCounterComparator mcc = new MapCounterComparator(key, dataType); Collections.sort (mapTasks, mcc); return mapTasks; } private class MapCounterComparator implements Comparator<MapTaskStatistics> { public Enum _sortKey; public Enum _dataType; public MapCounterComparator(Enum key, Enum dataType) { this._sortKey = key; this._dataType = dataType; } // Comparator interface requires defining compare method. public int compare(MapTaskStatistics a, MapTaskStatistics b) { if (this._dataType == KeyDataType.LONG) { long aa = a.getLongValue(this._sortKey); long bb = b.getLongValue(this._sortKey); if (aa<bb) return -1; if (aa==bb) return 0; if (aa>bb) return 1; } else { return a.getStringValue(this._sortKey).compareToIgnoreCase(b.getStringValue(this._sortKey)); } return 0; } } /* * Reduce Array List sorting */ private Hashtable <Enum, ArrayList<ReduceTaskStatistics>> _sortedReduceTaskListsByKey = new Hashtable<Enum,ArrayList<ReduceTaskStatistics>>(); /* * @return reduceTaskList : ArrayList of ReduceTaskStatistics * @param reduceTaskSortKey : Specific counter key used for sorting the task list * @param dataType : indicates the data type of the counter key used for sorting * If sort key is null then, by default reduce tasks are sorted using task ids. */ public synchronized ArrayList<ReduceTaskStatistics> getReduceTaskList (Enum reduceTaskSortKey, KeyDataType dataType) { /* * If reduceTaskSortKey is null then use the task id as a key. */ if (reduceTaskSortKey == null) { reduceTaskSortKey = ReduceTaskKeys.TASK_ID; } if (this._sortedReduceTaskListsByKey.get(reduceTaskSortKey) == null) { ArrayList<ReduceTaskStatistics> newList = (ArrayList<ReduceTaskStatistics>)this._reduceTaskList.clone(); this._sortedReduceTaskListsByKey.put(reduceTaskSortKey, this.sortReduceTasksByKey(newList, reduceTaskSortKey, dataType)); } return this._sortedReduceTaskListsByKey.get(reduceTaskSortKey); } private ArrayList<ReduceTaskStatistics> sortReduceTasksByKey (ArrayList<ReduceTaskStatistics> reduceTasks, Enum key, Enum dataType) { ReduceCounterComparator rcc = new ReduceCounterComparator(key, dataType); Collections.sort (reduceTasks, rcc); return reduceTasks; } private class ReduceCounterComparator implements Comparator<ReduceTaskStatistics> { public Enum _sortKey; public Enum _dataType; //either long or string public ReduceCounterComparator(Enum key, Enum dataType) { this._sortKey = key; this._dataType = dataType; } // Comparator interface requires defining compare method. public int compare(ReduceTaskStatistics a, ReduceTaskStatistics b) { if (this._dataType == KeyDataType.LONG) { long aa = a.getLongValue(this._sortKey); long bb = b.getLongValue(this._sortKey); if (aa<bb) return -1; if (aa==bb) return 0; if (aa>bb) return 1; } else { return a.getStringValue(this._sortKey).compareToIgnoreCase(b.getStringValue(this._sortKey)); } return 0; } } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java index ca71c1d..39ca94e 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java @@ -1,125 +1,125 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.statistics.job; import java.util.ArrayList; import org.apache.hadoop.mapred.JobConf; public interface JobStatisticsInterface { /** * Get job configuration (job.xml) values */ public JobConf getJobConf(); /* * Get Job Counters of type long */ public long getLongValue(Enum key); /* * Get job Counters of type Double */ public double getDoubleValue(Enum key); /* * Get Job Counters of type String */ public String getStringValue(Enum key); /* * Set key value of type long */ public void setValue(Enum key, long value); /* * Set key value of type double */ public void setValue(Enum key, double valye); /* * Set key value of type String */ public void setValue(Enum key, String value); /** * @return mapTaskList : ArrayList of MapTaskStatistics * @param mapTaskSortKey : Specific counter key used for sorting the task list * @param datatype : indicates the data type of the counter key used for sorting * If sort key is null then by default map tasks are sorted using map task ids. */ public ArrayList<MapTaskStatistics> getMapTaskList(Enum mapTaskSortKey, KeyDataType dataType); /** * @return reduceTaskList : ArrayList of ReduceTaskStatistics * @param reduceTaskSortKey : Specific counter key used for sorting the task list * @param dataType : indicates the data type of the counter key used for sorting * If sort key is null then, by default reduce tasks are sorted using task ids. */ public ArrayList<ReduceTaskStatistics> getReduceTaskList(Enum reduceTaskSortKey, KeyDataType dataType); /* * Print the Job Execution Statistics */ public void printJobExecutionStatistics(); /* * Job and Task statistics Key data types */ public static enum KeyDataType { STRING, LONG, DOUBLE } /** * Job Keys */ public static enum JobKeys { - JOBTRACKERID, JOBID, JOBNAME, USER, SUBMIT_TIME, CONF_PATH, LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, + JOBTRACKERID, JOBID, JOBNAME, JOBTYPE, USER, SUBMIT_TIME, CONF_PATH, LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, STATUS, FINISH_TIME, FINISHED_MAPS, FINISHED_REDUCES, FAILED_MAPS, FAILED_REDUCES, LAUNCHED_MAPS, LAUNCHED_REDUCES, RACKLOCAL_MAPS, DATALOCAL_MAPS, HDFS_BYTES_READ, - HDFS_BYTES_WRITTEN, LOCAL_BYTES_READ, LOCAL_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, + HDFS_BYTES_WRITTEN, FILE_BYTES_READ, FILE_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, REDUCE_INPUT_GROUPS, REDUCE_INPUT_RECORDS, REDUCE_OUTPUT_RECORDS, MAP_INPUT_RECORDS, MAP_OUTPUT_RECORDS, MAP_INPUT_BYTES, MAP_OUTPUT_BYTES, MAP_HDFS_BYTES_WRITTEN, JOBCONF, JOB_PRIORITY, SHUFFLE_BYTES, SPILLED_RECORDS - } + } /** * Map Task Keys */ public static enum MapTaskKeys { TASK_ID, TASK_TYPE, START_TIME, STATUS, FINISH_TIME, HDFS_BYTES_READ, HDFS_BYTES_WRITTEN, - LOCAL_BYTES_READ, LOCAL_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, + FILE_BYTES_READ, FILE_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, OUTPUT_RECORDS, INPUT_RECORDS, INPUT_BYTES, OUTPUT_BYTES, NUM_ATTEMPTS, ATTEMPT_ID, - HOSTNAME, SPLITS, SPILLED_RECORDS, TRACKER_NAME, STATE_STRING, HTTP_PORT, ERROR + HOSTNAME, SPLITS, SPILLED_RECORDS, TRACKER_NAME, STATE_STRING, HTTP_PORT, ERROR, EXECUTION_TIME } /** * Reduce Task Keys */ public static enum ReduceTaskKeys { TASK_ID, TASK_TYPE, START_TIME, STATUS, FINISH_TIME, HDFS_BYTES_READ, HDFS_BYTES_WRITTEN, - LOCAL_BYTES_READ, LOCAL_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, + FILE_BYTES_READ, FILE_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, OUTPUT_RECORDS, INPUT_RECORDS, NUM_ATTEMPTS, ATTEMPT_ID, HOSTNAME, SHUFFLE_FINISH_TIME, SORT_FINISH_TIME, INPUT_GROUPS, TRACKER_NAME, STATE_STRING, HTTP_PORT, SPLITS, SHUFFLE_BYTES, - SPILLED_RECORDS + SPILLED_RECORDS, EXECUTION_TIME } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java index 1a0d55c..ee7ebbe 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/TaskStatistics.java @@ -1,90 +1,103 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.statistics.job; import java.util.Hashtable; import java.util.Map; /** * */ public class TaskStatistics { /* * Stores task statistics as Enum/String key,value pairs. */ private Hashtable<Enum, String> _task = new Hashtable<Enum, String>(); - /* + /* * Get Long key value */ public long getLongValue(Enum key) { - return Long.parseLong(this._task.get(key)); - } - + if (this._task.get(key) == null) { + return (long)0; + } + else { + return Long.parseLong(this._task.get(key)); + } + } + /* - * Get double key value + * Get key type Double */ public double getDoubleValue(Enum key) { - return Double.parseDouble(this._task.get(key)); + if (this._task.get(key) == null) { + return (double)0; + } else { + return Double.parseDouble(this._task.get(key)); + } } - + /* - * Get String key value + * Get key of type String */ public String getStringValue(Enum key) { - return this._task.get(key); + if (this._task.get(key) == null) { + return ""; + } else { + return this._task.get(key); + } } - + /* * Set long key value */ public void setValue(Enum key, long value) { this._task.put(key, Long.toString(value)); } /* * Set double key value */ public void setValue(Enum key, double value) { this._task.put(key, Double.toString(value)); } /* * Set String key value */ public void setValue(Enum key, String value) { this._task.put(key, value); } /* * Print the key/values pairs for a task */ public void printKeys () { java.util.Set<Map.Entry<Enum, String>> task = this._task.entrySet(); int size = task.size(); java.util.Iterator<Map.Entry<Enum, String>> kv = task.iterator(); for (int i = 0; i < size; i++) { Map.Entry<Enum, String> entry = (Map.Entry<Enum, String>) kv.next(); Enum key = entry.getKey(); String value = entry.getValue(); System.out.println("Key:<" + key.name() + ">, value:<"+ value +">"); } } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java index e66fe57..22071c1 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/util/XMLUtils.java @@ -1,237 +1,237 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.util; import java.io.IOException; import java.io.File; import java.io.InputStream; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.parsers.DocumentBuilder; import javax.xml.transform.TransformerConfigurationException; import javax.xml.transform.TransformerException; import javax.xml.transform.Source; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.Result; import javax.xml.transform.stream.StreamResult; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerFactory; import org.xml.sax.SAXParseException; import org.xml.sax.SAXException; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; import org.w3c.dom.NodeList; /** * Sample Utility class to work with DOM document */ public class XMLUtils { /** Prints the specified node, then prints all of its children. */ public static void printDOM(Node node) { int type = node.getNodeType(); switch (type) { // print the document element case Node.DOCUMENT_NODE: { System.out.print("<?xml version=\"1.0\" ?>"); printDOM(((Document)node).getDocumentElement()); break; } // print element with attributes case Node.ELEMENT_NODE: { System.out.println(); System.out.print("<"); System.out.print(node.getNodeName()); NamedNodeMap attrs = node.getAttributes(); for (int i = 0; i < attrs.getLength(); i++) { Node attr = attrs.item(i); System.out.print(" " + attr.getNodeName().trim() + "=\"" + attr.getNodeValue().trim() + "\""); } System.out.print(">"); NodeList children = node.getChildNodes(); if (children != null) { int len = children.getLength(); for (int i = 0; i < len; i++) printDOM(children.item(i)); } break; } // handle entity reference nodes case Node.ENTITY_REFERENCE_NODE: { System.out.print("&"); System.out.print(node.getNodeName().trim()); System.out.print(";"); break; } // print cdata sections case Node.CDATA_SECTION_NODE: { System.out.print("<![CDATA["); System.out.print(node.getNodeValue().trim()); System.out.print("]]>"); break; } // print text case Node.TEXT_NODE: { System.out.println(); System.out.print(node.getNodeValue().trim()); break; } // print processing instruction case Node.PROCESSING_INSTRUCTION_NODE: { System.out.print("<?"); System.out.print(node.getNodeName().trim()); String data = node.getNodeValue().trim(); { System.out.print(" "); System.out.print(data); } System.out.print("?>"); break; } } if (type == Node.ELEMENT_NODE) { System.out.println(); System.out.print("</"); System.out.print(node.getNodeName().trim()); System.out.print('>'); } } /* * Get the value of the first (or only) element given its node name */ public static String getElementValue(String elementName, Element element) throws Exception { String value = null; NodeList childNodes = element.getElementsByTagName(elementName); Element cn = (Element)childNodes.item(0); value = cn.getFirstChild().getNodeValue().trim(); //value = childNodes.item(0).getNodeValue().trim(); if (value == null) { throw new Exception ("No element found with given name:"+elementName); } return value; } /** * Parse the XML file and create Document * @param fileName * @return Document */ public static Document parse(InputStream fs) { Document document = null; // Initiate DocumentBuilderFactory DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); // To get a validating parser factory.setValidating(false); // To get one that understands namespaces factory.setNamespaceAware(true); try { // Get DocumentBuilder DocumentBuilder builder = factory.newDocumentBuilder(); // Parse and load into memory the Document //document = builder.parse( new File(fileName)); document = builder.parse(fs); return document; } catch (SAXParseException spe) { // Error generated by the parser - System.out.println("\n** Parsing error , line " + spe.getLineNumber() + System.err.println("\n** Parsing error , line " + spe.getLineNumber() + ", uri " + spe.getSystemId()); - System.out.println(" " + spe.getMessage() ); + System.err.println(" " + spe.getMessage() ); // Use the contained exception, if any Exception x = spe; if (spe.getException() != null) x = spe.getException(); x.printStackTrace(); } catch (SAXException sxe) { // Error generated during parsing Exception x = sxe; if (sxe.getException() != null) x = sxe.getException(); x.printStackTrace(); } catch (ParserConfigurationException pce) { // Parser with specified options can't be built pce.printStackTrace(); } catch (IOException ioe) { // I/O error ioe.printStackTrace(); } return null; } /** * This method writes a DOM document to a file * @param filename * @param document */ public static void writeXmlToFile(String filename, Document document) { try { // Prepare the DOM document for writing Source source = new DOMSource(document); // Prepare the output file File file = new File(filename); Result result = new StreamResult(file); // Write the DOM document to the file // Get Transformer Transformer xformer = TransformerFactory.newInstance().newTransformer(); // Write to a file xformer.transform(source, result); } catch (TransformerConfigurationException e) { - System.out.println("TransformerConfigurationException: " + e); + System.err.println("TransformerConfigurationException: " + e); } catch (TransformerException e) { - System.out.println("TransformerException: " + e); + System.err.println("TransformerException: " + e); } } /** * Count Elements in Document by Tag Name * @param tag * @param document * @return number elements by Tag Name */ public static int countByTagName(String tag, Document document){ NodeList list = document.getElementsByTagName(tag); return list.getLength(); } }
jaxlaw/hadoop-common
3753dbffeca6e32833c64a87e97aae614db7c126
HADOOP-5582. Fix Hadoop Vaidya to use new Counters in org.apache.hadoop.mapreduce package. Contributed by Suhas Gogate.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index c046567..db1b73b 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,463 +1,467 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + + HADOOP-5582. Fix Hadoop Vaidya to use new Counters in + org.apache.hadoop.mapreduce package. (Suhas Gogate via acmurthy) + HDFS-595. umask settings in configuration may now use octal or symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java index 38e60e3..8892f37 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/ReadingHDFSFilesAsSideEffect.java @@ -1,101 +1,114 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.postexdiagnosis.tests; import org.apache.hadoop.vaidya.statistics.job.JobStatistics; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.KeyDataType; import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.ReduceTaskKeys; import org.apache.hadoop.vaidya.statistics.job.ReduceTaskStatistics; import org.apache.hadoop.vaidya.DiagnosticTest; import org.w3c.dom.Element; import java.util.Hashtable; import java.util.List; /** * */ public class ReadingHDFSFilesAsSideEffect extends DiagnosticTest { private double _impact; private JobStatistics _job; /** * */ public ReadingHDFSFilesAsSideEffect() { } /* * Evaluate the test */ @Override public double evaluate(JobStatistics job) { /* * Set the this._job */ this._job = job; + + /* + * Read the Normalization Factor + */ + double normF = getInputElementDoubleValue("NormalizationFactor", 2.0); + /* * Calculate and return the impact * * Check if job level aggregate bytes read from HDFS are more than map input bytes * Typically they should be same unless maps and/or reducers are reading some data * from HDFS as a side effect * * If side effect HDFS bytes read are >= twice map input bytes impact is treated as * maximum. */ + if(job.getLongValue(JobKeys.MAP_INPUT_BYTES) == 0 && job.getLongValue(JobKeys.HDFS_BYTES_READ) != 0) { + return (double)1; + } + + if (job.getLongValue(JobKeys.HDFS_BYTES_READ) == 0) { + return (double)0; + } this._impact = (job.getLongValue(JobKeys.HDFS_BYTES_READ) / job.getLongValue(JobKeys.MAP_INPUT_BYTES)); - if (this._impact >= 2.0) { + if (this._impact >= normF) { this._impact = 1; } else { - this._impact -= 1; + this._impact = this._impact/normF; } return this._impact; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getAdvice() */ @Override public String getPrescription() { return "Map and/or Reduce tasks are reading application specific files from HDFS. Make sure the replication factor\n" + "of these HDFS files is high enough to avoid the data reading bottleneck. Typically replication factor\n" + "can be square root of map/reduce tasks capacity of the allocated cluster."; } /* (non-Javadoc) * @see org.apache.hadoop.contrib.utils.perfadvisor.diagnostic_rules.DiagnosticRule#getReferenceDetails() */ @Override public String getReferenceDetails() { String ref = "* Total HDFS Bytes read: "+this._job.getLongValue(JobKeys.HDFS_BYTES_READ)+"\n"+ "* Total Map Input Bytes read: "+this._job.getLongValue(JobKeys.MAP_INPUT_BYTES)+"\n"+ "* Impact: "+truncate(this._impact); return ref; } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml index f30d5d9..5bd22c4 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/postexdiagnosis/tests/postex_diagnosis_tests.xml @@ -1,104 +1,105 @@ <?xml version="1.0" encoding="ISO-8859-1"?> <!-- ** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ** --> <!-- This is a diagnostic test configuration file. Diagnostic test driver reads this file to get the list of tests and their configuration information Title : Provides brief description of the test ClassName : Provides the fully qualified java class name that implements the test condition Description : Provides detailed information about the test describing how it checks for a specific performance problem. SuccessThreshold : (value between [0..1]) : Evaluation of a diagnostic test returns its level of impact on the job performance. If impact value [between 0..1] is equal or greater than the success threshold, means rule has detected the problem (TEST POSITIVE) else rule has passed the test (TEST NEGATIVE). The impact level is calculated and returned by each test's evaluate method. For tests that are boolean in nature the impact level is either 0 or 1 and success threshold should be 1. Importance : Indicates relative importance of this diagnostic test among the set of diagnostic rules defined in this file. Three declarative values that can be assigned are High, Medium or Low Prescription : This is an optional element to store the advice to be included in the report upon test failure This is overwritten in the report by any advice/prescription text returned by getPrescription method of DiagnosticTest. InputElement : Input element is made available to the diagnostic test for it to interpret and accept any parameters specific to the test. These test specific parameters are used to configure the tests without changing the java code. --> <PostExPerformanceDiagnosisTests> <DiagnosticTest> <Title><![CDATA[Balanaced Reduce Partitioning]]></Title> <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.BalancedReducePartitioning]]></ClassName> <Description><![CDATA[This rule tests as to how well the input to reduce tasks is balanced]]></Description> <Importance><![CDATA[High]]></Importance> <SuccessThreshold><![CDATA[0.20]]></SuccessThreshold> <Prescription><![CDATA[advice]]></Prescription> <InputElement> <PercentReduceRecords><![CDATA[0.85]]></PercentReduceRecords> </InputElement> </DiagnosticTest> <DiagnosticTest> <Title><![CDATA[Impact of Map tasks Re-Execution]]></Title> <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.MapsReExecutionImpact]]></ClassName> <Description><![CDATA[This test rule checks percentage of map task re-execution impacting the job performance]]></Description> <Importance><![CDATA[High]]></Importance> <SuccessThreshold><![CDATA[0.40]]></SuccessThreshold> <Prescription><![CDATA[default advice]]></Prescription> <InputElement> </InputElement> </DiagnosticTest> <DiagnosticTest> <Title><![CDATA[Impact of Reduce tasks Re-Execution]]></Title> <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.ReducesReExecutionImpact]]></ClassName> <Description><![CDATA[This test rule checks percentage of reduce task re-execution impacting the job performance]]></Description> <Importance><![CDATA[High]]></Importance> <SuccessThreshold><![CDATA[0.40]]></SuccessThreshold> <Prescription><![CDATA[default advice]]></Prescription> <InputElement> </InputElement> </DiagnosticTest> <DiagnosticTest> <Title><![CDATA[Map and/or Reduce tasks reading HDFS data as a side effect]]></Title> <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.ReadingHDFSFilesAsSideEffect]]></ClassName> <Description><![CDATA[This test rule checks if map/reduce tasks are reading data from HDFS as a side effect. More the data read as a side effect can potentially be a bottleneck across parallel execution of map/reduce tasks.]]></Description> <Importance><![CDATA[High]]></Importance> <SuccessThreshold><![CDATA[0.05]]></SuccessThreshold> <Prescription><![CDATA[default advice]]></Prescription> <InputElement> + <NormalizationFactor>2.0</NormalizationFactor> </InputElement> </DiagnosticTest> <DiagnosticTest> <Title><![CDATA[Map side disk spill]]></Title> <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.MapSideDiskSpill]]></ClassName> <Description><![CDATA[This test rule checks if Map tasks are spilling the data on to the local disk during the map side sorting due to insufficient sort buffer size. The impact is calculated as ratio between local bytes written to map output bytes. Impact is normalized using NormalizationFactor given below and any value greater than or equal to normalization factor is treated as maximum (i.e. 1). ]]></Description> <Importance><![CDATA[Low]]></Importance> <SuccessThreshold><![CDATA[0.3]]></SuccessThreshold> <Prescription><![CDATA[default advice]]></Prescription> <InputElement> <NormalizationFactor>3.0</NormalizationFactor> </InputElement> </DiagnosticTest> </PostExPerformanceDiagnosisTests> diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java index adc484e..3da8ae5 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java @@ -1,580 +1,640 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.statistics.job; import java.util.ArrayList; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobHistory; import org.apache.hadoop.mapred.JobHistory.JobInfo; import org.apache.hadoop.mapred.JobHistory.Keys; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.Counters.Counter; import java.text.ParseException; //import org.apache.hadoop.vaidya.statistics.job.JobStatisticsInterface.JobKeys; import java.util.Hashtable; import java.util.Map; import java.util.regex.Pattern; import java.util.regex.Matcher; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Collections; /** * */ public class JobStatistics implements JobStatisticsInterface { /* * Pattern for parsing the COUNTERS */ private static final Pattern _pattern = Pattern.compile("[[^,]?]+"); //"[[^,]?]+" /* * Job configuration */ private JobConf _jobConf; /** * @param jobConf the jobConf to set */ void setJobConf(JobConf jobConf) { this._jobConf = jobConf; // TODO: Add job conf to _job array } /* * Aggregated Job level counters */ private JobHistory.JobInfo _jobInfo; /* * Job stats */ private java.util.Hashtable<Enum, String> _job; /** * @param jobConf the jobConf to set */ public JobConf getJobConf() { return this._jobConf; } /* * Get Job Counters of type long */ public long getLongValue(Enum key) { - return Long.parseLong(this._job.get(key)); + if (this._job.get(key) == null) { + return (long)0; + } + else { + return Long.parseLong(this._job.get(key)); + } } /* * Get job Counters of type Double */ public double getDoubleValue(Enum key) { - return Double.parseDouble(this._job.get(key)); + if (this._job.get(key) == null) { + return (double)0; + } else { + return Double.parseDouble(this._job.get(key)); + } } /* * Get Job Counters of type String */ public String getStringValue(Enum key) { - return this._job.get(key); + if (this._job.get(key) == null) { + return ""; + } else { + return this._job.get(key); + } } /* * Set key value of type long */ public void setValue(Enum key, long value) { this._job.put(key, Long.toString(value)); } /* * Set key value of type double */ public void setValue(Enum key, double value) { this._job.put(key, Double.toString(value)); } /* * Set key value of type String */ public void setValue(Enum key, String value) { this._job.put(key, value); } /* * Map Task List (Sorted by task id) */ private ArrayList<MapTaskStatistics> _mapTaskList = new ArrayList<MapTaskStatistics>(); /* * Reduce Task List (Sorted by task id) */ private ArrayList<ReduceTaskStatistics> _reduceTaskList = new ArrayList<ReduceTaskStatistics>(); /* * Ctor: */ public JobStatistics (JobConf jobConf, JobInfo jobInfo) throws ParseException { this._jobConf = jobConf; this._jobInfo = jobInfo; this._job = new Hashtable<Enum, String>(); - populate_Job(this._job, this._jobInfo.getValues()); + populate_Job(this._job, this._jobInfo.getValues()); populate_MapReduceTaskLists(this._mapTaskList, this._reduceTaskList, this._jobInfo.getAllTasks()); } /* * */ private void populate_MapReduceTaskLists (ArrayList<MapTaskStatistics> mapTaskList, ArrayList<ReduceTaskStatistics> reduceTaskList, java.util.Map<String, JobHistory.Task> taskMap) throws ParseException { /* * */ int num_tasks = taskMap.entrySet().size(); java.util.Iterator<Map.Entry<String, JobHistory.Task>> ti = taskMap.entrySet().iterator(); for (int i = 0; i < num_tasks; i++) { Map.Entry<String, JobHistory.Task> entry = (Map.Entry<String, JobHistory.Task>) ti.next(); JobHistory.Task task = entry.getValue(); if (task.get(Keys.TASK_TYPE).equals("MAP")) { MapTaskStatistics mapT = new MapTaskStatistics(); java.util.Map<JobHistory.Keys, String> mapTask = task.getValues(); java.util.Map<JobHistory.Keys, String> successTaskAttemptMap = getLastSuccessfulTaskAttempt(task); // NOTE: Following would lead to less number of actual tasks collected in the tasklist array if (successTaskAttemptMap != null) { mapTask.putAll(successTaskAttemptMap); } else { System.out.println("Task:<"+task.get(Keys.TASKID)+"> is not successful - SKIPPING"); } int size = mapTask.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = mapTask.entrySet().iterator(); for (int j = 0; j < size; j++) { Map.Entry<JobHistory.Keys, String> mtc = kv.next(); JobHistory.Keys key = mtc.getKey(); String value = mtc.getValue(); + //System.out.println("JobHistory.MapKeys."+key+": "+value); switch (key) { case TASKID: mapT.setValue(MapTaskKeys.TASK_ID, value); break; case TASK_ATTEMPT_ID: mapT.setValue(MapTaskKeys.ATTEMPT_ID, value); break; case HOSTNAME: mapT.setValue(MapTaskKeys.HOSTNAME, value); break; case TASK_TYPE: mapT.setValue(MapTaskKeys.TASK_TYPE, value); break; case TASK_STATUS: mapT.setValue(MapTaskKeys.STATUS, value); break; case START_TIME: mapT.setValue(MapTaskKeys.START_TIME, value); break; case FINISH_TIME: mapT.setValue(MapTaskKeys.FINISH_TIME, value); break; case SPLITS: mapT.setValue(MapTaskKeys.SPLITS, value); break; + case TRACKER_NAME: mapT.setValue(MapTaskKeys.TRACKER_NAME, value); break; + case STATE_STRING: mapT.setValue(MapTaskKeys.STATE_STRING, value); break; + case HTTP_PORT: mapT.setValue(MapTaskKeys.HTTP_PORT, value); break; + case ERROR: mapT.setValue(MapTaskKeys.ERROR, value); break; case COUNTERS: value.concat(","); parseAndAddMapTaskCounters(mapT, value); mapTaskList.add(mapT); break; - default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS"); + default: System.out.println("JobHistory.MapKeys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR MAP COUNTERS"); break; } } // Add number of task attempts mapT.setValue(MapTaskKeys.NUM_ATTEMPTS, (new Integer(task.getTaskAttempts().size())).toString()); }else if (task.get(Keys.TASK_TYPE).equals("REDUCE")) { ReduceTaskStatistics reduceT = new ReduceTaskStatistics(); java.util.Map<JobHistory.Keys, String> reduceTask = task.getValues(); java.util.Map<JobHistory.Keys, String> successTaskAttemptMap = getLastSuccessfulTaskAttempt(task); // NOTE: Following would lead to less number of actual tasks collected in the tasklist array if (successTaskAttemptMap != null) { reduceTask.putAll(successTaskAttemptMap); } else { System.out.println("Task:<"+task.get(Keys.TASKID)+"> is not successful - SKIPPING"); } int size = reduceTask.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = reduceTask.entrySet().iterator(); for (int j = 0; j < size; j++) { Map.Entry<JobHistory.Keys, String> rtc = kv.next(); JobHistory.Keys key = rtc.getKey(); String value = rtc.getValue(); + //System.out.println("JobHistory.ReduceKeys."+key+": "+value); switch (key) { case TASKID: reduceT.setValue(ReduceTaskKeys.TASK_ID, value); break; case TASK_ATTEMPT_ID: reduceT.setValue(ReduceTaskKeys.ATTEMPT_ID, value); break; case HOSTNAME: reduceT.setValue(ReduceTaskKeys.HOSTNAME, value); break; case TASK_TYPE: reduceT.setValue(ReduceTaskKeys.TASK_TYPE, value); break; case TASK_STATUS: reduceT.setValue(ReduceTaskKeys.STATUS, value); break; case START_TIME: reduceT.setValue(ReduceTaskKeys.START_TIME, value); break; case FINISH_TIME: reduceT.setValue(ReduceTaskKeys.FINISH_TIME, value); break; case SHUFFLE_FINISHED: reduceT.setValue(ReduceTaskKeys.SHUFFLE_FINISH_TIME, value); break; case SORT_FINISHED: reduceT.setValue(ReduceTaskKeys.SORT_FINISH_TIME, value); break; + case SPLITS: reduceT.setValue(ReduceTaskKeys.SPLITS, value); break; + case TRACKER_NAME: reduceT.setValue(ReduceTaskKeys.TRACKER_NAME, value); break; + case STATE_STRING: reduceT.setValue(ReduceTaskKeys.STATE_STRING, value); break; + case HTTP_PORT: reduceT.setValue(ReduceTaskKeys.HTTP_PORT, value); break; case COUNTERS: value.concat(","); parseAndAddReduceTaskCounters(reduceT, value); reduceTaskList.add(reduceT); break; - default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE COUNTERS"); + default: System.out.println("JobHistory.ReduceKeys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE COUNTERS"); break; } // Add number of task attempts reduceT.setValue(ReduceTaskKeys.NUM_ATTEMPTS, (new Integer(task.getTaskAttempts().size())).toString()); } - } else if (task.get(Keys.TASK_TYPE).equals("CLEANUP")) { + } else if (task.get(Keys.TASK_TYPE).equals("CLEANUP") || + task.get(Keys.TASK_TYPE).equals("SETUP")) { //System.out.println("INFO: IGNORING TASK TYPE : "+task.get(Keys.TASK_TYPE)); } else { System.out.println("UNKNOWN TASK TYPE : "+task.get(Keys.TASK_TYPE)); } } } /* * Get last successful task attempt to be added in the stats */ private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) { Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts(); int size = taskAttempts.size(); java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator(); for (int i=0; i<size; i++) { // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next(); JobHistory.TaskAttempt attempt = tae.getValue(); if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) { return attempt.getValues(); } } return null; } /* * Popuate the job stats */ private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException { int size = jobC.size(); java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next(); JobHistory.Keys key = entry.getKey(); String value = entry.getValue(); + //System.out.println("JobHistory.JobKeys."+key+": "+value); switch (key) { case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break; //case START_TIME: job.put(JobKeys., value); break; case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break; case JOBID: job.put(JobKeys.JOBID, value); break; case JOBNAME: job.put(JobKeys.JOBNAME, value); break; case USER: job.put(JobKeys.USER, value); break; case JOBCONF: job.put(JobKeys.JOBCONF, value); break; case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break; case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break; case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break; case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break; case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break; case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break; case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break; case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break; case JOB_STATUS: job.put(JobKeys.STATUS, value); break; + case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break; case COUNTERS: value.concat(","); parseAndAddJobCounters(job, value); break; default: System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS"); break; } } } /* * Parse and add the job counters */ private void parseAndAddJobCounters(Hashtable<Enum, String> job, String counters) throws ParseException { - Matcher m = _pattern.matcher(counters); - while(m.find()){ - String ctuple = m.group(0); - //String ctuple = c1tuple.substring(0, c1tuple.length()-1); - String []parts = ctuple.split(":"); - if (parts[0].equals("File Systems.Local bytes read")) { - job.put(JobKeys.LOCAL_BYTES_READ, parts[1]); - } else if (parts[0].equals("File Systems.Local bytes written")) { - job.put(JobKeys.LOCAL_BYTES_WRITTEN, parts[1]); - } else if (parts[0].equals("File Systems.HDFS bytes read")) { - job.put(JobKeys.HDFS_BYTES_READ, parts[1]); - } else if (parts[0].equals("File Systems.HDFS bytes written")) { - job.put(JobKeys.HDFS_BYTES_WRITTEN, parts[1]); - } else if (parts[0].equals("Job Counters .Launched map tasks")) { - job.put(JobKeys.LAUNCHED_MAPS, parts[1]); - } else if (parts[0].equals("Job Counters .Launched reduce tasks")) { - job.put(JobKeys.LAUNCHED_REDUCES, parts[1]); - } else if (parts[0].equals("Job Counters .Data-local map tasks")) { - job.put(JobKeys.DATALOCAL_MAPS, parts[1]); - } else if (parts[0].equals("Job Counters .Rack-local map tasks")) { - job.put(JobKeys.RACKLOCAL_MAPS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map input records")) { - job.put(JobKeys.MAP_INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map output records")) { - job.put(JobKeys.MAP_OUTPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) { - job.put(JobKeys.MAP_INPUT_BYTES, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) { - job.put(JobKeys.MAP_OUTPUT_BYTES, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { - job.put(JobKeys.COMBINE_INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { - job.put(JobKeys.COMBINE_OUTPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) { - job.put(JobKeys.REDUCE_INPUT_GROUPS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) { - job.put(JobKeys.REDUCE_INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) { - job.put(JobKeys.REDUCE_OUTPUT_RECORDS, parts[1]); - } else { - System.out.println("Pattern:<"+ctuple+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR"); + Counters cnt = Counters.fromEscapedCompactString(counters); + for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) { + Counters.Group grp = grps.next(); + //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">"; + for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) { + Counters.Counter counter = mycounters.next(); + //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">"; + //System.out.println("groupName:"+groupname+",countername: "+countername); + String countername = grp.getDisplayName()+"."+counter.getDisplayName(); + String value = (new Long(counter.getValue())).toString(); + String[] parts = {countername,value}; + //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]); + if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) { + job.put(JobKeys.LOCAL_BYTES_READ, parts[1]); + } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) { + job.put(JobKeys.LOCAL_BYTES_WRITTEN, parts[1]); + } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) { + job.put(JobKeys.HDFS_BYTES_READ, parts[1]); + } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) { + job.put(JobKeys.HDFS_BYTES_WRITTEN, parts[1]); + } else if (parts[0].equals("Job Counters .Launched map tasks")) { + job.put(JobKeys.LAUNCHED_MAPS, parts[1]); + } else if (parts[0].equals("Job Counters .Launched reduce tasks")) { + job.put(JobKeys.LAUNCHED_REDUCES, parts[1]); + } else if (parts[0].equals("Job Counters .Data-local map tasks")) { + job.put(JobKeys.DATALOCAL_MAPS, parts[1]); + } else if (parts[0].equals("Job Counters .Rack-local map tasks")) { + job.put(JobKeys.RACKLOCAL_MAPS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map input records")) { + job.put(JobKeys.MAP_INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map output records")) { + job.put(JobKeys.MAP_OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) { + job.put(JobKeys.MAP_INPUT_BYTES, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) { + job.put(JobKeys.MAP_OUTPUT_BYTES, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { + job.put(JobKeys.COMBINE_INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { + job.put(JobKeys.COMBINE_OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) { + job.put(JobKeys.REDUCE_INPUT_GROUPS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) { + job.put(JobKeys.REDUCE_INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) { + job.put(JobKeys.REDUCE_OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) { + job.put(JobKeys.SPILLED_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce shuffle bytes")) { + job.put(JobKeys.SHUFFLE_BYTES, parts[1]); + } else { + System.out.println("JobCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR"); + } } } } /* * Parse and add the Map task counters */ - private void parseAndAddMapTaskCounters(MapTaskStatistics mapTask, String counters) { - Matcher m = _pattern.matcher(counters); - while(m.find()){ - String ctuple = m.group(0); - //String ctuple = c1tuple.substring(0, c1tuple.length()-1); - String []parts = ctuple.split(":"); - if (parts[0].equals("File Systems.Local bytes read")) { - mapTask.setValue(MapTaskKeys.LOCAL_BYTES_READ, parts[1]); - } else if (parts[0].equals("File Systems.Local bytes written")) { - mapTask.setValue(MapTaskKeys.LOCAL_BYTES_WRITTEN, parts[1]); - } else if (parts[0].equals("File Systems.HDFS bytes read")) { - mapTask.setValue(MapTaskKeys.HDFS_BYTES_READ, parts[1]); - } else if (parts[0].equals("File Systems.HDFS bytes written")) { - mapTask.setValue(MapTaskKeys.HDFS_BYTES_WRITTEN, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map input records")) { - mapTask.setValue(MapTaskKeys.INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map output records")) { - mapTask.setValue(MapTaskKeys.OUTPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) { - mapTask.setValue(MapTaskKeys.INPUT_BYTES, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) { - mapTask.setValue(MapTaskKeys.OUTPUT_BYTES, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { - mapTask.setValue(MapTaskKeys.COMBINE_INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { - mapTask.setValue(MapTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]); - } else { - System.out.println("Pattern:<"+ctuple+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK"); - } - } + private void parseAndAddMapTaskCounters(MapTaskStatistics mapTask, String counters) throws ParseException { + Counters cnt = Counters.fromEscapedCompactString(counters); + for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) { + Counters.Group grp = grps.next(); + //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">"; + for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) { + Counters.Counter counter = mycounters.next(); + //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">"; + //System.out.println("groupName:"+groupname+",countername: "+countername); + String countername = grp.getDisplayName()+"."+counter.getDisplayName(); + String value = (new Long(counter.getValue())).toString(); + String[] parts = {countername,value}; + //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]); + if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) { + mapTask.setValue(MapTaskKeys.LOCAL_BYTES_READ, parts[1]); + } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) { + mapTask.setValue(MapTaskKeys.LOCAL_BYTES_WRITTEN, parts[1]); + } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) { + mapTask.setValue(MapTaskKeys.HDFS_BYTES_READ, parts[1]); + } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) { + mapTask.setValue(MapTaskKeys.HDFS_BYTES_WRITTEN, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map input records")) { + mapTask.setValue(MapTaskKeys.INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map output records")) { + mapTask.setValue(MapTaskKeys.OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) { + mapTask.setValue(MapTaskKeys.INPUT_BYTES, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) { + mapTask.setValue(MapTaskKeys.OUTPUT_BYTES, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { + mapTask.setValue(MapTaskKeys.COMBINE_INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { + mapTask.setValue(MapTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) { + mapTask.setValue(MapTaskKeys.SPILLED_RECORDS, parts[1]); + } else { + System.out.println("MapCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK"); + } + } + } } /* * Parse and add the reduce task counters */ - private void parseAndAddReduceTaskCounters(ReduceTaskStatistics reduceTask, String counters) { - Matcher m = _pattern.matcher(counters); - while(m.find()){ - String ctuple = m.group(0); - //String ctuple = c1tuple.substring(0, c1tuple.length()-1); - String []parts = ctuple.split(":"); - if (parts[0].equals("File Systems.Local bytes read")) { - reduceTask.setValue(ReduceTaskKeys.LOCAL_BYTES_READ, parts[1]); - } else if (parts[0].equals("File Systems.Local bytes written")) { - reduceTask.setValue(ReduceTaskKeys.LOCAL_BYTES_WRITTEN, parts[1]); - } else if (parts[0].equals("File Systems.HDFS bytes read")) { - reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_READ, parts[1]); - } else if (parts[0].equals("File Systems.HDFS bytes written")) { - reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_WRITTEN, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) { - reduceTask.setValue(ReduceTaskKeys.INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) { - reduceTask.setValue(ReduceTaskKeys.OUTPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { - reduceTask.setValue(ReduceTaskKeys.COMBINE_INPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { - reduceTask.setValue(ReduceTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]); - } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) { - reduceTask.setValue(ReduceTaskKeys.INPUT_GROUPS, parts[1]); - } else { - System.out.println("Pattern:<"+ctuple+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK"); + private void parseAndAddReduceTaskCounters(ReduceTaskStatistics reduceTask, String counters) throws ParseException { + Counters cnt = Counters.fromEscapedCompactString(counters); + for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) { + Counters.Group grp = grps.next(); + //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">"; + for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) { + Counters.Counter counter = mycounters.next(); + //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">"; + //System.out.println("groupName:"+groupname+",countername: "+countername); + String countername = grp.getDisplayName()+"."+counter.getDisplayName(); + String value = (new Long(counter.getValue())).toString(); + String[] parts = {countername,value}; + //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]); + if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) { + reduceTask.setValue(ReduceTaskKeys.LOCAL_BYTES_READ, parts[1]); + } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) { + reduceTask.setValue(ReduceTaskKeys.LOCAL_BYTES_WRITTEN, parts[1]); + } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) { + reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_READ, parts[1]); + } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) { + reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_WRITTEN, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) { + reduceTask.setValue(ReduceTaskKeys.INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) { + reduceTask.setValue(ReduceTaskKeys.OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) { + reduceTask.setValue(ReduceTaskKeys.COMBINE_INPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) { + reduceTask.setValue(ReduceTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) { + reduceTask.setValue(ReduceTaskKeys.INPUT_GROUPS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) { + reduceTask.setValue(ReduceTaskKeys.SPILLED_RECORDS, parts[1]); + } else if (parts[0].equals("Map-Reduce Framework.Reduce shuffle bytes")) { + reduceTask.setValue(ReduceTaskKeys.SHUFFLE_BYTES, parts[1]); + } else { + System.out.println("ReduceCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE TASK"); + } } } } /* * Print the Job Execution Statistics * TODO: split to pring job, map/reduce task list and individual map/reduce task stats */ public void printJobExecutionStatistics() { /* * Print Job Counters */ System.out.println("JOB COUNTERS *********************************************"); int size = this._job.size(); java.util.Iterator<Map.Entry<Enum, String>> kv = this._job.entrySet().iterator(); for (int i = 0; i < size; i++) { Map.Entry<Enum, String> entry = (Map.Entry<Enum, String>) kv.next(); Enum key = entry.getKey(); String value = entry.getValue(); System.out.println("Key:<" + key.name() + ">, value:<"+ value +">"); } /* * */ System.out.println("MAP COUNTERS *********************************************"); int size1 = this._mapTaskList.size(); for (int i = 0; i < size1; i++) { System.out.println("MAP TASK *********************************************"); this._mapTaskList.get(i).printKeys(); } /* * */ System.out.println("REDUCE COUNTERS *********************************************"); int size2 = this._mapTaskList.size(); for (int i = 0; i < size2; i++) { System.out.println("REDUCE TASK *********************************************"); this._reduceTaskList.get(i).printKeys(); } } /* * Hash table keeping sorted lists of map tasks based on the specific map task key */ private Hashtable <Enum, ArrayList<MapTaskStatistics>> _sortedMapTaskListsByKey = new Hashtable<Enum, ArrayList<MapTaskStatistics>>(); /* * @return mapTaskList : ArrayList of MapTaskStatistics * @param mapTaskSortKey : Specific counter key used for sorting the task list * @param datatype : indicates the data type of the counter key used for sorting * If sort key is null then by default map tasks are sorted using map task ids. */ public synchronized ArrayList<MapTaskStatistics> getMapTaskList(Enum mapTaskSortKey, KeyDataType dataType) { /* * If mapTaskSortKey is null then use the task id as a key. */ if (mapTaskSortKey == null) { mapTaskSortKey = MapTaskKeys.TASK_ID; } if (this._sortedMapTaskListsByKey.get(mapTaskSortKey) == null) { ArrayList<MapTaskStatistics> newList = (ArrayList<MapTaskStatistics>)this._mapTaskList.clone(); this._sortedMapTaskListsByKey.put(mapTaskSortKey, this.sortMapTasksByKey(newList, mapTaskSortKey, dataType)); } return this._sortedMapTaskListsByKey.get(mapTaskSortKey); } private ArrayList<MapTaskStatistics> sortMapTasksByKey (ArrayList<MapTaskStatistics> mapTasks, Enum key, Enum dataType) { MapCounterComparator mcc = new MapCounterComparator(key, dataType); Collections.sort (mapTasks, mcc); return mapTasks; } private class MapCounterComparator implements Comparator<MapTaskStatistics> { public Enum _sortKey; public Enum _dataType; public MapCounterComparator(Enum key, Enum dataType) { this._sortKey = key; this._dataType = dataType; } // Comparator interface requires defining compare method. public int compare(MapTaskStatistics a, MapTaskStatistics b) { if (this._dataType == KeyDataType.LONG) { long aa = a.getLongValue(this._sortKey); long bb = b.getLongValue(this._sortKey); if (aa<bb) return -1; if (aa==bb) return 0; if (aa>bb) return 1; } else { return a.getStringValue(this._sortKey).compareToIgnoreCase(b.getStringValue(this._sortKey)); } return 0; } } /* * Reduce Array List sorting */ private Hashtable <Enum, ArrayList<ReduceTaskStatistics>> _sortedReduceTaskListsByKey = new Hashtable<Enum,ArrayList<ReduceTaskStatistics>>(); /* * @return reduceTaskList : ArrayList of ReduceTaskStatistics * @param reduceTaskSortKey : Specific counter key used for sorting the task list * @param dataType : indicates the data type of the counter key used for sorting * If sort key is null then, by default reduce tasks are sorted using task ids. */ public synchronized ArrayList<ReduceTaskStatistics> getReduceTaskList (Enum reduceTaskSortKey, KeyDataType dataType) { /* * If reduceTaskSortKey is null then use the task id as a key. */ if (reduceTaskSortKey == null) { reduceTaskSortKey = ReduceTaskKeys.TASK_ID; } if (this._sortedReduceTaskListsByKey.get(reduceTaskSortKey) == null) { ArrayList<ReduceTaskStatistics> newList = (ArrayList<ReduceTaskStatistics>)this._reduceTaskList.clone(); this._sortedReduceTaskListsByKey.put(reduceTaskSortKey, this.sortReduceTasksByKey(newList, reduceTaskSortKey, dataType)); } return this._sortedReduceTaskListsByKey.get(reduceTaskSortKey); } private ArrayList<ReduceTaskStatistics> sortReduceTasksByKey (ArrayList<ReduceTaskStatistics> reduceTasks, Enum key, Enum dataType) { ReduceCounterComparator rcc = new ReduceCounterComparator(key, dataType); Collections.sort (reduceTasks, rcc); return reduceTasks; } private class ReduceCounterComparator implements Comparator<ReduceTaskStatistics> { public Enum _sortKey; public Enum _dataType; //either long or string public ReduceCounterComparator(Enum key, Enum dataType) { this._sortKey = key; this._dataType = dataType; } // Comparator interface requires defining compare method. public int compare(ReduceTaskStatistics a, ReduceTaskStatistics b) { if (this._dataType == KeyDataType.LONG) { long aa = a.getLongValue(this._sortKey); long bb = b.getLongValue(this._sortKey); if (aa<bb) return -1; if (aa==bb) return 0; if (aa>bb) return 1; } else { return a.getStringValue(this._sortKey).compareToIgnoreCase(b.getStringValue(this._sortKey)); } return 0; } } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java index b287ca9..ca71c1d 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatisticsInterface.java @@ -1,124 +1,125 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.vaidya.statistics.job; import java.util.ArrayList; import org.apache.hadoop.mapred.JobConf; public interface JobStatisticsInterface { /** * Get job configuration (job.xml) values */ public JobConf getJobConf(); /* * Get Job Counters of type long */ public long getLongValue(Enum key); /* * Get job Counters of type Double */ public double getDoubleValue(Enum key); /* * Get Job Counters of type String */ public String getStringValue(Enum key); /* * Set key value of type long */ public void setValue(Enum key, long value); /* * Set key value of type double */ public void setValue(Enum key, double valye); /* * Set key value of type String */ public void setValue(Enum key, String value); /** * @return mapTaskList : ArrayList of MapTaskStatistics * @param mapTaskSortKey : Specific counter key used for sorting the task list * @param datatype : indicates the data type of the counter key used for sorting * If sort key is null then by default map tasks are sorted using map task ids. */ public ArrayList<MapTaskStatistics> getMapTaskList(Enum mapTaskSortKey, KeyDataType dataType); /** * @return reduceTaskList : ArrayList of ReduceTaskStatistics * @param reduceTaskSortKey : Specific counter key used for sorting the task list * @param dataType : indicates the data type of the counter key used for sorting * If sort key is null then, by default reduce tasks are sorted using task ids. */ public ArrayList<ReduceTaskStatistics> getReduceTaskList(Enum reduceTaskSortKey, KeyDataType dataType); /* * Print the Job Execution Statistics */ public void printJobExecutionStatistics(); /* * Job and Task statistics Key data types */ public static enum KeyDataType { STRING, LONG, DOUBLE } /** * Job Keys */ public static enum JobKeys { JOBTRACKERID, JOBID, JOBNAME, USER, SUBMIT_TIME, CONF_PATH, LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, STATUS, FINISH_TIME, FINISHED_MAPS, FINISHED_REDUCES, FAILED_MAPS, FAILED_REDUCES, LAUNCHED_MAPS, LAUNCHED_REDUCES, RACKLOCAL_MAPS, DATALOCAL_MAPS, HDFS_BYTES_READ, HDFS_BYTES_WRITTEN, LOCAL_BYTES_READ, LOCAL_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, REDUCE_INPUT_GROUPS, REDUCE_INPUT_RECORDS, REDUCE_OUTPUT_RECORDS, MAP_INPUT_RECORDS, MAP_OUTPUT_RECORDS, MAP_INPUT_BYTES, MAP_OUTPUT_BYTES, MAP_HDFS_BYTES_WRITTEN, - JOBCONF + JOBCONF, JOB_PRIORITY, SHUFFLE_BYTES, SPILLED_RECORDS } /** * Map Task Keys */ public static enum MapTaskKeys { TASK_ID, TASK_TYPE, START_TIME, STATUS, FINISH_TIME, HDFS_BYTES_READ, HDFS_BYTES_WRITTEN, LOCAL_BYTES_READ, LOCAL_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, OUTPUT_RECORDS, INPUT_RECORDS, INPUT_BYTES, OUTPUT_BYTES, NUM_ATTEMPTS, ATTEMPT_ID, - HOSTNAME, SPLITS + HOSTNAME, SPLITS, SPILLED_RECORDS, TRACKER_NAME, STATE_STRING, HTTP_PORT, ERROR } /** * Reduce Task Keys */ public static enum ReduceTaskKeys { TASK_ID, TASK_TYPE, START_TIME, STATUS, FINISH_TIME, HDFS_BYTES_READ, HDFS_BYTES_WRITTEN, LOCAL_BYTES_READ, LOCAL_BYTES_WRITTEN, COMBINE_OUTPUT_RECORDS, COMBINE_INPUT_RECORDS, OUTPUT_RECORDS, INPUT_RECORDS, NUM_ATTEMPTS, ATTEMPT_ID, HOSTNAME, SHUFFLE_FINISH_TIME, - SORT_FINISH_TIME, INPUT_GROUPS + SORT_FINISH_TIME, INPUT_GROUPS, TRACKER_NAME, STATE_STRING, HTTP_PORT, SPLITS, SHUFFLE_BYTES, + SPILLED_RECORDS } } diff --git a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh index ada6715..a11b286 100644 --- a/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh +++ b/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/vaidya.sh @@ -1,47 +1,47 @@ #!/bin/sh # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. this="$0" while [ -h "$this" ]; do ls=`ls -ld "$this"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '.*/.*' > /dev/null; then this="$link" else this=`dirname "$this"`/"$link" fi done # convert relative path to absolute path bin=`dirname "$this"` script=`basename "$this"` bin=`cd "$bin"; pwd` this="$bin/$script" # Check if HADOOP_HOME AND JAVA_HOME is set. if [ -z $HADOOP_HOME ] ; then echo "HADOOP_HOME environment variable not defined" exit -1; fi if [ -z $JAVA_HOME ] ; then echo "JAVA_HOME environment variable not defined" exit -1; fi hadoopVersion=`$HADOOP_HOME/bin/hadoop version | awk 'BEGIN { RS = "" ; FS = "\n" } ; { print $1 }' | awk '{print $2}'` -$JAVA_HOME/bin/java -classpath $HADOOP_HOME/hadoop-${hadoopVersion}-core.jar:$HADOOP_HOME/contrib/vaidya/hadoop-${hadoopVersion}-vaidya.jar:$HADOOP_HOME/lib/commons-logging-1.0.4.jar:${CLASSPATH} org.apache.hadoop.vaidya.postexdiagnosis.PostExPerformanceDiagnoser $@ +$JAVA_HOME/bin/java -Xmx1024m -classpath $HADOOP_HOME/hadoop-${hadoopVersion}-core.jar:$HADOOP_HOME/contrib/vaidya/hadoop-${hadoopVersion}-vaidya.jar:$HADOOP_HOME/lib/commons-logging-1.0.4.jar:${CLASSPATH} org.apache.hadoop.vaidya.postexdiagnosis.PostExPerformanceDiagnoser $@
jaxlaw/hadoop-common
818321ea0fb763502742107cc6976b5479bc7983
HDFS:595 from https://issues.apache.org/jira/secure/attachment/12427977/HDFS-595-Y20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index af1f818..c046567 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,461 +1,463 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + HDFS-595. umask settings in configuration may now use octal or + symbolic instead of decimal. Update HDFS tests as such. (jghoman) MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/test/org/apache/hadoop/hdfs/TestDFSPermission.java b/src/test/org/apache/hadoop/hdfs/TestDFSPermission.java index c456b90..fd3a1ce 100644 --- a/src/test/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/src/test/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -1,669 +1,669 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import java.io.IOException; import java.util.Random; import javax.security.auth.login.LoginException; import org.apache.commons.logging.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UnixUserGroupInformation; import junit.framework.AssertionFailedError; import junit.framework.TestCase; /** Unit tests for permission */ public class TestDFSPermission extends TestCase { public static final Log LOG = LogFactory.getLog(TestDFSPermission.class); final private static Configuration conf = new Configuration(); final private static String GROUP1_NAME = "group1"; final private static String GROUP2_NAME = "group2"; final private static String GROUP3_NAME = "group3"; final private static String GROUP4_NAME = "group4"; final private static String USER1_NAME = "user1"; final private static String USER2_NAME = "user2"; final private static String USER3_NAME = "user3"; private static UnixUserGroupInformation SUPERUSER; private static UnixUserGroupInformation USER1; private static UnixUserGroupInformation USER2; private static UnixUserGroupInformation USER3; final private static short MAX_PERMISSION = 511; final private static short DEFAULT_UMASK = 022; final private static short FILE_MASK = 0666; final private static FsPermission DEFAULT_PERMISSION = FsPermission.createImmutable((short) 0777); final static private int NUM_TEST_PERMISSIONS = conf.getInt("test.dfs.permission.num", 10) * (MAX_PERMISSION + 1) / 100; final private static String PATH_NAME = "xx"; final private static Path FILE_DIR_PATH = new Path("/", PATH_NAME); final private static Path NON_EXISTENT_PATH = new Path("/parent", PATH_NAME); final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile"); private FileSystem fs; private static Random r; static { try { // Initiate the random number generator and logging the seed long seed = Util.now(); r = new Random(seed); LOG.info("Random number generator uses seed " + seed); LOG.info("NUM_TEST_PERMISSIONS=" + NUM_TEST_PERMISSIONS); // explicitly turn on permission checking conf.setBoolean("dfs.permissions", true); // Initiate all four users SUPERUSER = UnixUserGroupInformation.login(conf); USER1 = new UnixUserGroupInformation(USER1_NAME, new String[] { GROUP1_NAME, GROUP2_NAME }); USER2 = new UnixUserGroupInformation(USER2_NAME, new String[] { GROUP2_NAME, GROUP3_NAME }); USER3 = new UnixUserGroupInformation(USER3_NAME, new String[] { GROUP3_NAME, GROUP4_NAME }); } catch (LoginException e) { throw new RuntimeException(e); } } /** This tests if permission setting in create, mkdir, and * setPermission works correctly */ public void testPermissionSetting() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); try { cluster.waitActive(); fs = FileSystem.get(conf); LOG.info("ROOT=" + fs.getFileStatus(new Path("/"))); testPermissionSetting(OpType.CREATE); // test file creation testPermissionSetting(OpType.MKDIRS); // test directory creation } finally { fs.close(); cluster.shutdown(); } } /* check permission setting works correctly for file or directory */ private void testPermissionSetting(OpType op) throws Exception { // case 1: use default permission but all possible umasks PermissionGenerator generator = new PermissionGenerator(r); for (short i = 0; i < NUM_TEST_PERMISSIONS; i++) { createAndCheckPermission(op, FILE_DIR_PATH, generator.next(), new FsPermission(DEFAULT_PERMISSION), true); } // case 2: use permission 0643 and the default umask createAndCheckPermission(op, FILE_DIR_PATH, DEFAULT_UMASK, new FsPermission((short) 0643), true); // case 3: use permission 0643 and umask 0222 createAndCheckPermission(op, FILE_DIR_PATH, (short) 0222, new FsPermission((short) 0643), false); // case 4: set permission fs.setPermission(FILE_DIR_PATH, new FsPermission((short) 0111)); short expectedPermission = (short) ((op == OpType.CREATE) ? 0 : 0111); checkPermission(FILE_DIR_PATH, expectedPermission, true); // case 5: test non-existent parent directory assertFalse(fs.exists(NON_EXISTENT_PATH)); createAndCheckPermission(op, NON_EXISTENT_PATH, DEFAULT_UMASK, new FsPermission(DEFAULT_PERMISSION), false); Path parent = NON_EXISTENT_PATH.getParent(); checkPermission(parent, getPermission(parent.getParent()), true); } /* get the permission of a file/directory */ private short getPermission(Path path) throws IOException { return fs.getFileStatus(path).getPermission().toShort(); } /* create a file/directory with the default umask and permission */ private void create(OpType op, Path name) throws IOException { create(op, name, DEFAULT_UMASK, new FsPermission(DEFAULT_PERMISSION)); } /* create a file/directory with the given umask and permission */ private void create(OpType op, Path name, short umask, FsPermission permission) throws IOException { - // set umask in configuration - conf.setInt(FsPermission.UMASK_LABEL, umask); + // set umask in configuration, converting to padded octal + conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask)); // create the file/directory switch (op) { case CREATE: FSDataOutputStream out = fs.create(name, permission, true, conf.getInt( "io.file.buffer.size", 4096), fs.getDefaultReplication(), fs .getDefaultBlockSize(), null); out.close(); break; case MKDIRS: fs.mkdirs(name, permission); break; default: throw new IOException("Unsupported operation: " + op); } } /* create file/directory with the provided umask and permission; then it * checks if the permission is set correctly; * If the delete flag is true, delete the file afterwards; otherwise leave * it in the file system. */ private void createAndCheckPermission(OpType op, Path name, short umask, FsPermission permission, boolean delete) throws Exception { // create the file/directory create(op, name, umask, permission); // get the short form of the permission short permissionNum = (DEFAULT_PERMISSION.equals(permission)) ? MAX_PERMISSION : permission.toShort(); // get the expected permission short expectedPermission = (op == OpType.CREATE) ? (short) (~umask & permissionNum & FILE_MASK) : (short) (~umask & permissionNum); // check if permission is correctly set checkPermission(name, expectedPermission, delete); } /* Check if the permission of a file/directory is the same as the * expected permission; If the delete flag is true, delete the * file/directory afterwards. */ private void checkPermission(Path name, short expectedPermission, boolean delete) throws IOException { try { // check its permission assertEquals(getPermission(name), expectedPermission); } finally { // delete the file if (delete) { fs.delete(name, true); } } } /* check if the ownership of a file/directory is set correctly */ public void testOwnership() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); try { cluster.waitActive(); testOwnership(OpType.CREATE); // test file creation testOwnership(OpType.MKDIRS); // test directory creation } finally { fs.close(); cluster.shutdown(); } } /* change a file/directory's owner and group. * if expectDeny is set, expect an AccessControlException. */ private void setOwner(Path path, String owner, String group, boolean expectDeny) throws IOException { try { String expectedOwner = (owner == null) ? getOwner(path) : owner; String expectedGroup = (group == null) ? getGroup(path) : group; fs.setOwner(path, owner, group); checkOwnership(path, expectedOwner, expectedGroup); assertFalse(expectDeny); } catch(AccessControlException e) { assertTrue(expectDeny); } } /* check ownership is set correctly for a file or directory */ private void testOwnership(OpType op) throws Exception { // case 1: superuser create a file/directory fs = FileSystem.get(conf); create(op, FILE_DIR_PATH, DEFAULT_UMASK, new FsPermission(DEFAULT_PERMISSION)); checkOwnership(FILE_DIR_PATH, SUPERUSER.getUserName(), getGroup(FILE_DIR_PATH.getParent())); // case 2: superuser changes FILE_DIR_PATH's owner to be <user1, group3> setOwner(FILE_DIR_PATH, USER1.getUserName(), GROUP3_NAME, false); // case 3: user1 changes FILE_DIR_PATH's owner to be user2 login(USER1); setOwner(FILE_DIR_PATH, USER2.getUserName(), null, true); // case 4: user1 changes FILE_DIR_PATH's group to be group1 which it belongs // to setOwner(FILE_DIR_PATH, null, GROUP1_NAME, false); // case 5: user1 changes FILE_DIR_PATH's group to be group3 // which it does not belong to setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true); // case 6: user2 (non-owner) changes FILE_DIR_PATH's group to be group3 login(USER2); setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true); // case 7: user2 (non-owner) changes FILE_DIR_PATH's user to be user2 setOwner(FILE_DIR_PATH, USER2.getUserName(), null, true); // delete the file/directory login(SUPERUSER); fs.delete(FILE_DIR_PATH, true); } /* Return the group owner of the file/directory */ private String getGroup(Path path) throws IOException { return fs.getFileStatus(path).getGroup(); } /* Return the file owner of the file/directory */ private String getOwner(Path path) throws IOException { return fs.getFileStatus(path).getOwner(); } /* check if ownership is set correctly */ private void checkOwnership(Path name, String expectedOwner, String expectedGroup) throws IOException { // check its owner and group FileStatus status = fs.getFileStatus(name); assertEquals(status.getOwner(), expectedOwner); assertEquals(status.getGroup(), expectedGroup); } final static private String ANCESTOR_NAME = "/ancestor"; final static private String PARENT_NAME = "parent"; final static private String FILE_NAME = "file"; final static private String DIR_NAME = "dir"; final static private String FILE_DIR_NAME = "filedir"; private enum OpType {CREATE, MKDIRS, OPEN, SET_REPLICATION, GET_FILEINFO, IS_DIR, EXISTS, GET_CONTENT_LENGTH, LIST, RENAME, DELETE }; /* Check if namenode performs permission checking correctly for * superuser, file owner, group owner, and other users */ public void testPermissionChecking() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); try { cluster.waitActive(); fs = FileSystem.get(conf); // set the permission of the root to be world-wide rwx fs.setPermission(new Path("/"), new FsPermission((short)0777)); // create a directory hierarchy and sets random permission for each inode PermissionGenerator ancestorPermissionGenerator = new PermissionGenerator(r); PermissionGenerator dirPermissionGenerator = new PermissionGenerator(r); PermissionGenerator filePermissionGenerator = new PermissionGenerator(r); short[] ancestorPermissions = new short[NUM_TEST_PERMISSIONS]; short[] parentPermissions = new short[NUM_TEST_PERMISSIONS]; short[] permissions = new short[NUM_TEST_PERMISSIONS]; Path[] ancestorPaths = new Path[NUM_TEST_PERMISSIONS]; Path[] parentPaths = new Path[NUM_TEST_PERMISSIONS]; Path[] filePaths = new Path[NUM_TEST_PERMISSIONS]; Path[] dirPaths = new Path[NUM_TEST_PERMISSIONS]; for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) { // create ancestor directory ancestorPaths[i] = new Path(ANCESTOR_NAME + i); create(OpType.MKDIRS, ancestorPaths[i]); fs.setOwner(ancestorPaths[i], USER1_NAME, GROUP2_NAME); // create parent directory parentPaths[i] = new Path(ancestorPaths[i], PARENT_NAME + i); create(OpType.MKDIRS, parentPaths[i]); // change parent directory's ownership to be user1 fs.setOwner(parentPaths[i], USER1_NAME, GROUP2_NAME); filePaths[i] = new Path(parentPaths[i], FILE_NAME + i); dirPaths[i] = new Path(parentPaths[i], DIR_NAME + i); // makes sure that each inode at the same level // has a different permission ancestorPermissions[i] = ancestorPermissionGenerator.next(); parentPermissions[i] = dirPermissionGenerator.next(); permissions[i] = filePermissionGenerator.next(); fs.setPermission(ancestorPaths[i], new FsPermission( ancestorPermissions[i])); fs.setPermission(parentPaths[i], new FsPermission( parentPermissions[i])); } /* file owner */ testPermissionCheckingPerUser(USER1, ancestorPermissions, parentPermissions, permissions, parentPaths, filePaths, dirPaths); /* group owner */ testPermissionCheckingPerUser(USER2, ancestorPermissions, parentPermissions, permissions, parentPaths, filePaths, dirPaths); /* other owner */ testPermissionCheckingPerUser(USER3, ancestorPermissions, parentPermissions, permissions, parentPaths, filePaths, dirPaths); /* super owner */ testPermissionCheckingPerUser(SUPERUSER, ancestorPermissions, parentPermissions, permissions, parentPaths, filePaths, dirPaths); } finally { fs.close(); cluster.shutdown(); } } /* Check if namenode performs permission checking correctly * for the given user for operations mkdir, open, setReplication, * getFileInfo, isDirectory, exists, getContentLength, list, rename, * and delete */ private void testPermissionCheckingPerUser(UnixUserGroupInformation ugi, short[] ancestorPermission, short[] parentPermission, short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs) throws Exception { login(SUPERUSER); for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) { create(OpType.CREATE, files[i]); create(OpType.MKDIRS, dirs[i]); fs.setOwner(files[i], USER1_NAME, GROUP2_NAME); fs.setOwner(dirs[i], USER1_NAME, GROUP2_NAME); checkOwnership(dirs[i], USER1_NAME, GROUP2_NAME); checkOwnership(files[i], USER1_NAME, GROUP2_NAME); FsPermission fsPermission = new FsPermission(filePermission[i]); fs.setPermission(files[i], fsPermission); fs.setPermission(dirs[i], fsPermission); } login(ugi); for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) { testCreateMkdirs(ugi, new Path(parentDirs[i], FILE_DIR_NAME), ancestorPermission[i], parentPermission[i]); testOpen(ugi, files[i], ancestorPermission[i], parentPermission[i], filePermission[i]); testSetReplication(ugi, files[i], ancestorPermission[i], parentPermission[i], filePermission[i]); testSetTimes(ugi, files[i], ancestorPermission[i], parentPermission[i], filePermission[i]); testStats(ugi, files[i], ancestorPermission[i], parentPermission[i]); testList(ugi, files[i], dirs[i], ancestorPermission[i], parentPermission[i], filePermission[i]); int next = i == NUM_TEST_PERMISSIONS - 1 ? 0 : i + 1; testRename(ugi, files[i], files[next], ancestorPermission[i], parentPermission[i], ancestorPermission[next], parentPermission[next]); testDeleteFile(ugi, files[i], ancestorPermission[i], parentPermission[i]); testDeleteDir(ugi, dirs[i], ancestorPermission[i], parentPermission[i], filePermission[i], null); } // test non existent file checkNonExistentFile(); } /* A random permission generator that guarantees that each permission * value is generated only once. */ static private class PermissionGenerator { private Random r; private short permissions[] = new short[MAX_PERMISSION + 1]; private int numLeft = MAX_PERMISSION + 1; PermissionGenerator(Random r) { this.r = r; for (int i = 0; i <= MAX_PERMISSION; i++) { permissions[i] = (short) i; } } short next() throws IOException { if (numLeft == 0) { throw new IOException("No more permission is avaialbe"); } int index = r.nextInt(numLeft); // choose which permission to return numLeft--; // decrement the counter // swap the chosen permission with last available permission in the array short temp = permissions[numLeft]; permissions[numLeft] = permissions[index]; permissions[index] = temp; return permissions[numLeft]; } } /* A base class that verifies the permission checking is correct * for an operation */ abstract class PermissionVerifier { protected Path path; protected short ancestorPermission; protected short parentPermission; private short permission; protected short requiredAncestorPermission; protected short requiredParentPermission; protected short requiredPermission; final static protected short opAncestorPermission = SEARCH_MASK; protected short opParentPermission; protected short opPermission; protected UnixUserGroupInformation ugi; /* initialize */ protected void set(Path path, short ancestorPermission, short parentPermission, short permission) { this.path = path; this.ancestorPermission = ancestorPermission; this.parentPermission = parentPermission; this.permission = permission; setOpPermission(); this.ugi = null; } /* Perform an operation and verify if the permission checking is correct */ void verifyPermission(UnixUserGroupInformation ugi) throws LoginException, IOException { if (this.ugi != ugi) { setRequiredPermissions(ugi); this.ugi = ugi; } try { try { call(); assertFalse(expectPermissionDeny()); } catch(AccessControlException e) { assertTrue(expectPermissionDeny()); } } catch (AssertionFailedError ae) { logPermissions(); throw ae; } } /** Log the permissions and required permissions */ protected void logPermissions() { LOG.info("required ancestor permission:" + Integer.toOctalString(requiredAncestorPermission)); LOG.info("ancestor permission: " + Integer.toOctalString(ancestorPermission)); LOG.info("required parent permission:" + Integer.toOctalString(requiredParentPermission)); LOG.info("parent permission: " + Integer.toOctalString(parentPermission)); LOG.info("required permission:" + Integer.toOctalString(requiredPermission)); LOG.info("permission: " + Integer.toOctalString(permission)); } /* Return true if an AccessControlException is expected */ protected boolean expectPermissionDeny() { return (requiredPermission & permission) != requiredPermission || (requiredParentPermission & parentPermission) != requiredParentPermission || (requiredAncestorPermission & ancestorPermission) != requiredAncestorPermission; } /* Set the permissions required to pass the permission checking */ protected void setRequiredPermissions(UnixUserGroupInformation ugi) throws IOException { if (SUPERUSER.equals(ugi)) { requiredAncestorPermission = SUPER_MASK; requiredParentPermission = SUPER_MASK; requiredPermission = SUPER_MASK; } else if (USER1.equals(ugi)) { requiredAncestorPermission = (short)(opAncestorPermission & OWNER_MASK); requiredParentPermission = (short)(opParentPermission & OWNER_MASK); requiredPermission = (short)(opPermission & OWNER_MASK); } else if (USER2.equals(ugi)) { requiredAncestorPermission = (short)(opAncestorPermission & GROUP_MASK); requiredParentPermission = (short)(opParentPermission & GROUP_MASK); requiredPermission = (short)(opPermission & GROUP_MASK); } else if (USER3.equals(ugi)) { requiredAncestorPermission = (short)(opAncestorPermission & OTHER_MASK); requiredParentPermission = (short)(opParentPermission & OTHER_MASK); requiredPermission = (short)(opPermission & OTHER_MASK); } else { throw new IllegalArgumentException("Non-supported user: " + ugi); } } /* Set the rwx permissions required for the operation */ abstract void setOpPermission(); /* Perform the operation */ abstract void call() throws IOException; } final static private short SUPER_MASK = 0; final static private short READ_MASK = 0444; final static private short WRITE_MASK = 0222; final static private short SEARCH_MASK = 0111; final static private short NULL_MASK = 0; final static private short OWNER_MASK = 0700; final static private short GROUP_MASK = 0070; final static private short OTHER_MASK = 0007; /* A class that verifies the permission checking is correct for create/mkdir*/ private class CreatePermissionVerifier extends PermissionVerifier { private OpType opType; private boolean cleanup = true; /* initialize */ protected void set(Path path, OpType opType, short ancestorPermission, short parentPermission) { super.set(path, ancestorPermission, parentPermission, NULL_MASK); setOpType(opType); } void setCleanup(boolean cleanup) { this.cleanup = cleanup; } /* set if the operation mkdir/create */ void setOpType(OpType opType) { this.opType = opType; } @Override void setOpPermission() { this.opParentPermission = SEARCH_MASK | WRITE_MASK; } @Override void call() throws IOException { create(opType, path); if (cleanup) { fs.delete(path, true); } } } private CreatePermissionVerifier createVerifier = new CreatePermissionVerifier(); /* test if the permission checking of create/mkdir is correct */ private void testCreateMkdirs(UnixUserGroupInformation ugi, Path path, short ancestorPermission, short parentPermission) throws Exception { createVerifier.set(path, OpType.MKDIRS, ancestorPermission, parentPermission); createVerifier.verifyPermission(ugi); createVerifier.setOpType(OpType.CREATE); createVerifier.setCleanup(false); createVerifier.verifyPermission(ugi); createVerifier.setCleanup(true); createVerifier.verifyPermission(ugi); // test overWritten } /* A class that verifies the permission checking is correct for open */ private class OpenPermissionVerifier extends PermissionVerifier { @Override void setOpPermission() { this.opParentPermission = SEARCH_MASK; this.opPermission = READ_MASK; } @Override void call() throws IOException { FSDataInputStream in = fs.open(path); in.close(); } } private OpenPermissionVerifier openVerifier = new OpenPermissionVerifier(); /* test if the permission checking of open is correct */ private void testOpen(UnixUserGroupInformation ugi, Path path, short ancestorPermission, short parentPermission, short filePermission) throws Exception { openVerifier .set(path, ancestorPermission, parentPermission, filePermission); openVerifier.verifyPermission(ugi); } /* A class that verifies the permission checking is correct for * setReplication */ private class SetReplicationPermissionVerifier extends PermissionVerifier { @Override void setOpPermission() { this.opParentPermission = SEARCH_MASK; this.opPermission = WRITE_MASK; } @Override void call() throws IOException { fs.setReplication(path, (short) 1); } } private SetReplicationPermissionVerifier replicatorVerifier = new SetReplicationPermissionVerifier(); /* test if the permission checking of setReplication is correct */ private void testSetReplication(UnixUserGroupInformation ugi, Path path, short ancestorPermission, short parentPermission, short filePermission) throws Exception { replicatorVerifier.set(path, ancestorPermission, parentPermission, filePermission); replicatorVerifier.verifyPermission(ugi); } /* A class that verifies the permission checking is correct for * setTimes */ private class SetTimesPermissionVerifier extends PermissionVerifier { @Override void setOpPermission() { this.opParentPermission = SEARCH_MASK; this.opPermission = WRITE_MASK; diff --git a/src/test/org/apache/hadoop/security/TestPermission.java b/src/test/org/apache/hadoop/security/TestPermission.java index c77b777..b6138a0 100644 --- a/src/test/org/apache/hadoop/security/TestPermission.java +++ b/src/test/org/apache/hadoop/security/TestPermission.java @@ -1,229 +1,229 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.security; import java.io.IOException; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Level; import junit.framework.TestCase; /** Unit tests for permission */ public class TestPermission extends TestCase { public static final Log LOG = LogFactory.getLog(TestPermission.class); { ((Log4JLogger)UserGroupInformation.LOG).getLogger().setLevel(Level.ALL); } final private static Path ROOT_PATH = new Path("/data"); final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1"); final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2"); final private static Path CHILD_FILE1 = new Path(ROOT_PATH, "file1"); final private static Path CHILD_FILE2 = new Path(ROOT_PATH, "file2"); final private static int FILE_LEN = 100; final private static Random RAN = new Random(); final private static String USER_NAME = "user" + RAN.nextInt(); final private static String[] GROUP_NAMES = {"group1", "group2"}; static FsPermission checkPermission(FileSystem fs, String path, FsPermission expected) throws IOException { FileStatus s = fs.getFileStatus(new Path(path)); LOG.info(s.getPath() + ": " + s.isDir() + " " + s.getPermission() + ":" + s.getOwner() + ":" + s.getGroup()); if (expected != null) { assertEquals(expected, s.getPermission()); assertEquals(expected.toShort(), s.getPermission().toShort()); } return s.getPermission(); } public void testCreate() throws Exception { Configuration conf = new Configuration(); conf.setBoolean("dfs.permissions", true); - conf.setInt(FsPermission.UMASK_LABEL, 0); + conf.set(FsPermission.UMASK_LABEL, "000"); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); fs = FileSystem.get(conf); FsPermission rootPerm = checkPermission(fs, "/", null); FsPermission inheritPerm = FsPermission.createImmutable( (short)(rootPerm.toShort() | 0300)); FsPermission dirPerm = new FsPermission((short)0777); fs.mkdirs(new Path("/a1/a2/a3"), dirPerm); checkPermission(fs, "/a1", inheritPerm); checkPermission(fs, "/a1/a2", inheritPerm); checkPermission(fs, "/a1/a2/a3", dirPerm); FsPermission filePerm = new FsPermission((short)0444); FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm, true, conf.getInt("io.file.buffer.size", 4096), fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); out.write(123); out.close(); checkPermission(fs, "/b1", inheritPerm); checkPermission(fs, "/b1/b2", inheritPerm); checkPermission(fs, "/b1/b2/b3.txt", filePerm); - conf.setInt(FsPermission.UMASK_LABEL, 0022); + conf.set(FsPermission.UMASK_LABEL, "022"); FsPermission permission = FsPermission.createImmutable((short)0666); FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission)); FileSystem.create(fs, new Path("/c1/c2.txt"), new FsPermission(permission)); checkPermission(fs, "/c1", permission); checkPermission(fs, "/c1/c2.txt", permission); } finally { try { if(fs != null) fs.close(); } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); } try { if(cluster != null) cluster.shutdown(); } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); } } } public void testFilePermision() throws Exception { Configuration conf = new Configuration(); conf.setBoolean("dfs.permissions", true); MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); try { FileSystem nnfs = FileSystem.get(conf); // test permissions on files that do not exist assertFalse(nnfs.exists(CHILD_FILE1)); try { nnfs.setOwner(CHILD_FILE1, "foo", "bar"); assertTrue(false); } catch(java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } try { nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0777)); assertTrue(false); } catch(java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } // following dir/file creations are legal nnfs.mkdirs(CHILD_DIR1); FSDataOutputStream out = nnfs.create(CHILD_FILE1); byte data[] = new byte[FILE_LEN]; RAN.nextBytes(data); out.write(data); out.close(); nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0700)); // following read is legal byte dataIn[] = new byte[FILE_LEN]; FSDataInputStream fin = nnfs.open(CHILD_FILE1); int bytesRead = fin.read(dataIn); assertTrue(bytesRead == FILE_LEN); for(int i=0; i<FILE_LEN; i++) { assertEquals(data[i], dataIn[i]); } //////////////////////////////////////////////////////////////// // test illegal file/dir creation UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation( USER_NAME, GROUP_NAMES ); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo); FileSystem userfs = FileSystem.get(conf); // make sure mkdir of a existing directory that is not owned by // this user does not throw an exception. userfs.mkdirs(CHILD_DIR1); // illegal mkdir assertTrue(!canMkdirs(userfs, CHILD_DIR2)); // illegal file creation assertTrue(!canCreate(userfs, CHILD_FILE2)); // illegal file open assertTrue(!canOpen(userfs, CHILD_FILE1)); nnfs.setPermission(ROOT_PATH, new FsPermission((short)0755)); nnfs.setPermission(CHILD_DIR1, new FsPermission((short)0777)); nnfs.setPermission(new Path("/"), new FsPermission((short)0777)); final Path RENAME_PATH = new Path("/foo/bar"); userfs.mkdirs(RENAME_PATH); assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1)); } finally { if(cluster != null) cluster.shutdown(); } } static boolean canMkdirs(FileSystem fs, Path p) throws IOException { try { fs.mkdirs(p); return true; } catch(AccessControlException e) { return false; } } static boolean canCreate(FileSystem fs, Path p) throws IOException { try { fs.create(p); return true; } catch(AccessControlException e) { return false; } } static boolean canOpen(FileSystem fs, Path p) throws IOException { try { fs.open(p); return true; } catch(AccessControlException e) { return false; } } static boolean canRename(FileSystem fs, Path src, Path dst ) throws IOException { try { fs.rename(src, dst); return true; } catch(AccessControlException e) { return false; } } }
jaxlaw/hadoop-common
13497f6cdd407d98970247fb9141b81c893fe24d
MAPREDUCE-1068. Added a verbose error message when user specifies an incorrect -file parameter. Contributed by Amareshwari Sriramadasu.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 48f83f4..af1f818 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,458 +1,461 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + MAPREDUCE-1068. Added a verbose error message when user specifies an + incorrect -file parameter. (Amareshwari Sriramadasu via acmurthy) + MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java b/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java index a65d440..d29142f 100644 --- a/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java +++ b/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java @@ -1,863 +1,865 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.streaming; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URLEncoder; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.TreeMap; import java.util.TreeSet; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.OptionGroup; import org.apache.commons.cli.Options; import org.apache.commons.cli.Parser; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileAlreadyExistsException; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.InvalidJobConfException; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapred.KeyValueTextInputFormat; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.SequenceFileAsTextInputFormat; import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner; import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; /** All the client-side work happens here. * (Jar packaging, MapRed job submission and monitoring) */ public class StreamJob implements Tool { protected static final Log LOG = LogFactory.getLog(StreamJob.class.getName()); final static String REDUCE_NONE = "NONE"; /** -----------Streaming CLI Implementation **/ private CommandLineParser parser = new BasicParser(); private Options allOptions; /**@deprecated use StreamJob() with ToolRunner or set the * Configuration using {@link #setConf(Configuration)} and * run with {@link #run(String[])}. */ @Deprecated public StreamJob(String[] argv, boolean mayExit) { this(); argv_ = argv; this.config_ = new Configuration(); } public StreamJob() { setupOptions(); this.config_ = new Configuration(); } @Override public Configuration getConf() { return config_; } @Override public void setConf(Configuration conf) { this.config_ = conf; } @Override public int run(String[] args) throws Exception { try { this.argv_ = args; init(); preProcessArgs(); parseArgv(); postProcessArgs(); setJobConf(); return submitAndMonitorJob(); }catch (IllegalArgumentException ex) { //ignore, since log will already be printed + // print the log in debug mode. + LOG.debug("Error in streaming job", ex); return 1; } } /** * This method creates a streaming job from the given argument list. * The created object can be used and/or submitted to a jobtracker for * execution by a job agent such as JobControl * @param argv the list args for creating a streaming job * @return the created JobConf object * @throws IOException */ static public JobConf createJob(String[] argv) throws IOException { StreamJob job = new StreamJob(); job.argv_ = argv; job.init(); job.preProcessArgs(); job.parseArgv(); job.postProcessArgs(); job.setJobConf(); return job.jobConf_; } /** * This is the method that actually * intializes the job conf and submits the job * to the jobtracker * @throws IOException * @deprecated use {@link #run(String[])} instead. */ @Deprecated public int go() throws IOException { try { return run(argv_); } catch (Exception ex) { throw new IOException(ex.getMessage()); } } protected void init() { try { env_ = new Environment(); } catch (IOException io) { throw new RuntimeException(io); } } void preProcessArgs() { verbose_ = false; addTaskEnvironment_ = ""; } void postProcessArgs() throws IOException { if (inputSpecs_.size() == 0) { fail("Required argument: -input <name>"); } if (output_ == null) { fail("Required argument: -output "); } msg("addTaskEnvironment=" + addTaskEnvironment_); Iterator it = packageFiles_.iterator(); while (it.hasNext()) { File f = new File((String) it.next()); if (f.isFile()) { shippedCanonFiles_.add(f.getCanonicalPath()); } } msg("shippedCanonFiles_=" + shippedCanonFiles_); // careful with class names.. mapCmd_ = unqualifyIfLocalPath(mapCmd_); comCmd_ = unqualifyIfLocalPath(comCmd_); redCmd_ = unqualifyIfLocalPath(redCmd_); } String unqualifyIfLocalPath(String cmd) throws IOException { if (cmd == null) { // } else { String prog = cmd; String args = ""; int s = cmd.indexOf(" "); if (s != -1) { prog = cmd.substring(0, s); args = cmd.substring(s + 1); } String progCanon; try { progCanon = new File(prog).getCanonicalPath(); } catch (IOException io) { progCanon = prog; } boolean shipped = shippedCanonFiles_.contains(progCanon); msg("shipped: " + shipped + " " + progCanon); if (shipped) { // Change path to simple filename. // That way when PipeMapRed calls Runtime.exec(), // it will look for the excutable in Task's working dir. // And this is where TaskRunner unjars our job jar. prog = new File(prog).getName(); if (args.length() > 0) { cmd = prog + " " + args; } else { cmd = prog; } } } msg("cmd=" + cmd); return cmd; } void parseArgv(){ CommandLine cmdLine = null; try{ cmdLine = parser.parse(allOptions, argv_); }catch(Exception oe){ LOG.error(oe.getMessage()); exitUsage(argv_.length > 0 && "-info".equals(argv_[0])); } if (cmdLine != null){ verbose_ = cmdLine.hasOption("verbose"); detailedUsage_ = cmdLine.hasOption("info"); debug_ = cmdLine.hasOption("debug")? debug_ + 1 : debug_; String[] values = cmdLine.getOptionValues("input"); if (values != null && values.length > 0) { for (String input : values) { inputSpecs_.add(input); } } output_ = (String) cmdLine.getOptionValue("output"); mapCmd_ = (String)cmdLine.getOptionValue("mapper"); comCmd_ = (String)cmdLine.getOptionValue("combiner"); redCmd_ = (String)cmdLine.getOptionValue("reducer"); values = cmdLine.getOptionValues("file"); if (values != null && values.length > 0) { for (String file : values) { packageFiles_.add(file); } validate(packageFiles_); } String fsName = (String)cmdLine.getOptionValue("dfs"); if (null != fsName){ LOG.warn("-dfs option is deprecated, please use -fs instead."); config_.set("fs.default.name", fsName); } additionalConfSpec_ = (String)cmdLine.getOptionValue("additionalconfspec"); inputFormatSpec_ = (String)cmdLine.getOptionValue("inputformat"); outputFormatSpec_ = (String)cmdLine.getOptionValue("outputformat"); numReduceTasksSpec_ = (String)cmdLine.getOptionValue("numReduceTasks"); partitionerSpec_ = (String)cmdLine.getOptionValue("partitioner"); inReaderSpec_ = (String)cmdLine.getOptionValue("inputreader"); mapDebugSpec_ = (String)cmdLine.getOptionValue("mapdebug"); reduceDebugSpec_ = (String)cmdLine.getOptionValue("reducedebug"); String[] car = cmdLine.getOptionValues("cacheArchive"); if (null != car && car.length > 0){ LOG.warn("-cacheArchive option is deprecated, please use -archives instead."); for(String s : car){ cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s; } } String[] caf = cmdLine.getOptionValues("cacheFile"); if (null != caf && caf.length > 0){ LOG.warn("-cacheFile option is deprecated, please use -files instead."); for(String s : caf){ cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s; } } String[] jobconf = cmdLine.getOptionValues("jobconf"); if (null != jobconf && jobconf.length > 0){ LOG.warn("-jobconf option is deprecated, please use -D instead."); for(String s : jobconf){ String []parts = s.split("=", 2); config_.set(parts[0], parts[1]); } } String[] cmd = cmdLine.getOptionValues("cmdenv"); if (null != cmd && cmd.length > 0){ for(String s : cmd) { if (addTaskEnvironment_.length() > 0) { addTaskEnvironment_ += " "; } addTaskEnvironment_ += s; } } }else { exitUsage(argv_.length > 0 && "-info".equals(argv_[0])); } } protected void msg(String msg) { if (verbose_) { System.out.println("STREAM: " + msg); } } private Option createOption(String name, String desc, String argName, int max, boolean required){ return OptionBuilder .withArgName(argName) .hasArgs(max) .withDescription(desc) .isRequired(required) .create(name); } private Option createBoolOption(String name, String desc){ return OptionBuilder.withDescription(desc).create(name); } - private static void validate(final List<String> values) + private void validate(final List<String> values) throws IllegalArgumentException { for (String file : values) { File f = new File(file); if (!f.canRead()) { - throw new IllegalArgumentException("File : " + f.getAbsolutePath() - + " is not readable."); + fail("File: " + f.getAbsolutePath() + + " does not exist, or is not readable."); } } } private void setupOptions(){ Option input = createOption("input", "DFS input file(s) for the Map step", "path", Integer.MAX_VALUE, true); Option output = createOption("output", "DFS output directory for the Reduce step", "path", 1, true); Option mapper = createOption("mapper", "The streaming command to run", "cmd", 1, false); Option combiner = createOption("combiner", "The streaming command to run", "cmd", 1, false); // reducer could be NONE Option reducer = createOption("reducer", "The streaming command to run", "cmd", 1, false); Option file = createOption("file", "File to be shipped in the Job jar file", "file", Integer.MAX_VALUE, false); Option dfs = createOption("dfs", "Optional. Override DFS configuration", "<h:p>|local", 1, false); Option jt = createOption("jt", "Optional. Override JobTracker configuration", "<h:p>|local", 1, false); Option additionalconfspec = createOption("additionalconfspec", "Optional.", "spec", 1, false); Option inputformat = createOption("inputformat", "Optional.", "spec", 1, false); Option outputformat = createOption("outputformat", "Optional.", "spec", 1, false); Option partitioner = createOption("partitioner", "Optional.", "spec", 1, false); Option numReduceTasks = createOption("numReduceTasks", "Optional.", "spec",1, false ); Option inputreader = createOption("inputreader", "Optional.", "spec", 1, false); Option mapDebug = createOption("mapdebug", "Optional.", "spec", 1, false); Option reduceDebug = createOption("reducedebug", "Optional", "spec",1, false); Option jobconf = createOption("jobconf", "(n=v) Optional. Add or override a JobConf property.", "spec", 1, false); Option cmdenv = createOption("cmdenv", "(n=v) Pass env.var to streaming commands.", "spec", 1, false); Option cacheFile = createOption("cacheFile", "File name URI", "fileNameURI", Integer.MAX_VALUE, false); Option cacheArchive = createOption("cacheArchive", "File name URI", "fileNameURI", Integer.MAX_VALUE, false); // boolean properties Option verbose = createBoolOption("verbose", "print verbose output"); Option info = createBoolOption("info", "print verbose output"); Option help = createBoolOption("help", "print this help message"); Option debug = createBoolOption("debug", "print debug output"); Option inputtagged = createBoolOption("inputtagged", "inputtagged"); allOptions = new Options(). addOption(input). addOption(output). addOption(mapper). addOption(combiner). addOption(reducer). addOption(file). addOption(dfs). addOption(jt). addOption(additionalconfspec). addOption(inputformat). addOption(outputformat). addOption(partitioner). addOption(numReduceTasks). addOption(inputreader). addOption(mapDebug). addOption(reduceDebug). addOption(jobconf). addOption(cmdenv). addOption(cacheFile). addOption(cacheArchive). addOption(verbose). addOption(info). addOption(debug). addOption(inputtagged). addOption(help); } public void exitUsage(boolean detailed) { // 1 2 3 4 5 6 7 //1234567890123456789012345678901234567890123456789012345678901234567890123456789 System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar \\"); System.out.println(" $HADOOP_HOME/hadoop-streaming.jar [options]"); System.out.println("Options:"); System.out.println(" -input <path> DFS input file(s) for the Map step"); System.out.println(" -output <path> DFS output directory for the Reduce step"); System.out.println(" -mapper <cmd|JavaClassName> The streaming command to run"); System.out.println(" -combiner <cmd|JavaClassName>" + " The streaming command to run"); System.out.println(" -reducer <cmd|JavaClassName> The streaming command to run"); System.out.println(" -file <file> File/dir to be shipped in the Job jar file"); System.out.println(" -inputformat TextInputFormat(default)|SequenceFileAsTextInputFormat|JavaClassName Optional."); System.out.println(" -outputformat TextOutputFormat(default)|JavaClassName Optional."); System.out.println(" -partitioner JavaClassName Optional."); System.out.println(" -numReduceTasks <num> Optional."); System.out.println(" -inputreader <spec> Optional."); System.out.println(" -cmdenv <n>=<v> Optional. Pass env.var to streaming commands"); System.out.println(" -mapdebug <path> Optional. " + "To run this script when a map task fails "); System.out.println(" -reducedebug <path> Optional." + " To run this script when a reduce task fails "); System.out.println(" -verbose"); System.out.println(); GenericOptionsParser.printGenericCommandUsage(System.out); if (!detailed) { System.out.println(); System.out.println("For more details about these options:"); System.out.println("Use $HADOOP_HOME/bin/hadoop jar build/hadoop-streaming.jar -info"); fail(""); } System.out.println(); System.out.println("In -input: globbing on <path> is supported and can have multiple -input"); System.out.println("Default Map input format: a line is a record in UTF-8"); System.out.println(" the key part ends at first TAB, the rest of the line is the value"); System.out.println("Custom input format: -inputformat package.MyInputFormat "); System.out.println("Map output format, reduce input/output format:"); System.out.println(" Format defined by what the mapper command outputs. Line-oriented"); System.out.println(); System.out.println("The files named in the -file argument[s] end up in the"); System.out.println(" working directory when the mapper and reducer are run."); System.out.println(" The location of this working directory is unspecified."); System.out.println(); System.out.println("To set the number of reduce tasks (num. of output files):"); System.out.println(" -D mapred.reduce.tasks=10"); System.out.println("To skip the sort/combine/shuffle/sort/reduce step:"); System.out.println(" Use -numReduceTasks 0"); System.out .println(" A Task's Map output then becomes a 'side-effect output' rather than a reduce input"); System.out .println(" This speeds up processing, This also feels more like \"in-place\" processing"); System.out.println(" because the input filename and the map input order are preserved"); System.out.println(" This equivalent -reducer NONE"); System.out.println(); System.out.println("To speed up the last maps:"); System.out.println(" -D mapred.map.tasks.speculative.execution=true"); System.out.println("To speed up the last reduces:"); System.out.println(" -D mapred.reduce.tasks.speculative.execution=true"); System.out.println("To name the job (appears in the JobTracker Web UI):"); System.out.println(" -D mapred.job.name='My Job' "); System.out.println("To change the local temp directory:"); System.out.println(" -D dfs.data.dir=/tmp/dfs"); System.out.println(" -D stream.tmpdir=/tmp/streaming"); System.out.println("Additional local temp directories with -cluster local:"); System.out.println(" -D mapred.local.dir=/tmp/local"); System.out.println(" -D mapred.system.dir=/tmp/system"); System.out.println(" -D mapred.temp.dir=/tmp/temp"); System.out.println("To treat tasks with non-zero exit status as SUCCEDED:"); System.out.println(" -D stream.non.zero.exit.is.failure=false"); System.out.println("Use a custom hadoopStreaming build along a standard hadoop install:"); System.out.println(" $HADOOP_HOME/bin/hadoop jar /path/my-hadoop-streaming.jar [...]\\"); System.out .println(" [...] -D stream.shipped.hadoopstreaming=/path/my-hadoop-streaming.jar"); System.out.println("For more details about jobconf parameters see:"); System.out.println(" http://wiki.apache.org/hadoop/JobConfFile"); System.out.println("To set an environement variable in a streaming command:"); System.out.println(" -cmdenv EXAMPLE_DIR=/home/example/dictionaries/"); System.out.println(); System.out.println("Shortcut:"); System.out .println(" setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar\""); System.out.println(); System.out.println("Example: $HSTREAMING -mapper \"/usr/local/bin/perl5 filter.pl\""); System.out.println(" -file /local/filter.pl -input \"/logs/0604*/*\" [...]"); System.out.println(" Ships a script, invokes the non-shipped perl interpreter"); System.out.println(" Shipped files go to the working directory so filter.pl is found by perl"); System.out.println(" Input files are all the daily logs for days in month 2006-04"); fail(""); } public void fail(String message) { System.err.println(message); throw new IllegalArgumentException(message); } // -------------------------------------------- protected String getHadoopClientHome() { String h = env_.getProperty("HADOOP_HOME"); // standard Hadoop if (h == null) { //fail("Missing required environment variable: HADOOP_HOME"); h = "UNDEF"; } return h; } protected boolean isLocalHadoop() { return StreamUtil.isLocalJobTracker(jobConf_); } @Deprecated protected String getClusterNick() { return "default"; } /** @return path to the created Jar file or null if no files are necessary. */ protected String packageJobJar() throws IOException { ArrayList unjarFiles = new ArrayList(); // Runtime code: ship same version of code as self (job submitter code) // usually found in: build/contrib or build/hadoop-<version>-dev-streaming.jar // First try an explicit spec: it's too hard to find our own location in this case: // $HADOOP_HOME/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar // where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_HOME String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // jar or class dir if (runtimeClasses == null) { runtimeClasses = StreamUtil.findInClasspath(StreamJob.class.getName()); } if (runtimeClasses == null) { throw new IOException("runtime classes not found: " + getClass().getPackage()); } else { msg("Found runtime classes in: " + runtimeClasses); } if (isLocalHadoop()) { // don't package class files (they might get unpackaged in "." and then // hide the intended CLASSPATH entry) // we still package everything else (so that scripts and executable are found in // Task workdir like distributed Hadoop) } else { if (new File(runtimeClasses).isDirectory()) { packageFiles_.add(runtimeClasses); } else { unjarFiles.add(runtimeClasses); } } if (packageFiles_.size() + unjarFiles.size() == 0) { return null; } String tmp = jobConf_.get("stream.tmpdir"); //, "/tmp/${user.name}/" File tmpDir = (tmp == null) ? null : new File(tmp); // tmpDir=null means OS default tmp dir File jobJar = File.createTempFile("streamjob", ".jar", tmpDir); System.out.println("packageJobJar: " + packageFiles_ + " " + unjarFiles + " " + jobJar + " tmpDir=" + tmpDir); if (debug_ == 0) { jobJar.deleteOnExit(); } JarBuilder builder = new JarBuilder(); if (verbose_) { builder.setVerbose(true); } String jobJarName = jobJar.getAbsolutePath(); builder.merge(packageFiles_, unjarFiles, jobJarName); return jobJarName; } /** * get the uris of all the files/caches */ protected void getURIs(String lcacheArchives, String lcacheFiles) { String archives[] = StringUtils.getStrings(lcacheArchives); String files[] = StringUtils.getStrings(lcacheFiles); fileURIs = StringUtils.stringToURI(files); archiveURIs = StringUtils.stringToURI(archives); } protected void setJobConf() throws IOException { if (additionalConfSpec_ != null) { LOG.warn("-additionalconfspec option is deprecated, please use -conf instead."); config_.addResource(new Path(additionalConfSpec_)); } // general MapRed job properties jobConf_ = new JobConf(config_); // All streaming jobs get the task timeout value // from the configuration settings. // The correct FS must be set before this is called! // (to resolve local vs. dfs drive letter differences) // (mapred.working.dir will be lazily initialized ONCE and depends on FS) for (int i = 0; i < inputSpecs_.size(); i++) { FileInputFormat.addInputPaths(jobConf_, (String) inputSpecs_.get(i)); } jobConf_.set("stream.numinputspecs", "" + inputSpecs_.size()); String defaultPackage = this.getClass().getPackage().getName(); Class c; Class fmt = null; if (inReaderSpec_ == null && inputFormatSpec_ == null) { fmt = TextInputFormat.class; } else if (inputFormatSpec_ != null) { if (inputFormatSpec_.equals(TextInputFormat.class.getName()) || inputFormatSpec_.equals(TextInputFormat.class.getCanonicalName()) || inputFormatSpec_.equals(TextInputFormat.class.getSimpleName())) { fmt = TextInputFormat.class; } else if (inputFormatSpec_.equals(KeyValueTextInputFormat.class .getName()) || inputFormatSpec_.equals(KeyValueTextInputFormat.class .getCanonicalName()) || inputFormatSpec_.equals(KeyValueTextInputFormat.class.getSimpleName())) { } else if (inputFormatSpec_.equals(SequenceFileInputFormat.class .getName()) || inputFormatSpec_ .equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class .getCanonicalName()) || inputFormatSpec_ .equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class.getSimpleName())) { } else if (inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class .getName()) || inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class .getCanonicalName()) || inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class.getSimpleName())) { fmt = SequenceFileAsTextInputFormat.class; } else { c = StreamUtil.goodClassOrNull(jobConf_, inputFormatSpec_, defaultPackage); if (c != null) { fmt = c; } else { fail("-inputformat : class not found : " + inputFormatSpec_); } } } if (fmt == null) { fmt = StreamInputFormat.class; } jobConf_.setInputFormat(fmt); jobConf_.setOutputKeyClass(Text.class); jobConf_.setOutputValueClass(Text.class); jobConf_.set("stream.addenvironment", addTaskEnvironment_); if (mapCmd_ != null) { c = StreamUtil.goodClassOrNull(jobConf_, mapCmd_, defaultPackage); if (c != null) { jobConf_.setMapperClass(c); } else { jobConf_.setMapperClass(PipeMapper.class); jobConf_.setMapRunnerClass(PipeMapRunner.class); jobConf_.set("stream.map.streamprocessor", URLEncoder.encode(mapCmd_, "UTF-8")); } } if (comCmd_ != null) { c = StreamUtil.goodClassOrNull(jobConf_, comCmd_, defaultPackage); if (c != null) { jobConf_.setCombinerClass(c); } else { jobConf_.setCombinerClass(PipeCombiner.class); jobConf_.set("stream.combine.streamprocessor", URLEncoder.encode( comCmd_, "UTF-8")); } } boolean reducerNone_ = false; if (redCmd_ != null) { reducerNone_ = redCmd_.equals(REDUCE_NONE); if (redCmd_.compareToIgnoreCase("aggregate") == 0) { jobConf_.setReducerClass(ValueAggregatorReducer.class); jobConf_.setCombinerClass(ValueAggregatorCombiner.class); } else { c = StreamUtil.goodClassOrNull(jobConf_, redCmd_, defaultPackage); if (c != null) { jobConf_.setReducerClass(c); } else { jobConf_.setReducerClass(PipeReducer.class); jobConf_.set("stream.reduce.streamprocessor", URLEncoder.encode( redCmd_, "UTF-8")); } } } if (inReaderSpec_ != null) { String[] args = inReaderSpec_.split(","); String readerClass = args[0]; // this argument can only be a Java class c = StreamUtil.goodClassOrNull(jobConf_, readerClass, defaultPackage); if (c != null) { jobConf_.set("stream.recordreader.class", c.getName()); } else { fail("-inputreader: class not found: " + readerClass); } for (int i = 1; i < args.length; i++) { String[] nv = args[i].split("=", 2); String k = "stream.recordreader." + nv[0]; String v = (nv.length > 1) ? nv[1] : ""; jobConf_.set(k, v); } } FileOutputFormat.setOutputPath(jobConf_, new Path(output_)); fmt = null; if (outputFormatSpec_!= null) { c = StreamUtil.goodClassOrNull(jobConf_, outputFormatSpec_, defaultPackage); if (c != null) { fmt = c; } else { fail("-outputformat : class not found : " + outputFormatSpec_); } } if (fmt == null) { fmt = TextOutputFormat.class; } jobConf_.setOutputFormat(fmt); if (partitionerSpec_!= null) { c = StreamUtil.goodClassOrNull(jobConf_, partitionerSpec_, defaultPackage); if (c != null) { jobConf_.setPartitionerClass(c); } else { fail("-partitioner : class not found : " + partitionerSpec_); } } if (numReduceTasksSpec_!= null) { int numReduceTasks = Integer.parseInt(numReduceTasksSpec_); jobConf_.setNumReduceTasks(numReduceTasks); } if (reducerNone_) { jobConf_.setNumReduceTasks(0); } if(mapDebugSpec_ != null){ jobConf_.setMapDebugScript(mapDebugSpec_); } if(reduceDebugSpec_ != null){ jobConf_.setReduceDebugScript(reduceDebugSpec_); } // last, allow user to override anything // (although typically used with properties we didn't touch) jar_ = packageJobJar(); if (jar_ != null) { jobConf_.setJar(jar_); } if ((cacheArchives != null) || (cacheFiles != null)){ getURIs(cacheArchives, cacheFiles); boolean b = DistributedCache.checkURIs(fileURIs, archiveURIs); if (!b) fail(LINK_URI); } DistributedCache.createSymlink(jobConf_); // set the jobconf for the caching parameters if (cacheArchives != null) DistributedCache.setCacheArchives(archiveURIs, jobConf_); if (cacheFiles != null) DistributedCache.setCacheFiles(fileURIs, jobConf_); if (verbose_) { listJobConfProperties(); } msg("submitting to jobconf: " + getJobTrackerHostPort()); } /** * Prints out the jobconf properties on stdout * when verbose is specified. */ protected void listJobConfProperties() { msg("==== JobConf properties:"); Iterator it = jobConf_.iterator(); TreeMap sorted = new TreeMap(); while(it.hasNext()) { Map.Entry en = (Map.Entry)it.next(); sorted.put(en.getKey(), en.getValue()); } it = sorted.entrySet().iterator(); while(it.hasNext()) { Map.Entry en = (Map.Entry)it.next(); msg(en.getKey() + "=" + en.getValue()); } msg("===="); } protected String getJobTrackerHostPort() { return jobConf_.get("mapred.job.tracker"); } protected void jobInfo() { if (isLocalHadoop()) { LOG.info("Job running in-process (local Hadoop)"); } else { String hp = getJobTrackerHostPort(); LOG.info("To kill this job, run:"); LOG.info(getHadoopClientHome() + "/bin/hadoop job -Dmapred.job.tracker=" + hp + " -kill " + jobId_); //LOG.info("Job file: " + running_.getJobFile()); LOG.info("Tracking URL: " + StreamUtil.qualifyHost(running_.getTrackingURL())); } } // Based on JobClient public int submitAndMonitorJob() throws IOException {
jaxlaw/hadoop-common
d3562258db492426d4d5a61139d4cfe49654671d
MAPREDUCE-1171. Allow the read-error notification in shuffle to be configurable. Contributed by Amareshwari Sriramadasu.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index a454ec8..48f83f4 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,455 +1,458 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + MAPREDUCE-1171. Allow the read-error notification in shuffle to be + configurable. (Amareshwari Sriramadasu via acmurthy) + MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. (Amareshwari Sriramadasu via acmurthy) HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java index 36c1dfa..c59420c 100644 --- a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java +++ b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java @@ -386,2240 +386,2245 @@ class ReduceTask extends Task { setPhase(TaskStatus.Phase.SORT); statusUpdate(umbilical); final FileSystem rfs = FileSystem.getLocal(job).getRaw(); RawKeyValueIterator rIter = isLocal ? Merger.merge(job, rfs, job.getMapOutputKeyClass(), job.getMapOutputValueClass(), codec, getMapFiles(rfs, true), !conf.getKeepFailedTaskFiles(), job.getInt("io.sort.factor", 100), new Path(getTaskID().toString()), job.getOutputKeyComparator(), reporter, spilledRecordsCounter, null) : reduceCopier.createKVIterator(job, rfs, reporter); // free up the data structures mapOutputFilesOnDisk.clear(); sortPhase.complete(); // sort is complete setPhase(TaskStatus.Phase.REDUCE); statusUpdate(umbilical); Class keyClass = job.getMapOutputKeyClass(); Class valueClass = job.getMapOutputValueClass(); RawComparator comparator = job.getOutputValueGroupingComparator(); if (useNewApi) { runNewReducer(job, umbilical, reporter, rIter, comparator, keyClass, valueClass); } else { runOldReducer(job, umbilical, reporter, rIter, comparator, keyClass, valueClass); } done(umbilical, reporter); } @SuppressWarnings("unchecked") private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runOldReducer(JobConf job, TaskUmbilicalProtocol umbilical, final TaskReporter reporter, RawKeyValueIterator rIter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass) throws IOException { Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer = ReflectionUtils.newInstance(job.getReducerClass(), job); // make output collector String finalName = getOutputName(getPartition()); FileSystem fs = FileSystem.get(job); final RecordWriter<OUTKEY,OUTVALUE> out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter); OutputCollector<OUTKEY,OUTVALUE> collector = new OutputCollector<OUTKEY,OUTVALUE>() { public void collect(OUTKEY key, OUTVALUE value) throws IOException { out.write(key, value); reduceOutputCounter.increment(1); // indicate that progress update needs to be sent reporter.progress(); } }; // apply reduce function try { //increment processed counter only if skipping feature is enabled boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 && SkipBadRecords.getAutoIncrReducerProcCount(job); ReduceValuesIterator<INKEY,INVALUE> values = isSkipping() ? new SkippingReduceValuesIterator<INKEY,INVALUE>(rIter, comparator, keyClass, valueClass, job, reporter, umbilical) : new ReduceValuesIterator<INKEY,INVALUE>(rIter, job.getOutputValueGroupingComparator(), keyClass, valueClass, job, reporter); values.informReduceProgress(); while (values.more()) { reduceInputKeyCounter.increment(1); reducer.reduce(values.getKey(), values, collector, reporter); if(incrProcCount) { reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1); } values.nextKey(); values.informReduceProgress(); } //Clean up: repeated in catch block below reducer.close(); out.close(reporter); //End of clean up. } catch (IOException ioe) { try { reducer.close(); } catch (IOException ignored) {} try { out.close(reporter); } catch (IOException ignored) {} throw ioe; } } static class NewTrackingRecordWriter<K,V> extends org.apache.hadoop.mapreduce.RecordWriter<K,V> { private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real; private final org.apache.hadoop.mapreduce.Counter outputRecordCounter; NewTrackingRecordWriter(org.apache.hadoop.mapreduce.RecordWriter<K,V> real, org.apache.hadoop.mapreduce.Counter recordCounter) { this.real = real; this.outputRecordCounter = recordCounter; } @Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { real.close(context); } @Override public void write(K key, V value) throws IOException, InterruptedException { real.write(key,value); outputRecordCounter.increment(1); } } @SuppressWarnings("unchecked") private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runNewReducer(JobConf job, final TaskUmbilicalProtocol umbilical, final TaskReporter reporter, RawKeyValueIterator rIter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass ) throws IOException,InterruptedException, ClassNotFoundException { // wrap value iterator to report progress. final RawKeyValueIterator rawIter = rIter; rIter = new RawKeyValueIterator() { public void close() throws IOException { rawIter.close(); } public DataInputBuffer getKey() throws IOException { return rawIter.getKey(); } public Progress getProgress() { return rawIter.getProgress(); } public DataInputBuffer getValue() throws IOException { return rawIter.getValue(); } public boolean next() throws IOException { boolean ret = rawIter.next(); reducePhase.set(rawIter.getProgress().get()); reporter.progress(); return ret; } }; // make a task context so we can get the classes org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID()); // make a reducer org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer = (org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>) ReflectionUtils.newInstance(taskContext.getReducerClass(), job); org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output = (org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE>) outputFormat.getRecordWriter(taskContext); org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW = new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(output, reduceOutputCounter); job.setBoolean("mapred.skip.on", isSkipping()); org.apache.hadoop.mapreduce.Reducer.Context reducerContext = createReduceContext(reducer, job, getTaskID(), rIter, reduceInputKeyCounter, reduceInputValueCounter, trackedRW, committer, reporter, comparator, keyClass, valueClass); reducer.run(reducerContext); output.close(reducerContext); } private static enum CopyOutputErrorType { NO_ERROR, READ_ERROR, OTHER_ERROR }; class ReduceCopier<K, V> implements MRConstants { /** Reference to the umbilical object */ private TaskUmbilicalProtocol umbilical; private final TaskReporter reporter; /** Reference to the task object */ /** Number of ms before timing out a copy */ private static final int STALLED_COPY_TIMEOUT = 3 * 60 * 1000; /** Max events to fetch in one go from the tasktracker */ private static final int MAX_EVENTS_TO_FETCH = 10000; /** * our reduce task instance */ private ReduceTask reduceTask; /** * the list of map outputs currently being copied */ private List<MapOutputLocation> scheduledCopies; /** * the results of dispatched copy attempts */ private List<CopyResult> copyResults; /** * the number of outputs to copy in parallel */ private int numCopiers; /** * a number that is set to the max #fetches we'd schedule and then * pause the schduling */ private int maxInFlight; /** * the amount of time spent on fetching one map output before considering * it as failed and notifying the jobtracker about it. */ private int maxBackoff; /** * busy hosts from which copies are being backed off * Map of host -> next contact time */ private Map<String, Long> penaltyBox; /** * the set of unique hosts from which we are copying */ private Set<String> uniqueHosts; /** * A reference to the RamManager for writing the map outputs to. */ private ShuffleRamManager ramManager; /** * A reference to the local file system for writing the map outputs to. */ private FileSystem localFileSys; private FileSystem rfs; /** * Number of files to merge at a time */ private int ioSortFactor; /** * A reference to the throwable object (if merge throws an exception) */ private volatile Throwable mergeThrowable; /** * A flag to indicate when to exit localFS merge */ private volatile boolean exitLocalFSMerge = false; /** * A flag to indicate when to exit getMapEvents thread */ private volatile boolean exitGetMapEvents = false; /** * When we accumulate maxInMemOutputs number of files in ram, we merge/spill */ private final int maxInMemOutputs; /** * Usage threshold for in-memory output accumulation. */ private final float maxInMemCopyPer; /** * Maximum memory usage of map outputs to merge from memory into * the reduce, in bytes. */ private final long maxInMemReduce; /** * The threads for fetching the files. */ private List<MapOutputCopier> copiers = null; /** * The object for metrics reporting. */ private ShuffleClientMetrics shuffleClientMetrics = null; /** * the minimum interval between tasktracker polls */ private static final long MIN_POLL_INTERVAL = 1000; /** * a list of map output locations for fetch retrials */ private List<MapOutputLocation> retryFetches = new ArrayList<MapOutputLocation>(); /** * The set of required map outputs */ private Set <TaskID> copiedMapOutputs = Collections.synchronizedSet(new TreeSet<TaskID>()); /** * The set of obsolete map taskids. */ private Set <TaskAttemptID> obsoleteMapIds = Collections.synchronizedSet(new TreeSet<TaskAttemptID>()); private Random random = null; /** * the max of all the map completion times */ private int maxMapRuntime; /** * Maximum number of fetch-retries per-map. */ private volatile int maxFetchRetriesPerMap; /** * Combiner runner, if a combiner is needed */ private CombinerRunner combinerRunner; /** * Resettable collector used for combine. */ private CombineOutputCollector combineCollector = null; /** * Maximum percent of failed fetch attempt before killing the reduce task. */ private static final float MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT = 0.5f; /** * Minimum percent of progress required to keep the reduce alive. */ private static final float MIN_REQUIRED_PROGRESS_PERCENT = 0.5f; /** * Maximum percent of shuffle execution time required to keep the reducer alive. */ private static final float MAX_ALLOWED_STALL_TIME_PERCENT = 0.5f; /** * Minimum number of map fetch retries. */ private static final int MIN_FETCH_RETRIES_PER_MAP = 2; /** * The minimum percentage of maps yet to be copied, * which indicates end of shuffle */ private static final float MIN_PENDING_MAPS_PERCENT = 0.25f; /** * Maximum no. of unique maps from which we failed to fetch map-outputs * even after {@link #maxFetchRetriesPerMap} retries; after this the * reduce task is failed. */ private int maxFailedUniqueFetches = 5; /** * The maps from which we fail to fetch map-outputs * even after {@link #maxFetchRetriesPerMap} retries. */ Set<TaskID> fetchFailedMaps = new TreeSet<TaskID>(); /** * A map of taskId -> no. of failed fetches */ Map<TaskAttemptID, Integer> mapTaskToFailedFetchesMap = new HashMap<TaskAttemptID, Integer>(); /** * Initial backoff interval (milliseconds) */ private static final int BACKOFF_INIT = 4000; /** * The interval for logging in the shuffle */ private static final int MIN_LOG_TIME = 60000; /** * List of in-memory map-outputs. */ private final List<MapOutput> mapOutputsFilesInMemory = Collections.synchronizedList(new LinkedList<MapOutput>()); /** * The map for (Hosts, List of MapIds from this Host) maintaining * map output locations */ private final Map<String, List<MapOutputLocation>> mapLocations = new ConcurrentHashMap<String, List<MapOutputLocation>>(); /** * This class contains the methods that should be used for metrics-reporting * the specific metrics for shuffle. This class actually reports the * metrics for the shuffle client (the ReduceTask), and hence the name * ShuffleClientMetrics. */ class ShuffleClientMetrics implements Updater { private MetricsRecord shuffleMetrics = null; private int numFailedFetches = 0; private int numSuccessFetches = 0; private long numBytes = 0; private int numThreadsBusy = 0; ShuffleClientMetrics(JobConf conf) { MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", conf.getUser()); this.shuffleMetrics.setTag("jobName", conf.getJobName()); this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString()); this.shuffleMetrics.setTag("taskId", getTaskID().toString()); this.shuffleMetrics.setTag("sessionId", conf.getSessionId()); metricsContext.registerUpdater(this); } public synchronized void inputBytes(long numBytes) { this.numBytes += numBytes; } public synchronized void failedFetch() { ++numFailedFetches; } public synchronized void successFetch() { ++numSuccessFetches; } public synchronized void threadBusy() { ++numThreadsBusy; } public synchronized void threadFree() { --numThreadsBusy; } public void doUpdates(MetricsContext unused) { synchronized (this) { shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes); shuffleMetrics.incrMetric("shuffle_failed_fetches", numFailedFetches); shuffleMetrics.incrMetric("shuffle_success_fetches", numSuccessFetches); if (numCopiers != 0) { shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 100*((float)numThreadsBusy/numCopiers)); } else { shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0); } numBytes = 0; numSuccessFetches = 0; numFailedFetches = 0; } shuffleMetrics.update(); } } /** Represents the result of an attempt to copy a map output */ private class CopyResult { // the map output location against which a copy attempt was made private final MapOutputLocation loc; // the size of the file copied, -1 if the transfer failed private final long size; //a flag signifying whether a copy result is obsolete private static final int OBSOLETE = -2; private CopyOutputErrorType error = CopyOutputErrorType.NO_ERROR; CopyResult(MapOutputLocation loc, long size) { this.loc = loc; this.size = size; } CopyResult(MapOutputLocation loc, long size, CopyOutputErrorType error) { this.loc = loc; this.size = size; this.error = error; } public boolean getSuccess() { return size >= 0; } public boolean isObsolete() { return size == OBSOLETE; } public long getSize() { return size; } public String getHost() { return loc.getHost(); } public MapOutputLocation getLocation() { return loc; } public CopyOutputErrorType getError() { return error; } } private int nextMapOutputCopierId = 0; + private boolean reportReadErrorImmediately; /** * Abstraction to track a map-output. */ private class MapOutputLocation { TaskAttemptID taskAttemptId; TaskID taskId; String ttHost; URL taskOutput; public MapOutputLocation(TaskAttemptID taskAttemptId, String ttHost, URL taskOutput) { this.taskAttemptId = taskAttemptId; this.taskId = this.taskAttemptId.getTaskID(); this.ttHost = ttHost; this.taskOutput = taskOutput; } public TaskAttemptID getTaskAttemptId() { return taskAttemptId; } public TaskID getTaskId() { return taskId; } public String getHost() { return ttHost; } public URL getOutputLocation() { return taskOutput; } } /** Describes the output of a map; could either be on disk or in-memory. */ private class MapOutput { final TaskID mapId; final TaskAttemptID mapAttemptId; final Path file; final Configuration conf; byte[] data; final boolean inMemory; long compressedSize; public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, Configuration conf, Path file, long size) { this.mapId = mapId; this.mapAttemptId = mapAttemptId; this.conf = conf; this.file = file; this.compressedSize = size; this.data = null; this.inMemory = false; } public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, byte[] data, int compressedLength) { this.mapId = mapId; this.mapAttemptId = mapAttemptId; this.file = null; this.conf = null; this.data = data; this.compressedSize = compressedLength; this.inMemory = true; } public void discard() throws IOException { if (inMemory) { data = null; } else { FileSystem fs = file.getFileSystem(conf); fs.delete(file, true); } } } class ShuffleRamManager implements RamManager { /* Maximum percentage of the in-memory limit that a single shuffle can * consume*/ private static final float MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION = 0.25f; /* Maximum percentage of shuffle-threads which can be stalled * simultaneously after which a merge is triggered. */ private static final float MAX_STALLED_SHUFFLE_THREADS_FRACTION = 0.75f; private final long maxSize; private final long maxSingleShuffleLimit; private long size = 0; private Object dataAvailable = new Object(); private long fullSize = 0; private int numPendingRequests = 0; private int numRequiredMapOutputs = 0; private int numClosed = 0; private boolean closed = false; public ShuffleRamManager(Configuration conf) throws IOException { final float maxInMemCopyUse = conf.getFloat("mapred.job.shuffle.input.buffer.percent", 0.70f); if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) { throw new IOException("mapred.job.shuffle.input.buffer.percent" + maxInMemCopyUse); } maxSize = (long)Math.min( Runtime.getRuntime().maxMemory() * maxInMemCopyUse, Integer.MAX_VALUE); maxSingleShuffleLimit = (long)(maxSize * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION); LOG.info("ShuffleRamManager: MemoryLimit=" + maxSize + ", MaxSingleShuffleLimit=" + maxSingleShuffleLimit); } public synchronized boolean reserve(int requestedSize, InputStream in) throws InterruptedException { // Wait till the request can be fulfilled... while ((size + requestedSize) > maxSize) { // Close the input... if (in != null) { try { in.close(); } catch (IOException ie) { LOG.info("Failed to close connection with: " + ie); } finally { in = null; } } // Track pending requests synchronized (dataAvailable) { ++numPendingRequests; dataAvailable.notify(); } // Wait for memory to free up wait(); // Track pending requests synchronized (dataAvailable) { --numPendingRequests; } } size += requestedSize; return (in != null); } public synchronized void unreserve(int requestedSize) { size -= requestedSize; synchronized (dataAvailable) { fullSize -= requestedSize; --numClosed; } // Notify the threads blocked on RamManager.reserve notifyAll(); } public boolean waitForDataToMerge() throws InterruptedException { boolean done = false; synchronized (dataAvailable) { // Start in-memory merge if manager has been closed or... while (!closed && // In-memory threshold exceeded and at least two segments // have been fetched (getPercentUsed() < maxInMemCopyPer || numClosed < 2) && // More than "mapred.inmem.merge.threshold" map outputs // have been fetched into memory (maxInMemOutputs <= 0 || numClosed < maxInMemOutputs) && // More than MAX... threads are blocked on the RamManager // or the blocked threads are the last map outputs to be // fetched. If numRequiredMapOutputs is zero, either // setNumCopiedMapOutputs has not been called (no map ouputs // have been fetched, so there is nothing to merge) or the // last map outputs being transferred without // contention, so a merge would be premature. (numPendingRequests < numCopiers*MAX_STALLED_SHUFFLE_THREADS_FRACTION && (0 == numRequiredMapOutputs || numPendingRequests < numRequiredMapOutputs))) { dataAvailable.wait(); } done = closed; } return done; } public void closeInMemoryFile(int requestedSize) { synchronized (dataAvailable) { fullSize += requestedSize; ++numClosed; dataAvailable.notify(); } } public void setNumCopiedMapOutputs(int numRequiredMapOutputs) { synchronized (dataAvailable) { this.numRequiredMapOutputs = numRequiredMapOutputs; dataAvailable.notify(); } } public void close() { synchronized (dataAvailable) { closed = true; LOG.info("Closed ram manager"); dataAvailable.notify(); } } private float getPercentUsed() { return (float)fullSize/maxSize; } boolean canFitInMemory(long requestedSize) { return (requestedSize < Integer.MAX_VALUE && requestedSize < maxSingleShuffleLimit); } } /** Copies map outputs as they become available */ private class MapOutputCopier extends Thread { // basic/unit connection timeout (in milliseconds) private final static int UNIT_CONNECT_TIMEOUT = 30 * 1000; // default read timeout (in milliseconds) private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000; private final int shuffleConnectionTimeout; private final int shuffleReadTimeout; private MapOutputLocation currentLocation = null; private int id = nextMapOutputCopierId++; private Reporter reporter; private boolean readError = false; // Decompression of map-outputs private CompressionCodec codec = null; private Decompressor decompressor = null; public MapOutputCopier(JobConf job, Reporter reporter) { setName("MapOutputCopier " + reduceTask.getTaskID() + "." + id); LOG.debug(getName() + " created"); this.reporter = reporter; shuffleConnectionTimeout = job.getInt("mapreduce.reduce.shuffle.connect.timeout", STALLED_COPY_TIMEOUT); shuffleReadTimeout = job.getInt("mapreduce.reduce.shuffle.read.timeout", DEFAULT_READ_TIMEOUT); if (job.getCompressMapOutput()) { Class<? extends CompressionCodec> codecClass = job.getMapOutputCompressorClass(DefaultCodec.class); codec = ReflectionUtils.newInstance(codecClass, job); decompressor = CodecPool.getDecompressor(codec); } } /** * Fail the current file that we are fetching * @return were we currently fetching? */ public synchronized boolean fail() { if (currentLocation != null) { finish(-1, CopyOutputErrorType.OTHER_ERROR); return true; } else { return false; } } /** * Get the current map output location. */ public synchronized MapOutputLocation getLocation() { return currentLocation; } private synchronized void start(MapOutputLocation loc) { currentLocation = loc; } private synchronized void finish(long size, CopyOutputErrorType error) { if (currentLocation != null) { LOG.debug(getName() + " finishing " + currentLocation + " =" + size); synchronized (copyResults) { copyResults.add(new CopyResult(currentLocation, size, error)); copyResults.notify(); } currentLocation = null; } } /** Loop forever and fetch map outputs as they become available. * The thread exits when it is interrupted by {@link ReduceTaskRunner} */ @Override public void run() { while (true) { try { MapOutputLocation loc = null; long size = -1; synchronized (scheduledCopies) { while (scheduledCopies.isEmpty()) { scheduledCopies.wait(); } loc = scheduledCopies.remove(0); } CopyOutputErrorType error = CopyOutputErrorType.OTHER_ERROR; readError = false; try { shuffleClientMetrics.threadBusy(); start(loc); size = copyOutput(loc); shuffleClientMetrics.successFetch(); error = CopyOutputErrorType.NO_ERROR; } catch (IOException e) { LOG.warn(reduceTask.getTaskID() + " copy failed: " + loc.getTaskAttemptId() + " from " + loc.getHost()); LOG.warn(StringUtils.stringifyException(e)); shuffleClientMetrics.failedFetch(); if (readError) { error = CopyOutputErrorType.READ_ERROR; } // Reset size = -1; } finally { shuffleClientMetrics.threadFree(); finish(size, error); } } catch (InterruptedException e) { break; // ALL DONE } catch (FSError e) { LOG.error("Task: " + reduceTask.getTaskID() + " - FSError: " + StringUtils.stringifyException(e)); try { umbilical.fsError(reduceTask.getTaskID(), e.getMessage()); } catch (IOException io) { LOG.error("Could not notify TT of FSError: " + StringUtils.stringifyException(io)); } } catch (Throwable th) { String msg = getTaskID() + " : Map output copy failure : " + StringUtils.stringifyException(th); reportFatalError(getTaskID(), th, msg); } } if (decompressor != null) { CodecPool.returnDecompressor(decompressor); } } /** Copies a a map output from a remote host, via HTTP. * @param currentLocation the map output location to be copied * @return the path (fully qualified) of the copied file * @throws IOException if there is an error copying the file * @throws InterruptedException if the copier should give up */ private long copyOutput(MapOutputLocation loc ) throws IOException, InterruptedException { // check if we still need to copy the output from this location if (copiedMapOutputs.contains(loc.getTaskId()) || obsoleteMapIds.contains(loc.getTaskAttemptId())) { return CopyResult.OBSOLETE; } // a temp filename. If this file gets created in ramfs, we're fine, // else, we will check the localFS to find a suitable final location // for this path TaskAttemptID reduceId = reduceTask.getTaskID(); Path filename = new Path("/" + TaskTracker.getIntermediateOutputDir( reduceId.getJobID().toString(), reduceId.toString()) + "/map_" + loc.getTaskId().getId() + ".out"); // Copy the map output to a temp file whose name is unique to this attempt Path tmpMapOutput = new Path(filename+"-"+id); // Copy the map output MapOutput mapOutput = getMapOutput(loc, tmpMapOutput, reduceId.getTaskID().getId()); if (mapOutput == null) { throw new IOException("Failed to fetch map-output for " + loc.getTaskAttemptId() + " from " + loc.getHost()); } // The size of the map-output long bytes = mapOutput.compressedSize; // lock the ReduceTask while we do the rename synchronized (ReduceTask.this) { if (copiedMapOutputs.contains(loc.getTaskId())) { mapOutput.discard(); return CopyResult.OBSOLETE; } // Special case: discard empty map-outputs if (bytes == 0) { try { mapOutput.discard(); } catch (IOException ioe) { LOG.info("Couldn't discard output of " + loc.getTaskId()); } // Note that we successfully copied the map-output noteCopiedMapOutput(loc.getTaskId()); return bytes; } // Process map-output if (mapOutput.inMemory) { // Save it in the synchronized list of map-outputs mapOutputsFilesInMemory.add(mapOutput); } else { // Rename the temporary file to the final file; // ensure it is on the same partition tmpMapOutput = mapOutput.file; filename = new Path(tmpMapOutput.getParent(), filename.getName()); if (!localFileSys.rename(tmpMapOutput, filename)) { localFileSys.delete(tmpMapOutput, true); bytes = -1; throw new IOException("Failed to rename map output " + tmpMapOutput + " to " + filename); } synchronized (mapOutputFilesOnDisk) { addToMapOutputFilesOnDisk(localFileSys.getFileStatus(filename)); } } // Note that we successfully copied the map-output noteCopiedMapOutput(loc.getTaskId()); } return bytes; } /** * Save the map taskid whose output we just copied. * This function assumes that it has been synchronized on ReduceTask.this. * * @param taskId map taskid */ private void noteCopiedMapOutput(TaskID taskId) { copiedMapOutputs.add(taskId); ramManager.setNumCopiedMapOutputs(numMaps - copiedMapOutputs.size()); } /** * Get the map output into a local file (either in the inmemory fs or on the * local fs) from the remote server. * We use the file system so that we generate checksum files on the data. * @param mapOutputLoc map-output to be fetched * @param filename the filename to write the data into * @param connectionTimeout number of milliseconds for connection timeout * @param readTimeout number of milliseconds for read timeout * @return the path of the file that got created * @throws IOException when something goes wrong */ private MapOutput getMapOutput(MapOutputLocation mapOutputLoc, Path filename, int reduce) throws IOException, InterruptedException { // Connect URLConnection connection = mapOutputLoc.getOutputLocation().openConnection(); InputStream input = getInputStream(connection, shuffleConnectionTimeout, shuffleReadTimeout); // Validate header from map output TaskAttemptID mapId = null; try { mapId = TaskAttemptID.forName(connection.getHeaderField(FROM_MAP_TASK)); } catch (IllegalArgumentException ia) { LOG.warn("Invalid map id ", ia); return null; } TaskAttemptID expectedMapId = mapOutputLoc.getTaskAttemptId(); if (!mapId.equals(expectedMapId)) { LOG.warn("data from wrong map:" + mapId + " arrived to reduce task " + reduce + ", where as expected map output should be from " + expectedMapId); return null; } long decompressedLength = Long.parseLong(connection.getHeaderField(RAW_MAP_OUTPUT_LENGTH)); long compressedLength = Long.parseLong(connection.getHeaderField(MAP_OUTPUT_LENGTH)); if (compressedLength < 0 || decompressedLength < 0) { LOG.warn(getName() + " invalid lengths in map output header: id: " + mapId + " compressed len: " + compressedLength + ", decompressed len: " + decompressedLength); return null; } int forReduce = (int)Integer.parseInt(connection.getHeaderField(FOR_REDUCE_TASK)); if (forReduce != reduce) { LOG.warn("data for the wrong reduce: " + forReduce + " with compressed len: " + compressedLength + ", decompressed len: " + decompressedLength + " arrived to reduce task " + reduce); return null; } LOG.info("header: " + mapId + ", compressed len: " + compressedLength + ", decompressed len: " + decompressedLength); //We will put a file in memory if it meets certain criteria: //1. The size of the (decompressed) file should be less than 25% of // the total inmem fs //2. There is space available in the inmem fs // Check if this map-output can be saved in-memory boolean shuffleInMemory = ramManager.canFitInMemory(decompressedLength); // Shuffle MapOutput mapOutput = null; if (shuffleInMemory) { LOG.info("Shuffling " + decompressedLength + " bytes (" + compressedLength + " raw bytes) " + "into RAM from " + mapOutputLoc.getTaskAttemptId()); mapOutput = shuffleInMemory(mapOutputLoc, connection, input, (int)decompressedLength, (int)compressedLength); } else { LOG.info("Shuffling " + decompressedLength + " bytes (" + compressedLength + " raw bytes) " + "into Local-FS from " + mapOutputLoc.getTaskAttemptId()); mapOutput = shuffleToDisk(mapOutputLoc, input, filename, compressedLength); } return mapOutput; } /** * The connection establishment is attempted multiple times and is given up * only on the last failure. Instead of connecting with a timeout of * X, we try connecting with a timeout of x < X but multiple times. */ private InputStream getInputStream(URLConnection connection, int connectionTimeout, int readTimeout) throws IOException { int unit = 0; if (connectionTimeout < 0) { throw new IOException("Invalid timeout " + "[timeout = " + connectionTimeout + " ms]"); } else if (connectionTimeout > 0) { unit = (UNIT_CONNECT_TIMEOUT > connectionTimeout) ? connectionTimeout : UNIT_CONNECT_TIMEOUT; } // set the read timeout to the total timeout connection.setReadTimeout(readTimeout); // set the connect timeout to the unit-connect-timeout connection.setConnectTimeout(unit); while (true) { try { connection.connect(); break; } catch (IOException ioe) { // update the total remaining connect-timeout connectionTimeout -= unit; // throw an exception if we have waited for timeout amount of time // note that the updated value if timeout is used here if (connectionTimeout == 0) { throw ioe; } // reset the connect timeout for the last try if (connectionTimeout < unit) { unit = connectionTimeout; // reset the connect time out for the final connect connection.setConnectTimeout(unit); } } } try { return connection.getInputStream(); } catch (IOException ioe) { readError = true; throw ioe; } } private MapOutput shuffleInMemory(MapOutputLocation mapOutputLoc, URLConnection connection, InputStream input, int mapOutputLength, int compressedLength) throws IOException, InterruptedException { // Reserve ram for the map-output boolean createdNow = ramManager.reserve(mapOutputLength, input); // Reconnect if we need to if (!createdNow) { // Reconnect try { connection = mapOutputLoc.getOutputLocation().openConnection(); input = getInputStream(connection, shuffleConnectionTimeout, shuffleReadTimeout); } catch (IOException ioe) { LOG.info("Failed reopen connection to fetch map-output from " + mapOutputLoc.getHost()); // Inform the ram-manager ramManager.closeInMemoryFile(mapOutputLength); ramManager.unreserve(mapOutputLength); throw ioe; } } IFileInputStream checksumIn = new IFileInputStream(input,compressedLength); input = checksumIn; // Are map-outputs compressed? if (codec != null) { decompressor.reset(); input = codec.createInputStream(input, decompressor); } // Copy map-output into an in-memory buffer byte[] shuffleData = new byte[mapOutputLength]; MapOutput mapOutput = new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(), shuffleData, compressedLength); int bytesRead = 0; try { int n = input.read(shuffleData, 0, shuffleData.length); while (n > 0) { bytesRead += n; shuffleClientMetrics.inputBytes(n); // indicate we're making progress reporter.progress(); n = input.read(shuffleData, bytesRead, (shuffleData.length-bytesRead)); } LOG.info("Read " + bytesRead + " bytes from map-output for " + mapOutputLoc.getTaskAttemptId()); input.close(); } catch (IOException ioe) { LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), ioe); // Inform the ram-manager ramManager.closeInMemoryFile(mapOutputLength); ramManager.unreserve(mapOutputLength); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; // Close the streams IOUtils.cleanup(LOG, input); // Re-throw readError = true; throw ioe; } // Close the in-memory file ramManager.closeInMemoryFile(mapOutputLength); // Sanity check if (bytesRead != mapOutputLength) { // Inform the ram-manager ramManager.unreserve(mapOutputLength); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { // IGNORED because we are cleaning up LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; throw new IOException("Incomplete map output received for " + mapOutputLoc.getTaskAttemptId() + " from " + mapOutputLoc.getOutputLocation() + " (" + bytesRead + " instead of " + mapOutputLength + ")" ); } // TODO: Remove this after a 'fix' for HADOOP-3647 if (mapOutputLength > 0) { DataInputBuffer dib = new DataInputBuffer(); dib.reset(shuffleData, 0, shuffleData.length); LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" + WritableUtils.readVInt(dib) + ", " + WritableUtils.readVInt(dib) + ") from " + mapOutputLoc.getHost()); } return mapOutput; } private MapOutput shuffleToDisk(MapOutputLocation mapOutputLoc, InputStream input, Path filename, long mapOutputLength) throws IOException { // Find out a suitable location for the output on local-filesystem Path localFilename = lDirAlloc.getLocalPathForWrite(filename.toUri().getPath(), mapOutputLength, conf); MapOutput mapOutput = new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(), conf, localFileSys.makeQualified(localFilename), mapOutputLength); // Copy data to local-disk OutputStream output = null; long bytesRead = 0; try { output = rfs.create(localFilename); byte[] buf = new byte[64 * 1024]; int n = -1; try { n = input.read(buf, 0, buf.length); } catch (IOException ioe) { readError = true; throw ioe; } while (n > 0) { bytesRead += n; shuffleClientMetrics.inputBytes(n); output.write(buf, 0, n); // indicate we're making progress reporter.progress(); try { n = input.read(buf, 0, buf.length); } catch (IOException ioe) { readError = true; throw ioe; } } LOG.info("Read " + bytesRead + " bytes from map-output for " + mapOutputLoc.getTaskAttemptId()); output.close(); input.close(); } catch (IOException ioe) { LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), ioe); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; // Close the streams IOUtils.cleanup(LOG, input, output); // Re-throw throw ioe; } // Sanity check if (bytesRead != mapOutputLength) { try { mapOutput.discard(); } catch (Exception ioe) { // IGNORED because we are cleaning up LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ioe); } catch (Throwable t) { String msg = getTaskID() + " : Failed in shuffle to disk :" + StringUtils.stringifyException(t); reportFatalError(getTaskID(), t, msg); } mapOutput = null; throw new IOException("Incomplete map output received for " + mapOutputLoc.getTaskAttemptId() + " from " + mapOutputLoc.getOutputLocation() + " (" + bytesRead + " instead of " + mapOutputLength + ")" ); } return mapOutput; } } // MapOutputCopier private void configureClasspath(JobConf conf) throws IOException { // get the task and the current classloader which will become the parent Task task = ReduceTask.this; ClassLoader parent = conf.getClassLoader(); // get the work directory which holds the elements we are dynamically // adding to the classpath File workDir = new File(task.getJobFile()).getParentFile(); ArrayList<URL> urllist = new ArrayList<URL>(); // add the jars and directories to the classpath String jar = conf.getJar(); if (jar != null) { File jobCacheDir = new File(new Path(jar).getParent().toString()); File[] libs = new File(jobCacheDir, "lib").listFiles(); if (libs != null) { for (int i = 0; i < libs.length; i++) { urllist.add(libs[i].toURL()); } } urllist.add(new File(jobCacheDir, "classes").toURL()); urllist.add(jobCacheDir.toURL()); } urllist.add(workDir.toURL()); // create a new classloader with the old classloader as its parent // then set that classloader as the one used by the current jobconf URL[] urls = urllist.toArray(new URL[urllist.size()]); URLClassLoader loader = new URLClassLoader(urls, parent); conf.setClassLoader(loader); } public ReduceCopier(TaskUmbilicalProtocol umbilical, JobConf conf, TaskReporter reporter )throws ClassNotFoundException, IOException { configureClasspath(conf); this.reporter = reporter; this.shuffleClientMetrics = new ShuffleClientMetrics(conf); this.umbilical = umbilical; this.reduceTask = ReduceTask.this; this.scheduledCopies = new ArrayList<MapOutputLocation>(100); this.copyResults = new ArrayList<CopyResult>(100); this.numCopiers = conf.getInt("mapred.reduce.parallel.copies", 5); this.maxInFlight = 4 * numCopiers; this.maxBackoff = conf.getInt("mapred.reduce.copy.backoff", 300); Counters.Counter combineInputCounter = reporter.getCounter(Task.Counter.COMBINE_INPUT_RECORDS); this.combinerRunner = CombinerRunner.create(conf, getTaskID(), combineInputCounter, reporter, null); if (combinerRunner != null) { combineCollector = new CombineOutputCollector(reduceCombineOutputCounter); } this.ioSortFactor = conf.getInt("io.sort.factor", 10); // the exponential backoff formula // backoff (t) = init * base^(t-1) // so for max retries we get // backoff(1) + .... + backoff(max_fetch_retries) ~ max // solving which we get // max_fetch_retries ~ log((max * (base - 1) / init) + 1) / log(base) // for the default value of max = 300 (5min) we get max_fetch_retries = 6 // the order is 4,8,16,32,64,128. sum of which is 252 sec = 4.2 min // optimizing for the base 2 this.maxFetchRetriesPerMap = Math.max(MIN_FETCH_RETRIES_PER_MAP, getClosestPowerOf2((this.maxBackoff * 1000 / BACKOFF_INIT) + 1)); this.maxFailedUniqueFetches = Math.min(numMaps, this.maxFailedUniqueFetches); this.maxInMemOutputs = conf.getInt("mapred.inmem.merge.threshold", 1000); this.maxInMemCopyPer = conf.getFloat("mapred.job.shuffle.merge.percent", 0.66f); final float maxRedPer = conf.getFloat("mapred.job.reduce.input.buffer.percent", 0f); if (maxRedPer > 1.0 || maxRedPer < 0.0) { throw new IOException("mapred.job.reduce.input.buffer.percent" + maxRedPer); } this.maxInMemReduce = (int)Math.min( Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE); // Setup the RamManager ramManager = new ShuffleRamManager(conf); localFileSys = FileSystem.getLocal(conf); rfs = ((LocalFileSystem)localFileSys).getRaw(); // hosts -> next contact time this.penaltyBox = new LinkedHashMap<String, Long>(); // hostnames this.uniqueHosts = new HashSet<String>(); // Seed the random number generator with a reasonably globally unique seed long randomSeed = System.nanoTime() + (long)Math.pow(this.reduceTask.getPartition(), (this.reduceTask.getPartition()%10) ); this.random = new Random(randomSeed); this.maxMapRuntime = 0; + this.reportReadErrorImmediately = + conf.getBoolean("mapreduce.reduce.shuffle.notify.readerror", true); } private boolean busyEnough(int numInFlight) { return numInFlight > maxInFlight; } public boolean fetchOutputs() throws IOException { int totalFailures = 0; int numInFlight = 0, numCopied = 0; DecimalFormat mbpsFormat = new DecimalFormat("0.00"); final Progress copyPhase = reduceTask.getProgress().phase(); LocalFSMerger localFSMergerThread = null; InMemFSMergeThread inMemFSMergeThread = null; GetMapEventsThread getMapEventsThread = null; for (int i = 0; i < numMaps; i++) { copyPhase.addPhase(); // add sub-phase per file } copiers = new ArrayList<MapOutputCopier>(numCopiers); // start all the copying threads for (int i=0; i < numCopiers; i++) { MapOutputCopier copier = new MapOutputCopier(conf, reporter); copiers.add(copier); copier.start(); } //start the on-disk-merge thread localFSMergerThread = new LocalFSMerger((LocalFileSystem)localFileSys); //start the in memory merger thread inMemFSMergeThread = new InMemFSMergeThread(); localFSMergerThread.start(); inMemFSMergeThread.start(); // start the map events thread getMapEventsThread = new GetMapEventsThread(); getMapEventsThread.start(); // start the clock for bandwidth measurement long startTime = System.currentTimeMillis(); long currentTime = startTime; long lastProgressTime = startTime; long lastOutputTime = 0; // loop until we get all required outputs while (copiedMapOutputs.size() < numMaps && mergeThrowable == null) { currentTime = System.currentTimeMillis(); boolean logNow = false; if (currentTime - lastOutputTime > MIN_LOG_TIME) { lastOutputTime = currentTime; logNow = true; } if (logNow) { LOG.info(reduceTask.getTaskID() + " Need another " + (numMaps - copiedMapOutputs.size()) + " map output(s) " + "where " + numInFlight + " is already in progress"); } // Put the hash entries for the failed fetches. Iterator<MapOutputLocation> locItr = retryFetches.iterator(); while (locItr.hasNext()) { MapOutputLocation loc = locItr.next(); List<MapOutputLocation> locList = mapLocations.get(loc.getHost()); // Check if the list exists. Map output location mapping is cleared // once the jobtracker restarts and is rebuilt from scratch. // Note that map-output-location mapping will be recreated and hence // we continue with the hope that we might find some locations // from the rebuild map. if (locList != null) { // Add to the beginning of the list so that this map is //tried again before the others and we can hasten the //re-execution of this map should there be a problem locList.add(0, loc); } } if (retryFetches.size() > 0) { LOG.info(reduceTask.getTaskID() + ": " + "Got " + retryFetches.size() + " map-outputs from previous failures"); } // clear the "failed" fetches hashmap retryFetches.clear(); // now walk through the cache and schedule what we can int numScheduled = 0; int numDups = 0; synchronized (scheduledCopies) { // Randomize the map output locations to prevent // all reduce-tasks swamping the same tasktracker List<String> hostList = new ArrayList<String>(); hostList.addAll(mapLocations.keySet()); Collections.shuffle(hostList, this.random); Iterator<String> hostsItr = hostList.iterator(); while (hostsItr.hasNext()) { String host = hostsItr.next(); List<MapOutputLocation> knownOutputsByLoc = mapLocations.get(host); // Check if the list exists. Map output location mapping is // cleared once the jobtracker restarts and is rebuilt from // scratch. // Note that map-output-location mapping will be recreated and // hence we continue with the hope that we might find some // locations from the rebuild map and add then for fetching. if (knownOutputsByLoc == null || knownOutputsByLoc.size() == 0) { continue; } //Identify duplicate hosts here if (uniqueHosts.contains(host)) { numDups += knownOutputsByLoc.size(); continue; } Long penaltyEnd = penaltyBox.get(host); boolean penalized = false; if (penaltyEnd != null) { if (currentTime < penaltyEnd.longValue()) { penalized = true; } else { penaltyBox.remove(host); } } if (penalized) continue; synchronized (knownOutputsByLoc) { locItr = knownOutputsByLoc.iterator(); while (locItr.hasNext()) { MapOutputLocation loc = locItr.next(); // Do not schedule fetches from OBSOLETE maps if (obsoleteMapIds.contains(loc.getTaskAttemptId())) { locItr.remove(); continue; } uniqueHosts.add(host); scheduledCopies.add(loc); locItr.remove(); // remove from knownOutputs numInFlight++; numScheduled++; break; //we have a map from this host } } } scheduledCopies.notifyAll(); } if (numScheduled > 0 || logNow) { LOG.info(reduceTask.getTaskID() + " Scheduled " + numScheduled + " outputs (" + penaltyBox.size() + " slow hosts and" + numDups + " dup hosts)"); } if (penaltyBox.size() > 0 && logNow) { LOG.info("Penalized(slow) Hosts: "); for (String host : penaltyBox.keySet()) { LOG.info(host + " Will be considered after: " + ((penaltyBox.get(host) - currentTime)/1000) + " seconds."); } } // if we have no copies in flight and we can't schedule anything // new, just wait for a bit try { if (numInFlight == 0 && numScheduled == 0) { // we should indicate progress as we don't want TT to think // we're stuck and kill us reporter.progress(); Thread.sleep(5000); } } catch (InterruptedException e) { } // IGNORE while (numInFlight > 0 && mergeThrowable == null) { LOG.debug(reduceTask.getTaskID() + " numInFlight = " + numInFlight); //the call to getCopyResult will either //1) return immediately with a null or a valid CopyResult object, // or //2) if the numInFlight is above maxInFlight, return with a // CopyResult object after getting a notification from a // fetcher thread, //So, when getCopyResult returns null, we can be sure that //we aren't busy enough and we should go and get more mapcompletion //events from the tasktracker CopyResult cr = getCopyResult(numInFlight); if (cr == null) { break; } if (cr.getSuccess()) { // a successful copy numCopied++; lastProgressTime = System.currentTimeMillis(); reduceShuffleBytes.increment(cr.getSize()); long secsSinceStart = (System.currentTimeMillis()-startTime)/1000+1; float mbs = ((float)reduceShuffleBytes.getCounter())/(1024*1024); float transferRate = mbs/secsSinceStart; copyPhase.startNextPhase(); copyPhase.setStatus("copy (" + numCopied + " of " + numMaps + " at " + mbpsFormat.format(transferRate) + " MB/s)"); // Note successful fetch for this mapId to invalidate // (possibly) old fetch-failures fetchFailedMaps.remove(cr.getLocation().getTaskId()); } else if (cr.isObsolete()) { //ignore LOG.info(reduceTask.getTaskID() + " Ignoring obsolete copy result for Map Task: " + cr.getLocation().getTaskAttemptId() + " from host: " + cr.getHost()); } else { retryFetches.add(cr.getLocation()); // note the failed-fetch TaskAttemptID mapTaskId = cr.getLocation().getTaskAttemptId(); TaskID mapId = cr.getLocation().getTaskId(); totalFailures++; Integer noFailedFetches = mapTaskToFailedFetchesMap.get(mapTaskId); noFailedFetches = (noFailedFetches == null) ? 1 : (noFailedFetches + 1); mapTaskToFailedFetchesMap.put(mapTaskId, noFailedFetches); LOG.info("Task " + getTaskID() + ": Failed fetch #" + noFailedFetches + " from " + mapTaskId); // half the number of max fetch retries per map during // the end of shuffle int fetchRetriesPerMap = maxFetchRetriesPerMap; int pendingCopies = numMaps - numCopied; // The check noFailedFetches != maxFetchRetriesPerMap is // required to make sure of the notification in case of a // corner case : // when noFailedFetches reached maxFetchRetriesPerMap and // reducer reached the end of shuffle, then we may miss sending // a notification if the difference between // noFailedFetches and fetchRetriesPerMap is not divisible by 2 if (pendingCopies <= numMaps * MIN_PENDING_MAPS_PERCENT && noFailedFetches != maxFetchRetriesPerMap) { fetchRetriesPerMap = fetchRetriesPerMap >> 1; } // did the fetch fail too many times? // using a hybrid technique for notifying the jobtracker. // a. the first notification is sent after max-retries // b. subsequent notifications are sent after 2 retries. - // c. send notification immediately if it is a read error. - if (cr.getError().equals(CopyOutputErrorType.READ_ERROR) || + // c. send notification immediately if it is a read error and + // "mapreduce.reduce.shuffle.notify.readerror" set true. + if ((reportReadErrorImmediately && cr.getError().equals( + CopyOutputErrorType.READ_ERROR)) || ((noFailedFetches >= fetchRetriesPerMap) && ((noFailedFetches - fetchRetriesPerMap) % 2) == 0)) { synchronized (ReduceTask.this) { taskStatus.addFetchFailedMap(mapTaskId); reporter.progress(); LOG.info("Failed to fetch map-output from " + mapTaskId + " even after MAX_FETCH_RETRIES_PER_MAP retries... " + " or it is a read error, " + " reporting to the JobTracker"); } } // note unique failed-fetch maps if (noFailedFetches == maxFetchRetriesPerMap) { fetchFailedMaps.add(mapId); // did we have too many unique failed-fetch maps? // and did we fail on too many fetch attempts? // and did we progress enough // or did we wait for too long without any progress? // check if the reducer is healthy boolean reducerHealthy = (((float)totalFailures / (totalFailures + numCopied)) < MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT); // check if the reducer has progressed enough boolean reducerProgressedEnough = (((float)numCopied / numMaps) >= MIN_REQUIRED_PROGRESS_PERCENT); // check if the reducer is stalled for a long time // duration for which the reducer is stalled int stallDuration = (int)(System.currentTimeMillis() - lastProgressTime); // duration for which the reducer ran with progress int shuffleProgressDuration = (int)(lastProgressTime - startTime); // min time the reducer should run without getting killed int minShuffleRunDuration = (shuffleProgressDuration > maxMapRuntime) ? shuffleProgressDuration : maxMapRuntime; boolean reducerStalled = (((float)stallDuration / minShuffleRunDuration) >= MAX_ALLOWED_STALL_TIME_PERCENT); // kill if not healthy and has insufficient progress if ((fetchFailedMaps.size() >= maxFailedUniqueFetches || fetchFailedMaps.size() == (numMaps - copiedMapOutputs.size())) && !reducerHealthy && (!reducerProgressedEnough || reducerStalled)) { LOG.fatal("Shuffle failed with too many fetch failures " + "and insufficient progress!" + "Killing task " + getTaskID() + "."); umbilical.shuffleError(getTaskID(), "Exceeded MAX_FAILED_UNIQUE_FETCHES;" + " bailing-out."); } } // back off exponentially until num_retries <= max_retries // back off by max_backoff/2 on subsequent failed attempts currentTime = System.currentTimeMillis(); int currentBackOff = noFailedFetches <= fetchRetriesPerMap ? BACKOFF_INIT * (1 << (noFailedFetches - 1)) : (this.maxBackoff * 1000 / 2); // If it is read error, // back off for maxMapRuntime/2 // during end of shuffle, // backoff for min(maxMapRuntime/2, currentBackOff) if (cr.getError().equals(CopyOutputErrorType.READ_ERROR)) { int backOff = maxMapRuntime >> 1; if (pendingCopies <= numMaps * MIN_PENDING_MAPS_PERCENT) { backOff = Math.min(backOff, currentBackOff); } currentBackOff = backOff; } penaltyBox.put(cr.getHost(), currentTime + currentBackOff); LOG.warn(reduceTask.getTaskID() + " adding host " + cr.getHost() + " to penalty box, next contact in " + (currentBackOff/1000) + " seconds"); } uniqueHosts.remove(cr.getHost()); numInFlight--; } } // all done, inform the copiers to exit exitGetMapEvents= true; try { getMapEventsThread.join(); LOG.info("getMapsEventsThread joined."); } catch (InterruptedException ie) { LOG.info("getMapsEventsThread threw an exception: " + StringUtils.stringifyException(ie)); } synchronized (copiers) { synchronized (scheduledCopies) { for (MapOutputCopier copier : copiers) { copier.interrupt(); } copiers.clear(); } } // copiers are done, exit and notify the waiting merge threads synchronized (mapOutputFilesOnDisk) { exitLocalFSMerge = true; mapOutputFilesOnDisk.notify(); } ramManager.close(); //Do a merge of in-memory files (if there are any) if (mergeThrowable == null) { try { // Wait for the on-disk merge to complete localFSMergerThread.join(); LOG.info("Interleaved on-disk merge complete: " + mapOutputFilesOnDisk.size() + " files left."); //wait for an ongoing merge (if it is in flight) to complete inMemFSMergeThread.join(); LOG.info("In-memory merge complete: " + mapOutputsFilesInMemory.size() + " files left."); } catch (InterruptedException ie) { LOG.warn(reduceTask.getTaskID() + " Final merge of the inmemory files threw an exception: " + StringUtils.stringifyException(ie)); // check if the last merge generated an error if (mergeThrowable != null) { mergeThrowable = ie; } return false; } } return mergeThrowable == null && copiedMapOutputs.size() == numMaps; } private long createInMemorySegments( List<Segment<K, V>> inMemorySegments, long leaveBytes) throws IOException { long totalSize = 0L; synchronized (mapOutputsFilesInMemory) { // fullSize could come from the RamManager, but files can be // closed but not yet present in mapOutputsFilesInMemory long fullSize = 0L; for (MapOutput mo : mapOutputsFilesInMemory) { fullSize += mo.data.length; } while(fullSize > leaveBytes) { MapOutput mo = mapOutputsFilesInMemory.remove(0); totalSize += mo.data.length; fullSize -= mo.data.length; Reader<K, V> reader = new InMemoryReader<K, V>(ramManager, mo.mapAttemptId, mo.data, 0, mo.data.length); Segment<K, V> segment = new Segment<K, V>(reader, true); inMemorySegments.add(segment); } } return totalSize; } /** * Create a RawKeyValueIterator from copied map outputs. All copying * threads have exited, so all of the map outputs are available either in * memory or on disk. We also know that no merges are in progress, so * synchronization is more lax, here. * * The iterator returned must satisfy the following constraints: * 1. Fewer than io.sort.factor files may be sources * 2. No more than maxInMemReduce bytes of map outputs may be resident * in memory when the reduce begins * * If we must perform an intermediate merge to satisfy (1), then we can * keep the excluded outputs from (2) in memory and include them in the * first merge pass. If not, then said outputs must be written to disk * first. */ @SuppressWarnings("unchecked") private RawKeyValueIterator createKVIterator( JobConf job, FileSystem fs, Reporter reporter) throws IOException { // merge config params Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass(); Class<V> valueClass = (Class<V>)job.getMapOutputValueClass(); boolean keepInputs = job.getKeepFailedTaskFiles(); final Path tmpDir = new Path(getTaskID().toString()); final RawComparator<K> comparator = (RawComparator<K>)job.getOutputKeyComparator(); // segments required to vacate memory List<Segment<K,V>> memDiskSegments = new ArrayList<Segment<K,V>>(); long inMemToDiskBytes = 0; if (mapOutputsFilesInMemory.size() > 0) { TaskID mapId = mapOutputsFilesInMemory.get(0).mapId; inMemToDiskBytes = createInMemorySegments(memDiskSegments, maxInMemReduce); final int numMemDiskSegments = memDiskSegments.size(); if (numMemDiskSegments > 0 && ioSortFactor > mapOutputFilesOnDisk.size()) { // must spill to disk, but can't retain in-mem for intermediate merge final Path outputPath = mapOutputFile.getInputFileForWrite(mapId, reduceTask.getTaskID(), inMemToDiskBytes); final RawKeyValueIterator rIter = Merger.merge(job, fs, keyClass, valueClass, memDiskSegments, numMemDiskSegments, tmpDir, comparator, reporter, spilledRecordsCounter, null); final Writer writer = new Writer(job, fs, outputPath, keyClass, valueClass, codec, null); try { Merger.writeFile(rIter, writer, reporter, job); addToMapOutputFilesOnDisk(fs.getFileStatus(outputPath)); } catch (Exception e) { if (null != outputPath) { fs.delete(outputPath, true); } throw new IOException("Final merge failed", e); } finally { if (null != writer) { writer.close(); } } LOG.info("Merged " + numMemDiskSegments + " segments, " + inMemToDiskBytes + " bytes to disk to satisfy " + "reduce memory limit"); inMemToDiskBytes = 0; memDiskSegments.clear(); } else if (inMemToDiskBytes != 0) { LOG.info("Keeping " + numMemDiskSegments + " segments, " + inMemToDiskBytes + " bytes in memory for " + "intermediate, on-disk merge"); } } // segments on disk List<Segment<K,V>> diskSegments = new ArrayList<Segment<K,V>>(); long onDiskBytes = inMemToDiskBytes; Path[] onDisk = getMapFiles(fs, false); for (Path file : onDisk) { onDiskBytes += fs.getFileStatus(file).getLen(); diskSegments.add(new Segment<K, V>(job, fs, file, codec, keepInputs)); } LOG.info("Merging " + onDisk.length + " files, " + onDiskBytes + " bytes from disk"); Collections.sort(diskSegments, new Comparator<Segment<K,V>>() { public int compare(Segment<K, V> o1, Segment<K, V> o2) { if (o1.getLength() == o2.getLength()) { return 0; } return o1.getLength() < o2.getLength() ? -1 : 1; } }); // build final list of segments from merged backed by disk + in-mem List<Segment<K,V>> finalSegments = new ArrayList<Segment<K,V>>(); long inMemBytes = createInMemorySegments(finalSegments, 0); LOG.info("Merging " + finalSegments.size() + " segments, " + inMemBytes + " bytes from memory into reduce"); if (0 != onDiskBytes) { final int numInMemSegments = memDiskSegments.size(); diskSegments.addAll(0, memDiskSegments); memDiskSegments.clear(); RawKeyValueIterator diskMerge = Merger.merge( job, fs, keyClass, valueClass, codec, diskSegments, ioSortFactor, numInMemSegments, tmpDir, comparator, reporter, false, spilledRecordsCounter, null); diskSegments.clear(); if (0 == finalSegments.size()) { return diskMerge; } finalSegments.add(new Segment<K,V>( new RawKVIteratorReader(diskMerge, onDiskBytes), true)); } return Merger.merge(job, fs, keyClass, valueClass, finalSegments, finalSegments.size(), tmpDir, comparator, reporter, spilledRecordsCounter, null); } class RawKVIteratorReader extends IFile.Reader<K,V> { private final RawKeyValueIterator kvIter; public RawKVIteratorReader(RawKeyValueIterator kvIter, long size) throws IOException { super(null, null, size, null, spilledRecordsCounter); this.kvIter = kvIter; } public boolean next(DataInputBuffer key, DataInputBuffer value) throws IOException { if (kvIter.next()) { final DataInputBuffer kb = kvIter.getKey(); final DataInputBuffer vb = kvIter.getValue(); final int kp = kb.getPosition(); final int klen = kb.getLength() - kp; key.reset(kb.getData(), kp, klen); final int vp = vb.getPosition(); final int vlen = vb.getLength() - vp; value.reset(vb.getData(), vp, vlen); bytesRead += klen + vlen; return true; } return false; } public long getPosition() throws IOException { return bytesRead; } public void close() throws IOException { kvIter.close(); } } private CopyResult getCopyResult(int numInFlight) { synchronized (copyResults) { while (copyResults.isEmpty()) { try { //The idea is that if we have scheduled enough, we can wait until //we hear from one of the copiers. if (busyEnough(numInFlight)) { copyResults.wait(); } else { return null; } } catch (InterruptedException e) { } } return copyResults.remove(0); } } private void addToMapOutputFilesOnDisk(FileStatus status) { synchronized (mapOutputFilesOnDisk) { mapOutputFilesOnDisk.add(status); mapOutputFilesOnDisk.notify(); } } /** Starts merging the local copy (on disk) of the map's output so that * most of the reducer's input is sorted i.e overlapping shuffle * and merge phases. */ private class LocalFSMerger extends Thread { private LocalFileSystem localFileSys; public LocalFSMerger(LocalFileSystem fs) { this.localFileSys = fs; setName("Thread for merging on-disk files"); setDaemon(true); } @SuppressWarnings("unchecked") public void run() { try { LOG.info(reduceTask.getTaskID() + " Thread started: " + getName()); while(!exitLocalFSMerge){ synchronized (mapOutputFilesOnDisk) { while (!exitLocalFSMerge && mapOutputFilesOnDisk.size() < (2 * ioSortFactor - 1)) { LOG.info(reduceTask.getTaskID() + " Thread waiting: " + getName()); mapOutputFilesOnDisk.wait(); } } if(exitLocalFSMerge) {//to avoid running one extra time in the end break; } List<Path> mapFiles = new ArrayList<Path>(); long approxOutputSize = 0; int bytesPerSum = reduceTask.getConf().getInt("io.bytes.per.checksum", 512); LOG.info(reduceTask.getTaskID() + "We have " + mapOutputFilesOnDisk.size() + " map outputs on disk. " + "Triggering merge of " + ioSortFactor + " files"); // 1. Prepare the list of files to be merged. This list is prepared // using a list of map output files on disk. Currently we merge // io.sort.factor files into 1. synchronized (mapOutputFilesOnDisk) { for (int i = 0; i < ioSortFactor; ++i) { FileStatus filestatus = mapOutputFilesOnDisk.first(); mapOutputFilesOnDisk.remove(filestatus); mapFiles.add(filestatus.getPath()); approxOutputSize += filestatus.getLen(); } } // sanity check if (mapFiles.size() == 0) { return; } // add the checksum length approxOutputSize += ChecksumFileSystem .getChecksumLength(approxOutputSize, bytesPerSum); // 2. Start the on-disk merge process Path outputPath = lDirAlloc.getLocalPathForWrite(mapFiles.get(0).toString(), approxOutputSize, conf) .suffix(".merged"); Writer writer = new Writer(conf,rfs, outputPath, conf.getMapOutputKeyClass(), conf.getMapOutputValueClass(), codec, null); RawKeyValueIterator iter = null; Path tmpDir = new Path(reduceTask.getTaskID().toString()); try { iter = Merger.merge(conf, rfs, conf.getMapOutputKeyClass(), conf.getMapOutputValueClass(), codec, mapFiles.toArray(new Path[mapFiles.size()]), true, ioSortFactor, tmpDir, conf.getOutputKeyComparator(), reporter, spilledRecordsCounter, null); Merger.writeFile(iter, writer, reporter, conf); writer.close(); } catch (Exception e) { localFileSys.delete(outputPath, true); throw new IOException (StringUtils.stringifyException(e)); } synchronized (mapOutputFilesOnDisk) { addToMapOutputFilesOnDisk(localFileSys.getFileStatus(outputPath)); } LOG.info(reduceTask.getTaskID() + " Finished merging " + mapFiles.size() + " map output files on disk of total-size " + approxOutputSize + "." + " Local output file is " + outputPath + " of size " + localFileSys.getFileStatus(outputPath).getLen()); } } catch (Exception e) { LOG.warn(reduceTask.getTaskID() + " Merging of the local FS files threw an exception: " + StringUtils.stringifyException(e)); if (mergeThrowable == null) { mergeThrowable = e; } } catch (Throwable t) { String msg = getTaskID() + " : Failed to merge on the local FS" + StringUtils.stringifyException(t); reportFatalError(getTaskID(), t, msg); } } } private class InMemFSMergeThread extends Thread { public InMemFSMergeThread() { setName("Thread for merging in memory files"); setDaemon(true); } public void run() { LOG.info(reduceTask.getTaskID() + " Thread started: " + getName()); try { boolean exit = false; do { exit = ramManager.waitForDataToMerge(); if (!exit) { doInMemMerge(); } } while (!exit); } catch (Exception e) { LOG.warn(reduceTask.getTaskID() + " Merge of the inmemory files threw an exception: " + StringUtils.stringifyException(e)); ReduceCopier.this.mergeThrowable = e; } catch (Throwable t) { String msg = getTaskID() + " : Failed to merge in memory" + StringUtils.stringifyException(t); reportFatalError(getTaskID(), t, msg); } } @SuppressWarnings("unchecked") private void doInMemMerge() throws IOException{ if (mapOutputsFilesInMemory.size() == 0) { return; } //name this output file same as the name of the first file that is //there in the current list of inmem files (this is guaranteed to //be absent on the disk currently. So we don't overwrite a prev. //created spill). Also we need to create the output file now since //it is not guaranteed that this file will be present after merge //is called (we delete empty files as soon as we see them //in the merge method) //figure out the mapId TaskID mapId = mapOutputsFilesInMemory.get(0).mapId; List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K,V>>(); long mergeOutputSize = createInMemorySegments(inMemorySegments, 0); int noInMemorySegments = inMemorySegments.size(); Path outputPath = mapOutputFile.getInputFileForWrite(mapId, reduceTask.getTaskID(), mergeOutputSize); Writer writer = new Writer(conf, rfs, outputPath, conf.getMapOutputKeyClass(),
jaxlaw/hadoop-common
c22e7b0d9d7f9bef1c337236775588826f8ecf4b
MAPREDUCE-353. Allow shuffle read and connection timeouts to be configurable. Contributed by Amareshwari Sriramadasu.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 1e3962b..a454ec8 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,451 +1,455 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + + MAPREDUCE-353. Allow shuffle read and connection timeouts to be + configurable. (Amareshwari Sriramadasu via acmurthy) + HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/mapred-default.xml b/src/mapred/mapred-default.xml index bd21f89..f86f5bd 100644 --- a/src/mapred/mapred-default.xml +++ b/src/mapred/mapred-default.xml @@ -1,810 +1,827 @@ <?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Do not modify this file directly. Instead, copy entries that you --> <!-- wish to modify from this file into mapred-site.xml and change them --> <!-- there. If mapred-site.xml does not already exist, create it. --> <configuration> <property> <name>hadoop.job.history.location</name> <value></value> <description> If job tracker is static the history files are stored in this single well known place. If No value is set here, by default, it is in the local file system at ${hadoop.log.dir}/history. </description> </property> <property> <name>hadoop.job.history.user.location</name> <value></value> <description> User can specify a location to store the history files of a particular job. If nothing is specified, the logs are stored in output directory. The files are stored in "_logs/history/" in the directory. User can stop logging by giving the value "none". </description> </property> <property> <name>mapred.job.tracker.history.completed.location</name> <value></value> <description> The completed job history files are stored at this single well known location. If nothing is specified, the files are stored at ${hadoop.job.history.location}/done. </description> </property> <!-- i/o properties --> <property> <name>io.sort.factor</name> <value>10</value> <description>The number of streams to merge at once while sorting files. This determines the number of open file handles.</description> </property> <property> <name>io.sort.mb</name> <value>100</value> <description>The total amount of buffer memory to use while sorting files, in megabytes. By default, gives each merge stream 1MB, which should minimize seeks.</description> </property> <property> <name>io.sort.record.percent</name> <value>0.05</value> <description>The percentage of io.sort.mb dedicated to tracking record boundaries. Let this value be r, io.sort.mb be x. The maximum number of records collected before the collection thread must block is equal to (r * x) / 4</description> </property> <property> <name>io.sort.spill.percent</name> <value>0.80</value> <description>The soft limit in either the buffer or record collection buffers. Once reached, a thread will begin to spill the contents to disk in the background. Note that this does not imply any chunking of data to the spill. A value less than 0.5 is not recommended.</description> </property> <property> <name>io.map.index.skip</name> <value>0</value> <description>Number of index entries to skip between each entry. Zero by default. Setting this to values larger than zero can facilitate opening large map files using less memory.</description> </property> <property> <name>mapred.job.tracker</name> <value>local</value> <description>The host and port that the MapReduce job tracker runs at. If "local", then jobs are run in-process as a single map and reduce task. </description> </property> <property> <name>mapred.job.tracker.http.address</name> <value>0.0.0.0:50030</value> <description> The job tracker http server address and port the server will listen on. If the port is 0 then the server will start on a free port. </description> </property> <property> <name>mapred.job.tracker.handler.count</name> <value>10</value> <description> The number of server threads for the JobTracker. This should be roughly 4% of the number of tasktracker nodes. </description> </property> <property> <name>mapred.task.tracker.report.address</name> <value>127.0.0.1:0</value> <description>The interface and port that task tracker server listens on. Since it is only connected to by the tasks, it uses the local interface. EXPERT ONLY. Should only be changed if your host does not have the loopback interface.</description> </property> <property> <name>mapred.local.dir</name> <value>${hadoop.tmp.dir}/mapred/local</value> <description>The local directory where MapReduce stores intermediate data files. May be a comma-separated list of directories on different devices in order to spread disk i/o. Directories that do not exist are ignored. </description> </property> <property> <name>mapred.system.dir</name> <value>${hadoop.tmp.dir}/mapred/system</value> <description>The shared directory where MapReduce stores control files. </description> </property> <property> <name>mapred.temp.dir</name> <value>${hadoop.tmp.dir}/mapred/temp</value> <description>A shared directory for temporary files. </description> </property> <property> <name>mapred.local.dir.minspacestart</name> <value>0</value> <description>If the space in mapred.local.dir drops under this, do not ask for more tasks. Value in bytes. </description> </property> <property> <name>mapred.local.dir.minspacekill</name> <value>0</value> <description>If the space in mapred.local.dir drops under this, do not ask more tasks until all the current ones have finished and cleaned up. Also, to save the rest of the tasks we have running, kill one of them, to clean up some space. Start with the reduce tasks, then go with the ones that have finished the least. Value in bytes. </description> </property> <property> <name>mapred.tasktracker.expiry.interval</name> <value>600000</value> <description>Expert: The time-interval, in miliseconds, after which a tasktracker is declared 'lost' if it doesn't send heartbeats. </description> </property> <property> <name>mapred.tasktracker.instrumentation</name> <value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value> <description>Expert: The instrumentation class to associate with each TaskTracker. </description> </property> <property> <name>mapred.tasktracker.memory_calculator_plugin</name> <value></value> <description> Name of the class whose instance will be used to query memory information on the tasktracker. The class must be an instance of org.apache.hadoop.util.MemoryCalculatorPlugin. If the value is null, the tasktracker attempts to use a class appropriate to the platform. Currently, the only platform supported is Linux. </description> </property> <property> <name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name> <value>5000</value> <description>The interval, in milliseconds, for which the tasktracker waits between two cycles of monitoring its tasks' memory usage. Used only if tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory. </description> </property> <property> <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name> <value>5000</value> <description>The time, in milliseconds, the tasktracker waits for sending a SIGKILL to a process, after it has been sent a SIGTERM.</description> </property> <property> <name>mapred.map.tasks</name> <value>2</value> <description>The default number of map tasks per job. Ignored when mapred.job.tracker is "local". </description> </property> <property> <name>mapred.reduce.tasks</name> <value>1</value> <description>The default number of reduce tasks per job. Typically set to 99% of the cluster's reduce capacity, so that if a node fails the reduces can still be executed in a single wave. Ignored when mapred.job.tracker is "local". </description> </property> <property> <name>mapreduce.tasktracker.outofband.heartbeat</name> <value>false</value> <description>Expert: Set this to true to let the tasktracker send an out-of-band heartbeat on task-completion for better latency. </description> </property> <property> <name>mapred.jobtracker.restart.recover</name> <value>false</value> <description>"true" to enable (job) recovery upon restart, "false" to start afresh </description> </property> <property> <name>mapred.jobtracker.job.history.block.size</name> <value>3145728</value> <description>The block size of the job history file. Since the job recovery uses job history, its important to dump job history to disk as soon as possible. Note that this is an expert level parameter. The default value is set to 3 MB. </description> </property> <property> <name>mapred.jobtracker.taskScheduler</name> <value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value> <description>The class responsible for scheduling the tasks.</description> </property> <property> <name>mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</name> <value></value> <description>The maximum number of running tasks for a job before it gets preempted. No limits if undefined. </description> </property> <property> <name>mapred.map.max.attempts</name> <value>4</value> <description>Expert: The maximum number of attempts per map task. In other words, framework will try to execute a map task these many number of times before giving up on it. </description> </property> <property> <name>mapred.reduce.max.attempts</name> <value>4</value> <description>Expert: The maximum number of attempts per reduce task. In other words, framework will try to execute a reduce task these many number of times before giving up on it. </description> </property> <property> <name>mapred.reduce.parallel.copies</name> <value>5</value> <description>The default number of parallel transfers run by reduce during the copy(shuffle) phase. </description> </property> <property> <name>mapred.reduce.copy.backoff</name> <value>300</value> <description>The maximum amount of time (in seconds) a reducer spends on fetching one map output before declaring it as failed. </description> </property> +<property> + <name>mapreduce.reduce.shuffle.connect.timeout</name> + <value>180000</value> + <description>Expert: The maximum amount of time (in milli seconds) a reduce + task spends in trying to connect to a tasktracker for getting map output. + </description> +</property> + +<property> + <name>mapreduce.reduce.shuffle.read.timeout</name> + <value>180000</value> + <description>Expert: The maximum amount of time (in milli seconds) a reduce + task waits for map output data to be available for reading after obtaining + connection. + </description> +</property> + <property> <name>mapred.task.timeout</name> <value>600000</value> <description>The number of milliseconds before a task will be terminated if it neither reads an input, writes an output, nor updates its status string. </description> </property> <property> <name>mapred.tasktracker.map.tasks.maximum</name> <value>2</value> <description>The maximum number of map tasks that will be run simultaneously by a task tracker. </description> </property> <property> <name>mapred.tasktracker.reduce.tasks.maximum</name> <value>2</value> <description>The maximum number of reduce tasks that will be run simultaneously by a task tracker. </description> </property> <property> <name>mapred.jobtracker.completeuserjobs.maximum</name> <value>100</value> <description>The maximum number of complete jobs per user to keep around before delegating them to the job history.</description> </property> <property> <name>mapred.job.tracker.retiredjobs.cache.size</name> <value>1000</value> <description>The number of retired job status to keep in the cache. </description> </property> <property> <name>mapred.job.tracker.jobhistory.lru.cache.size</name> <value>5</value> <description>The number of job history files loaded in memory. The jobs are loaded when they are first accessed. The cache is cleared based on LRU. </description> </property> <property> <name>mapred.jobtracker.instrumentation</name> <value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value> <description>Expert: The instrumentation class to associate with each JobTracker. </description> </property> <property> <name>mapred.child.java.opts</name> <value>-Xmx200m</value> <description>Java opts for the task tracker child processes. The following symbol, if present, will be interpolated: @taskid@ is replaced by current TaskID. Any other occurrences of '@' will go unchanged. For example, to enable verbose gc logging to a file named for the taskid in /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: -Xmx1024m -verbose:gc -Xloggc:/tmp/@[email protected] The configuration variable mapred.child.ulimit can be used to control the maximum virtual memory of the child processes. </description> </property> <property> <name>mapred.child.env</name> <value></value> <description>User added environment variables for the task tracker child processes. Example : 1) A=foo This will set the env variable A to foo 2) B=$B:c This is inherit tasktracker's B env variable. </description> </property> <property> <name>mapred.child.ulimit</name> <value></value> <description>The maximum virtual memory, in KB, of a process launched by the Map-Reduce framework. This can be used to control both the Mapper/Reducer tasks and applications using Hadoop Pipes, Hadoop Streaming etc. By default it is left unspecified to let cluster admins control it via limits.conf and other such relevant mechanisms. Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to JavaVM, else the VM might not start. </description> </property> <property> <name>mapred.child.tmp</name> <value>./tmp</value> <description> To set the value of tmp directory for map and reduce tasks. If the value is an absolute path, it is directly assigned. Otherwise, it is prepended with task's working directory. The java tasks are executed with option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and streaming are set with environment variable, TMPDIR='the absolute path of the tmp dir' </description> </property> <property> <name>mapred.inmem.merge.threshold</name> <value>1000</value> <description>The threshold, in terms of the number of files for the in-memory merge process. When we accumulate threshold number of files we initiate the in-memory merge and spill to disk. A value of 0 or less than 0 indicates we want to DON'T have any threshold and instead depend only on the ramfs's memory consumption to trigger the merge. </description> </property> <property> <name>mapred.job.shuffle.merge.percent</name> <value>0.66</value> <description>The usage threshold at which an in-memory merge will be initiated, expressed as a percentage of the total memory allocated to storing in-memory map outputs, as defined by mapred.job.shuffle.input.buffer.percent. </description> </property> <property> <name>mapred.job.shuffle.input.buffer.percent</name> <value>0.70</value> <description>The percentage of memory to be allocated from the maximum heap size to storing map outputs during the shuffle. </description> </property> <property> <name>mapred.job.reduce.input.buffer.percent</name> <value>0.0</value> <description>The percentage of memory- relative to the maximum heap size- to retain map outputs during the reduce. When the shuffle is concluded, any remaining map outputs in memory must consume less than this threshold before the reduce can begin. </description> </property> <property> <name>mapred.map.tasks.speculative.execution</name> <value>true</value> <description>If true, then multiple instances of some map tasks may be executed in parallel.</description> </property> <property> <name>mapred.reduce.tasks.speculative.execution</name> <value>true</value> <description>If true, then multiple instances of some reduce tasks may be executed in parallel.</description> </property> <property> <name>mapred.job.reuse.jvm.num.tasks</name> <value>1</value> <description>How many tasks to run per jvm. If set to -1, there is no limit. </description> </property> <property> <name>mapred.min.split.size</name> <value>0</value> <description>The minimum size chunk that map input should be split into. Note that some file formats may have minimum split sizes that take priority over this setting.</description> </property> <property> <name>mapred.jobtracker.maxtasks.per.job</name> <value>-1</value> <description>The maximum number of tasks for a single job. A value of -1 indicates that there is no maximum. </description> </property> <property> <name>mapred.submit.replication</name> <value>10</value> <description>The replication level for submitted job files. This should be around the square root of the number of nodes. </description> </property> <property> <name>mapred.tasktracker.dns.interface</name> <value>default</value> <description>The name of the Network Interface from which a task tracker should report its IP address. </description> </property> <property> <name>mapred.tasktracker.dns.nameserver</name> <value>default</value> <description>The host name or IP address of the name server (DNS) which a TaskTracker should use to determine the host name used by the JobTracker for communication and display purposes. </description> </property> <property> <name>tasktracker.http.threads</name> <value>40</value> <description>The number of worker threads that for the http server. This is used for map output fetching </description> </property> <property> <name>mapred.task.tracker.http.address</name> <value>0.0.0.0:50060</value> <description> The task tracker http server address and port. If the port is 0 then the server will start on a free port. </description> </property> <property> <name>keep.failed.task.files</name> <value>false</value> <description>Should the files for failed tasks be kept. This should only be used on jobs that are failing, because the storage is never reclaimed. It also prevents the map outputs from being erased from the reduce directory as they are consumed.</description> </property> <!-- <property> <name>keep.task.files.pattern</name> <value>.*_m_123456_0</value> <description>Keep all files from tasks whose task names match the given regular expression. Defaults to none.</description> </property> --> <property> <name>mapred.output.compress</name> <value>false</value> <description>Should the job outputs be compressed? </description> </property> <property> <name>mapred.output.compression.type</name> <value>RECORD</value> <description>If the job outputs are to compressed as SequenceFiles, how should they be compressed? Should be one of NONE, RECORD or BLOCK. </description> </property> <property> <name>mapred.output.compression.codec</name> <value>org.apache.hadoop.io.compress.DefaultCodec</value> <description>If the job outputs are compressed, how should they be compressed? </description> </property> <property> <name>mapred.compress.map.output</name> <value>false</value> <description>Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression. </description> </property> <property> <name>mapred.map.output.compression.codec</name> <value>org.apache.hadoop.io.compress.DefaultCodec</value> <description>If the map outputs are compressed, how should they be compressed? </description> </property> <property> <name>map.sort.class</name> <value>org.apache.hadoop.util.QuickSort</value> <description>The default sort class for sorting keys. </description> </property> <property> <name>mapred.userlog.limit.kb</name> <value>0</value> <description>The maximum size of user-logs of each task in KB. 0 disables the cap. </description> </property> <property> <name>mapred.userlog.retain.hours</name> <value>24</value> <description>The maximum time, in hours, for which the user-logs are to be retained. </description> </property> <property> <name>mapred.hosts</name> <value></value> <description>Names a file that contains the list of nodes that may connect to the jobtracker. If the value is empty, all hosts are permitted.</description> </property> <property> <name>mapred.hosts.exclude</name> <value></value> <description>Names a file that contains the list of hosts that should be excluded by the jobtracker. If the value is empty, no hosts are excluded.</description> </property> <property> <name>mapred.heartbeats.in.second</name> <value>100</value> <description>Expert: Approximate number of heart-beats that could arrive at JobTracker in a second. Assuming each RPC can be processed in 10msec, the default value is made 100 RPCs in a second. </description> </property> <property> <name>mapred.max.tracker.blacklists</name> <value>4</value> <description>The number of blacklists for a taskTracker by various jobs after which the task tracker could be blacklisted across all jobs. The tracker will be given a tasks later (after a day). The tracker will become a healthy tracker after a restart. </description> </property> <property> <name>mapred.max.tracker.failures</name> <value>4</value> <description>The number of task-failures on a tasktracker of a given job after which new tasks of that job aren't assigned to it. </description> </property> <property> <name>jobclient.output.filter</name> <value>FAILED</value> <description>The filter for controlling the output of the task's userlogs sent to the console of the JobClient. The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and ALL. </description> </property> <property> <name>mapred.job.tracker.persist.jobstatus.active</name> <value>false</value> <description>Indicates if persistency of job status information is active or not. </description> </property> <property> <name>mapred.job.tracker.persist.jobstatus.hours</name> <value>0</value> <description>The number of hours job status information is persisted in DFS. The job status information will be available after it drops of the memory queue and between jobtracker restarts. With a zero value the job status information is not persisted at all in DFS. </description> </property> <property> <name>mapred.job.tracker.persist.jobstatus.dir</name> <value>/jobtracker/jobsInfo</value> <description>The directory where the job status information is persisted in a file system to be available after it drops of the memory queue and between jobtracker restarts. </description> </property> <property> <name>mapred.task.profile</name> <value>false</value> <description>To set whether the system should collect profiler information for some of the tasks in this job? The information is stored in the user log directory. The value is "true" if task profiling is enabled.</description> </property> <property> <name>mapred.task.profile.maps</name> <value>0-2</value> <description> To set the ranges of map tasks to profile. mapred.task.profile has to be set to true for the value to be accounted. </description> </property> <property> <name>mapred.task.profile.reduces</name> <value>0-2</value> <description> To set the ranges of reduce tasks to profile. mapred.task.profile has to be set to true for the value to be accounted. </description> </property> <property> <name>mapred.line.input.format.linespermap</name> <value>1</value> <description> Number of lines per split in NLineInputFormat. </description> </property> <property> <name>mapred.skip.attempts.to.start.skipping</name> <value>2</value> <description> The number of Task attempts AFTER which skip mode will be kicked off. When skip mode is kicked off, the tasks reports the range of records which it will process next, to the TaskTracker. So that on failures, TT knows which ones are possibly the bad records. On further executions, those are skipped. </description> </property> <property> <name>mapred.skip.map.auto.incr.proc.count</name> <value>true</value> <description> The flag which if set to true, SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented by MapRunner after invoking the map function. This value must be set to false for applications which process the records asynchronously or buffer the input records. For example streaming. In such cases applications should increment this counter on their own. </description> </property> <property> <name>mapred.skip.reduce.auto.incr.proc.count</name> <value>true</value> <description> The flag which if set to true, SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented by framework after invoking the reduce function. This value must be set to false for applications which process the records asynchronously or buffer the input records. For example streaming. In such cases applications should increment this counter on their own. </description> </property> <property> <name>mapred.skip.out.dir</name> <value></value> <description> If no value is specified here, the skipped records are written to the output directory at _logs/skip. User can stop writing skipped records by giving the value "none". </description> </property> <property> <name>mapred.skip.map.max.skip.records</name> <value>0</value> <description> The number of acceptable skip records surrounding the bad record PER bad record in mapper. The number includes the bad record as well. To turn the feature of detection/skipping of bad records off, set the value to 0. The framework tries to narrow down the skipped range by retrying until this threshold is met OR all attempts get exhausted for this task. Set the value to Long.MAX_VALUE to indicate that framework need not try to narrow down. Whatever records(depends on application) get skipped are acceptable. </description> </property> <property> <name>mapred.skip.reduce.max.skip.groups</name> <value>0</value> <description> The number of acceptable skip groups surrounding the bad group PER bad group in reducer. The number includes the bad group as well. To turn the feature of detection/skipping of bad groups off, set the value to 0. The framework tries to narrow down the skipped range by retrying until this threshold is met OR all attempts get exhausted for this task. Set the value to Long.MAX_VALUE to indicate that framework need not try to narrow down. Whatever groups(depends on application) get skipped are acceptable. </description> </property> <!-- Job Notification Configuration --> <!-- <property> <name>job.end.notification.url</name> <value>http://localhost:8080/jobstatus.php?jobId=$jobId&amp;jobStatus=$jobStatus</value> <description>Indicates url which will be called on completion of job to inform end status of job. User can give at most 2 variables with URI : $jobId and $jobStatus. If they are present in URI, then they will be replaced by their respective values. </description> </property> --> <property> <name>job.end.retry.attempts</name> <value>0</value> <description>Indicates how many times hadoop should attempt to contact the notification URL </description> </property> diff --git a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java index 7d12fe2..36c1dfa 100644 --- a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java +++ b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java @@ -625,1402 +625,1409 @@ class ReduceTask extends Task { * Map of host -> next contact time */ private Map<String, Long> penaltyBox; /** * the set of unique hosts from which we are copying */ private Set<String> uniqueHosts; /** * A reference to the RamManager for writing the map outputs to. */ private ShuffleRamManager ramManager; /** * A reference to the local file system for writing the map outputs to. */ private FileSystem localFileSys; private FileSystem rfs; /** * Number of files to merge at a time */ private int ioSortFactor; /** * A reference to the throwable object (if merge throws an exception) */ private volatile Throwable mergeThrowable; /** * A flag to indicate when to exit localFS merge */ private volatile boolean exitLocalFSMerge = false; /** * A flag to indicate when to exit getMapEvents thread */ private volatile boolean exitGetMapEvents = false; /** * When we accumulate maxInMemOutputs number of files in ram, we merge/spill */ private final int maxInMemOutputs; /** * Usage threshold for in-memory output accumulation. */ private final float maxInMemCopyPer; /** * Maximum memory usage of map outputs to merge from memory into * the reduce, in bytes. */ private final long maxInMemReduce; /** * The threads for fetching the files. */ private List<MapOutputCopier> copiers = null; /** * The object for metrics reporting. */ private ShuffleClientMetrics shuffleClientMetrics = null; /** * the minimum interval between tasktracker polls */ private static final long MIN_POLL_INTERVAL = 1000; /** * a list of map output locations for fetch retrials */ private List<MapOutputLocation> retryFetches = new ArrayList<MapOutputLocation>(); /** * The set of required map outputs */ private Set <TaskID> copiedMapOutputs = Collections.synchronizedSet(new TreeSet<TaskID>()); /** * The set of obsolete map taskids. */ private Set <TaskAttemptID> obsoleteMapIds = Collections.synchronizedSet(new TreeSet<TaskAttemptID>()); private Random random = null; /** * the max of all the map completion times */ private int maxMapRuntime; /** * Maximum number of fetch-retries per-map. */ private volatile int maxFetchRetriesPerMap; /** * Combiner runner, if a combiner is needed */ private CombinerRunner combinerRunner; /** * Resettable collector used for combine. */ private CombineOutputCollector combineCollector = null; /** * Maximum percent of failed fetch attempt before killing the reduce task. */ private static final float MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT = 0.5f; /** * Minimum percent of progress required to keep the reduce alive. */ private static final float MIN_REQUIRED_PROGRESS_PERCENT = 0.5f; /** * Maximum percent of shuffle execution time required to keep the reducer alive. */ private static final float MAX_ALLOWED_STALL_TIME_PERCENT = 0.5f; /** * Minimum number of map fetch retries. */ private static final int MIN_FETCH_RETRIES_PER_MAP = 2; /** * The minimum percentage of maps yet to be copied, * which indicates end of shuffle */ private static final float MIN_PENDING_MAPS_PERCENT = 0.25f; /** * Maximum no. of unique maps from which we failed to fetch map-outputs * even after {@link #maxFetchRetriesPerMap} retries; after this the * reduce task is failed. */ private int maxFailedUniqueFetches = 5; /** * The maps from which we fail to fetch map-outputs * even after {@link #maxFetchRetriesPerMap} retries. */ Set<TaskID> fetchFailedMaps = new TreeSet<TaskID>(); /** * A map of taskId -> no. of failed fetches */ Map<TaskAttemptID, Integer> mapTaskToFailedFetchesMap = new HashMap<TaskAttemptID, Integer>(); /** * Initial backoff interval (milliseconds) */ private static final int BACKOFF_INIT = 4000; /** * The interval for logging in the shuffle */ private static final int MIN_LOG_TIME = 60000; /** * List of in-memory map-outputs. */ private final List<MapOutput> mapOutputsFilesInMemory = Collections.synchronizedList(new LinkedList<MapOutput>()); /** * The map for (Hosts, List of MapIds from this Host) maintaining * map output locations */ private final Map<String, List<MapOutputLocation>> mapLocations = new ConcurrentHashMap<String, List<MapOutputLocation>>(); /** * This class contains the methods that should be used for metrics-reporting * the specific metrics for shuffle. This class actually reports the * metrics for the shuffle client (the ReduceTask), and hence the name * ShuffleClientMetrics. */ class ShuffleClientMetrics implements Updater { private MetricsRecord shuffleMetrics = null; private int numFailedFetches = 0; private int numSuccessFetches = 0; private long numBytes = 0; private int numThreadsBusy = 0; ShuffleClientMetrics(JobConf conf) { MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", conf.getUser()); this.shuffleMetrics.setTag("jobName", conf.getJobName()); this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString()); this.shuffleMetrics.setTag("taskId", getTaskID().toString()); this.shuffleMetrics.setTag("sessionId", conf.getSessionId()); metricsContext.registerUpdater(this); } public synchronized void inputBytes(long numBytes) { this.numBytes += numBytes; } public synchronized void failedFetch() { ++numFailedFetches; } public synchronized void successFetch() { ++numSuccessFetches; } public synchronized void threadBusy() { ++numThreadsBusy; } public synchronized void threadFree() { --numThreadsBusy; } public void doUpdates(MetricsContext unused) { synchronized (this) { shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes); shuffleMetrics.incrMetric("shuffle_failed_fetches", numFailedFetches); shuffleMetrics.incrMetric("shuffle_success_fetches", numSuccessFetches); if (numCopiers != 0) { shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 100*((float)numThreadsBusy/numCopiers)); } else { shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0); } numBytes = 0; numSuccessFetches = 0; numFailedFetches = 0; } shuffleMetrics.update(); } } /** Represents the result of an attempt to copy a map output */ private class CopyResult { // the map output location against which a copy attempt was made private final MapOutputLocation loc; // the size of the file copied, -1 if the transfer failed private final long size; //a flag signifying whether a copy result is obsolete private static final int OBSOLETE = -2; private CopyOutputErrorType error = CopyOutputErrorType.NO_ERROR; CopyResult(MapOutputLocation loc, long size) { this.loc = loc; this.size = size; } CopyResult(MapOutputLocation loc, long size, CopyOutputErrorType error) { this.loc = loc; this.size = size; this.error = error; } public boolean getSuccess() { return size >= 0; } public boolean isObsolete() { return size == OBSOLETE; } public long getSize() { return size; } public String getHost() { return loc.getHost(); } public MapOutputLocation getLocation() { return loc; } public CopyOutputErrorType getError() { return error; } } private int nextMapOutputCopierId = 0; /** * Abstraction to track a map-output. */ private class MapOutputLocation { TaskAttemptID taskAttemptId; TaskID taskId; String ttHost; URL taskOutput; public MapOutputLocation(TaskAttemptID taskAttemptId, String ttHost, URL taskOutput) { this.taskAttemptId = taskAttemptId; this.taskId = this.taskAttemptId.getTaskID(); this.ttHost = ttHost; this.taskOutput = taskOutput; } public TaskAttemptID getTaskAttemptId() { return taskAttemptId; } public TaskID getTaskId() { return taskId; } public String getHost() { return ttHost; } public URL getOutputLocation() { return taskOutput; } } /** Describes the output of a map; could either be on disk or in-memory. */ private class MapOutput { final TaskID mapId; final TaskAttemptID mapAttemptId; final Path file; final Configuration conf; byte[] data; final boolean inMemory; long compressedSize; public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, Configuration conf, Path file, long size) { this.mapId = mapId; this.mapAttemptId = mapAttemptId; this.conf = conf; this.file = file; this.compressedSize = size; this.data = null; this.inMemory = false; } public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, byte[] data, int compressedLength) { this.mapId = mapId; this.mapAttemptId = mapAttemptId; this.file = null; this.conf = null; this.data = data; this.compressedSize = compressedLength; this.inMemory = true; } public void discard() throws IOException { if (inMemory) { data = null; } else { FileSystem fs = file.getFileSystem(conf); fs.delete(file, true); } } } class ShuffleRamManager implements RamManager { /* Maximum percentage of the in-memory limit that a single shuffle can * consume*/ private static final float MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION = 0.25f; /* Maximum percentage of shuffle-threads which can be stalled * simultaneously after which a merge is triggered. */ private static final float MAX_STALLED_SHUFFLE_THREADS_FRACTION = 0.75f; private final long maxSize; private final long maxSingleShuffleLimit; private long size = 0; private Object dataAvailable = new Object(); private long fullSize = 0; private int numPendingRequests = 0; private int numRequiredMapOutputs = 0; private int numClosed = 0; private boolean closed = false; public ShuffleRamManager(Configuration conf) throws IOException { final float maxInMemCopyUse = conf.getFloat("mapred.job.shuffle.input.buffer.percent", 0.70f); if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) { throw new IOException("mapred.job.shuffle.input.buffer.percent" + maxInMemCopyUse); } maxSize = (long)Math.min( Runtime.getRuntime().maxMemory() * maxInMemCopyUse, Integer.MAX_VALUE); maxSingleShuffleLimit = (long)(maxSize * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION); LOG.info("ShuffleRamManager: MemoryLimit=" + maxSize + ", MaxSingleShuffleLimit=" + maxSingleShuffleLimit); } public synchronized boolean reserve(int requestedSize, InputStream in) throws InterruptedException { // Wait till the request can be fulfilled... while ((size + requestedSize) > maxSize) { // Close the input... if (in != null) { try { in.close(); } catch (IOException ie) { LOG.info("Failed to close connection with: " + ie); } finally { in = null; } } // Track pending requests synchronized (dataAvailable) { ++numPendingRequests; dataAvailable.notify(); } // Wait for memory to free up wait(); // Track pending requests synchronized (dataAvailable) { --numPendingRequests; } } size += requestedSize; return (in != null); } public synchronized void unreserve(int requestedSize) { size -= requestedSize; synchronized (dataAvailable) { fullSize -= requestedSize; --numClosed; } // Notify the threads blocked on RamManager.reserve notifyAll(); } public boolean waitForDataToMerge() throws InterruptedException { boolean done = false; synchronized (dataAvailable) { // Start in-memory merge if manager has been closed or... while (!closed && // In-memory threshold exceeded and at least two segments // have been fetched (getPercentUsed() < maxInMemCopyPer || numClosed < 2) && // More than "mapred.inmem.merge.threshold" map outputs // have been fetched into memory (maxInMemOutputs <= 0 || numClosed < maxInMemOutputs) && // More than MAX... threads are blocked on the RamManager // or the blocked threads are the last map outputs to be // fetched. If numRequiredMapOutputs is zero, either // setNumCopiedMapOutputs has not been called (no map ouputs // have been fetched, so there is nothing to merge) or the // last map outputs being transferred without // contention, so a merge would be premature. (numPendingRequests < numCopiers*MAX_STALLED_SHUFFLE_THREADS_FRACTION && (0 == numRequiredMapOutputs || numPendingRequests < numRequiredMapOutputs))) { dataAvailable.wait(); } done = closed; } return done; } public void closeInMemoryFile(int requestedSize) { synchronized (dataAvailable) { fullSize += requestedSize; ++numClosed; dataAvailable.notify(); } } public void setNumCopiedMapOutputs(int numRequiredMapOutputs) { synchronized (dataAvailable) { this.numRequiredMapOutputs = numRequiredMapOutputs; dataAvailable.notify(); } } public void close() { synchronized (dataAvailable) { closed = true; LOG.info("Closed ram manager"); dataAvailable.notify(); } } private float getPercentUsed() { return (float)fullSize/maxSize; } boolean canFitInMemory(long requestedSize) { return (requestedSize < Integer.MAX_VALUE && requestedSize < maxSingleShuffleLimit); } } /** Copies map outputs as they become available */ private class MapOutputCopier extends Thread { // basic/unit connection timeout (in milliseconds) private final static int UNIT_CONNECT_TIMEOUT = 30 * 1000; // default read timeout (in milliseconds) private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000; + private final int shuffleConnectionTimeout; + private final int shuffleReadTimeout; private MapOutputLocation currentLocation = null; private int id = nextMapOutputCopierId++; private Reporter reporter; private boolean readError = false; // Decompression of map-outputs private CompressionCodec codec = null; private Decompressor decompressor = null; public MapOutputCopier(JobConf job, Reporter reporter) { setName("MapOutputCopier " + reduceTask.getTaskID() + "." + id); LOG.debug(getName() + " created"); this.reporter = reporter; + shuffleConnectionTimeout = + job.getInt("mapreduce.reduce.shuffle.connect.timeout", STALLED_COPY_TIMEOUT); + shuffleReadTimeout = + job.getInt("mapreduce.reduce.shuffle.read.timeout", DEFAULT_READ_TIMEOUT); + if (job.getCompressMapOutput()) { Class<? extends CompressionCodec> codecClass = job.getMapOutputCompressorClass(DefaultCodec.class); codec = ReflectionUtils.newInstance(codecClass, job); decompressor = CodecPool.getDecompressor(codec); } } /** * Fail the current file that we are fetching * @return were we currently fetching? */ public synchronized boolean fail() { if (currentLocation != null) { finish(-1, CopyOutputErrorType.OTHER_ERROR); return true; } else { return false; } } /** * Get the current map output location. */ public synchronized MapOutputLocation getLocation() { return currentLocation; } private synchronized void start(MapOutputLocation loc) { currentLocation = loc; } private synchronized void finish(long size, CopyOutputErrorType error) { if (currentLocation != null) { LOG.debug(getName() + " finishing " + currentLocation + " =" + size); synchronized (copyResults) { copyResults.add(new CopyResult(currentLocation, size, error)); copyResults.notify(); } currentLocation = null; } } /** Loop forever and fetch map outputs as they become available. * The thread exits when it is interrupted by {@link ReduceTaskRunner} */ @Override public void run() { while (true) { try { MapOutputLocation loc = null; long size = -1; synchronized (scheduledCopies) { while (scheduledCopies.isEmpty()) { scheduledCopies.wait(); } loc = scheduledCopies.remove(0); } CopyOutputErrorType error = CopyOutputErrorType.OTHER_ERROR; readError = false; try { shuffleClientMetrics.threadBusy(); start(loc); size = copyOutput(loc); shuffleClientMetrics.successFetch(); error = CopyOutputErrorType.NO_ERROR; } catch (IOException e) { LOG.warn(reduceTask.getTaskID() + " copy failed: " + loc.getTaskAttemptId() + " from " + loc.getHost()); LOG.warn(StringUtils.stringifyException(e)); shuffleClientMetrics.failedFetch(); if (readError) { error = CopyOutputErrorType.READ_ERROR; } // Reset size = -1; } finally { shuffleClientMetrics.threadFree(); finish(size, error); } } catch (InterruptedException e) { break; // ALL DONE } catch (FSError e) { LOG.error("Task: " + reduceTask.getTaskID() + " - FSError: " + StringUtils.stringifyException(e)); try { umbilical.fsError(reduceTask.getTaskID(), e.getMessage()); } catch (IOException io) { LOG.error("Could not notify TT of FSError: " + StringUtils.stringifyException(io)); } } catch (Throwable th) { String msg = getTaskID() + " : Map output copy failure : " + StringUtils.stringifyException(th); reportFatalError(getTaskID(), th, msg); } } if (decompressor != null) { CodecPool.returnDecompressor(decompressor); } } /** Copies a a map output from a remote host, via HTTP. * @param currentLocation the map output location to be copied * @return the path (fully qualified) of the copied file * @throws IOException if there is an error copying the file * @throws InterruptedException if the copier should give up */ private long copyOutput(MapOutputLocation loc ) throws IOException, InterruptedException { // check if we still need to copy the output from this location if (copiedMapOutputs.contains(loc.getTaskId()) || obsoleteMapIds.contains(loc.getTaskAttemptId())) { return CopyResult.OBSOLETE; } // a temp filename. If this file gets created in ramfs, we're fine, // else, we will check the localFS to find a suitable final location // for this path TaskAttemptID reduceId = reduceTask.getTaskID(); Path filename = new Path("/" + TaskTracker.getIntermediateOutputDir( reduceId.getJobID().toString(), reduceId.toString()) + "/map_" + loc.getTaskId().getId() + ".out"); // Copy the map output to a temp file whose name is unique to this attempt Path tmpMapOutput = new Path(filename+"-"+id); // Copy the map output MapOutput mapOutput = getMapOutput(loc, tmpMapOutput, reduceId.getTaskID().getId()); if (mapOutput == null) { throw new IOException("Failed to fetch map-output for " + loc.getTaskAttemptId() + " from " + loc.getHost()); } // The size of the map-output long bytes = mapOutput.compressedSize; // lock the ReduceTask while we do the rename synchronized (ReduceTask.this) { if (copiedMapOutputs.contains(loc.getTaskId())) { mapOutput.discard(); return CopyResult.OBSOLETE; } // Special case: discard empty map-outputs if (bytes == 0) { try { mapOutput.discard(); } catch (IOException ioe) { LOG.info("Couldn't discard output of " + loc.getTaskId()); } // Note that we successfully copied the map-output noteCopiedMapOutput(loc.getTaskId()); return bytes; } // Process map-output if (mapOutput.inMemory) { // Save it in the synchronized list of map-outputs mapOutputsFilesInMemory.add(mapOutput); } else { // Rename the temporary file to the final file; // ensure it is on the same partition tmpMapOutput = mapOutput.file; filename = new Path(tmpMapOutput.getParent(), filename.getName()); if (!localFileSys.rename(tmpMapOutput, filename)) { localFileSys.delete(tmpMapOutput, true); bytes = -1; throw new IOException("Failed to rename map output " + tmpMapOutput + " to " + filename); } synchronized (mapOutputFilesOnDisk) { addToMapOutputFilesOnDisk(localFileSys.getFileStatus(filename)); } } // Note that we successfully copied the map-output noteCopiedMapOutput(loc.getTaskId()); } return bytes; } /** * Save the map taskid whose output we just copied. * This function assumes that it has been synchronized on ReduceTask.this. * * @param taskId map taskid */ private void noteCopiedMapOutput(TaskID taskId) { copiedMapOutputs.add(taskId); ramManager.setNumCopiedMapOutputs(numMaps - copiedMapOutputs.size()); } /** * Get the map output into a local file (either in the inmemory fs or on the * local fs) from the remote server. * We use the file system so that we generate checksum files on the data. * @param mapOutputLoc map-output to be fetched * @param filename the filename to write the data into * @param connectionTimeout number of milliseconds for connection timeout * @param readTimeout number of milliseconds for read timeout * @return the path of the file that got created * @throws IOException when something goes wrong */ private MapOutput getMapOutput(MapOutputLocation mapOutputLoc, Path filename, int reduce) throws IOException, InterruptedException { // Connect URLConnection connection = mapOutputLoc.getOutputLocation().openConnection(); - InputStream input = getInputStream(connection, STALLED_COPY_TIMEOUT, - DEFAULT_READ_TIMEOUT); + InputStream input = getInputStream(connection, shuffleConnectionTimeout, + shuffleReadTimeout); // Validate header from map output TaskAttemptID mapId = null; try { mapId = TaskAttemptID.forName(connection.getHeaderField(FROM_MAP_TASK)); } catch (IllegalArgumentException ia) { LOG.warn("Invalid map id ", ia); return null; } TaskAttemptID expectedMapId = mapOutputLoc.getTaskAttemptId(); if (!mapId.equals(expectedMapId)) { LOG.warn("data from wrong map:" + mapId + " arrived to reduce task " + reduce + ", where as expected map output should be from " + expectedMapId); return null; } long decompressedLength = Long.parseLong(connection.getHeaderField(RAW_MAP_OUTPUT_LENGTH)); long compressedLength = Long.parseLong(connection.getHeaderField(MAP_OUTPUT_LENGTH)); if (compressedLength < 0 || decompressedLength < 0) { LOG.warn(getName() + " invalid lengths in map output header: id: " + mapId + " compressed len: " + compressedLength + ", decompressed len: " + decompressedLength); return null; } int forReduce = (int)Integer.parseInt(connection.getHeaderField(FOR_REDUCE_TASK)); if (forReduce != reduce) { LOG.warn("data for the wrong reduce: " + forReduce + " with compressed len: " + compressedLength + ", decompressed len: " + decompressedLength + " arrived to reduce task " + reduce); return null; } LOG.info("header: " + mapId + ", compressed len: " + compressedLength + ", decompressed len: " + decompressedLength); //We will put a file in memory if it meets certain criteria: //1. The size of the (decompressed) file should be less than 25% of // the total inmem fs //2. There is space available in the inmem fs // Check if this map-output can be saved in-memory boolean shuffleInMemory = ramManager.canFitInMemory(decompressedLength); // Shuffle MapOutput mapOutput = null; if (shuffleInMemory) { LOG.info("Shuffling " + decompressedLength + " bytes (" + compressedLength + " raw bytes) " + "into RAM from " + mapOutputLoc.getTaskAttemptId()); mapOutput = shuffleInMemory(mapOutputLoc, connection, input, (int)decompressedLength, (int)compressedLength); } else { LOG.info("Shuffling " + decompressedLength + " bytes (" + compressedLength + " raw bytes) " + "into Local-FS from " + mapOutputLoc.getTaskAttemptId()); mapOutput = shuffleToDisk(mapOutputLoc, input, filename, compressedLength); } return mapOutput; } /** * The connection establishment is attempted multiple times and is given up * only on the last failure. Instead of connecting with a timeout of * X, we try connecting with a timeout of x < X but multiple times. */ private InputStream getInputStream(URLConnection connection, int connectionTimeout, int readTimeout) throws IOException { int unit = 0; if (connectionTimeout < 0) { throw new IOException("Invalid timeout " + "[timeout = " + connectionTimeout + " ms]"); } else if (connectionTimeout > 0) { unit = (UNIT_CONNECT_TIMEOUT > connectionTimeout) ? connectionTimeout : UNIT_CONNECT_TIMEOUT; } // set the read timeout to the total timeout connection.setReadTimeout(readTimeout); // set the connect timeout to the unit-connect-timeout connection.setConnectTimeout(unit); while (true) { try { connection.connect(); break; } catch (IOException ioe) { // update the total remaining connect-timeout connectionTimeout -= unit; // throw an exception if we have waited for timeout amount of time // note that the updated value if timeout is used here if (connectionTimeout == 0) { throw ioe; } // reset the connect timeout for the last try if (connectionTimeout < unit) { unit = connectionTimeout; // reset the connect time out for the final connect connection.setConnectTimeout(unit); } } } try { return connection.getInputStream(); } catch (IOException ioe) { readError = true; throw ioe; } } private MapOutput shuffleInMemory(MapOutputLocation mapOutputLoc, URLConnection connection, InputStream input, int mapOutputLength, int compressedLength) throws IOException, InterruptedException { // Reserve ram for the map-output boolean createdNow = ramManager.reserve(mapOutputLength, input); // Reconnect if we need to if (!createdNow) { // Reconnect try { connection = mapOutputLoc.getOutputLocation().openConnection(); - input = getInputStream(connection, STALLED_COPY_TIMEOUT, - DEFAULT_READ_TIMEOUT); + input = getInputStream(connection, shuffleConnectionTimeout, + shuffleReadTimeout); } catch (IOException ioe) { LOG.info("Failed reopen connection to fetch map-output from " + mapOutputLoc.getHost()); // Inform the ram-manager ramManager.closeInMemoryFile(mapOutputLength); ramManager.unreserve(mapOutputLength); throw ioe; } } IFileInputStream checksumIn = new IFileInputStream(input,compressedLength); input = checksumIn; // Are map-outputs compressed? if (codec != null) { decompressor.reset(); input = codec.createInputStream(input, decompressor); } // Copy map-output into an in-memory buffer byte[] shuffleData = new byte[mapOutputLength]; MapOutput mapOutput = new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(), shuffleData, compressedLength); int bytesRead = 0; try { int n = input.read(shuffleData, 0, shuffleData.length); while (n > 0) { bytesRead += n; shuffleClientMetrics.inputBytes(n); // indicate we're making progress reporter.progress(); n = input.read(shuffleData, bytesRead, (shuffleData.length-bytesRead)); } LOG.info("Read " + bytesRead + " bytes from map-output for " + mapOutputLoc.getTaskAttemptId()); input.close(); } catch (IOException ioe) { LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), ioe); // Inform the ram-manager ramManager.closeInMemoryFile(mapOutputLength); ramManager.unreserve(mapOutputLength); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; // Close the streams IOUtils.cleanup(LOG, input); // Re-throw readError = true; throw ioe; } // Close the in-memory file ramManager.closeInMemoryFile(mapOutputLength); // Sanity check if (bytesRead != mapOutputLength) { // Inform the ram-manager ramManager.unreserve(mapOutputLength); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { // IGNORED because we are cleaning up LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; throw new IOException("Incomplete map output received for " + mapOutputLoc.getTaskAttemptId() + " from " + mapOutputLoc.getOutputLocation() + " (" + bytesRead + " instead of " + mapOutputLength + ")" ); } // TODO: Remove this after a 'fix' for HADOOP-3647 if (mapOutputLength > 0) { DataInputBuffer dib = new DataInputBuffer(); dib.reset(shuffleData, 0, shuffleData.length); LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" + WritableUtils.readVInt(dib) + ", " + WritableUtils.readVInt(dib) + ") from " + mapOutputLoc.getHost()); } return mapOutput; } private MapOutput shuffleToDisk(MapOutputLocation mapOutputLoc, InputStream input, Path filename, long mapOutputLength) throws IOException { // Find out a suitable location for the output on local-filesystem Path localFilename = lDirAlloc.getLocalPathForWrite(filename.toUri().getPath(), mapOutputLength, conf); MapOutput mapOutput = new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(), conf, localFileSys.makeQualified(localFilename), mapOutputLength); // Copy data to local-disk OutputStream output = null; long bytesRead = 0; try { output = rfs.create(localFilename); byte[] buf = new byte[64 * 1024]; int n = -1; try { n = input.read(buf, 0, buf.length); } catch (IOException ioe) { readError = true; throw ioe; } while (n > 0) { bytesRead += n; shuffleClientMetrics.inputBytes(n); output.write(buf, 0, n); // indicate we're making progress reporter.progress(); try { n = input.read(buf, 0, buf.length); } catch (IOException ioe) { readError = true; throw ioe; } } LOG.info("Read " + bytesRead + " bytes from map-output for " + mapOutputLoc.getTaskAttemptId()); output.close(); input.close(); } catch (IOException ioe) { LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), ioe); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; // Close the streams IOUtils.cleanup(LOG, input, output); // Re-throw throw ioe; } // Sanity check if (bytesRead != mapOutputLength) { try { mapOutput.discard(); } catch (Exception ioe) { // IGNORED because we are cleaning up LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ioe); } catch (Throwable t) { String msg = getTaskID() + " : Failed in shuffle to disk :" + StringUtils.stringifyException(t); reportFatalError(getTaskID(), t, msg); } mapOutput = null; throw new IOException("Incomplete map output received for " + mapOutputLoc.getTaskAttemptId() + " from " + mapOutputLoc.getOutputLocation() + " (" + bytesRead + " instead of " + mapOutputLength + ")" ); } return mapOutput; } } // MapOutputCopier private void configureClasspath(JobConf conf) throws IOException { // get the task and the current classloader which will become the parent Task task = ReduceTask.this; ClassLoader parent = conf.getClassLoader(); // get the work directory which holds the elements we are dynamically // adding to the classpath File workDir = new File(task.getJobFile()).getParentFile(); ArrayList<URL> urllist = new ArrayList<URL>(); // add the jars and directories to the classpath String jar = conf.getJar(); if (jar != null) { File jobCacheDir = new File(new Path(jar).getParent().toString()); File[] libs = new File(jobCacheDir, "lib").listFiles(); if (libs != null) { for (int i = 0; i < libs.length; i++) { urllist.add(libs[i].toURL()); } } urllist.add(new File(jobCacheDir, "classes").toURL()); urllist.add(jobCacheDir.toURL()); } urllist.add(workDir.toURL()); // create a new classloader with the old classloader as its parent // then set that classloader as the one used by the current jobconf URL[] urls = urllist.toArray(new URL[urllist.size()]); URLClassLoader loader = new URLClassLoader(urls, parent); conf.setClassLoader(loader); } public ReduceCopier(TaskUmbilicalProtocol umbilical, JobConf conf, TaskReporter reporter )throws ClassNotFoundException, IOException { configureClasspath(conf); this.reporter = reporter; this.shuffleClientMetrics = new ShuffleClientMetrics(conf); this.umbilical = umbilical; this.reduceTask = ReduceTask.this; this.scheduledCopies = new ArrayList<MapOutputLocation>(100); this.copyResults = new ArrayList<CopyResult>(100); this.numCopiers = conf.getInt("mapred.reduce.parallel.copies", 5); this.maxInFlight = 4 * numCopiers; this.maxBackoff = conf.getInt("mapred.reduce.copy.backoff", 300); Counters.Counter combineInputCounter = reporter.getCounter(Task.Counter.COMBINE_INPUT_RECORDS); this.combinerRunner = CombinerRunner.create(conf, getTaskID(), combineInputCounter, reporter, null); if (combinerRunner != null) { combineCollector = new CombineOutputCollector(reduceCombineOutputCounter); } this.ioSortFactor = conf.getInt("io.sort.factor", 10); // the exponential backoff formula // backoff (t) = init * base^(t-1) // so for max retries we get // backoff(1) + .... + backoff(max_fetch_retries) ~ max // solving which we get // max_fetch_retries ~ log((max * (base - 1) / init) + 1) / log(base) // for the default value of max = 300 (5min) we get max_fetch_retries = 6 // the order is 4,8,16,32,64,128. sum of which is 252 sec = 4.2 min // optimizing for the base 2 this.maxFetchRetriesPerMap = Math.max(MIN_FETCH_RETRIES_PER_MAP, getClosestPowerOf2((this.maxBackoff * 1000 / BACKOFF_INIT) + 1)); this.maxFailedUniqueFetches = Math.min(numMaps, this.maxFailedUniqueFetches); this.maxInMemOutputs = conf.getInt("mapred.inmem.merge.threshold", 1000); this.maxInMemCopyPer = conf.getFloat("mapred.job.shuffle.merge.percent", 0.66f); final float maxRedPer = conf.getFloat("mapred.job.reduce.input.buffer.percent", 0f); if (maxRedPer > 1.0 || maxRedPer < 0.0) { throw new IOException("mapred.job.reduce.input.buffer.percent" + maxRedPer); } this.maxInMemReduce = (int)Math.min( Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE); // Setup the RamManager ramManager = new ShuffleRamManager(conf); localFileSys = FileSystem.getLocal(conf); rfs = ((LocalFileSystem)localFileSys).getRaw(); // hosts -> next contact time this.penaltyBox = new LinkedHashMap<String, Long>(); // hostnames this.uniqueHosts = new HashSet<String>(); // Seed the random number generator with a reasonably globally unique seed long randomSeed = System.nanoTime() + (long)Math.pow(this.reduceTask.getPartition(), (this.reduceTask.getPartition()%10) ); this.random = new Random(randomSeed); this.maxMapRuntime = 0; } private boolean busyEnough(int numInFlight) { return numInFlight > maxInFlight; } public boolean fetchOutputs() throws IOException { int totalFailures = 0; int numInFlight = 0, numCopied = 0; DecimalFormat mbpsFormat = new DecimalFormat("0.00"); final Progress copyPhase = reduceTask.getProgress().phase(); LocalFSMerger localFSMergerThread = null; InMemFSMergeThread inMemFSMergeThread = null; GetMapEventsThread getMapEventsThread = null; for (int i = 0; i < numMaps; i++) { copyPhase.addPhase(); // add sub-phase per file } copiers = new ArrayList<MapOutputCopier>(numCopiers); // start all the copying threads for (int i=0; i < numCopiers; i++) { MapOutputCopier copier = new MapOutputCopier(conf, reporter); copiers.add(copier); copier.start(); } //start the on-disk-merge thread localFSMergerThread = new LocalFSMerger((LocalFileSystem)localFileSys); //start the in memory merger thread inMemFSMergeThread = new InMemFSMergeThread(); localFSMergerThread.start(); inMemFSMergeThread.start(); // start the map events thread getMapEventsThread = new GetMapEventsThread(); getMapEventsThread.start(); // start the clock for bandwidth measurement long startTime = System.currentTimeMillis(); long currentTime = startTime; long lastProgressTime = startTime; long lastOutputTime = 0; // loop until we get all required outputs while (copiedMapOutputs.size() < numMaps && mergeThrowable == null) { currentTime = System.currentTimeMillis(); boolean logNow = false; if (currentTime - lastOutputTime > MIN_LOG_TIME) { lastOutputTime = currentTime; logNow = true; } if (logNow) { LOG.info(reduceTask.getTaskID() + " Need another " + (numMaps - copiedMapOutputs.size()) + " map output(s) " + "where " + numInFlight + " is already in progress"); } // Put the hash entries for the failed fetches. Iterator<MapOutputLocation> locItr = retryFetches.iterator(); while (locItr.hasNext()) { MapOutputLocation loc = locItr.next(); List<MapOutputLocation> locList = mapLocations.get(loc.getHost()); // Check if the list exists. Map output location mapping is cleared // once the jobtracker restarts and is rebuilt from scratch. // Note that map-output-location mapping will be recreated and hence // we continue with the hope that we might find some locations // from the rebuild map. if (locList != null) { // Add to the beginning of the list so that this map is //tried again before the others and we can hasten the //re-execution of this map should there be a problem locList.add(0, loc); } } if (retryFetches.size() > 0) { LOG.info(reduceTask.getTaskID() + ": " + "Got " + retryFetches.size() + " map-outputs from previous failures"); } // clear the "failed" fetches hashmap retryFetches.clear(); // now walk through the cache and schedule what we can int numScheduled = 0; int numDups = 0; synchronized (scheduledCopies) { // Randomize the map output locations to prevent // all reduce-tasks swamping the same tasktracker List<String> hostList = new ArrayList<String>(); hostList.addAll(mapLocations.keySet()); Collections.shuffle(hostList, this.random); Iterator<String> hostsItr = hostList.iterator(); while (hostsItr.hasNext()) { String host = hostsItr.next(); List<MapOutputLocation> knownOutputsByLoc = mapLocations.get(host); // Check if the list exists. Map output location mapping is // cleared once the jobtracker restarts and is rebuilt from // scratch. // Note that map-output-location mapping will be recreated and // hence we continue with the hope that we might find some // locations from the rebuild map and add then for fetching. if (knownOutputsByLoc == null || knownOutputsByLoc.size() == 0) { continue; } //Identify duplicate hosts here if (uniqueHosts.contains(host)) { numDups += knownOutputsByLoc.size(); continue; } Long penaltyEnd = penaltyBox.get(host); boolean penalized = false; if (penaltyEnd != null) { if (currentTime < penaltyEnd.longValue()) { penalized = true; } else { penaltyBox.remove(host); } } if (penalized) continue; synchronized (knownOutputsByLoc) { locItr = knownOutputsByLoc.iterator(); while (locItr.hasNext()) { MapOutputLocation loc = locItr.next(); // Do not schedule fetches from OBSOLETE maps if (obsoleteMapIds.contains(loc.getTaskAttemptId())) { locItr.remove(); continue; } uniqueHosts.add(host); scheduledCopies.add(loc); locItr.remove(); // remove from knownOutputs numInFlight++; numScheduled++; break; //we have a map from this host } } } scheduledCopies.notifyAll(); } if (numScheduled > 0 || logNow) { LOG.info(reduceTask.getTaskID() + " Scheduled " + numScheduled + " outputs (" + penaltyBox.size() + " slow hosts and" + numDups + " dup hosts)"); } if (penaltyBox.size() > 0 && logNow) { LOG.info("Penalized(slow) Hosts: "); for (String host : penaltyBox.keySet()) { LOG.info(host + " Will be considered after: " + ((penaltyBox.get(host) - currentTime)/1000) + " seconds."); } } // if we have no copies in flight and we can't schedule anything // new, just wait for a bit try { if (numInFlight == 0 && numScheduled == 0) { // we should indicate progress as we don't want TT to think // we're stuck and kill us reporter.progress(); Thread.sleep(5000); } } catch (InterruptedException e) { } // IGNORE while (numInFlight > 0 && mergeThrowable == null) {
jaxlaw/hadoop-common
9e136abc4eaf13b4dea357b05467b16d11df5729
HADOOP-6428 from https://issues.apache.org/jira/secure/attachment/12427624/HADOOP-6428.0.20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 24fd6cb..1e3962b 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,450 +1,451 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383002 + HADOOP-6428. HttpServer sleeps with negative values (cos) HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat via sharad) MAPREDUCE-896. Modify permissions for local files on tasktracker before deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) HADOOP-5771. Implements unit tests for LinuxTaskController. (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/http/HttpServer.java b/src/core/org/apache/hadoop/http/HttpServer.java index 47d0500..4fc44c1 100644 --- a/src/core/org/apache/hadoop/http/HttpServer.java +++ b/src/core/org/apache/hadoop/http/HttpServer.java @@ -1,545 +1,544 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.http; import java.io.IOException; import java.io.PrintWriter; import java.net.BindException; import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.nio.channels.ServerSocketChannel; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.util.ReflectionUtils; import org.mortbay.jetty.Connector; import org.mortbay.jetty.Handler; import org.mortbay.jetty.Server; import org.mortbay.jetty.handler.ContextHandlerCollection; import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.security.SslSocketConnector; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.DefaultServlet; import org.mortbay.jetty.servlet.FilterHolder; import org.mortbay.jetty.servlet.FilterMapping; import org.mortbay.jetty.servlet.ServletHandler; import org.mortbay.jetty.servlet.ServletHolder; import org.mortbay.jetty.webapp.WebAppContext; import org.mortbay.thread.QueuedThreadPool; import org.mortbay.util.MultiException; /** * Create a Jetty embedded server to answer http requests. The primary goal * is to serve up status information for the server. * There are three contexts: * "/logs/" -> points to the log directory * "/static/" -> points to common static files (src/webapps/static) * "/" -> the jsp server code from (src/webapps/<name>) */ public class HttpServer implements FilterContainer { public static final Log LOG = LogFactory.getLog(HttpServer.class); static final String FILTER_INITIALIZER_PROPERTY = "hadoop.http.filter.initializers"; protected final Server webServer; protected final Connector listener; protected final WebAppContext webAppContext; protected final boolean findPort; protected final Map<Context, Boolean> defaultContexts = new HashMap<Context, Boolean>(); protected final List<String> filterNames = new ArrayList<String>(); private static final int MAX_RETRIES = 10; /** Same as this(name, bindAddress, port, findPort, null); */ public HttpServer(String name, String bindAddress, int port, boolean findPort ) throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** * Create a status server on the given port. * The jsp scripts are taken from src/webapps/<name>. * @param name The name of the server * @param port The port to use on the server * @param findPort whether the server should start at the given port and * increment by 1 until it finds a free port. * @param conf Configuration */ public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) throws IOException { webServer = new Server(); this.findPort = findPort; listener = createBaseListener(conf); listener.setHost(bindAddress); listener.setPort(port); webServer.addConnector(listener); webServer.setThreadPool(new QueuedThreadPool()); final String appDir = getWebAppsPath(); ContextHandlerCollection contexts = new ContextHandlerCollection(); webServer.setHandler(contexts); webAppContext = new WebAppContext(); webAppContext.setContextPath("/"); webAppContext.setWar(appDir + "/" + name); webServer.addHandler(webAppContext); addDefaultApps(contexts, appDir); final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { for(FilterInitializer c : initializers) { c.initFilter(this); } } addDefaultServlets(); } /** * Create a required listener for the Jetty instance listening on the port * provided. This wrapper and all subclasses must create at least one * listener. */ protected Connector createBaseListener(Configuration conf) throws IOException { SelectChannelConnector ret = new SelectChannelConnector(); ret.setLowResourceMaxIdleTime(10000); ret.setAcceptQueueSize(128); ret.setResolveNames(false); ret.setUseDirectBuffers(false); return ret; } /** Get an array of FilterConfiguration specified in the conf */ private static FilterInitializer[] getFilterInitializers(Configuration conf) { if (conf == null) { return null; } Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY); if (classes == null) { return null; } FilterInitializer[] initializers = new FilterInitializer[classes.length]; for(int i = 0; i < classes.length; i++) { initializers[i] = (FilterInitializer)ReflectionUtils.newInstance( classes[i], conf); } return initializers; } /** * Add default apps. * @param appDir The application directory * @throws IOException */ protected void addDefaultApps(ContextHandlerCollection parent, final String appDir) throws IOException { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = System.getProperty("hadoop.log.dir"); if (logDir != null) { Context logContext = new Context(parent, "/logs"); logContext.setResourceBase(logDir); logContext.addServlet(DefaultServlet.class, "/"); defaultContexts.put(logContext, true); } // set up the context for "/static/*" Context staticContext = new Context(parent, "/static"); staticContext.setResourceBase(appDir + "/static"); staticContext.addServlet(DefaultServlet.class, "/*"); defaultContexts.put(staticContext, true); } /** * Add default servlets. */ protected void addDefaultServlets() { // set up default servlets addServlet("stacks", "/stacks", StackServlet.class); addServlet("logLevel", "/logLevel", LogLevel.Servlet.class); } public void addContext(Context ctxt, boolean isFiltered) throws IOException { webServer.addHandler(ctxt); defaultContexts.put(ctxt, isFiltered); } /** * Add a context * @param pathSpec The path spec for the context * @param dir The directory containing the context * @param isFiltered if true, the servlet is added to the filter path mapping * @throws IOException */ protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException { if (0 == webServer.getHandlers().length) { throw new RuntimeException("Couldn't find handler"); } WebAppContext webAppCtx = new WebAppContext(); webAppCtx.setContextPath(pathSpec); webAppCtx.setWar(dir); addContext(webAppCtx, true); } /** * Set a value in the webapp context. These values are available to the jsp * pages as "application.getAttribute(name)". * @param name The name of the attribute * @param value The value of the attribute */ public void setAttribute(String name, Object value) { webAppContext.setAttribute(name, value); } /** * Add a servlet in the server. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class */ public void addServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { addInternalServlet(name, pathSpec, clazz); addFilterPathMapping(pathSpec, webAppContext); } /** * Add an internal servlet in the server. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class * @deprecated this is a temporary method */ @Deprecated public void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); } webAppContext.addServlet(holder, pathSpec); } /** {@inheritDoc} */ public void addFilter(String name, String classname, Map<String, String> parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); final String[] ALL_URLS = { "/*" }; for (Map.Entry<Context, Boolean> e : defaultContexts.entrySet()) { if (e.getValue()) { Context ctx = e.getKey(); defineFilter(ctx, name, classname, parameters, ALL_URLS); LOG.info("Added filter " + name + " (class=" + classname + ") to context " + ctx.getDisplayName()); } } filterNames.add(name); } /** {@inheritDoc} */ public void addGlobalFilter(String name, String classname, Map<String, String> parameters) { final String[] ALL_URLS = { "/*" }; defineFilter(webAppContext, name, classname, parameters, ALL_URLS); for (Context ctx : defaultContexts.keySet()) { defineFilter(ctx, name, classname, parameters, ALL_URLS); } LOG.info("Added global filter" + name + " (class=" + classname + ")"); } /** * Define a filter for a context and set up default url mappings. */ protected void defineFilter(Context ctx, String name, String classname, Map<String,String> parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); holder.setInitParameters(parameters); FilterMapping fmap = new FilterMapping(); fmap.setPathSpecs(urls); fmap.setDispatches(Handler.ALL); fmap.setFilterName(name); ServletHandler handler = ctx.getServletHandler(); handler.addFilter(holder, fmap); } /** * Add the path spec to the filter path mapping. * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ protected void addFilterPathMapping(String pathSpec, Context webAppCtx) { ServletHandler handler = webAppCtx.getServletHandler(); for(String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); fmap.setDispatches(Handler.ALL); handler.addFilterMapping(fmap); } } /** * Get the value in the webapp context. * @param name The name of the attribute * @return The value of the attribute */ public Object getAttribute(String name) { return webAppContext.getAttribute(name); } /** * Get the pathname to the webapps files. * @return the pathname as a URL * @throws IOException if 'webapps' directory cannot be found on CLASSPATH. */ protected String getWebAppsPath() throws IOException { URL url = getClass().getClassLoader().getResource("webapps"); if (url == null) throw new IOException("webapps not found in CLASSPATH"); return url.toString(); } /** * Get the port that the server is on * @return the port */ public int getPort() { return webServer.getConnectors()[0].getLocalPort(); } /** * Set the min, max number of worker threads (simultaneous connections). */ public void setThreads(int min, int max) { QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ; pool.setMinThreads(min); pool.setMaxThreads(max); } /** * Configure an ssl listener on the server. * @param addr address to listen on * @param keystore location of the keystore * @param storPass password for the keystore * @param keyPass password for the key * @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)} */ @Deprecated public void addSslListener(InetSocketAddress addr, String keystore, String storPass, String keyPass) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(keystore); sslListener.setPassword(storPass); sslListener.setKeyPassword(keyPass); webServer.addConnector(sslListener); } /** * Configure an ssl listener on the server. * @param addr address to listen on * @param sslConf conf to retrieve ssl options * @param needClientAuth whether client authentication is required */ public void addSslListener(InetSocketAddress addr, Configuration sslConf, boolean needClientAuth) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } if (needClientAuth) { // setting up SSL truststore for authenticating clients System.setProperty("javax.net.ssl.trustStore", sslConf.get( "ssl.server.truststore.location", "")); System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get( "ssl.server.truststore.password", "")); System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( "ssl.server.truststore.type", "jks")); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); sslListener.setPassword(sslConf.get("ssl.server.keystore.password", "")); sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", "")); sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks")); sslListener.setNeedClientAuth(needClientAuth); webServer.addConnector(sslListener); } /** * Start the server. Does not wait for the server to start. */ public void start() throws IOException { try { int port = 0; int oriPort = listener.getPort(); // The original requested port while (true) { try { port = webServer.getConnectors()[0].getLocalPort(); LOG.info("Port returned by webServer.getConnectors()[0]." + "getLocalPort() before open() is "+ port + ". Opening the listener on " + oriPort); listener.open(); port = listener.getLocalPort(); LOG.info("listener.getLocalPort() returned " + listener.getLocalPort() + " webServer.getConnectors()[0].getLocalPort() returned " + webServer.getConnectors()[0].getLocalPort()); //Workaround to handle the problem reported in HADOOP-4744 if (port < 0) { Thread.sleep(100); int numRetries = 1; while (port < 0) { LOG.warn("listener.getLocalPort returned " + port); if (numRetries++ > MAX_RETRIES) { throw new Exception(" listener.getLocalPort is returning " + "less than 0 even after " +numRetries+" resets"); } for (int i = 0; i < 2; i++) { LOG.info("Retrying listener.getLocalPort()"); port = listener.getLocalPort(); if (port > 0) { break; } Thread.sleep(200); } if (port > 0) { break; } LOG.info("Bouncing the listener"); listener.close(); Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); port = listener.getLocalPort(); } } //Workaround end LOG.info("Jetty bound to port " + port); webServer.start(); // Workaround for HADOOP-6386 port = listener.getLocalPort(); if (port < 0) { LOG.warn("Bounds port is " + port + " after webserver start"); - Random r = new Random(1000); for (int i = 0; i < MAX_RETRIES/2; i++) { try { webServer.stop(); } catch (Exception e) { LOG.warn("Can't stop web-server", e); } - Thread.sleep(r.nextInt()); + Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); webServer.start(); LOG.info(i + "attempts to restart webserver"); port = listener.getLocalPort(); if (port > 0) break; } if (port < 0) throw new Exception("listener.getLocalPort() is returning " + "less than 0 even after " +MAX_RETRIES+" resets"); } // End of HADOOP-6386 workaround break; } catch (IOException ex) { // if this is a bind exception, // then try the next port number. if (ex instanceof BindException) { if (!findPort) { throw (BindException) ex; } } else { LOG.info("HttpServer.start() threw a non Bind IOException"); throw ex; } } catch (MultiException ex) { LOG.info("HttpServer.start() threw a MultiException"); throw ex; } listener.setPort((oriPort += 1)); } } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException("Problem starting http server", e); } } /** * stop the server */ public void stop() throws Exception { listener.close(); webServer.stop(); } public void join() throws InterruptedException { webServer.join(); } /** * A very simple servlet to serve up a text representation of the current * stack traces. It both returns the stacks to the caller and logs them. * Currently the stack traces are done sequentially rather than exactly the * same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { PrintWriter out = new PrintWriter(response.getOutputStream()); ReflectionUtils.printThreadInfo(out, ""); out.close(); ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1); } } }
jaxlaw/hadoop-common
6641820201ddc36451724e6ffd4a5220d9db5028
HADOOP:5771 from https://issues.apache.org/jira/secure/attachment/12426091/hadoop-5771-ydist.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index bdd86c3..08ef865 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,445 +1,447 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + MAPREDUCE-1185. Redirect running job url to history url if job is already + retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) + + MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat + via sharad) + + MAPREDUCE-896. Modify permissions for local files on tasktracker before + deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) + + HADOOP-5771. Implements unit tests for LinuxTaskController. + (Sreekanth Ramakrishnan and Vinod Kumar Vavilapalli via yhemanth) yahoo-hadoop-0.20.1-3195383001 HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) - MAPREDUCE-1185. Redirect running job url to history url if job is already - retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) - - MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat - via sharad) - - MAPREDUCE-896. Modify permissions for local files on tasktracker before - deletion so they can be deleted cleanly. (Ravi Gummadi via yhemanth) - yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/build.xml b/build.xml index e64ff2d..f81ab87 100644 --- a/build.xml +++ b/build.xml @@ -229,1024 +229,1026 @@ <path refid="classpath"/> </path> <!-- the cluster test classpath: uses conf.dir for configuration --> <path id="test.cluster.classpath"> <path refid="classpath"/> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> <pathelement location="${build.dir}"/> </path> <!-- properties dependent on the items defined above. --> <!--<available classname="${rat.reporting.classname}" classpathref="classpath" property="rat.present" value="true"/> --> <!-- ====================================================== --> <!-- Macro definitions --> <!-- ====================================================== --> <macrodef name="macro_tar" description="Worker Macro for tar"> <attribute name="param.destfile"/> <element name="param.listofitems"/> <sequential> <tar compression="gzip" longfile="gnu" destfile="@{param.destfile}"> <param.listofitems/> </tar> </sequential> </macrodef> <!-- ====================================================== --> <!-- Stuff needed by all targets --> <!-- ====================================================== --> <target name="init" depends="ivy-retrieve-common"> <mkdir dir="${build.dir}"/> <mkdir dir="${build.classes}"/> <mkdir dir="${build.tools}"/> <mkdir dir="${build.src}"/> <mkdir dir="${build.webapps}/task/WEB-INF"/> <mkdir dir="${build.webapps}/job/WEB-INF"/> <mkdir dir="${build.webapps}/hdfs/WEB-INF"/> <mkdir dir="${build.webapps}/datanode/WEB-INF"/> <mkdir dir="${build.webapps}/secondary/WEB-INF"/> <mkdir dir="${build.examples}"/> <mkdir dir="${build.anttasks}"/> <mkdir dir="${build.dir}/c++"/> <mkdir dir="${test.build.dir}"/> <mkdir dir="${test.build.classes}"/> <mkdir dir="${test.build.testjar}"/> <mkdir dir="${test.build.testshell}"/> <mkdir dir="${test.build.extraconf}"/> <tempfile property="touch.temp.file" destDir="${java.io.tmpdir}"/> <touch millis="0" file="${touch.temp.file}"> <fileset dir="${conf.dir}" includes="**/*.template"/> <fileset dir="${contrib.dir}" includes="**/*.template"/> </touch> <delete file="${touch.temp.file}"/> <!-- copy all of the jsp and static files --> <copy todir="${build.webapps}"> <fileset dir="${src.webapps}"> <exclude name="**/*.jsp" /> </fileset> </copy> <copy todir="${conf.dir}" verbose="true"> <fileset dir="${conf.dir}" includes="**/*.template"/> <mapper type="glob" from="*.template" to="*"/> </copy> <copy todir="${contrib.dir}" verbose="true"> <fileset dir="${contrib.dir}" includes="**/*.template"/> <mapper type="glob" from="*.template" to="*"/> </copy> <exec executable="sh"> <arg line="src/saveVersion.sh ${version}"/> </exec> <exec executable="sh"> <arg line="src/fixFontsPath.sh ${src.docs.cn}"/> </exec> </target> <!-- ====================================================== --> <!-- Compile the Java files --> <!-- ====================================================== --> <target name="record-parser" depends="init" if="javacc.home"> <javacc target="${core.src.dir}/org/apache/hadoop/record/compiler/generated/rcc.jj" outputdirectory="${core.src.dir}/org/apache/hadoop/record/compiler/generated" javacchome="${javacc.home}" /> </target> <target name="compile-rcc-compiler" depends="init, record-parser"> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/record/compiler/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}"/> <classpath refid="classpath"/> </javac> <taskdef name="recordcc" classname="org.apache.hadoop.record.compiler.ant.RccTask"> <classpath refid="classpath" /> </taskdef> </target> <target name="compile-core-classes" depends="init, compile-rcc-compiler"> <taskdef classname="org.apache.jasper.JspC" name="jsp-compile" > <classpath refid="test.classpath"/> </taskdef> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${core.src.dir}" includes="**/*.properties"/> <fileset dir="${core.src.dir}" includes="core-default.xml"/> </copy> </target> <target name="compile-mapred-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/task" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/task/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/job" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/job/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${mapred.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${mapred.src.dir}" includes="**/*.properties"/> <fileset dir="${mapred.src.dir}" includes="mapred-default.xml"/> </copy> </target> <target name="compile-hdfs-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/hdfs" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.namenode" webxml="${build.webapps}/hdfs/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/datanode" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.datanode" webxml="${build.webapps}/datanode/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${hdfs.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${hdfs.src.dir}" includes="**/*.properties"/> <fileset dir="${hdfs.src.dir}" includes="hdfs-default.xml"/> </copy> </target> <target name="compile-tools" depends="init"> <javac encoding="${build.encoding}" srcdir="${tools.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.tools}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.tools}"> <fileset dir="${tools.src}" includes="**/*.properties" /> </copy> </target> <target name="compile-native"> <antcall target="compile-core-native"> <param name="compile.native" value="true"/> </antcall> </target> <target name="compile-core-native" depends="compile-core-classes" if="compile.native"> <mkdir dir="${build.native}/lib"/> <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/> <javah classpath="${build.classes}" destdir="${build.native}/src/org/apache/hadoop/io/compress/zlib" force="yes" verbose="yes" > <class name="org.apache.hadoop.io.compress.zlib.ZlibCompressor" /> <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" /> </javah> <exec dir="${build.native}" executable="sh" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> <arg line="${native.src.dir}/configure"/> </exec> <exec dir="${build.native}" executable="${make.cmd}" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> </exec> <exec dir="${build.native}" executable="sh" failonerror="true"> <arg line="${build.native}/libtool --mode=install cp ${build.native}/lib/libhadoop.la ${build.native}/lib"/> </exec> </target> <target name="compile-core" depends="clover,compile-core-classes,compile-mapred-classes, compile-hdfs-classes,compile-core-native,compile-c++" description="Compile core only"> </target> <target name="compile-contrib" depends="compile-core,compile-c++-libhdfs"> <subant target="compile"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="compile" depends="compile-core, compile-contrib, compile-ant-tasks, compile-tools" description="Compile core, contrib"> </target> <target name="compile-examples" depends="compile-core,compile-tools,compile-c++-examples"> <javac encoding="${build.encoding}" srcdir="${examples.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.examples}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath> <path refid="classpath"/> <pathelement location="${build.tools}"/> </classpath> </javac> </target> <!-- ================================================================== --> <!-- Make hadoop.jar --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="jar" depends="compile-core" description="Make hadoop.jar"> <tar compression="gzip" destfile="${build.classes}/bin.tgz"> <tarfileset dir="bin" mode="755"/> </tar> <jar jarfile="${build.dir}/${final.name}-core.jar" basedir="${build.classes}"> <manifest> <section name="org/apache/hadoop"> <attribute name="Implementation-Title" value="Hadoop"/> <attribute name="Implementation-Version" value="${version}"/> <attribute name="Implementation-Vendor" value="Yahoo!"/> </section> </manifest> <fileset file="${conf.dir}/commons-logging.properties"/> <fileset file="${conf.dir}/log4j.properties"/> <fileset file="${conf.dir}/hadoop-metrics.properties"/> <zipfileset dir="${build.webapps}" prefix="webapps"/> </jar> </target> <!-- ================================================================== --> <!-- Make the Hadoop examples jar. --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="examples" depends="jar, compile-examples" description="Make the Hadoop examples jar."> <jar jarfile="${build.dir}/${final.name}-examples.jar" basedir="${build.examples}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/examples/ExampleDriver"/> </manifest> </jar> </target> <target name="tools-jar" depends="jar, compile-tools" description="Make the Hadoop tools jar."> <jar jarfile="${build.dir}/${final.name}-tools.jar" basedir="${build.tools}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/examples/ExampleDriver"/> </manifest> </jar> </target> <!-- ================================================================== --> <!-- Make the Hadoop metrics jar. (for use outside Hadoop) --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="metrics.jar" depends="compile-core" description="Make the Hadoop metrics jar. (for use outside Hadoop)"> <jar jarfile="${build.dir}/hadoop-metrics-${version}.jar" basedir="${build.classes}"> <include name="**/metrics/**" /> <exclude name="**/package.html" /> </jar> </target> <target name="generate-test-records" depends="compile-rcc-compiler"> <recordcc destdir="${test.generated.dir}"> <fileset dir="${test.src.dir}" includes="**/*.jr" /> </recordcc> </target> <!-- ================================================================== --> <!-- Compile test code --> <!-- ================================================================== --> <target name="compile-core-test" depends="compile-examples, compile-tools, generate-test-records"> <javac encoding="${build.encoding}" srcdir="${test.generated.dir}" includes="org/apache/hadoop/**/*.java" destdir="${test.build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}" /> <classpath refid="test.classpath"/> </javac> <javac encoding="${build.encoding}" srcdir="${test.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${test.build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="test.classpath"/> </javac> <javac encoding="${build.encoding}" srcdir="${test.src.dir}/testjar" includes="*.java" destdir="${test.build.testjar}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="test.classpath"/> </javac> <delete file="${test.build.testjar}/testjob.jar"/> <jar jarfile="${test.build.testjar}/testjob.jar" basedir="${test.build.testjar}"> </jar> <javac encoding="${build.encoding}" srcdir="${test.src.dir}/testshell" includes="*.java" destdir="${test.build.testshell}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}"/> <classpath refid="test.classpath"/> </javac> <delete file="${test.build.testshell}/testshell.jar"/> <jar jarfile="${test.build.testshell}/testshell.jar" basedir="${test.build.testshell}"> </jar> <delete dir="${test.cache.data}"/> <mkdir dir="${test.cache.data}"/> <delete dir="${test.debug.data}"/> <mkdir dir="${test.debug.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/testscript.txt" todir="${test.debug.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.txt" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.jar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.zip" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar.gz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/testConf.xml" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data15bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data30bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data60bytes" todir="${test.cache.data}"/> <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data120bytes" todir="${test.cache.data}"/> </target> <!-- ================================================================== --> <!-- Make hadoop-test.jar --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="jar-test" depends="compile-core-test" description="Make hadoop-test.jar"> <jar jarfile="${build.dir}/${final.name}-test.jar" basedir="${test.build.classes}"> <manifest> <attribute name="Main-Class" value="org/apache/hadoop/test/AllTestDriver"/> <section name="org/apache/hadoop"> <attribute name="Implementation-Title" value="Hadoop"/> <attribute name="Implementation-Version" value="${version}"/> <attribute name="Implementation-Vendor" value="Yahoo!"/> </section> </manifest> </jar> </target> <!-- ================================================================== --> <!-- Run unit tests --> <!-- ================================================================== --> <target name="test-core" depends="jar-test" description="Run core unit tests"> <delete dir="${test.build.data}"/> <mkdir dir="${test.build.data}"/> <delete dir="${test.log.dir}"/> <mkdir dir="${test.log.dir}"/> <copy file="${test.src.dir}/hadoop-policy.xml" todir="${test.build.extraconf}" /> <junit showoutput="${test.output}" printsummary="${test.junit.printsummary}" haltonfailure="${test.junit.haltonfailure}" fork="yes" forkmode="${test.junit.fork.mode}" maxmemory="${test.junit.maxmemory}" dir="${basedir}" timeout="${test.timeout}" errorProperty="tests.failed" failureProperty="tests.failed"> <sysproperty key="test.build.data" value="${test.build.data}"/> <sysproperty key="test.cache.data" value="${test.cache.data}"/> <sysproperty key="test.debug.data" value="${test.debug.data}"/> <sysproperty key="hadoop.log.dir" value="${test.log.dir}"/> <sysproperty key="test.src.dir" value="${test.src.dir}"/> + <sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/> + <sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/> <sysproperty key="test.build.extraconf" value="${test.build.extraconf}" /> <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml"/> <sysproperty key="java.library.path" value="${build.native}/lib:${lib.dir}/native/${build.platform}"/> <sysproperty key="install.c++.examples" value="${install.c++.examples}"/> <!-- set io.compression.codec.lzo.class in the child jvm only if it is set --> <syspropertyset dynamic="no"> <propertyref name="io.compression.codec.lzo.class"/> </syspropertyset> <!-- set compile.c++ in the child jvm only if it is set --> <syspropertyset dynamic="no"> <propertyref name="compile.c++"/> </syspropertyset> <classpath refid="${test.classpath.id}"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${test.build.dir}" unless="testcase"> <fileset dir="${test.src.dir}" includes="**/${test.include}.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${test.build.dir}" if="testcase"> <fileset dir="${test.src.dir}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> <target name="test-contrib" depends="compile, compile-core-test" description="Run contrib unit tests"> <subant target="test"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="test" depends="test-core, test-contrib" description="Run core, contrib unit tests"> </target> <!-- Run all unit tests, not just Test*, and use non-test configuration. --> <target name="test-cluster" description="Run all unit tests, not just Test*, and use non-test configuration."> <antcall target="test"> <param name="test.include" value="*"/> <param name="test.classpath.id" value="test.cluster.classpath"/> </antcall> </target> <target name="nightly" depends="test, tar"> </target> <!-- ================================================================== --> <!-- Run optional third-party tool targets --> <!-- ================================================================== --> <target name="checkstyle" depends="ivy-retrieve-checkstyle,check-for-checkstyle" if="checkstyle.present" description="Run optional third-party tool targets"> <taskdef resource="checkstyletask.properties"> <classpath refid="checkstyle-classpath"/> </taskdef> <mkdir dir="${test.build.dir}"/> <checkstyle config="${test.src.dir}/checkstyle.xml" failOnViolation="false"> <fileset dir="${core.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <fileset dir="${mapred.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <fileset dir="${hdfs.src.dir}" includes="**/*.java" excludes="**/generated/**"/> <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/> </checkstyle> <xslt style="${test.src.dir}/checkstyle-noframes-sorted.xsl" in="${test.build.dir}/checkstyle-errors.xml" out="${test.build.dir}/checkstyle-errors.html"/> </target> <target name="check-for-checkstyle"> <available property="checkstyle.present" resource="checkstyletask.properties"> <classpath refid="checkstyle-classpath"/> </available> </target> <property name="findbugs.home" value=""/> <target name="findbugs" depends="check-for-findbugs, tar" if="findbugs.present" description="Run findbugs if present"> <property name="findbugs.out.dir" value="${test.build.dir}/findbugs"/> <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/> <property name="findbugs.report.htmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.html"/> <property name="findbugs.report.xmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.xml"/> <taskdef name="findbugs" classname="edu.umd.cs.findbugs.anttask.FindBugsTask" classpath="${findbugs.home}/lib/findbugs-ant.jar" /> <mkdir dir="${findbugs.out.dir}"/> <findbugs home="${findbugs.home}" output="xml:withMessages" outputFile="${findbugs.report.xmlfile}" effort="max" excludeFilter="${findbugs.exclude.file}" jvmargs="-Xmx512M"> <auxClasspath> <fileset dir="${lib.dir}"> <include name="**/*.jar"/> </fileset> <fileset dir="${build.ivy.lib.dir}/${ant.project.name}/common"> <include name="**/*.jar"/> </fileset> </auxClasspath> <sourcePath path="${core.src.dir}"/> <sourcePath path="${mapred.src.dir}"/> <sourcePath path="${hdfs.src.dir}"/> <sourcePath path="${examples.dir}" /> <sourcePath path="${tools.src}" /> <sourcePath path="${basedir}/src/contrib/streaming/src/java" /> <class location="${basedir}/build/${final.name}-core.jar" /> <class location="${basedir}/build/${final.name}-examples.jar" /> <class location="${basedir}/build/${final.name}-tools.jar" /> <class location="${basedir}/build/contrib/streaming/${final.name}-streaming.jar" /> </findbugs> <xslt style="${findbugs.home}/src/xsl/default.xsl" in="${findbugs.report.xmlfile}" out="${findbugs.report.htmlfile}"/> </target> <target name="check-for-findbugs"> <available property="findbugs.present" file="${findbugs.home}/lib/findbugs.jar" /> </target> <!-- ================================================================== --> <!-- Documentation --> <!-- ================================================================== --> <target name="docs" depends="forrest.check" description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home"> <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest" failonerror="true"> <env key="JAVA_HOME" value="${java5.home}"/> </exec> <copy todir="${build.docs}"> <fileset dir="${docs.src}/build/site/" /> </copy> <copy file="${docs.src}/releasenotes.html" todir="${build.docs}"/> <style basedir="${core.src.dir}" destdir="${build.docs}" includes="core-default.xml" style="conf/configuration.xsl"/> <style basedir="${hdfs.src.dir}" destdir="${build.docs}" includes="hdfs-default.xml" style="conf/configuration.xsl"/> <style basedir="${mapred.src.dir}" destdir="${build.docs}" includes="mapred-default.xml" style="conf/configuration.xsl"/> <antcall target="changes-to-html"/> <antcall target="cn-docs"/> </target> <target name="cn-docs" depends="forrest.check, init" description="Generate forrest-based Chinese documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home"> <exec dir="${src.docs.cn}" executable="${forrest.home}/bin/forrest" failonerror="true"> <env key="LANG" value="en_US.utf8"/> <env key="JAVA_HOME" value="${java5.home}"/> </exec> <copy todir="${build.docs.cn}"> <fileset dir="${src.docs.cn}/build/site/" /> </copy> <style basedir="${core.src.dir}" destdir="${build.docs.cn}" includes="core-default.xml" style="conf/configuration.xsl"/> <style basedir="${hdfs.src.dir}" destdir="${build.docs.cn}" includes="hdfs-default.xml" style="conf/configuration.xsl"/> <style basedir="${mapred.src.dir}" destdir="${build.docs.cn}" includes="mapred-default.xml" style="conf/configuration.xsl"/> <antcall target="changes-to-html"/> </target> <target name="forrest.check" unless="forrest.home" depends="java5.check"> <fail message="'forrest.home' is not defined. Please pass -Dforrest.home=&lt;base of Apache Forrest installation&gt; to Ant on the command-line." /> </target> <target name="java5.check" unless="java5.home"> <fail message="'java5.home' is not defined. Forrest requires Java 5. Please pass -Djava5.home=&lt;base of Java 5 distribution&gt; to Ant on the command-line." /> </target> <target name="javadoc-dev" description="Generate javadoc for hadoop developers"> <mkdir dir="${build.javadoc.dev}"/> <javadoc overview="${core.src.dir}/overview.html" packagenames="org.apache.hadoop.*" destdir="${build.javadoc.dev}" author="true" version="true" use="true" windowtitle="${Name} ${version} API" doctitle="${Name} ${version} Developer API" bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation." > <packageset dir="${core.src.dir}"/> <packageset dir="${mapred.src.dir}"/> <packageset dir="${hdfs.src.dir}"/> <packageset dir="${examples.dir}"/> <packageset dir="src/contrib/streaming/src/java"/> <packageset dir="src/contrib/data_join/src/java"/> <packageset dir="src/contrib/index/src/java"/> <link href="${javadoc.link.java}"/> <classpath > <path refid="classpath" /> <fileset dir="src/contrib/"> <include name="*/lib/*.jar" /> </fileset> <pathelement path="${java.class.path}"/> </classpath> <group title="Core" packages="org.apache.*"/> <group title="Examples" packages="org.apache.hadoop.examples*"/> <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/> <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/> <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/> </javadoc> </target> <target name="javadoc" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc"> <mkdir dir="${build.javadoc}"/> <javadoc overview="${core.src.dir}/overview.html" packagenames="org.apache.hadoop.*" destdir="${build.javadoc}" author="true" version="true" use="true" windowtitle="${Name} ${version} API" doctitle="${Name} ${version} API" bottom="This release is based on the Yahoo! Distribution of Hadoop, powering the largest Hadoop clusters in the Universe!&lt;br>Copyright &amp;copy; ${year} The Apache Software Foundation." > <packageset dir="${core.src.dir}"/> <packageset dir="${mapred.src.dir}"/> <packageset dir="${examples.dir}"/> <packageset dir="src/contrib/streaming/src/java"/> <packageset dir="src/contrib/data_join/src/java"/> <packageset dir="src/contrib/index/src/java"/> <packageset dir="src/contrib/failmon/src/java/"/> <link href="${javadoc.link.java}"/> <classpath > <path refid="classpath" /> <fileset dir="src/contrib/"> <include name="*/lib/*.jar" /> </fileset> <path refid="javadoc-classpath"/> <pathelement path="${java.class.path}"/> <pathelement location="${build.tools}"/> </classpath> <group title="Core" packages="org.apache.*"/> <group title="Examples" packages="org.apache.hadoop.examples*"/> <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/> <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/> <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/> <group title="contrib: FailMon" packages="org.apache.hadoop.contrib.failmon*"/> </javadoc> </target> <target name="api-xml" depends="ivy-retrieve-jdiff,javadoc,write-null"> <javadoc> <doclet name="jdiff.JDiff" path="${jdiff.jar}:${xerces.jar}"> <param name="-apidir" value="${jdiff.xml.dir}"/> <param name="-apiname" value="hadoop ${version}"/> </doclet> <packageset dir="src/core"/> <packageset dir="src/mapred"/> <packageset dir="src/tools"/> <classpath > <path refid="classpath" /> <path refid="jdiff-classpath" /> <pathelement path="${java.class.path}"/> </classpath> </javadoc> </target> <target name="write-null"> <exec executable="touch"> <arg value="${jdiff.home}/Null.java"/> </exec> </target> <target name="api-report" depends="ivy-retrieve-jdiff,api-xml"> <mkdir dir="${jdiff.build.dir}"/> <javadoc sourcepath="src/core,src/hdfs,src,mapred,src/tools" destdir="${jdiff.build.dir}" sourceFiles="${jdiff.home}/Null.java"> <doclet name="jdiff.JDiff" path="${jdiff.jar}:${xerces.jar}"> <param name="-oldapi" value="hadoop ${jdiff.stable}"/> <param name="-newapi" value="hadoop ${version}"/> <param name="-oldapidir" value="${jdiff.xml.dir}"/> <param name="-newapidir" value="${jdiff.xml.dir}"/> <param name="-javadocold" value="${jdiff.stable.javadoc}"/> <param name="-javadocnew" value="../../api/"/> <param name="-stats"/> </doclet> <classpath > <path refid="classpath" /> <path refid="jdiff-classpath"/> <pathelement path="${java.class.path}"/> </classpath> </javadoc> </target> <target name="changes-to-html" description="Convert CHANGES.txt into an html file"> <mkdir dir="${build.docs}"/> <exec executable="perl" input="CHANGES.txt" output="${build.docs}/changes.html" failonerror="true"> <arg value="${changes.src}/changes2html.pl"/> </exec> <copy todir="${build.docs}"> <fileset dir="${changes.src}" includes="*.css"/> </copy> </target> <!-- ================================================================== --> <!-- D I S T R I B U T I O N --> <!-- ================================================================== --> <!-- --> <!-- ================================================================== --> <target name="package" depends="compile, jar, javadoc, docs, cn-docs, api-report, examples, tools-jar, jar-test, ant-tasks, package-librecordio" description="Build distribution"> <mkdir dir="${dist.dir}"/> <mkdir dir="${dist.dir}/lib"/> <mkdir dir="${dist.dir}/contrib"/> <mkdir dir="${dist.dir}/bin"/> <mkdir dir="${dist.dir}/docs"/> <mkdir dir="${dist.dir}/docs/api"/> <mkdir dir="${dist.dir}/docs/jdiff"/> <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true"> <fileset dir="${common.ivy.lib.dir}"/> </copy> <copy todir="${dist.dir}/lib" includeEmptyDirs="false"> <fileset dir="lib"> <exclude name="**/native/**"/> </fileset> </copy> <exec dir="${dist.dir}" executable="sh" failonerror="true"> <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/> <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/> <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/> <arg line="${native.src.dir}/packageNativeHadoop.sh"/> </exec> <subant target="package"> <!--Pass down the version in case its needed again and the target distribution directory so contribs know where to install to.--> <property name="version" value="${version}"/> <property name="dist.dir" value="${dist.dir}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> <copy todir="${dist.dir}/webapps"> <fileset dir="${build.webapps}"/> </copy> <copy todir="${dist.dir}"> <fileset file="${build.dir}/${final.name}-*.jar"/> </copy> <copy todir="${dist.dir}/bin"> <fileset dir="bin"/> </copy> <copy todir="${dist.dir}/conf"> <fileset dir="${conf.dir}" excludes="**/*.template"/> </copy> <copy todir="${dist.dir}/docs"> <fileset dir="${build.docs}"/> </copy> <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/> <copy todir="${dist.dir}/ivy"> <fileset dir="ivy"/> </copy> <copy todir="${dist.dir}"> <fileset dir="."> <include name="*.txt" /> </fileset> </copy> <copy todir="${dist.dir}/src" includeEmptyDirs="true"> <fileset dir="src" excludes="**/*.template **/docs/build/**/*"/> </copy> <copy todir="${dist.dir}/c++" includeEmptyDirs="false"> <fileset dir="${build.dir}/c++"/> </copy> <copy todir="${dist.dir}/" file="build.xml"/> <chmod perm="ugo+x" type="file" parallel="false"> <fileset dir="${dist.dir}/bin"/> <fileset dir="${dist.dir}/src/contrib/"> <include name="*/bin/*" /> </fileset> <fileset dir="${dist.dir}/src/contrib/ec2/bin/image"/> </chmod> <chmod perm="ugo+x" type="file"> <fileset dir="${dist.dir}/src/c++/pipes/debug"/> </chmod> </target> <!-- ================================================================== --> <!-- Make release tarball --> <!-- ================================================================== --> <target name="tar" depends="package" description="Make release tarball"> <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz"> <param.listofitems> <tarfileset dir="${build.dir}" mode="664"> <exclude name="${final.name}/bin/*" /> <exclude name="${final.name}/contrib/*/bin/*" /> <exclude name="${final.name}/src/contrib/ec2/bin/*" /> <exclude name="${final.name}/src/contrib/ec2/bin/image/*" /> <include name="${final.name}/**" /> </tarfileset> <tarfileset dir="${build.dir}" mode="755"> <include name="${final.name}/bin/*" /> <include name="${final.name}/contrib/*/bin/*" /> <include name="${final.name}/src/contrib/ec2/bin/*" /> <include name="${final.name}/src/contrib/ec2/bin/image/*" /> </tarfileset> </param.listofitems> </macro_tar> </target> <target name="bin-package" depends="compile, jar, examples, tools-jar, jar-test, ant-tasks, package-librecordio" description="assembles artifacts for binary target"> <mkdir dir="${dist.dir}"/> <mkdir dir="${dist.dir}/lib"/> <mkdir dir="${dist.dir}/contrib"/> <mkdir dir="${dist.dir}/bin"/> <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true"> <fileset dir="${common.ivy.lib.dir}"/> </copy> <copy todir="${dist.dir}/lib" includeEmptyDirs="false"> <fileset dir="lib"> <exclude name="**/native/**"/> </fileset> </copy> <exec dir="${dist.dir}" executable="sh" failonerror="true"> <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/> <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/> <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/> <arg line="${native.src.dir}/packageNativeHadoop.sh"/> </exec> <subant target="package"> <!--Pass down the version in case its needed again and the target distribution directory so contribs know where to install to.--> <property name="version" value="${version}"/> <property name="dist.dir" value="${dist.dir}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> <copy todir="${dist.dir}/webapps"> <fileset dir="${build.webapps}"/> </copy> <copy todir="${dist.dir}"> <fileset file="${build.dir}/${final.name}-*.jar"/> </copy> <copy todir="${dist.dir}/bin"> <fileset dir="bin"/> </copy> <copy todir="${dist.dir}/conf"> <fileset dir="${conf.dir}" excludes="**/*.template"/> </copy> <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/> <copy todir="${dist.dir}/ivy"> <fileset dir="ivy"/> </copy> <copy todir="${dist.dir}"> <fileset dir="."> <include name="*.txt" /> </fileset> </copy> <copy todir="${dist.dir}/c++" includeEmptyDirs="false"> <fileset dir="${build.dir}/c++"/> </copy> <copy todir="${dist.dir}/" file="build.xml"/> <chmod perm="ugo+x" type="file" parallel="false"> <fileset dir="${dist.dir}/bin"/> </chmod> </target> <target name="binary" depends="bin-package" description="Make tarball without source and documentation"> <macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz"> <param.listofitems> <tarfileset dir="${build.dir}" mode="664"> <exclude name="${final.name}/bin/*" /> <exclude name="${final.name}/src/**" /> <exclude name="${final.name}/docs/**" /> <include name="${final.name}/**" /> diff --git a/src/contrib/build-contrib.xml b/src/contrib/build-contrib.xml index ab69772..2f27f70 100644 --- a/src/contrib/build-contrib.xml +++ b/src/contrib/build-contrib.xml @@ -1,299 +1,301 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- Imported by contrib/*/build.xml files to share generic targets. --> <project name="hadoopbuildcontrib" xmlns:ivy="antlib:org.apache.ivy.ant"> <property name="name" value="${ant.project.name}"/> <property name="root" value="${basedir}"/> <!-- Load all the default properties, and any the user wants --> <!-- to contribute (without having to type -D or edit this file --> <property file="${user.home}/${name}.build.properties" /> <property file="${root}/build.properties" /> <property name="hadoop.root" location="${root}/../../../"/> <property name="src.dir" location="${root}/src/java"/> <property name="src.test" location="${root}/src/test"/> <property name="src.examples" location="${root}/src/examples"/> <available file="${src.examples}" type="dir" property="examples.available"/> <available file="${src.test}" type="dir" property="test.available"/> <property name="conf.dir" location="${hadoop.root}/conf"/> <property name="test.junit.output.format" value="plain"/> <property name="test.output" value="no"/> <property name="test.timeout" value="900000"/> <property name="build.dir" location="${hadoop.root}/build/contrib/${name}"/> <property name="build.classes" location="${build.dir}/classes"/> <property name="build.test" location="${build.dir}/test"/> <property name="build.examples" location="${build.dir}/examples"/> <property name="hadoop.log.dir" location="${build.dir}/test/logs"/> <!-- all jars together --> <property name="javac.deprecation" value="off"/> <property name="javac.debug" value="on"/> <property name="build.ivy.lib.dir" value="${hadoop.root}/build/ivy/lib"/> <property name="javadoc.link" value="http://java.sun.com/j2se/1.4/docs/api/"/> <property name="build.encoding" value="ISO-8859-1"/> <fileset id="lib.jars" dir="${root}" includes="lib/*.jar"/> <!-- IVY properties set here --> <property name="ivy.dir" location="ivy" /> <property name="ivysettings.xml" location="${hadoop.root}/ivy/ivysettings.xml"/> <loadproperties srcfile="${ivy.dir}/libraries.properties"/> <loadproperties srcfile="${hadoop.root}/ivy/libraries.properties"/> <property name="ivy.jar" location="${hadoop.root}/ivy/ivy-${ivy.version}.jar"/> <property name="ivy_repo_url" value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar" /> <property name="build.dir" location="build" /> <property name="build.ivy.dir" location="${build.dir}/ivy" /> <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" /> <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" /> <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/> <!--this is the naming policy for artifacts we want pulled down--> <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/> <!-- the normal classpath --> <path id="contrib-classpath"> <pathelement location="${build.classes}"/> <fileset refid="lib.jars"/> <pathelement location="${hadoop.root}/build/classes"/> <fileset dir="${hadoop.root}/lib"> <include name="**/*.jar" /> </fileset> <path refid="${ant.project.name}.common-classpath"/> </path> <!-- the unit test classpath --> <path id="test.classpath"> <pathelement location="${build.test}" /> <pathelement location="${hadoop.root}/build/test/classes"/> <pathelement location="${hadoop.root}/src/contrib/test"/> <pathelement location="${conf.dir}"/> <pathelement location="${hadoop.root}/build"/> <pathelement location="${build.examples}"/> <pathelement location="${hadoop.root}/build/examples"/> <path refid="contrib-classpath"/> </path> <!-- to be overridden by sub-projects --> <target name="check-contrib"/> <target name="init-contrib"/> <!-- ====================================================== --> <!-- Stuff needed by all targets --> <!-- ====================================================== --> <target name="init" depends="check-contrib" unless="skip.contrib"> <echo message="contrib: ${name}"/> <mkdir dir="${build.dir}"/> <mkdir dir="${build.classes}"/> <mkdir dir="${build.test}"/> <mkdir dir="${build.examples}"/> <mkdir dir="${hadoop.log.dir}"/> <antcall target="init-contrib"/> </target> <!-- ====================================================== --> <!-- Compile a Hadoop contrib's files --> <!-- ====================================================== --> <target name="compile" depends="init, ivy-retrieve-common" unless="skip.contrib"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.dir}" includes="**/*.java" destdir="${build.classes}" debug="${javac.debug}" deprecation="${javac.deprecation}"> <classpath refid="contrib-classpath"/> </javac> </target> <!-- ======================================================= --> <!-- Compile a Hadoop contrib's example files (if available) --> <!-- ======================================================= --> <target name="compile-examples" depends="compile" if="examples.available"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.examples}" includes="**/*.java" destdir="${build.examples}" debug="${javac.debug}"> <classpath refid="contrib-classpath"/> </javac> </target> <!-- ================================================================== --> <!-- Compile test code --> <!-- ================================================================== --> <target name="compile-test" depends="compile-examples" if="test.available"> <echo message="contrib: ${name}"/> <javac encoding="${build.encoding}" srcdir="${src.test}" includes="**/*.java" destdir="${build.test}" debug="${javac.debug}"> <classpath refid="test.classpath"/> </javac> </target> <!-- ====================================================== --> <!-- Make a Hadoop contrib's jar --> <!-- ====================================================== --> <target name="jar" depends="compile" unless="skip.contrib"> <echo message="contrib: ${name}"/> <jar jarfile="${build.dir}/hadoop-${version}-${name}.jar" basedir="${build.classes}" /> </target> <!-- ====================================================== --> <!-- Make a Hadoop contrib's examples jar --> <!-- ====================================================== --> <target name="jar-examples" depends="compile-examples" if="examples.available" unless="skip.contrib"> <echo message="contrib: ${name}"/> <jar jarfile="${build.dir}/hadoop-${version}-${name}-examples.jar"> <fileset dir="${build.classes}"> </fileset> <fileset dir="${build.examples}"> </fileset> </jar> </target> <!-- ====================================================== --> <!-- Package a Hadoop contrib --> <!-- ====================================================== --> <target name="package" depends="jar, jar-examples" unless="skip.contrib"> <mkdir dir="${dist.dir}/contrib/${name}"/> <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false" flatten="true"> <fileset dir="${build.dir}"> <include name="hadoop-${version}-${name}.jar" /> </fileset> </copy> </target> <!-- ================================================================== --> <!-- Run unit tests --> <!-- ================================================================== --> <target name="test" depends="compile-test, compile" if="test.available"> <echo message="contrib: ${name}"/> <delete dir="${hadoop.log.dir}"/> <mkdir dir="${hadoop.log.dir}"/> <junit printsummary="yes" showoutput="${test.output}" haltonfailure="no" fork="yes" maxmemory="256m" errorProperty="tests.failed" failureProperty="tests.failed" timeout="${test.timeout}"> <sysproperty key="test.build.data" value="${build.test}/data"/> <sysproperty key="build.test" value="${build.test}"/> <sysproperty key="contrib.name" value="${name}"/> <!-- requires fork=yes for: relative File paths to use the specified user.dir classpath to use build/contrib/*.jar --> <sysproperty key="user.dir" value="${build.test}/data"/> <sysproperty key="fs.default.name" value="${fs.default.name}"/> <sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/> <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/> + <sysproperty key="taskcontroller-path" value="${taskcontroller-path}"/> + <sysproperty key="taskcontroller-user" value="${taskcontroller-user}"/> <classpath refid="test.classpath"/> <formatter type="${test.junit.output.format}" /> <batchtest todir="${build.test}" unless="testcase"> <fileset dir="${src.test}" includes="**/Test*.java" excludes="**/${test.exclude}.java" /> </batchtest> <batchtest todir="${build.test}" if="testcase"> <fileset dir="${src.test}" includes="**/${testcase}.java"/> </batchtest> </junit> <fail if="tests.failed">Tests failed!</fail> </target> <!-- ================================================================== --> <!-- Clean. Delete the build files, and their directories --> <!-- ================================================================== --> <target name="clean"> <echo message="contrib: ${name}"/> <delete dir="${build.dir}"/> </target> <target name="ivy-probe-antlib" > <condition property="ivy.found"> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </condition> </target> <target name="ivy-download" description="To download ivy " unless="offline"> <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/> </target> <target name="ivy-init-antlib" depends="ivy-download,ivy-probe-antlib" unless="ivy.found"> <typedef uri="antlib:org.apache.ivy.ant" onerror="fail" loaderRef="ivyLoader"> <classpath> <pathelement location="${ivy.jar}"/> </classpath> </typedef> <fail > <condition > <not> <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/> </not> </condition> You need Apache Ivy 2.0 or later from http://ant.apache.org/ It could not be loaded from ${ivy_repo_url} </fail> </target> <target name="ivy-init" depends="ivy-init-antlib"> <ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}"/> </target> <target name="ivy-resolve-common" depends="ivy-init"> <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" /> </target> <target name="ivy-retrieve-common" depends="ivy-resolve-common" description="Retrieve Ivy-managed artifacts for the compile/test configurations"> <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings" pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}" sync="true" /> <ivy:cachepath pathid="${ant.project.name}.common-classpath" conf="common" /> </target> </project> diff --git a/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java b/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java new file mode 100644 index 0000000..8e6842e --- /dev/null +++ b/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingAsDifferentUser.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.streaming; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.ClusterWithLinuxTaskController; +import org.apache.hadoop.mapred.JobConf; + +/** + * Test Streaming with LinuxTaskController running the jobs as a user different + * from the user running the cluster. See {@link ClusterWithLinuxTaskController} + */ +public class TestStreamingAsDifferentUser extends + ClusterWithLinuxTaskController { + + private Path inputPath = new Path("input"); + private Path outputPath = new Path("output"); + private String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n"; + private String map = + StreamUtil.makeJavaCommand(TrApp.class, new String[] { ".", "\\n" }); + private String reduce = + StreamUtil.makeJavaCommand(UniqApp.class, new String[] { "R" }); + + public void testStreaming() + throws Exception { + if (!shouldRun()) { + return; + } + startCluster(); + JobConf myConf = getClusterConf(); + FileSystem inFs = inputPath.getFileSystem(myConf); + FileSystem outFs = outputPath.getFileSystem(myConf); + outFs.delete(outputPath, true); + if (!inFs.mkdirs(inputPath)) { + throw new IOException("Mkdirs failed to create " + inFs.toString()); + } + DataOutputStream file = inFs.create(new Path(inputPath, "part-0")); + file.writeBytes(input); + file.close(); + String[] args = + new String[] { "-input", inputPath.makeQualified(inFs).toString(), + "-output", outputPath.makeQualified(outFs).toString(), "-mapper", + map, "-reducer", reduce, "-jobconf", + "keep.failed.task.files=true", "-jobconf", + "stream.tmpdir=" + System.getProperty("test.build.data", "/tmp") }; + StreamJob streamJob = new StreamJob(args, true); + streamJob.setConf(myConf); + streamJob.go(); + assertOwnerShip(outputPath); + } +} diff --git a/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java b/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java index 1f51e56..2b96aaf 100644 --- a/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java +++ b/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java @@ -1,116 +1,113 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.streaming; import java.io.*; import org.apache.hadoop.streaming.Environment; /** A minimal Java implementation of /usr/bin/tr. Used to test the usage of external applications without adding platform-specific dependencies. */ public class TrApp { public TrApp(char find, char replace) { this.find = find; this.replace = replace; } void testParentJobConfToEnvVars() throws IOException { env = new Environment(); // test that some JobConf properties are exposed as expected // Note the dots translated to underscore: // property names have been escaped in PipeMapRed.safeEnvVarName() - expect("mapred_job_tracker", "local"); - //expect("mapred_local_dir", "build/test/mapred/local"); expectDefined("mapred_local_dir"); expect("mapred_output_format_class", "org.apache.hadoop.mapred.TextOutputFormat"); expect("mapred_output_key_class", "org.apache.hadoop.io.Text"); expect("mapred_output_value_class", "org.apache.hadoop.io.Text"); expect("mapred_task_is_map", "true"); expectDefined("mapred_task_id"); expectDefined("map_input_file"); - expect("map_input_start", "0"); expectDefined("map_input_length"); expectDefined("io_sort_factor"); // the FileSplit context properties are not available in local hadoop.. // so can't check them in this test. } // this runs in a subprocess; won't use JUnit's assertTrue() void expect(String evName, String evVal) throws IOException { String got = env.getProperty(evName); if (!evVal.equals(got)) { String msg = "FAIL evName=" + evName + " got=" + got + " expect=" + evVal; throw new IOException(msg); } } void expectDefined(String evName) throws IOException { String got = env.getProperty(evName); if (got == null) { String msg = "FAIL evName=" + evName + " is undefined. Expect defined."; throw new IOException(msg); } } public void go() throws IOException { testParentJobConfToEnvVars(); BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); String line; while ((line = in.readLine()) != null) { String out = line.replace(find, replace); System.out.println(out); System.err.println("reporter:counter:UserCounters,InputLines,1"); } } public static void main(String[] args) throws IOException { args[0] = CUnescape(args[0]); args[1] = CUnescape(args[1]); TrApp app = new TrApp(args[0].charAt(0), args[1].charAt(0)); app.go(); } public static String CUnescape(String s) { if (s.equals("\\n")) { return "\n"; } else { return s; } } char find; char replace; Environment env; } diff --git a/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java b/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java index 77c6fba..4da98a9 100644 --- a/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java +++ b/src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java @@ -1,599 +1,602 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.mapred.JvmManager.JvmEnv; import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Shell.ShellCommandExecutor; /** * A {@link TaskController} that runs the task JVMs as the user * who submits the job. * * This class executes a setuid executable to implement methods * of the {@link TaskController}, including launching the task * JVM and killing it when needed, and also initializing and * finalizing the task environment. * <p> The setuid executable is launched using the command line:</p> * <p>task-controller user-name command command-args, where</p> * <p>user-name is the name of the owner who submits the job</p> * <p>command is one of the cardinal value of the * {@link LinuxTaskController.TaskCommands} enumeration</p> * <p>command-args depends on the command being launched.</p> * * In addition to running and killing tasks, the class also * sets up appropriate access for the directories and files * that will be used by the tasks. */ class LinuxTaskController extends TaskController { private static final Log LOG = LogFactory.getLog(LinuxTaskController.class); // Name of the executable script that will contain the child // JVM command line. See writeCommand for details. private static final String COMMAND_FILE = "taskjvm.sh"; // Path to the setuid executable. private static String taskControllerExe; static { // the task-controller is expected to be under the $HADOOP_HOME/bin // directory. File hadoopBin = new File(System.getenv("HADOOP_HOME"), "bin"); taskControllerExe = new File(hadoopBin, "task-controller").getAbsolutePath(); } // The list of directory paths specified in the // variable mapred.local.dir. This is used to determine // which among the list of directories is picked up // for storing data for a particular task. private String[] mapredLocalDirs; // permissions to set on files and directories created. // When localized files are handled securely, this string // will change to something more restrictive. Until then, // it opens up the permissions for all, so that the tasktracker // and job owners can access files together. private static final String FILE_PERMISSIONS = "ugo+rwx"; // permissions to set on components of the path leading to // localized files and directories. Read and execute permissions // are required for different users to be able to access the // files. private static final String PATH_PERMISSIONS = "go+rx"; public LinuxTaskController() { super(); } @Override public void setConf(Configuration conf) { super.setConf(conf); mapredLocalDirs = conf.getStrings("mapred.local.dir"); //Setting of the permissions of the local directory is done in //setup() } /** * List of commands that the setuid script will execute. */ enum TaskCommands { LAUNCH_TASK_JVM, TERMINATE_TASK_JVM, KILL_TASK_JVM, ENABLE_TASK_FOR_CLEANUP } /** * Launch a task JVM that will run as the owner of the job. * * This method launches a task JVM by executing a setuid * executable that will switch to the user and run the * task. */ @Override void launchTaskJVM(TaskController.TaskControllerContext context) throws IOException { JvmEnv env = context.env; // get the JVM command line. String cmdLine = TaskLog.buildCommandLine(env.setup, env.vargs, env.stdout, env.stderr, env.logSize, true); StringBuffer sb = new StringBuffer(); //export out all the environment variable before child command as //the setuid/setgid binaries would not be getting, any environmental //variables which begin with LD_*. for(Entry<String, String> entry : env.env.entrySet()) { sb.append("export "); sb.append(entry.getKey()); sb.append("="); sb.append(entry.getValue()); sb.append("\n"); } sb.append(cmdLine); // write the command to a file in the // task specific cache directory writeCommand(sb.toString(), getTaskCacheDirectory(context)); // Call the taskcontroller with the right parameters. List<String> launchTaskJVMArgs = buildLaunchTaskArgs(context); ShellCommandExecutor shExec = buildTaskControllerExecutor( TaskCommands.LAUNCH_TASK_JVM, env.conf.getUser(), launchTaskJVMArgs, env.workDir, env.env); context.shExec = shExec; try { shExec.execute(); } catch (Exception e) { LOG.warn("Exception thrown while launching task JVM : " + StringUtils.stringifyException(e)); LOG.warn("Exit code from task is : " + shExec.getExitCode()); LOG.warn("Output from task-contoller is : " + shExec.getOutput()); throw new IOException(e); } if(LOG.isDebugEnabled()) { LOG.debug("output after executing task jvm = " + shExec.getOutput()); } } /** * Helper method that runs a LinuxTaskController command * * @param taskCommand * @param user * @param cmdArgs * @param env * @throws IOException */ private void runCommand(TaskCommands taskCommand, String user, List<String> cmdArgs, File workDir, Map<String, String> env) throws IOException { ShellCommandExecutor shExec = buildTaskControllerExecutor(taskCommand, user, cmdArgs, workDir, env); try { shExec.execute(); } catch (Exception e) { LOG.warn("Exit code from " + taskCommand.toString() + " is : " + shExec.getExitCode()); LOG.warn("Exception thrown by " + taskCommand.toString() + " : " + StringUtils.stringifyException(e)); LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + " follows:"); logOutput(shExec.getOutput()); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.info("Output from LinuxTaskController's " + taskCommand.toString() + " follows:"); logOutput(shExec.getOutput()); } } /** * Returns list of arguments to be passed while launching task VM. * See {@code buildTaskControllerExecutor(TaskCommands, * String, List<String>, JvmEnv)} documentation. * @param context * @return Argument to be used while launching Task VM */ private List<String> buildLaunchTaskArgs(TaskControllerContext context) { List<String> commandArgs = new ArrayList<String>(3); String taskId = context.task.getTaskID().toString(); String jobId = getJobId(context); LOG.debug("getting the task directory as: " + getTaskCacheDirectory(context)); commandArgs.add(getDirectoryChosenForTask( new File(getTaskCacheDirectory(context)), context)); commandArgs.add(jobId); if(!context.task.isTaskCleanupTask()) { commandArgs.add(taskId); }else { commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX); } return commandArgs; } private List<String> buildTaskCleanupArgs( TaskControllerPathDeletionContext context) { List<String> commandArgs = new ArrayList<String>(3); commandArgs.add(context.mapredLocalDir.toUri().getPath()); commandArgs.add(context.task.getJobID().toString()); String workDir = ""; if (context.isWorkDir) { workDir = "/work"; } if (context.task.isTaskCleanupTask()) { commandArgs.add(context.task.getTaskID() + TaskTracker.TASK_CLEANUP_SUFFIX + workDir); } else { commandArgs.add(context.task.getTaskID() + workDir); } return commandArgs; } /** * Enables the task for cleanup by changing permissions of the specified path * in the local filesystem */ @Override void enableTaskForCleanup(PathDeletionContext context) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Going to do " + TaskCommands.ENABLE_TASK_FOR_CLEANUP.toString() + " for " + context.fullPath); } if (context instanceof TaskControllerPathDeletionContext) { TaskControllerPathDeletionContext tContext = (TaskControllerPathDeletionContext) context; if (tContext.task.getUser() != null && tContext.fs instanceof LocalFileSystem) { runCommand(TaskCommands.ENABLE_TASK_FOR_CLEANUP, tContext.task.getUser(), buildTaskCleanupArgs(tContext), null, null); } else { throw new IllegalArgumentException("Either user is null or the " + "file system is not local file system."); } } else { throw new IllegalArgumentException("PathDeletionContext provided is not " + "TaskControllerPathDeletionContext."); } } private void logOutput(String output) { String shExecOutput = output; if (shExecOutput != null) { for (String str : shExecOutput.split("\n")) { LOG.info(str); } } } // get the Job ID from the information in the TaskControllerContext private String getJobId(TaskControllerContext context) { String taskId = context.task.getTaskID().toString(); TaskAttemptID tId = TaskAttemptID.forName(taskId); String jobId = tId.getJobID().toString(); return jobId; } // Get the directory from the list of directories configured // in mapred.local.dir chosen for storing data pertaining to // this task. private String getDirectoryChosenForTask(File directory, TaskControllerContext context) { String jobId = getJobId(context); String taskId = context.task.getTaskID().toString(); for (String dir : mapredLocalDirs) { File mapredDir = new File(dir); File taskDir = new File(mapredDir, TaskTracker.getLocalTaskDir( jobId, taskId, context.task.isTaskCleanupTask())); if (directory.equals(taskDir)) { return dir; } } LOG.error("Couldn't parse task cache directory correctly"); throw new IllegalArgumentException("invalid task cache directory " + directory.getAbsolutePath()); } /** * Setup appropriate permissions for directories and files that * are used by the task. * * As the LinuxTaskController launches tasks as a user, different * from the daemon, all directories and files that are potentially * used by the tasks are setup with appropriate permissions that * will allow access. * * Until secure data handling is implemented (see HADOOP-4491 and * HADOOP-4493, for e.g.), the permissions are set up to allow * read, write and execute access for everyone. This will be * changed to restricted access as data is handled securely. */ void initializeTask(TaskControllerContext context) { // Setup permissions for the job and task cache directories. setupTaskCacheFileAccess(context); // setup permissions for task log directory setupTaskLogFileAccess(context); } // Allows access for the task to create log files under // the task log directory private void setupTaskLogFileAccess(TaskControllerContext context) { TaskAttemptID taskId = context.task.getTaskID(); File f = TaskLog.getTaskLogFile(taskId, TaskLog.LogName.SYSLOG); String taskAttemptLogDir = f.getParentFile().getAbsolutePath(); changeDirectoryPermissions(taskAttemptLogDir, FILE_PERMISSIONS, false); } // Allows access for the task to read, write and execute // the files under the job and task cache directories private void setupTaskCacheFileAccess(TaskControllerContext context) { String taskId = context.task.getTaskID().toString(); JobID jobId = JobID.forName(getJobId(context)); //Change permission for the task across all the disks for(String localDir : mapredLocalDirs) { File f = new File(localDir); File taskCacheDir = new File(f,TaskTracker.getLocalTaskDir( jobId.toString(), taskId, context.task.isTaskCleanupTask())); if(taskCacheDir.exists()) { changeDirectoryPermissions(taskCacheDir.getPath(), FILE_PERMISSIONS, true); } }//end of local directory Iteration } // convenience method to execute chmod. private void changeDirectoryPermissions(String dir, String mode, boolean isRecursive) { int ret = 0; try { ret = FileUtil.chmod(dir, mode, isRecursive); } catch (Exception e) { LOG.warn("Exception in changing permissions for directory " + dir + ". Exception: " + e.getMessage()); } if (ret != 0) { LOG.warn("Could not change permissions for directory " + dir); } } /** * Builds the command line for launching/terminating/killing task JVM. * Following is the format for launching/terminating/killing task JVM * <br/> * For launching following is command line argument: * <br/> * {@code user-name command tt-root job_id task_id} * <br/> * For terminating/killing task jvm. * {@code user-name command tt-root task-pid} * * @param command command to be executed. * @param userName user name * @param cmdArgs list of extra arguments * @param env JVM environment variables. * @return {@link ShellCommandExecutor} * @throws IOException */ private ShellCommandExecutor buildTaskControllerExecutor( TaskCommands command, String userName, List<String> cmdArgs, File workDir, Map<String, String> env) throws IOException { String[] taskControllerCmd = new String[3 + cmdArgs.size()]; - taskControllerCmd[0] = taskControllerExe; + taskControllerCmd[0] = getTaskControllerExecutablePath(); taskControllerCmd[1] = userName; taskControllerCmd[2] = String.valueOf(command.ordinal()); int i = 3; for (String cmdArg : cmdArgs) { taskControllerCmd[i++] = cmdArg; } if (LOG.isDebugEnabled()) { for (String cmd : taskControllerCmd) { LOG.debug("taskctrl command = " + cmd); } } ShellCommandExecutor shExec = null; if(workDir != null && workDir.exists()) { shExec = new ShellCommandExecutor(taskControllerCmd, workDir, env); } else { shExec = new ShellCommandExecutor(taskControllerCmd); } return shExec; } // Return the task specific directory under the cache. private String getTaskCacheDirectory(TaskControllerContext context) { // In the case of JVM reuse, the task specific directory // is different from what is set with respect with // env.workDir. Hence building this from the taskId everytime. String taskId = context.task.getTaskID().toString(); File cacheDirForJob = context.env.workDir.getParentFile().getParentFile(); if(context.task.isTaskCleanupTask()) { taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX; } return new File(cacheDirForJob, taskId).getAbsolutePath(); } // Write the JVM command line to a file under the specified directory // Note that the JVM will be launched using a setuid executable, and // could potentially contain strings defined by a user. Hence, to // prevent special character attacks, we write the command line to // a file and execute it. private void writeCommand(String cmdLine, String directory) throws IOException { PrintWriter pw = null; String commandFile = directory + File.separator + COMMAND_FILE; LOG.info("Writing commands to " + commandFile); try { FileWriter fw = new FileWriter(commandFile); BufferedWriter bw = new BufferedWriter(fw); pw = new PrintWriter(bw); pw.write(cmdLine); } catch (IOException ioe) { LOG.error("Caught IOException while writing JVM command line to file. " + ioe.getMessage()); } finally { if (pw != null) { pw.close(); } // set execute permissions for all on the file. File f = new File(commandFile); if (f.exists()) { f.setReadable(true, false); f.setExecutable(true, false); } } } - + + protected String getTaskControllerExecutablePath() { + return taskControllerExe; + } /** * Sets up the permissions of the following directories: * * Job cache directory * Archive directory * Hadoop log directories * */ @Override void setup() { //set up job cache directory and associated permissions String localDirs[] = this.mapredLocalDirs; for(String localDir : localDirs) { //Cache root File cacheDirectory = new File(localDir,TaskTracker.getCacheSubdir()); File jobCacheDirectory = new File(localDir,TaskTracker.getJobCacheSubdir()); if(!cacheDirectory.exists()) { if(!cacheDirectory.mkdirs()) { LOG.warn("Unable to create cache directory : " + cacheDirectory.getPath()); } } if(!jobCacheDirectory.exists()) { if(!jobCacheDirectory.mkdirs()) { LOG.warn("Unable to create job cache directory : " + jobCacheDirectory.getPath()); } } //Give world writable permission for every directory under //mapred-local-dir. //Child tries to write files under it when executing. changeDirectoryPermissions(localDir, FILE_PERMISSIONS, true); }//end of local directory manipulations //setting up perms for user logs File taskLog = TaskLog.getUserLogDir(); changeDirectoryPermissions(taskLog.getPath(), FILE_PERMISSIONS,false); } /* * Create Job directories across disks and set their permissions to 777 * This way when tasks are run we just need to setup permissions for * task folder. */ @Override void initializeJob(JobID jobid) { for(String localDir : this.mapredLocalDirs) { File jobDirectory = new File(localDir, TaskTracker.getLocalJobDir(jobid.toString())); if(!jobDirectory.exists()) { if(!jobDirectory.mkdir()) { LOG.warn("Unable to create job cache directory : " + jobDirectory.getPath()); continue; } } //Should be recursive because the jar and work folders might be //present under the job cache directory changeDirectoryPermissions( jobDirectory.getPath(), FILE_PERMISSIONS, true); } } /** * API which builds the command line to be pass to LinuxTaskController * binary to terminate/kill the task. See * {@code buildTaskControllerExecutor(TaskCommands, * String, List<String>, JvmEnv)} documentation. * * * @param context context of task which has to be passed kill signal. * */ private List<String> buildKillTaskCommandArgs(TaskControllerContext context){ List<String> killTaskJVMArgs = new ArrayList<String>(); killTaskJVMArgs.add(context.pid); return killTaskJVMArgs; } /** * Convenience method used to sending appropriate Kill signal to the task * VM * @param context * @param command * @throws IOException */ private void finishTask(TaskControllerContext context, TaskCommands command) throws IOException{ if(context.task == null) { LOG.info("Context task null not killing the JVM"); return; } ShellCommandExecutor shExec = buildTaskControllerExecutor( command, context.env.conf.getUser(), buildKillTaskCommandArgs(context), context.env.workDir, context.env.env); try { shExec.execute(); } catch (Exception e) { LOG.warn("Output from task-contoller is : " + shExec.getOutput()); throw new IOException(e); } } @Override void terminateTask(TaskControllerContext context) { try { finishTask(context, TaskCommands.TERMINATE_TASK_JVM); } catch (Exception e) { LOG.warn("Exception thrown while sending kill to the Task VM " + StringUtils.stringifyException(e)); } } @Override void killTask(TaskControllerContext context) { try { finishTask(context, TaskCommands.KILL_TASK_JVM); } catch (Exception e) { LOG.warn("Exception thrown while sending destroy to the Task VM " + StringUtils.stringifyException(e)); } } } diff --git a/src/test/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java b/src/test/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java new file mode 100644 index 0000000..521c758 --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java @@ -0,0 +1,241 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintWriter; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.UnixUserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation; + +import junit.framework.TestCase; + +/** + * The base class which starts up a cluster with LinuxTaskController as the task + * controller. + * + * In order to run test cases utilizing LinuxTaskController please follow the + * following steps: + * <ol> + * <li>Build LinuxTaskController by not passing any + * <code>-Dhadoop.conf.dir</code></li> + * <li>Make the built binary to setuid executable</li> + * <li>Execute following targets: + * <code>ant test -Dcompile.c++=true -Dtaskcontroller-path=<em>path to built binary</em> + * -Dtaskcontroller-user=<em>user,group</em></code></li> + * </ol> + * + */ +public class ClusterWithLinuxTaskController extends TestCase { + private static final Log LOG = + LogFactory.getLog(ClusterWithLinuxTaskController.class); + + /** + * The wrapper class around LinuxTaskController which allows modification of + * the custom path to task-controller which we can use for task management. + * + **/ + public static class MyLinuxTaskController extends LinuxTaskController { + String taskControllerExePath; + + @Override + protected String getTaskControllerExecutablePath() { + return taskControllerExePath; + } + + void setTaskControllerExe(String execPath) { + this.taskControllerExePath = execPath; + } + } + + // cluster instances which sub classes can use + protected MiniMRCluster mrCluster = null; + protected MiniDFSCluster dfsCluster = null; + + private JobConf clusterConf = null; + protected Path homeDirectory; + + private static final int NUMBER_OF_NODES = 1; + + private File configurationFile = null; + + private UserGroupInformation taskControllerUser; + + /* + * Utility method which subclasses use to start and configure the MR Cluster + * so they can directly submit a job. + */ + protected void startCluster() + throws IOException { + JobConf conf = new JobConf(); + dfsCluster = new MiniDFSCluster(conf, NUMBER_OF_NODES, true, null); + conf.set("mapred.task.tracker.task-controller", + MyLinuxTaskController.class.getName()); + mrCluster = + new MiniMRCluster(NUMBER_OF_NODES, dfsCluster.getFileSystem().getUri() + .toString(), 1, null, null, conf); + + // Get the configured taskcontroller-path + String path = System.getProperty("taskcontroller-path"); + createTaskControllerConf(path); + String execPath = path + "/task-controller"; + TaskTracker tracker = mrCluster.getTaskTrackerRunner(0).tt; + // TypeCasting the parent to our TaskController instance as we + // know that that would be instance which should be present in TT. + ((MyLinuxTaskController) tracker.getTaskController()) + .setTaskControllerExe(execPath); + String ugi = System.getProperty("taskcontroller-user"); + clusterConf = mrCluster.createJobConf(); + String[] splits = ugi.split(","); + taskControllerUser = new UnixUserGroupInformation(splits); + clusterConf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); + createHomeDirectory(clusterConf); + } + + private void createHomeDirectory(JobConf conf) + throws IOException { + FileSystem fs = dfsCluster.getFileSystem(); + String path = "/user/" + taskControllerUser.getUserName(); + homeDirectory = new Path(path); + LOG.info("Creating Home directory : " + homeDirectory); + fs.mkdirs(homeDirectory); + changePermission(conf, homeDirectory); + } + + private void changePermission(JobConf conf, Path p) + throws IOException { + FileSystem fs = dfsCluster.getFileSystem(); + fs.setOwner(homeDirectory, taskControllerUser.getUserName(), + taskControllerUser.getGroupNames()[0]); + } + + private void createTaskControllerConf(String path) + throws IOException { + File confDirectory = new File(path, "../conf"); + if (!confDirectory.exists()) { + confDirectory.mkdirs(); + } + configurationFile = new File(confDirectory, "taskcontroller.cfg"); + PrintWriter writer = + new PrintWriter(new FileOutputStream(configurationFile)); + + writer.println(String.format("mapred.local.dir=%s", mrCluster + .getTaskTrackerLocalDir(0))); + + writer.flush(); + writer.close(); + } + + /** + * Can we run the tests with LinuxTaskController? + * + * @return boolean + */ + protected boolean shouldRun() { + return isTaskExecPathPassed() && isUserPassed(); + } + + private boolean isTaskExecPathPassed() { + String path = System.getProperty("taskcontroller-path"); + if (path == null || path.isEmpty() + || path.equals("${taskcontroller-path}")) { + return false; + } + return true; + } + + private boolean isUserPassed() { + String ugi = System.getProperty("taskcontroller-user"); + if (ugi != null && !(ugi.equals("${taskcontroller-user}")) + && !ugi.isEmpty()) { + if (ugi.indexOf(",") > 1) { + return true; + } + return false; + } + return false; + } + + protected JobConf getClusterConf() { + return new JobConf(clusterConf); + } + + @Override + protected void tearDown() + throws Exception { + if (mrCluster != null) { + mrCluster.shutdown(); + } + + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + + if (configurationFile != null) { + configurationFile.delete(); + } + + super.tearDown(); + } + + /** + * Assert that the job is actually run by the specified user by verifying the + * permissions of the output part-files. + * + * @param outDir + * @throws IOException + */ + protected void assertOwnerShip(Path outDir) + throws IOException { + FileSystem fs = outDir.getFileSystem(clusterConf); + assertOwnerShip(outDir, fs); + } + + /** + * Assert that the job is actually run by the specified user by verifying the + * permissions of the output part-files. + * + * @param outDir + * @param fs + * @throws IOException + */ + protected void assertOwnerShip(Path outDir, FileSystem fs) + throws IOException { + for (FileStatus status : fs.listStatus(outDir, new OutputLogFilter())) { + String owner = status.getOwner(); + String group = status.getGroup(); + LOG.info("Ownership of the file is " + status.getPath() + " is " + owner + + "," + group); + assertTrue("Output part-file's owner is not correct. Expected : " + + taskControllerUser.getUserName() + " Found : " + owner, owner + .equals(taskControllerUser.getUserName())); + assertTrue("Output part-file's group is not correct. Expected : " + + taskControllerUser.getGroupNames()[0] + " Found : " + group, group + .equals(taskControllerUser.getGroupNames()[0])); + } + } +} diff --git a/src/test/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java b/src/test/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java new file mode 100644 index 0000000..73645b7 --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; + +/** + * Test a java-based mapred job with LinuxTaskController running the jobs as a + * user different from the user running the cluster. See + * {@link ClusterWithLinuxTaskController} + */ +public class TestJobExecutionAsDifferentUser extends + ClusterWithLinuxTaskController { + + public void testJobExecution() + throws Exception { + if (!shouldRun()) { + return; + } + startCluster(); + submitWordCount(getClusterConf()); + } + + private void submitWordCount(JobConf clientConf) throws IOException { + Path inDir = new Path("testing/wc/input"); + Path outDir = new Path("testing/wc/output"); + JobConf conf = new JobConf(clientConf); + FileSystem fs = FileSystem.get(conf); + fs.delete(outDir, true); + if (!fs.mkdirs(inDir)) { + throw new IOException("Mkdirs failed to create " + inDir.toString()); + } + + DataOutputStream file = fs.create(new Path(inDir, "part-0")); + file.writeBytes("a b c d e f g h"); + file.close(); + + conf.setJobName("wordcount"); + conf.setInputFormat(TextInputFormat.class); + + // the keys are words (strings) + conf.setOutputKeyClass(Text.class); + // the values are counts (ints) + conf.setOutputValueClass(IntWritable.class); + + conf.setMapperClass(WordCount.MapClass.class); + conf.setCombinerClass(WordCount.Reduce.class); + conf.setReducerClass(WordCount.Reduce.class); + + FileInputFormat.setInputPaths(conf, inDir); + FileOutputFormat.setOutputPath(conf, outDir); + conf.setNumMapTasks(1); + conf.setNumReduceTasks(1); + RunningJob rj = JobClient.runJob(conf); + assertTrue("Job Failed", rj.isSuccessful()); + assertOwnerShip(outDir); + } + + public void testEnvironment() throws IOException { + if (!shouldRun()) { + return; + } + startCluster(); + TestMiniMRChildTask childTask = new TestMiniMRChildTask(); + Path inDir = new Path("input1"); + Path outDir = new Path("output1"); + try { + childTask.runTestTaskEnv(getClusterConf(), inDir, outDir); + } catch (IOException e) { + fail("IOException thrown while running enviroment test." + + e.getMessage()); + } finally { + FileSystem outFs = outDir.getFileSystem(getClusterConf()); + if (outFs.exists(outDir)) { + assertOwnerShip(outDir); + outFs.delete(outDir, true); + } else { + fail("Output directory does not exist" + outDir.toString()); + } + } + } +} diff --git a/src/test/org/apache/hadoop/mapred/TestMiniMRChildTask.java b/src/test/org/apache/hadoop/mapred/TestMiniMRChildTask.java index 31ac57e..bab0d82 100644 --- a/src/test/org/apache/hadoop/mapred/TestMiniMRChildTask.java +++ b/src/test/org/apache/hadoop/mapred/TestMiniMRChildTask.java @@ -1,418 +1,437 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.*; import java.util.Iterator; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.lib.IdentityReducer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; /** * Class to test mapred task's * - temp directory * - child env */ public class TestMiniMRChildTask extends TestCase { private static final Log LOG = LogFactory.getLog(TestMiniMRChildTask.class.getName()); private final static String OLD_CONFIGS = "test.old.configs"; private final static String TASK_OPTS_VAL = "-Xmx200m"; private final static String MAP_OPTS_VAL = "-Xmx200m"; private final static String REDUCE_OPTS_VAL = "-Xmx300m"; private MiniMRCluster mr; private MiniDFSCluster dfs; private FileSystem fileSys; /** * Map class which checks whether temp directory exists * and check the value of java.io.tmpdir * Creates a tempfile and checks whether that is created in * temp directory specified. */ public static class MapClass extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { Path tmpDir; FileSystem localFs; public void map (LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { String tmp = null; if (localFs.exists(tmpDir)) { tmp = tmpDir.makeQualified(localFs).toString(); assertEquals(tmp, new Path(System.getProperty("java.io.tmpdir")). makeQualified(localFs).toString()); } else { fail("Temp directory "+tmpDir +" doesnt exist."); } File tmpFile = File.createTempFile("test", ".tmp"); assertEquals(tmp, new Path(tmpFile.getParent()). makeQualified(localFs).toString()); } public void configure(JobConf job) { tmpDir = new Path(job.get("mapred.child.tmp", "./tmp")); try { localFs = FileSystem.getLocal(job); } catch (IOException ioe) { ioe.printStackTrace(); fail("IOException in getting localFS"); } } } // configure a job private void configure(JobConf conf, Path inDir, Path outDir, String input, Class<? extends Mapper> map, Class<? extends Reducer> reduce) throws IOException { // set up the input file system and write input text. FileSystem inFs = inDir.getFileSystem(conf); FileSystem outFs = outDir.getFileSystem(conf); outFs.delete(outDir, true); if (!inFs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } { // write input into input file DataOutputStream file = inFs.create(new Path(inDir, "part-0")); file.writeBytes(input); file.close(); } // configure the mapred Job which creates a tempfile in map. conf.setJobName("testmap"); conf.setMapperClass(map); conf.setReducerClass(reduce); conf.setNumMapTasks(1); conf.setNumReduceTasks(0); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp")).toString().replace(' ', '+'); conf.set("test.build.data", TEST_ROOT_DIR); } /** * Launch tests * @param conf Configuration of the mapreduce job. * @param inDir input path * @param outDir output path * @param input Input text * @throws IOException */ public void launchTest(JobConf conf, Path inDir, Path outDir, String input) throws IOException { configure(conf, inDir, outDir, input, MapClass.class, IdentityReducer.class); FileSystem outFs = outDir.getFileSystem(conf); // Launch job with default option for temp dir. // i.e. temp dir is ./tmp JobClient.runJob(conf); outFs.delete(outDir, true); // Launch job by giving relative path to temp dir. conf.set("mapred.child.tmp", "../temp"); JobClient.runJob(conf); outFs.delete(outDir, true); // Launch job by giving absolute path to temp dir conf.set("mapred.child.tmp", "/tmp"); JobClient.runJob(conf); outFs.delete(outDir, true); } private static void checkEnv(String envName, String expValue, String mode) { String envValue = System.getenv(envName).trim(); if ("append".equals(mode)) { if (envValue == null || !envValue.contains(":")) { throw new RuntimeException("Missing env variable"); } else { String parts[] = envValue.split(":"); // check if the value is appended if (!parts[parts.length - 1].equals(expValue)) { throw new RuntimeException("Wrong env variable in append mode"); } } } else { if (envValue == null || !envValue.equals(expValue)) { throw new RuntimeException("Wrong env variable in noappend mode"); } } } // Mappers that simply checks if the desired user env are present or not static class EnvCheckMapper extends MapReduceBase implements Mapper<WritableComparable, Writable, WritableComparable, Writable> { public void configure(JobConf job) { boolean oldConfigs = job.getBoolean(OLD_CONFIGS, false); if (oldConfigs) { String javaOpts = job.get(JobConf.MAPRED_TASK_JAVA_OPTS); assertNotNull(JobConf.MAPRED_TASK_JAVA_OPTS + " is null!", javaOpts); assertEquals(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: " + javaOpts, javaOpts, TASK_OPTS_VAL); } else { String mapJavaOpts = job.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS); assertNotNull(JobConf.MAPRED_MAP_TASK_JAVA_OPTS + " is null!", mapJavaOpts); assertEquals(JobConf.MAPRED_MAP_TASK_JAVA_OPTS + " has value of: " + mapJavaOpts, mapJavaOpts, MAP_OPTS_VAL); } String path = job.get("path"); // check if the pwd is there in LD_LIBRARY_PATH String pwd = System.getenv("PWD"); assertTrue("LD doesnt contain pwd", System.getenv("LD_LIBRARY_PATH").contains(pwd)); // check if X=$X:/abc works for LD_LIBRARY_PATH checkEnv("LD_LIBRARY_PATH", "/tmp", "append"); // check if X=/tmp works for an already existing parameter checkEnv("HOME", "/tmp", "noappend"); // check if X=/tmp for a new env variable checkEnv("MY_PATH", "/tmp", "noappend"); // check if X=$X:/tmp works for a new env var and results into :/tmp checkEnv("NEW_PATH", ":/tmp", "noappend"); // check if X=$(tt's X var):/tmp for an old env variable inherited from // the tt checkEnv("PATH", path + ":/tmp", "noappend"); } public void map(WritableComparable key, Writable value, OutputCollector<WritableComparable, Writable> out, Reporter reporter) throws IOException { } } static class EnvCheckReducer extends MapReduceBase implements Reducer<WritableComparable, Writable, WritableComparable, Writable> { @Override public void configure(JobConf job) { boolean oldConfigs = job.getBoolean(OLD_CONFIGS, false); if (oldConfigs) { String javaOpts = job.get(JobConf.MAPRED_TASK_JAVA_OPTS); assertNotNull(JobConf.MAPRED_TASK_JAVA_OPTS + " is null!", javaOpts); assertEquals(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: " + javaOpts, javaOpts, TASK_OPTS_VAL); } else { String reduceJavaOpts = job.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS); assertNotNull(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " is null!", reduceJavaOpts); assertEquals(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " has value of: " + reduceJavaOpts, reduceJavaOpts, REDUCE_OPTS_VAL); } String path = job.get("path"); // check if the pwd is there in LD_LIBRARY_PATH String pwd = System.getenv("PWD"); assertTrue("LD doesnt contain pwd", System.getenv("LD_LIBRARY_PATH").contains(pwd)); // check if X=$X:/abc works for LD_LIBRARY_PATH checkEnv("LD_LIBRARY_PATH", "/tmp", "append"); // check if X=/tmp works for an already existing parameter checkEnv("HOME", "/tmp", "noappend"); // check if X=/tmp for a new env variable checkEnv("MY_PATH", "/tmp", "noappend"); // check if X=$X:/tmp works for a new env var and results into :/tmp checkEnv("NEW_PATH", ":/tmp", "noappend"); // check if X=$(tt's X var):/tmp for an old env variable inherited from // the tt checkEnv("PATH", path + ":/tmp", "noappend"); } @Override public void reduce(WritableComparable key, Iterator<Writable> values, OutputCollector<WritableComparable, Writable> output, Reporter reporter) throws IOException { } } @Override public void setUp() { try { // create configuration, dfs, file system and mapred cluster dfs = new MiniDFSCluster(new Configuration(), 1, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(2, fileSys.getUri().toString(), 1); } catch (IOException ioe) { tearDown(); } } @Override public void tearDown() { // close file system and shut down dfs and mapred cluster try { if (fileSys != null) { fileSys.close(); } if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } catch (IOException ioe) { LOG.info("IO exception in closing file system)" ); ioe.printStackTrace(); } } /** * Tests task's temp directory. * * In this test, we give different values to mapred.child.tmp * both relative and absolute. And check whether the temp directory * is created. We also check whether java.io.tmpdir value is same as * the directory specified. We create a temp file and check if is is * created in the directory specified. */ public void testTaskTempDir(){ try { JobConf conf = mr.createJobConf(); // intialize input, output directories Path inDir = new Path("testing/wc/input"); Path outDir = new Path("testing/wc/output"); String input = "The input"; launchTest(conf, inDir, outDir, input); } catch(Exception e) { e.printStackTrace(); fail("Exception in testing temp dir"); tearDown(); } } /** * Test to test if the user set env variables reflect in the child * processes. Mainly * - x=y (x can be a already existing env variable or a new variable) * - x=$x:y (replace $x with the current value of x) */ public void testTaskEnv(){ try { JobConf conf = mr.createJobConf(); // initialize input, output directories Path inDir = new Path("testing/wc/input1"); Path outDir = new Path("testing/wc/output1"); FileSystem outFs = outDir.getFileSystem(conf); runTestTaskEnv(conf, inDir, outDir, false); outFs.delete(outDir, true); } catch(Exception e) { e.printStackTrace(); fail("Exception in testing child env"); tearDown(); } } /** * Test to test if the user set *old* env variables reflect in the child * processes. Mainly * - x=y (x can be a already existing env variable or a new variable) * - x=$x:y (replace $x with the current value of x) */ public void testTaskOldEnv(){ try { JobConf conf = mr.createJobConf(); // initialize input, output directories Path inDir = new Path("testing/wc/input1"); Path outDir = new Path("testing/wc/output1"); FileSystem outFs = outDir.getFileSystem(conf); runTestTaskEnv(conf, inDir, outDir, true); outFs.delete(outDir, true); } catch(Exception e) { e.printStackTrace(); fail("Exception in testing child env"); tearDown(); } } void runTestTaskEnv(JobConf conf, Path inDir, Path outDir, boolean oldConfigs) throws IOException { String input = "The input"; configure(conf, inDir, outDir, input, EnvCheckMapper.class, EnvCheckReducer.class); // test // - new SET of new var (MY_PATH) // - set of old var (HOME) // - append to an old var from modified env (LD_LIBRARY_PATH) // - append to an old var from tt's env (PATH) // - append to a new var (NEW_PATH) String mapTaskEnvKey = JobConf.MAPRED_MAP_TASK_ENV; String reduceTaskEnvKey = JobConf.MAPRED_MAP_TASK_ENV; String mapTaskJavaOptsKey = JobConf.MAPRED_MAP_TASK_JAVA_OPTS; String reduceTaskJavaOptsKey = JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS; String mapTaskJavaOpts = MAP_OPTS_VAL; String reduceTaskJavaOpts = REDUCE_OPTS_VAL; conf.setBoolean(OLD_CONFIGS, oldConfigs); if (oldConfigs) { mapTaskEnvKey = reduceTaskEnvKey = JobConf.MAPRED_TASK_ENV; mapTaskJavaOptsKey = reduceTaskJavaOptsKey = JobConf.MAPRED_TASK_JAVA_OPTS; mapTaskJavaOpts = reduceTaskJavaOpts = TASK_OPTS_VAL; } conf.set(mapTaskEnvKey, "MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp"); conf.set(reduceTaskEnvKey, "MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp"); conf.set("path", System.getenv("PATH")); conf.set(mapTaskJavaOptsKey, mapTaskJavaOpts); conf.set(reduceTaskJavaOptsKey, reduceTaskJavaOpts); RunningJob job = JobClient.runJob(conf); assertTrue("The environment checker job failed.", job.isSuccessful()); } + + void runTestTaskEnv(JobConf conf, Path inDir, Path outDir) throws IOException { + String input = "The input"; + configure(conf, inDir, outDir, input, EnvCheckMapper.class, + IdentityReducer.class); + // test + // - new SET of new var (MY_PATH) + // - set of old var (HOME) + // - append to an old var from modified env (LD_LIBRARY_PATH) + // - append to an old var from tt's env (PATH) + // - append to a new var (NEW_PATH) + conf.set("mapred.child.env", + "MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp," + + "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp"); + conf.set("path", System.getenv("PATH")); + RunningJob job = JobClient.runJob(conf); + assertTrue("The environment checker job failed.", job.isSuccessful()); + } + } diff --git a/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java b/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java index 604bc20..86d6524 100644 --- a/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java +++ b/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java @@ -1,254 +1,277 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred.pipes; import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.List; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.TestMiniMRWithDFS; import org.apache.hadoop.mapred.Utils; import org.apache.hadoop.mapred.Counters.Counter; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; public class TestPipes extends TestCase { private static final Log LOG = LogFactory.getLog(TestPipes.class.getName()); - + + private static Path cppExamples = + new Path(System.getProperty("install.c++.examples")); + static Path wordCountSimple = + new Path(cppExamples, "bin/wordcount-simple"); + static Path wordCountPart = + new Path(cppExamples, "bin/wordcount-part"); + static Path wordCountNoPipes = + new Path(cppExamples,"bin/wordcount-nopipe"); + + static Path nonPipedOutDir; + static void cleanup(FileSystem fs, Path p) throws IOException { fs.delete(p, true); assertFalse("output not cleaned up", fs.exists(p)); } public void testPipes() throws IOException { if (System.getProperty("compile.c++") == null) { LOG.info("compile.c++ is not defined, so skipping TestPipes"); return; } MiniDFSCluster dfs = null; MiniMRCluster mr = null; - Path cppExamples = new Path(System.getProperty("install.c++.examples")); Path inputPath = new Path("/testing/in"); Path outputPath = new Path("/testing/out"); try { final int numSlaves = 2; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, numSlaves, true, null); mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getName(), 1); writeInputFile(dfs.getFileSystem(), inputPath); - runProgram(mr, dfs, new Path(cppExamples, "bin/wordcount-simple"), - inputPath, outputPath, 3, 2, twoSplitOutput); + runProgram(mr, dfs, wordCountSimple, + inputPath, outputPath, 3, 2, twoSplitOutput, null); cleanup(dfs.getFileSystem(), outputPath); - - runProgram(mr, dfs, new Path(cppExamples, "bin/wordcount-simple"), - inputPath, outputPath, 3, 0, noSortOutput); + runProgram(mr, dfs, wordCountSimple, + inputPath, outputPath, 3, 0, noSortOutput, null); cleanup(dfs.getFileSystem(), outputPath); - - runProgram(mr, dfs, new Path(cppExamples, "bin/wordcount-part"), - inputPath, outputPath, 3, 2, fixedPartitionOutput); - runNonPipedProgram(mr, dfs, new Path(cppExamples,"bin/wordcount-nopipe")); + runProgram(mr, dfs, wordCountPart, + inputPath, outputPath, 3, 2, fixedPartitionOutput, null); + runNonPipedProgram(mr, dfs, wordCountNoPipes, null); mr.waitUntilIdle(); } finally { mr.shutdown(); dfs.shutdown(); } } + final static String[] twoSplitOutput = new String[] { "`and\t1\na\t1\nand\t1\nbeginning\t1\nbook\t1\nbut\t1\nby\t1\n" + "conversation?'\t1\ndo:\t1\nhad\t2\nhaving\t1\nher\t2\nin\t1\nit\t1\n"+ "it,\t1\nno\t1\nnothing\t1\nof\t3\non\t1\nonce\t1\nor\t3\npeeped\t1\n"+ "pictures\t2\nthe\t3\nthought\t1\nto\t2\nuse\t1\nwas\t2\n", "Alice\t2\n`without\t1\nbank,\t1\nbook,'\t1\nconversations\t1\nget\t1\n" + "into\t1\nis\t1\nreading,\t1\nshe\t1\nsister\t2\nsitting\t1\ntired\t1\n" + "twice\t1\nvery\t1\nwhat\t1\n" }; final static String[] noSortOutput = new String[] { "it,\t1\n`and\t1\nwhat\t1\nis\t1\nthe\t1\nuse\t1\nof\t1\na\t1\n" + "book,'\t1\nthought\t1\nAlice\t1\n`without\t1\npictures\t1\nor\t1\n"+ "conversation?'\t1\n", "Alice\t1\nwas\t1\nbeginning\t1\nto\t1\nget\t1\nvery\t1\ntired\t1\n"+ "of\t1\nsitting\t1\nby\t1\nher\t1\nsister\t1\non\t1\nthe\t1\nbank,\t1\n"+ "and\t1\nof\t1\nhaving\t1\nnothing\t1\nto\t1\ndo:\t1\nonce\t1\n", "or\t1\ntwice\t1\nshe\t1\nhad\t1\npeeped\t1\ninto\t1\nthe\t1\nbook\t1\n"+ "her\t1\nsister\t1\nwas\t1\nreading,\t1\nbut\t1\nit\t1\nhad\t1\nno\t1\n"+ "pictures\t1\nor\t1\nconversations\t1\nin\t1\n" }; final static String[] fixedPartitionOutput = new String[] { "Alice\t2\n`and\t1\n`without\t1\na\t1\nand\t1\nbank,\t1\nbeginning\t1\n" + "book\t1\nbook,'\t1\nbut\t1\nby\t1\nconversation?'\t1\nconversations\t1\n"+ "do:\t1\nget\t1\nhad\t2\nhaving\t1\nher\t2\nin\t1\ninto\t1\nis\t1\n" + "it\t1\nit,\t1\nno\t1\nnothing\t1\nof\t3\non\t1\nonce\t1\nor\t3\n" + "peeped\t1\npictures\t2\nreading,\t1\nshe\t1\nsister\t2\nsitting\t1\n" + "the\t3\nthought\t1\ntired\t1\nto\t2\ntwice\t1\nuse\t1\n" + "very\t1\nwas\t2\nwhat\t1\n", "" }; - private void writeInputFile(FileSystem fs, Path dir) throws IOException { + static void writeInputFile(FileSystem fs, Path dir) throws IOException { DataOutputStream out = fs.create(new Path(dir, "part0")); out.writeBytes("Alice was beginning to get very tired of sitting by her\n"); out.writeBytes("sister on the bank, and of having nothing to do: once\n"); out.writeBytes("or twice she had peeped into the book her sister was\n"); out.writeBytes("reading, but it had no pictures or conversations in\n"); out.writeBytes("it, `and what is the use of a book,' thought Alice\n"); out.writeBytes("`without pictures or conversation?'\n"); out.close(); } - private void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, + static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, Path program, Path inputPath, Path outputPath, - int numMaps, int numReduces, String[] expectedResults + int numMaps, int numReduces, String[] expectedResults, + JobConf conf ) throws IOException { Path wordExec = new Path("/testing/bin/application"); - JobConf job = mr.createJobConf(); + JobConf job = null; + if(conf == null) { + job = mr.createJobConf(); + }else { + job = new JobConf(conf); + } job.setNumMapTasks(numMaps); job.setNumReduceTasks(numReduces); { FileSystem fs = dfs.getFileSystem(); fs.delete(wordExec.getParent(), true); fs.copyFromLocalFile(program, wordExec); Submitter.setExecutable(job, fs.makeQualified(wordExec).toString()); Submitter.setIsJavaRecordReader(job, true); Submitter.setIsJavaRecordWriter(job, true); FileInputFormat.setInputPaths(job, inputPath); FileOutputFormat.setOutputPath(job, outputPath); RunningJob rJob = null; if (numReduces == 0) { rJob = Submitter.jobSubmit(job); while (!rJob.isComplete()) { try { Thread.sleep(1000); } catch (InterruptedException ie) { throw new RuntimeException(ie); } } } else { rJob = Submitter.runJob(job); } assertTrue("pipes job failed", rJob.isSuccessful()); Counters counters = rJob.getCounters(); Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT"); int numCounters = 0; for (Counter c : wordCountCounters) { System.out.println(c); ++numCounters; } assertTrue("No counters found!", (numCounters > 0)); } List<String> results = new ArrayList<String>(); for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath, new Utils.OutputFileUtils.OutputFilesFilter()))) { results.add(TestMiniMRWithDFS.readOutput(p, job)); } assertEquals("number of reduces is wrong", expectedResults.length, results.size()); for(int i=0; i < results.size(); i++) { assertEquals("pipes program " + program + " output " + i + " wrong", expectedResults[i], results.get(i)); } } /** * Run a map/reduce word count that does all of the map input and reduce * output directly rather than sending it back up to Java. * @param mr The mini mr cluster * @param dfs the dfs cluster * @param program the program to run * @throws IOException */ - private void runNonPipedProgram(MiniMRCluster mr, MiniDFSCluster dfs, - Path program) throws IOException { - JobConf job = mr.createJobConf(); + static void runNonPipedProgram(MiniMRCluster mr, MiniDFSCluster dfs, + Path program, JobConf conf) throws IOException { + JobConf job; + if(conf == null) { + job = mr.createJobConf(); + }else { + job = new JobConf(conf); + } + job.setInputFormat(WordCountInputFormat.class); FileSystem local = FileSystem.getLocal(job); Path testDir = new Path("file:" + System.getProperty("test.build.data"), "pipes"); Path inDir = new Path(testDir, "input"); - Path outDir = new Path(testDir, "output"); + nonPipedOutDir = new Path(testDir, "output"); Path wordExec = new Path("/testing/bin/application"); Path jobXml = new Path(testDir, "job.xml"); { FileSystem fs = dfs.getFileSystem(); fs.delete(wordExec.getParent(), true); fs.copyFromLocalFile(program, wordExec); } DataOutputStream out = local.create(new Path(inDir, "part0")); out.writeBytes("i am a silly test\n"); out.writeBytes("you are silly\n"); out.writeBytes("i am a cat test\n"); out.writeBytes("you is silly\n"); out.writeBytes("i am a billy test\n"); out.writeBytes("hello are silly\n"); out.close(); out = local.create(new Path(inDir, "part1")); out.writeBytes("mall world things drink java\n"); out.writeBytes("hall silly cats drink java\n"); out.writeBytes("all dogs bow wow\n"); out.writeBytes("hello drink java\n"); + local.delete(nonPipedOutDir, true); + local.mkdirs(nonPipedOutDir, new FsPermission(FsAction.ALL, FsAction.ALL, + FsAction.ALL)); out.close(); - local.delete(outDir, true); - local.mkdirs(outDir); out = local.create(jobXml); job.writeXml(out); out.close(); - System.err.println("About to run: Submitter -conf " + jobXml + - " -input " + inDir + " -output " + outDir + - " -program " + - dfs.getFileSystem().makeQualified(wordExec)); + System.err.println("About to run: Submitter -conf " + jobXml + " -input " + + inDir + " -output " + nonPipedOutDir + " -program " + + dfs.getFileSystem().makeQualified(wordExec)); try { int ret = ToolRunner.run(new Submitter(), new String[]{"-conf", jobXml.toString(), "-input", inDir.toString(), - "-output", outDir.toString(), + "-output", nonPipedOutDir.toString(), "-program", dfs.getFileSystem().makeQualified(wordExec).toString(), "-reduces", "2"}); assertEquals(0, ret); } catch (Exception e) { assertTrue("got exception: " + StringUtils.stringifyException(e), false); } } } diff --git a/src/test/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java b/src/test/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java new file mode 100644 index 0000000..761d9e2 --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred.pipes; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.ClusterWithLinuxTaskController; +import org.apache.hadoop.mapred.JobConf; + +/** + * Test Pipes jobs with LinuxTaskController running the jobs as a user different + * from the user running the cluster. See {@link ClusterWithLinuxTaskController} + */ +public class TestPipesAsDifferentUser extends ClusterWithLinuxTaskController { + + private static final Log LOG = + LogFactory.getLog(TestPipesAsDifferentUser.class); + + public void testPipes() + throws Exception { + if (System.getProperty("compile.c++") == null) { + LOG.info("compile.c++ is not defined, so skipping TestPipes"); + return; + } + + if (!shouldRun()) { + return; + } + + super.startCluster(); + JobConf clusterConf = getClusterConf(); + Path inputPath = new Path(homeDirectory, "in"); + Path outputPath = new Path(homeDirectory, "out"); + + TestPipes.writeInputFile(FileSystem.get(clusterConf), inputPath); + TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple, + inputPath, outputPath, 3, 2, TestPipes.twoSplitOutput, clusterConf); + assertOwnerShip(outputPath); + TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath); + + TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple, + inputPath, outputPath, 3, 0, TestPipes.noSortOutput, clusterConf); + assertOwnerShip(outputPath); + TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath); + + TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountPart, + inputPath, outputPath, 3, 2, TestPipes.fixedPartitionOutput, + clusterConf); + assertOwnerShip(outputPath); + TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath); + + TestPipes.runNonPipedProgram(mrCluster, dfsCluster, + TestPipes.wordCountNoPipes, clusterConf); + assertOwnerShip(TestPipes.nonPipedOutDir, FileSystem.getLocal(clusterConf)); + } +}
jaxlaw/hadoop-common
cf1b5bb4c1965683d2259bfde703be11b2bf2fba
MAPREDUCE:754 from https://issues.apache.org/jira/secure/attachment/12427347/mapreduce-754-v2.2.1-yahoo.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 5931aa8..bea7b3e 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,439 +1,442 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383001 HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. (cos) HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) + MAPREDUCE-754. Fix NPE in expiry thread when a TT is lost. (Amar Kamat + via sharad) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobTracker.java b/src/mapred/org/apache/hadoop/mapred/JobTracker.java index eeb9dbe..d52b2bd 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobTracker.java +++ b/src/mapred/org/apache/hadoop/mapred/JobTracker.java @@ -1,1402 +1,1396 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintWriter; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.Writer; import java.net.BindException; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import javax.security.auth.login.LoginException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.RPC.VersionMismatch; import org.apache.hadoop.mapred.JobHistory.Keys; import org.apache.hadoop.mapred.JobHistory.Listener; import org.apache.hadoop.mapred.JobHistory.Values; import org.apache.hadoop.mapred.JobInProgress.KillInterruptedException; import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.PermissionChecker; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ConfiguredPolicy; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.mapreduce.ClusterMetrics; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; /******************************************************* * JobTracker is the central location for submitting and * tracking MR jobs in a network environment. * *******************************************************/ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmissionProtocol, TaskTrackerManager, RefreshAuthorizationPolicyProtocol, AdminOperationsProtocol { static{ Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); } static long TASKTRACKER_EXPIRY_INTERVAL = 10 * 60 * 1000; static long RETIRE_JOB_INTERVAL; static long RETIRE_JOB_CHECK_INTERVAL; // The interval after which one fault of a tracker will be discarded, // if there are no faults during this. private static long UPDATE_FAULTY_TRACKER_INTERVAL = 24 * 60 * 60 * 1000; // The maximum percentage of trackers in cluster added // to the 'blacklist' across all the jobs. private static double MAX_BLACKLIST_PERCENT = 0.50; // A tracker is blacklisted across jobs only if number of // blacklists are X% above the average number of blacklists. // X is the blacklist threshold here. private double AVERAGE_BLACKLIST_THRESHOLD = 0.50; // The maximum number of blacklists for a tracker after which the // tracker could be blacklisted across all jobs private int MAX_BLACKLISTS_PER_TRACKER = 4; // Approximate number of heartbeats that could arrive JobTracker // in a second static final String JT_HEARTBEATS_IN_SECOND = "mapred.heartbeats.in.second"; private int NUM_HEARTBEATS_IN_SECOND; private final int DEFAULT_NUM_HEARTBEATS_IN_SECOND = 100; private final int MIN_NUM_HEARTBEATS_IN_SECOND = 1; // Scaling factor for heartbeats, used for testing only static final String JT_HEARTBEATS_SCALING_FACTOR = "mapreduce.jobtracker.heartbeats.scaling.factor"; private float HEARTBEATS_SCALING_FACTOR; private final float MIN_HEARTBEATS_SCALING_FACTOR = 0.01f; private final float DEFAULT_HEARTBEATS_SCALING_FACTOR = 1.0f; public static enum State { INITIALIZING, RUNNING } State state = State.INITIALIZING; private static final int FS_ACCESS_RETRY_PERIOD = 10000; private DNSToSwitchMapping dnsToSwitchMapping; private NetworkTopology clusterMap = new NetworkTopology(); private int numTaskCacheLevels; // the max level to which we cache tasks private Set<Node> nodesAtMaxLevel = new HashSet<Node>(); private final TaskScheduler taskScheduler; private final List<JobInProgressListener> jobInProgressListeners = new CopyOnWriteArrayList<JobInProgressListener>(); private static final LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); // system directories are world-wide readable and owner readable final static FsPermission SYSTEM_DIR_PERMISSION = FsPermission.createImmutable((short) 0733); // rwx-wx-wx // system files should have 700 permission final static FsPermission SYSTEM_FILE_PERMISSION = FsPermission.createImmutable((short) 0700); // rwx------ /** * A client tried to submit a job before the Job Tracker was ready. */ public static class IllegalStateException extends IOException { public IllegalStateException(String msg) { super(msg); } } /** * The maximum no. of 'completed' (successful/failed/killed) * jobs kept in memory per-user. */ final int MAX_COMPLETE_USER_JOBS_IN_MEMORY; /** * The minimum time (in ms) that a job's information has to remain * in the JobTracker's memory before it is retired. */ static final int MIN_TIME_BEFORE_RETIRE = 0; private int nextJobId = 1; public static final Log LOG = LogFactory.getLog(JobTracker.class); /** * Start the JobTracker with given configuration. * * The conf will be modified to reflect the actual ports on which * the JobTracker is up and running if the user passes the port as * <code>zero</code>. * * @param conf configuration for the JobTracker. * @throws IOException */ public static JobTracker startTracker(JobConf conf ) throws IOException, InterruptedException { return startTracker(conf, generateNewIdentifier()); } public static JobTracker startTracker(JobConf conf, String identifier) throws IOException, InterruptedException { JobTracker result = null; while (true) { try { result = new JobTracker(conf, identifier); result.taskScheduler.setTaskTrackerManager(result); break; } catch (VersionMismatch e) { throw e; } catch (BindException e) { throw e; } catch (UnknownHostException e) { throw e; } catch (AccessControlException ace) { // in case of jobtracker not having right access // bail out throw ace; } catch (IOException e) { LOG.warn("Error starting tracker: " + StringUtils.stringifyException(e)); } Thread.sleep(1000); } if (result != null) { JobEndNotifier.startNotifier(); } return result; } public void stopTracker() throws IOException { JobEndNotifier.stopNotifier(); close(); } public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(InterTrackerProtocol.class.getName())) { return InterTrackerProtocol.versionID; } else if (protocol.equals(JobSubmissionProtocol.class.getName())){ return JobSubmissionProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(AdminOperationsProtocol.class.getName())){ return AdminOperationsProtocol.versionID; } else { throw new IOException("Unknown protocol to job tracker: " + protocol); } } /** * A thread to timeout tasks that have been assigned to task trackers, * but that haven't reported back yet. * Note that I included a stop() method, even though there is no place * where JobTrackers are cleaned up. */ private class ExpireLaunchingTasks implements Runnable { /** * This is a map of the tasks that have been assigned to task trackers, * but that have not yet been seen in a status report. * map: task-id -> time-assigned */ private Map<TaskAttemptID, Long> launchingTasks = new LinkedHashMap<TaskAttemptID, Long>(); public void run() { while (true) { try { // Every 3 minutes check for any tasks that are overdue Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL/3); long now = System.currentTimeMillis(); LOG.debug("Starting launching task sweep"); synchronized (JobTracker.this) { synchronized (launchingTasks) { Iterator<Map.Entry<TaskAttemptID, Long>> itr = launchingTasks.entrySet().iterator(); while (itr.hasNext()) { Map.Entry<TaskAttemptID, Long> pair = itr.next(); TaskAttemptID taskId = pair.getKey(); long age = now - (pair.getValue()).longValue(); LOG.info(taskId + " is " + age + " ms debug."); if (age > TASKTRACKER_EXPIRY_INTERVAL) { LOG.info("Launching task " + taskId + " timed out."); TaskInProgress tip = null; tip = taskidToTIPMap.get(taskId); if (tip != null) { JobInProgress job = tip.getJob(); String trackerName = getAssignedTracker(taskId); TaskTrackerStatus trackerStatus = getTaskTrackerStatus(trackerName); // This might happen when the tasktracker has already // expired and this thread tries to call failedtask // again. expire tasktracker should have called failed // task! if (trackerStatus != null) job.failedTask(tip, taskId, "Error launching task", tip.isMapTask()? TaskStatus.Phase.MAP: TaskStatus.Phase.STARTING, TaskStatus.State.FAILED, trackerName); } itr.remove(); } else { // the tasks are sorted by start time, so once we find // one that we want to keep, we are done for this cycle. break; } } } } } catch (InterruptedException ie) { // all done break; } catch (Exception e) { LOG.error("Expire Launching Task Thread got exception: " + StringUtils.stringifyException(e)); } } } public void addNewTask(TaskAttemptID taskName) { synchronized (launchingTasks) { launchingTasks.put(taskName, System.currentTimeMillis()); } } public void removeTask(TaskAttemptID taskName) { synchronized (launchingTasks) { launchingTasks.remove(taskName); } } } /////////////////////////////////////////////////////// // Used to expire TaskTrackers that have gone down /////////////////////////////////////////////////////// class ExpireTrackers implements Runnable { public ExpireTrackers() { } /** * The run method lives for the life of the JobTracker, and removes TaskTrackers * that have not checked in for some time. */ public void run() { while (true) { try { // // Thread runs periodically to check whether trackers should be expired. // The sleep interval must be no more than half the maximum expiry time // for a task tracker. // Thread.sleep(TASKTRACKER_EXPIRY_INTERVAL / 3); // // Loop through all expired items in the queue // // Need to lock the JobTracker here since we are // manipulating it's data-structures via // ExpireTrackers.run -> JobTracker.lostTaskTracker -> // JobInProgress.failedTask -> JobTracker.markCompleteTaskAttempt // Also need to lock JobTracker before locking 'taskTracker' & // 'trackerExpiryQueue' to prevent deadlock: // @see {@link JobTracker.processHeartbeat(TaskTrackerStatus, boolean)} synchronized (JobTracker.this) { synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { long now = System.currentTimeMillis(); TaskTrackerStatus leastRecent = null; while ((trackerExpiryQueue.size() > 0) && (leastRecent = trackerExpiryQueue.first()) != null && ((now - leastRecent.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL)) { // Remove profile from head of queue trackerExpiryQueue.remove(leastRecent); String trackerName = leastRecent.getTrackerName(); // Figure out if last-seen time should be updated, or if tracker is dead TaskTracker current = getTaskTracker(trackerName); TaskTrackerStatus newProfile = (current == null ) ? null : current.getStatus(); // Items might leave the taskTracker set through other means; the // status stored in 'taskTrackers' might be null, which means the // tracker has already been destroyed. if (newProfile != null) { if ((now - newProfile.getLastSeen()) > TASKTRACKER_EXPIRY_INTERVAL) { - // Remove completely after marking the tasks as 'KILLED' - lostTaskTracker(current); - // tracker is lost, and if it is blacklisted, remove - // it from the count of blacklisted trackers in the cluster - if (isBlacklisted(trackerName)) { - faultyTrackers.decrBlackListedTrackers(1); - } - updateTaskTrackerStatus(trackerName, null); - statistics.taskTrackerRemoved(trackerName); - getInstrumentation().decTrackers(1); + removeTracker(current); // remove the mapping from the hosts list String hostname = newProfile.getHost(); hostnameToTaskTracker.get(hostname).remove(trackerName); } else { // Update time by inserting latest profile trackerExpiryQueue.add(newProfile); } } } } } } } catch (InterruptedException iex) { break; } catch (Exception t) { LOG.error("Tracker Expiry Thread got exception: " + StringUtils.stringifyException(t)); } } } } synchronized void historyFileCopied(JobID jobid, String historyFile) { JobInProgress job = getJob(jobid); if (job != null) { //found in main cache job.setHistoryFileCopied(); if (historyFile != null) { job.setHistoryFile(historyFile); } return; } RetireJobInfo jobInfo = retireJobs.get(jobid); if (jobInfo != null) { //found in retired cache if (historyFile != null) { jobInfo.setHistoryFile(historyFile); } } } static class RetireJobInfo { final JobStatus status; final JobProfile profile; final long finishTime; private String historyFile; RetireJobInfo(JobStatus status, JobProfile profile, long finishTime, String historyFile) { this.status = status; this.profile = profile; this.finishTime = finishTime; this.historyFile = historyFile; } void setHistoryFile(String file) { this.historyFile = file; } String getHistoryFile() { return historyFile; } } /////////////////////////////////////////////////////// // Used to remove old finished Jobs that have been around for too long /////////////////////////////////////////////////////// class RetireJobs implements Runnable { private final Map<JobID, RetireJobInfo> jobIDStatusMap = new HashMap<JobID, RetireJobInfo>(); private final LinkedList<RetireJobInfo> jobRetireInfoQ = new LinkedList<RetireJobInfo>(); public RetireJobs() { } synchronized void addToCache(JobInProgress job) { RetireJobInfo info = new RetireJobInfo(job.getStatus(), job.getProfile(), job.getFinishTime(), job.getHistoryFile()); jobRetireInfoQ.add(info); jobIDStatusMap.put(info.status.getJobID(), info); if (jobRetireInfoQ.size() > retiredJobsCacheSize) { RetireJobInfo removed = jobRetireInfoQ.remove(); jobIDStatusMap.remove(removed.status.getJobID()); LOG.info("Retired job removed from cache " + removed.status.getJobID()); } } synchronized RetireJobInfo get(JobID jobId) { return jobIDStatusMap.get(jobId); } @SuppressWarnings("unchecked") synchronized LinkedList<RetireJobInfo> getAll() { return (LinkedList<RetireJobInfo>) jobRetireInfoQ.clone(); } synchronized LinkedList<JobStatus> getAllJobStatus() { LinkedList<JobStatus> list = new LinkedList<JobStatus>(); for (RetireJobInfo info : jobRetireInfoQ) { list.add(info.status); } return list; } private boolean minConditionToRetire(JobInProgress job, long now) { return job.getStatus().getRunState() != JobStatus.RUNNING && job.getStatus().getRunState() != JobStatus.PREP && (job.getFinishTime() + MIN_TIME_BEFORE_RETIRE < now) && job.isHistoryFileCopied(); } /** * The run method lives for the life of the JobTracker, * and removes Jobs that are not still running, but which * finished a long time ago. */ public void run() { while (true) { try { Thread.sleep(RETIRE_JOB_CHECK_INTERVAL); List<JobInProgress> retiredJobs = new ArrayList<JobInProgress>(); long now = System.currentTimeMillis(); long retireBefore = now - RETIRE_JOB_INTERVAL; synchronized (jobs) { for(JobInProgress job: jobs.values()) { if (minConditionToRetire(job, now) && (job.getFinishTime() < retireBefore)) { retiredJobs.add(job); } } } synchronized (userToJobsMap) { Iterator<Map.Entry<String, ArrayList<JobInProgress>>> userToJobsMapIt = userToJobsMap.entrySet().iterator(); while (userToJobsMapIt.hasNext()) { Map.Entry<String, ArrayList<JobInProgress>> entry = userToJobsMapIt.next(); ArrayList<JobInProgress> userJobs = entry.getValue(); Iterator<JobInProgress> it = userJobs.iterator(); while (it.hasNext() && userJobs.size() > MAX_COMPLETE_USER_JOBS_IN_MEMORY) { JobInProgress jobUser = it.next(); if (retiredJobs.contains(jobUser)) { LOG.info("Removing from userToJobsMap: " + jobUser.getJobID()); it.remove(); } else if (minConditionToRetire(jobUser, now)) { LOG.info("User limit exceeded. Marking job: " + jobUser.getJobID() + " for retire."); retiredJobs.add(jobUser); it.remove(); } } if (userJobs.isEmpty()) { userToJobsMapIt.remove(); } } } if (!retiredJobs.isEmpty()) { synchronized (JobTracker.this) { synchronized (jobs) { synchronized (taskScheduler) { for (JobInProgress job: retiredJobs) { removeJobTasks(job); jobs.remove(job.getProfile().getJobID()); for (JobInProgressListener l : jobInProgressListeners) { l.jobRemoved(job); } String jobUser = job.getProfile().getUser(); LOG.info("Retired job with id: '" + job.getProfile().getJobID() + "' of user '" + jobUser + "'"); // clean up job files from the local disk JobHistory.JobInfo.cleanupJob(job.getProfile().getJobID()); addToCache(job); } } } } } } catch (InterruptedException t) { break; } catch (Throwable t) { LOG.error("Error in retiring job:\n" + StringUtils.stringifyException(t)); } } } } enum ReasonForBlackListing { EXCEEDING_FAILURES, NODE_UNHEALTHY } // The FaultInfo which indicates the number of faults of a tracker // and when the last fault occurred // and whether the tracker is blacklisted across all jobs or not private static class FaultInfo { static final String FAULT_FORMAT_STRING = "%d failures on the tracker"; int numFaults = 0; long lastUpdated; boolean blacklisted; private boolean isHealthy; private HashMap<ReasonForBlackListing, String>rfbMap; FaultInfo() { numFaults = 0; lastUpdated = System.currentTimeMillis(); blacklisted = false; rfbMap = new HashMap<ReasonForBlackListing, String>(); } void setFaultCount(int num) { numFaults = num; } void setLastUpdated(long timeStamp) { lastUpdated = timeStamp; } int getFaultCount() { return numFaults; } long getLastUpdated() { return lastUpdated; } boolean isBlacklisted() { return blacklisted; } void setBlacklist(ReasonForBlackListing rfb, String trackerFaultReport) { blacklisted = true; this.rfbMap.put(rfb, trackerFaultReport); } public void setHealthy(boolean isHealthy) { this.isHealthy = isHealthy; } public boolean isHealthy() { return isHealthy; } public String getTrackerFaultReport() { StringBuffer sb = new StringBuffer(); for(String reasons : rfbMap.values()) { sb.append(reasons); sb.append("\n"); } return sb.toString(); } Set<ReasonForBlackListing> getReasonforblacklisting() { return this.rfbMap.keySet(); } public void unBlacklist() { this.blacklisted = false; this.rfbMap.clear(); } public boolean removeBlackListedReason(ReasonForBlackListing rfb) { String str = rfbMap.remove(rfb); return str!=null; } public void addBlackListedReason(ReasonForBlackListing rfb, String reason) { this.rfbMap.put(rfb, reason); } } private class FaultyTrackersInfo { // A map from hostName to its faults private Map<String, FaultInfo> potentiallyFaultyTrackers = new HashMap<String, FaultInfo>(); // This count gives the number of blacklisted trackers in the cluster // at any time. This is maintained to avoid iteration over // the potentiallyFaultyTrackers to get blacklisted trackers. And also // this count doesn't include blacklisted trackers which are lost, // although the fault info is maintained for lost trackers. private volatile int numBlacklistedTrackers = 0; /** * Increments faults(blacklist by job) for the tracker by one. * * Adds the tracker to the potentially faulty list. * * @param hostName */ void incrementFaults(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = getFaultInfo(hostName, true); int numFaults = fi.getFaultCount(); ++numFaults; fi.setFaultCount(numFaults); fi.setLastUpdated(System.currentTimeMillis()); if (exceedsFaults(fi)) { LOG.info("Adding " + hostName + " to the blacklist" + " across all jobs"); String reason = String.format(FaultInfo.FAULT_FORMAT_STRING, numFaults); blackListTracker(hostName, reason, ReasonForBlackListing.EXCEEDING_FAILURES); } } } private void incrBlackListedTrackers(int count) { numBlacklistedTrackers += count; getInstrumentation().addBlackListedTrackers(count); } private void decrBlackListedTrackers(int count) { numBlacklistedTrackers -= count; getInstrumentation().decBlackListedTrackers(count); } private void blackListTracker(String hostName, String reason, ReasonForBlackListing rfb) { FaultInfo fi = getFaultInfo(hostName, true); boolean blackListed = fi.isBlacklisted(); if(blackListed) { if (LOG.isDebugEnabled()) { LOG.debug("Adding blacklisted reason for tracker : " + hostName + " Reason for blacklisting is : " + rfb); } if (!fi.getReasonforblacklisting().contains(rfb)) { LOG.info("Adding blacklisted reason for tracker : " + hostName + " Reason for blacklisting is : " + rfb); } fi.addBlackListedReason(rfb, reason); } else { LOG.info("Blacklisting tracker : " + hostName + " Reason for blacklisting is : " + rfb); Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostName); synchronized (trackers) { for (TaskTracker tracker : trackers) { tracker.cancelAllReservations(); } } removeHostCapacity(hostName); fi.setBlacklist(rfb, reason); } } private boolean canUnBlackListTracker(String hostName, ReasonForBlackListing rfb) { FaultInfo fi = getFaultInfo(hostName, false); if(fi == null) { return false; } Set<ReasonForBlackListing> rfbSet = fi.getReasonforblacklisting(); return fi.isBlacklisted() && rfbSet.contains(rfb); } private void unBlackListTracker(String hostName, ReasonForBlackListing rfb) { // check if you can black list the tracker then call this methods FaultInfo fi = getFaultInfo(hostName, false); if(fi.removeBlackListedReason(rfb)) { if(fi.getReasonforblacklisting().isEmpty()) { addHostCapacity(hostName); LOG.info("Unblacklisting tracker : " + hostName); fi.unBlacklist(); //We have unBlackListed tracker, so tracker should //definitely be healthy. Check fault count if fault count //is zero don't keep it memory. if(fi.numFaults == 0) { potentiallyFaultyTrackers.remove(hostName); } } } } private FaultInfo getFaultInfo(String hostName, boolean createIfNeccessary) { FaultInfo fi = potentiallyFaultyTrackers.get(hostName); if (fi == null && createIfNeccessary) { fi = new FaultInfo(); potentiallyFaultyTrackers.put(hostName, fi); } return fi; } /** * Blacklists the tracker across all jobs if * <ol> * <li>#faults are more than * MAX_BLACKLISTS_PER_TRACKER (configurable) blacklists</li> * <li>#faults is 50% (configurable) above the average #faults</li> * <li>50% the cluster is not blacklisted yet </li> * </ol> */ private boolean exceedsFaults(FaultInfo fi) { int faultCount = fi.getFaultCount(); if (faultCount >= MAX_BLACKLISTS_PER_TRACKER) { // calculate avgBlackLists long clusterSize = getClusterStatus().getTaskTrackers(); long sum = 0; for (FaultInfo f : potentiallyFaultyTrackers.values()) { sum += f.getFaultCount(); } double avg = (double) sum / clusterSize; long totalCluster = clusterSize + numBlacklistedTrackers; if ((faultCount - avg) > (AVERAGE_BLACKLIST_THRESHOLD * avg) && numBlacklistedTrackers < (totalCluster * MAX_BLACKLIST_PERCENT)) { return true; } } return false; } /** * Removes the tracker from blacklist and * from potentially faulty list, when it is restarted. * * @param hostName */ void markTrackerHealthy(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = potentiallyFaultyTrackers.remove(hostName); if (fi != null && fi.isBlacklisted()) { LOG.info("Removing " + hostName + " from blacklist"); addHostCapacity(hostName); } } } /** * Check whether tasks can be assigned to the tracker. * * One fault of the tracker is discarded if there * are no faults during one day. So, the tracker will get a * chance again to run tasks of a job. * * @param hostName The tracker name * @param now The current time * * @return true if the tracker is blacklisted * false otherwise */ boolean shouldAssignTasksToTracker(String hostName, long now) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = potentiallyFaultyTrackers.get(hostName); if (fi != null && (now - fi.getLastUpdated()) > UPDATE_FAULTY_TRACKER_INTERVAL) { int numFaults = fi.getFaultCount() - 1; fi.setFaultCount(numFaults); fi.setLastUpdated(now); if (canUnBlackListTracker(hostName, ReasonForBlackListing.EXCEEDING_FAILURES)) { unBlackListTracker(hostName, ReasonForBlackListing.EXCEEDING_FAILURES); } } return (fi != null && fi.isBlacklisted()); } } private void removeHostCapacity(String hostName) { synchronized (taskTrackers) { // remove the capacity of trackers on this host + int numTrackersOnHost = 0; for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity -= mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity -= reduceSlots; + ++numTrackersOnHost; getInstrumentation().addBlackListedMapSlots( mapSlots); getInstrumentation().addBlackListedReduceSlots( reduceSlots); } - incrBlackListedTrackers(uniqueHostsMap.remove(hostName)); + uniqueHostsMap.remove(hostName); + incrBlackListedTrackers(numTrackersOnHost); } } // This is called on tracker's restart or after a day of blacklist. private void addHostCapacity(String hostName) { synchronized (taskTrackers) { int numTrackersOnHost = 0; // add the capacity of trackers on the host for (TaskTrackerStatus status : getStatusesOnHost(hostName)) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity += mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity += reduceSlots; numTrackersOnHost++; getInstrumentation().decBlackListedMapSlots(mapSlots); getInstrumentation().decBlackListedReduceSlots(reduceSlots); } uniqueHostsMap.put(hostName, numTrackersOnHost); decrBlackListedTrackers(numTrackersOnHost); } } /** * Whether a host is blacklisted across all the jobs. * * @param hostName * @return */ boolean isBlacklisted(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = null; if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { return fi.isBlacklisted(); } } return false; } int getFaultCount(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = null; if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { return fi.getFaultCount(); } } return 0; } Set<ReasonForBlackListing> getReasonForBlackListing(String hostName) { synchronized (potentiallyFaultyTrackers) { FaultInfo fi = null; if ((fi = potentiallyFaultyTrackers.get(hostName)) != null) { return fi.getReasonforblacklisting(); } } return null; } void setNodeHealthStatus(String hostName, boolean isHealthy, String reason) { FaultInfo fi = null; // If tracker is not healthy, create a fault info object // blacklist it. if (!isHealthy) { fi = getFaultInfo(hostName, true); fi.setHealthy(isHealthy); synchronized (potentiallyFaultyTrackers) { blackListTracker(hostName, reason, ReasonForBlackListing.NODE_UNHEALTHY); } } else { fi = getFaultInfo(hostName, false); if (fi == null) { return; } else { if (canUnBlackListTracker(hostName, ReasonForBlackListing.NODE_UNHEALTHY)) { unBlackListTracker(hostName, ReasonForBlackListing.NODE_UNHEALTHY); } } } } } /** * Get all task tracker statuses on given host * * @param hostName * @return {@link java.util.List} of {@link TaskTrackerStatus} */ private List<TaskTrackerStatus> getStatusesOnHost(String hostName) { List<TaskTrackerStatus> statuses = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (hostName.equals(status.getHost())) { statuses.add(status); } } } return statuses; } /////////////////////////////////////////////////////// // Used to recover the jobs upon restart /////////////////////////////////////////////////////// class RecoveryManager { Set<JobID> jobsToRecover; // set of jobs to be recovered private int totalEventsRecovered = 0; private int restartCount = 0; private boolean shouldRecover = false; Set<String> recoveredTrackers = Collections.synchronizedSet(new HashSet<String>()); /** A custom listener that replays the events in the order in which the * events (task attempts) occurred. */ class JobRecoveryListener implements Listener { // The owner job private JobInProgress jip; private JobHistory.JobInfo job; // current job's info object // Maintain the count of the (attempt) events recovered private int numEventsRecovered = 0; // Maintains open transactions private Map<String, String> hangingAttempts = new HashMap<String, String>(); // Whether there are any updates for this job private boolean hasUpdates = false; public JobRecoveryListener(JobInProgress jip) { this.jip = jip; this.job = new JobHistory.JobInfo(jip.getJobID().toString()); } /** * Process a task. Note that a task might commit a previously pending * transaction. */ private void processTask(String taskId, JobHistory.Task task) { // Any TASK info commits the previous transaction boolean hasHanging = hangingAttempts.remove(taskId) != null; if (hasHanging) { numEventsRecovered += 2; } TaskID id = TaskID.forName(taskId); TaskInProgress tip = getTip(id); updateTip(tip, task); } /** * Adds a task-attempt in the listener */ private void processTaskAttempt(String taskAttemptId, JobHistory.TaskAttempt attempt) { TaskAttemptID id = TaskAttemptID.forName(taskAttemptId); // Check if the transaction for this attempt can be committed String taskStatus = attempt.get(Keys.TASK_STATUS); TaskAttemptID taskID = TaskAttemptID.forName(taskAttemptId); JobInProgress jip = getJob(taskID.getJobID()); JobStatus prevStatus = (JobStatus)jip.getStatus().clone(); if (taskStatus.length() > 0) { // This means this is an update event if (taskStatus.equals(Values.SUCCESS.name())) { // Mark this attempt as hanging hangingAttempts.put(id.getTaskID().toString(), taskAttemptId); addSuccessfulAttempt(jip, id, attempt); } else { addUnsuccessfulAttempt(jip, id, attempt); numEventsRecovered += 2; } } else { createTaskAttempt(jip, id, attempt); } JobStatus newStatus = (JobStatus)jip.getStatus().clone(); if (prevStatus.getRunState() != newStatus.getRunState()) { if(LOG.isDebugEnabled()) LOG.debug("Status changed hence informing prevStatus" + prevStatus + " currentStatus "+ newStatus); JobStatusChangeEvent event = new JobStatusChangeEvent(jip, EventType.RUN_STATE_CHANGED, prevStatus, newStatus); updateJobInProgressListeners(event); } } public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values) throws IOException { if (recType == JobHistory.RecordTypes.Job) { // Update the meta-level job information job.handle(values); // Forcefully init the job as we have some updates for it checkAndInit(); } else if (recType.equals(JobHistory.RecordTypes.Task)) { String taskId = values.get(Keys.TASKID); // Create a task JobHistory.Task task = new JobHistory.Task(); task.handle(values); // Ignore if its a cleanup task if (isCleanup(task)) { return; } // Process the task i.e update the tip state processTask(taskId, task); } else if (recType.equals(JobHistory.RecordTypes.MapAttempt)) { String attemptId = values.get(Keys.TASK_ATTEMPT_ID); // Create a task attempt JobHistory.MapAttempt attempt = new JobHistory.MapAttempt(); attempt.handle(values); // Ignore if its a cleanup task if (isCleanup(attempt)) { return; } // Process the attempt i.e update the attempt state via job processTaskAttempt(attemptId, attempt); } else if (recType.equals(JobHistory.RecordTypes.ReduceAttempt)) { String attemptId = values.get(Keys.TASK_ATTEMPT_ID); // Create a task attempt JobHistory.ReduceAttempt attempt = new JobHistory.ReduceAttempt(); attempt.handle(values); // Ignore if its a cleanup task if (isCleanup(attempt)) { return; } // Process the attempt i.e update the job state via job processTaskAttempt(attemptId, attempt); } } // Check if the task is of type CLEANUP private boolean isCleanup(JobHistory.Task task) { String taskType = task.get(Keys.TASK_TYPE); return Values.CLEANUP.name().equals(taskType); } // Init the job if its ready for init. Also make sure that the scheduler // is updated private void checkAndInit() throws IOException { String jobStatus = this.job.get(Keys.JOB_STATUS); if (Values.PREP.name().equals(jobStatus)) { hasUpdates = true; LOG.info("Calling init from RM for job " + jip.getJobID().toString()); try { initJob(jip); } catch (Throwable t) { LOG.error("Job initialization failed : \n" + StringUtils.stringifyException(t)); failJob(jip); throw new IOException(t); } } } void close() { if (hasUpdates) { // Apply the final (job-level) updates JobStatusChangeEvent event = updateJob(jip, job); synchronized (JobTracker.this) { // Update the job listeners updateJobInProgressListeners(event); } } } public int getNumEventsRecovered() { return numEventsRecovered; } } public RecoveryManager() { jobsToRecover = new TreeSet<JobID>(); } public boolean contains(JobID id) { return jobsToRecover.contains(id); } void addJobForRecovery(JobID id) { jobsToRecover.add(id); } public boolean shouldRecover() { return shouldRecover; } public boolean shouldSchedule() { return recoveredTrackers.isEmpty(); } private void markTracker(String trackerName) { recoveredTrackers.add(trackerName); } void unMarkTracker(String trackerName) { recoveredTrackers.remove(trackerName); } Set<JobID> getJobsToRecover() { return jobsToRecover; } /** Check if the given string represents a job-id or not */ private boolean isJobNameValid(String str) { if(str == null) { return false; } String[] parts = str.split("_"); if(parts.length == 3) { if(parts[0].equals("job")) { // other 2 parts should be parseable return JobTracker.validateIdentifier(parts[1]) && JobTracker.validateJobNumber(parts[2]); } } return false; } // checks if the job dir has the required files public void checkAndAddJob(FileStatus status) throws IOException { String fileName = status.getPath().getName(); if (isJobNameValid(fileName)) { if (JobClient.isJobDirValid(status.getPath(), fs)) { recoveryManager.addJobForRecovery(JobID.forName(fileName)); shouldRecover = true; // enable actual recovery if num-files > 1 } else { LOG.info("Found an incomplete job directory " + fileName + "." + " Deleting it!!"); fs.delete(status.getPath(), true); } } } private JobStatusChangeEvent updateJob(JobInProgress jip, JobHistory.JobInfo job) { // Change the job priority String jobpriority = job.get(Keys.JOB_PRIORITY); JobPriority priority = JobPriority.valueOf(jobpriority); // It's important to update this via the jobtracker's api as it will // take care of updating the event listeners too setJobPriority(jip.getJobID(), priority); // Save the previous job status JobStatus oldStatus = (JobStatus)jip.getStatus().clone(); // Set the start/launch time only if there are recovered tasks // Increment the job's restart count jip.updateJobInfo(job.getLong(JobHistory.Keys.SUBMIT_TIME), job.getLong(JobHistory.Keys.LAUNCH_TIME)); // Save the new job status JobStatus newStatus = (JobStatus)jip.getStatus().clone(); return new JobStatusChangeEvent(jip, EventType.START_TIME_CHANGED, oldStatus, newStatus); } private void updateTip(TaskInProgress tip, JobHistory.Task task) { long startTime = task.getLong(Keys.START_TIME); if (startTime != 0) { tip.setExecStartTime(startTime); } long finishTime = task.getLong(Keys.FINISH_TIME); // For failed tasks finish-time will be missing if (finishTime != 0) { tip.setExecFinishTime(finishTime); } String cause = task.get(Keys.TASK_ATTEMPT_ID); if (cause.length() > 0) { // This means that the this is a FAILED events TaskAttemptID id = TaskAttemptID.forName(cause); TaskStatus status = tip.getTaskStatus(id); synchronized (JobTracker.this) { // This will add the tip failed event in the new log tip.getJob().failedTask(tip, id, status.getDiagnosticInfo(), status.getPhase(), status.getRunState(), status.getTaskTracker()); } } } private void createTaskAttempt(JobInProgress job, TaskAttemptID attemptId, JobHistory.TaskAttempt attempt) { TaskID id = attemptId.getTaskID(); String type = attempt.get(Keys.TASK_TYPE); TaskInProgress tip = job.getTaskInProgress(id); // I. Get the required info TaskStatus taskStatus = null; String trackerName = attempt.get(Keys.TRACKER_NAME); String trackerHostName = JobInProgress.convertTrackerNameToHostName(trackerName); // recover the port information. int port = 0; // default to 0 String hport = attempt.get(Keys.HTTP_PORT); if (hport != null && hport.length() > 0) { port = attempt.getInt(Keys.HTTP_PORT); } long attemptStartTime = attempt.getLong(Keys.START_TIME); // II. Create the (appropriate) task status if (type.equals(Values.MAP.name())) { taskStatus = new MapTaskStatus(attemptId, 0.0f, job.getNumSlotsPerTask(TaskType.MAP), TaskStatus.State.RUNNING, "", "", trackerName, TaskStatus.Phase.MAP, new Counters()); } else { taskStatus = new ReduceTaskStatus(attemptId, 0.0f, job.getNumSlotsPerTask(TaskType.REDUCE), TaskStatus.State.RUNNING, "", "", trackerName, TaskStatus.Phase.REDUCE, new Counters()); } // Set the start time taskStatus.setStartTime(attemptStartTime); List<TaskStatus> ttStatusList = new ArrayList<TaskStatus>(); ttStatusList.add(taskStatus); // III. Create the dummy tasktracker status TaskTrackerStatus ttStatus = new TaskTrackerStatus(trackerName, trackerHostName, port, ttStatusList, 0 , 0, 0); ttStatus.setLastSeen(System.currentTimeMillis()); synchronized (JobTracker.this) { synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { // IV. Register a new tracker TaskTracker taskTracker = getTaskTracker(trackerName); boolean isTrackerRegistered = (taskTracker != null); if (!isTrackerRegistered) { markTracker(trackerName); // add the tracker to recovery-manager taskTracker = new TaskTracker(trackerName); taskTracker.setStatus(ttStatus); addNewTracker(taskTracker); } // V. Update the tracker status // This will update the meta info of the jobtracker and also add the // tracker status if missing i.e register it updateTaskTrackerStatus(trackerName, ttStatus); } } // Register the attempt with job and tip, under JobTracker lock. // Since, as of today they are atomic through heartbeat. // VI. Register the attempt // a) In the job job.addRunningTaskToTIP(tip, attemptId, ttStatus, false); // b) In the tip tip.updateStatus(taskStatus); } // VII. Make an entry in the launched tasks expireLaunchingTasks.addNewTask(attemptId); } private void addSuccessfulAttempt(JobInProgress job, TaskAttemptID attemptId, JobHistory.TaskAttempt attempt) { // I. Get the required info TaskID taskId = attemptId.getTaskID(); String type = attempt.get(Keys.TASK_TYPE); TaskInProgress tip = job.getTaskInProgress(taskId); long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME); // Get the task status and the tracker name and make a copy of it TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone(); taskStatus.setFinishTime(attemptFinishTime); String stateString = attempt.get(Keys.STATE_STRING); // Update the basic values taskStatus.setStateString(stateString); taskStatus.setProgress(1.0f); taskStatus.setRunState(TaskStatus.State.SUCCEEDED); // Set the shuffle/sort finished times if (type.equals(Values.REDUCE.name())) { long shuffleTime = Long.parseLong(attempt.get(Keys.SHUFFLE_FINISHED)); long sortTime = Long.parseLong(attempt.get(Keys.SORT_FINISHED)); taskStatus.setShuffleFinishTime(shuffleTime); taskStatus.setSortFinishTime(sortTime); @@ -2586,1030 +2580,1032 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, } /** * Version that is called from a timer thread, and therefore needs to be * careful to synchronize. */ public synchronized List<JobInProgress> getRunningJobs() { synchronized (jobs) { return runningJobs(); } } public Vector<JobInProgress> failedJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if ((status.getRunState() == JobStatus.FAILED) || (status.getRunState() == JobStatus.KILLED)) { v.add(jip); } } return v; } public Vector<JobInProgress> completedJobs() { Vector<JobInProgress> v = new Vector<JobInProgress>(); for (Iterator it = jobs.values().iterator(); it.hasNext();) { JobInProgress jip = (JobInProgress) it.next(); JobStatus status = jip.getStatus(); if (status.getRunState() == JobStatus.SUCCEEDED) { v.add(jip); } } return v; } /** * Get all the task trackers in the cluster * * @return {@link Collection} of {@link TaskTrackerStatus} */ public Collection<TaskTrackerStatus> taskTrackers() { Collection<TaskTrackerStatus> ttStatuses; synchronized (taskTrackers) { ttStatuses = new ArrayList<TaskTrackerStatus>(taskTrackers.values().size()); for (TaskTracker tt : taskTrackers.values()) { ttStatuses.add(tt.getStatus()); } } return ttStatuses; } /** * Get the active task tracker statuses in the cluster * * @return {@link Collection} of active {@link TaskTrackerStatus} */ public Collection<TaskTrackerStatus> activeTaskTrackers() { Collection<TaskTrackerStatus> activeTrackers = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for ( TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (!faultyTrackers.isBlacklisted(status.getHost())) { activeTrackers.add(status); } } } return activeTrackers; } /** * Get the active and blacklisted task tracker names in the cluster. The first * element in the returned list contains the list of active tracker names. * The second element in the returned list contains the list of blacklisted * tracker names. */ public List<List<String>> taskTrackerNames() { List<String> activeTrackers = new ArrayList<String>(); List<String> blacklistedTrackers = new ArrayList<String>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (!faultyTrackers.isBlacklisted(status.getHost())) { activeTrackers.add(status.getTrackerName()); } else { blacklistedTrackers.add(status.getTrackerName()); } } } List<List<String>> result = new ArrayList<List<String>>(2); result.add(activeTrackers); result.add(blacklistedTrackers); return result; } /** * Get the blacklisted task tracker statuses in the cluster * * @return {@link Collection} of blacklisted {@link TaskTrackerStatus} */ public Collection<TaskTrackerStatus> blacklistedTaskTrackers() { Collection<TaskTrackerStatus> blacklistedTrackers = new ArrayList<TaskTrackerStatus>(); synchronized (taskTrackers) { for (TaskTracker tt : taskTrackers.values()) { TaskTrackerStatus status = tt.getStatus(); if (faultyTrackers.isBlacklisted(status.getHost())) { blacklistedTrackers.add(status); } } } return blacklistedTrackers; } int getFaultCount(String hostName) { return faultyTrackers.getFaultCount(hostName); } /** * Get the number of blacklisted trackers across all the jobs * * @return */ int getBlacklistedTrackerCount() { return faultyTrackers.numBlacklistedTrackers; } /** * Whether the tracker is blacklisted or not * * @param trackerID * * @return true if blacklisted, false otherwise */ public boolean isBlacklisted(String trackerID) { TaskTrackerStatus status = getTaskTrackerStatus(trackerID); if (status != null) { return faultyTrackers.isBlacklisted(status.getHost()); } return false; } public TaskTrackerStatus getTaskTrackerStatus(String trackerID) { TaskTracker taskTracker; synchronized (taskTrackers) { taskTracker = taskTrackers.get(trackerID); } return (taskTracker == null) ? null : taskTracker.getStatus(); } public TaskTracker getTaskTracker(String trackerID) { synchronized (taskTrackers) { return taskTrackers.get(trackerID); } } JobTrackerStatistics getStatistics() { return statistics; } /** * Adds a new node to the jobtracker. It involves adding it to the expiry * thread and adding it for resolution * * Assuming trackerExpiryQueue is locked on entry * * @param status Task Tracker's status */ private void addNewTracker(TaskTracker taskTracker) { TaskTrackerStatus status = taskTracker.getStatus(); trackerExpiryQueue.add(status); // Register the tracker if its not registered String hostname = status.getHost(); if (getNode(status.getTrackerName()) == null) { // Making the network location resolution inline .. resolveAndAddToTopology(hostname); } // add it to the set of tracker per host Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname); if (trackers == null) { trackers = Collections.synchronizedSet(new HashSet<TaskTracker>()); hostnameToTaskTracker.put(hostname, trackers); } statistics.taskTrackerAdded(status.getTrackerName()); getInstrumentation().addTrackers(1); LOG.info("Adding tracker " + status.getTrackerName() + " to host " + hostname); trackers.add(taskTracker); } public Node resolveAndAddToTopology(String name) { List <String> tmpList = new ArrayList<String>(1); tmpList.add(name); List <String> rNameList = dnsToSwitchMapping.resolve(tmpList); String rName = rNameList.get(0); String networkLoc = NodeBase.normalize(rName); return addHostToNodeMapping(name, networkLoc); } private Node addHostToNodeMapping(String host, String networkLoc) { Node node; if ((node = clusterMap.getNode(networkLoc+"/"+host)) == null) { node = new NodeBase(host, networkLoc); clusterMap.add(node); if (node.getLevel() < getNumTaskCacheLevels()) { LOG.fatal("Got a host whose level is: " + node.getLevel() + "." + " Should get at least a level of value: " + getNumTaskCacheLevels()); try { stopTracker(); } catch (IOException ie) { LOG.warn("Exception encountered during shutdown: " + StringUtils.stringifyException(ie)); System.exit(-1); } } hostnameToNodeMap.put(host, node); // Make an entry for the node at the max level in the cache nodesAtMaxLevel.add(getParentNode(node, getNumTaskCacheLevels() - 1)); } return node; } /** * Returns a collection of nodes at the max level */ public Collection<Node> getNodesAtMaxLevel() { return nodesAtMaxLevel; } public static Node getParentNode(Node node, int level) { for (int i = 0; i < level; ++i) { node = node.getParent(); } return node; } /** * Return the Node in the network topology that corresponds to the hostname */ public Node getNode(String name) { return hostnameToNodeMap.get(name); } public int getNumTaskCacheLevels() { return numTaskCacheLevels; } public int getNumResolvedTaskTrackers() { return numResolved; } public int getNumberOfUniqueHosts() { return uniqueHostsMap.size(); } public void addJobInProgressListener(JobInProgressListener listener) { jobInProgressListeners.add(listener); } public void removeJobInProgressListener(JobInProgressListener listener) { jobInProgressListeners.remove(listener); } // Update the listeners about the job // Assuming JobTracker is locked on entry. private void updateJobInProgressListeners(JobChangeEvent event) { for (JobInProgressListener listener : jobInProgressListeners) { listener.jobUpdated(event); } } /** * Return the {@link QueueManager} associated with the JobTracker. */ public QueueManager getQueueManager() { return queueManager; } //////////////////////////////////////////////////// // InterTrackerProtocol //////////////////////////////////////////////////// public String getBuildVersion() throws IOException{ return VersionInfo.getBuildVersion(); } /** * The periodic heartbeat mechanism between the {@link TaskTracker} and * the {@link JobTracker}. * * The {@link JobTracker} processes the status information sent by the * {@link TaskTracker} and responds with instructions to start/stop * tasks or jobs, and also 'reset' instructions during contingencies. */ public synchronized HeartbeatResponse heartbeat(TaskTrackerStatus status, boolean restarted, boolean initialContact, boolean acceptNewTasks, short responseId) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Got heartbeat from: " + status.getTrackerName() + " (restarted: " + restarted + " initialContact: " + initialContact + " acceptNewTasks: " + acceptNewTasks + ")" + " with responseId: " + responseId); } // Make sure heartbeat is from a tasktracker allowed by the jobtracker. if (!acceptTaskTracker(status)) { throw new DisallowedTaskTrackerException(status); } // First check if the last heartbeat response got through String trackerName = status.getTrackerName(); long now = System.currentTimeMillis(); boolean isBlacklisted = false; if (restarted) { faultyTrackers.markTrackerHealthy(status.getHost()); } else { isBlacklisted = faultyTrackers.shouldAssignTasksToTracker(status.getHost(), now); } HeartbeatResponse prevHeartbeatResponse = trackerToHeartbeatResponseMap.get(trackerName); boolean addRestartInfo = false; if (initialContact != true) { // If this isn't the 'initial contact' from the tasktracker, // there is something seriously wrong if the JobTracker has // no record of the 'previous heartbeat'; if so, ask the // tasktracker to re-initialize itself. if (prevHeartbeatResponse == null) { // This is the first heartbeat from the old tracker to the newly // started JobTracker if (hasRestarted()) { addRestartInfo = true; // inform the recovery manager about this tracker joining back recoveryManager.unMarkTracker(trackerName); } else { // Jobtracker might have restarted but no recovery is needed // otherwise this code should not be reached LOG.warn("Serious problem, cannot find record of 'previous' " + "heartbeat for '" + trackerName + "'; reinitializing the tasktracker"); return new HeartbeatResponse(responseId, new TaskTrackerAction[] {new ReinitTrackerAction()}); } } else { // It is completely safe to not process a 'duplicate' heartbeat from a // {@link TaskTracker} since it resends the heartbeat when rpcs are // lost see {@link TaskTracker.transmitHeartbeat()}; // acknowledge it by re-sending the previous response to let the // {@link TaskTracker} go forward. if (prevHeartbeatResponse.getResponseId() != responseId) { LOG.info("Ignoring 'duplicate' heartbeat from '" + trackerName + "'; resending the previous 'lost' response"); return prevHeartbeatResponse; } } } // Process this heartbeat short newResponseId = (short)(responseId + 1); status.setLastSeen(now); if (!processHeartbeat(status, initialContact)) { if (prevHeartbeatResponse != null) { trackerToHeartbeatResponseMap.remove(trackerName); } return new HeartbeatResponse(newResponseId, new TaskTrackerAction[] {new ReinitTrackerAction()}); } // Initialize the response to be sent for the heartbeat HeartbeatResponse response = new HeartbeatResponse(newResponseId, null); List<TaskTrackerAction> actions = new ArrayList<TaskTrackerAction>(); isBlacklisted = faultyTrackers.isBlacklisted(status.getHost()); // Check for new tasks to be executed on the tasktracker if (recoveryManager.shouldSchedule() && acceptNewTasks && !isBlacklisted) { TaskTrackerStatus taskTrackerStatus = getTaskTrackerStatus(trackerName) ; if (taskTrackerStatus == null) { LOG.warn("Unknown task tracker polling; ignoring: " + trackerName); } else { List<Task> tasks = getSetupAndCleanupTasks(taskTrackerStatus); if (tasks == null ) { tasks = taskScheduler.assignTasks(taskTrackers.get(trackerName)); } if (tasks != null) { for (Task task : tasks) { expireLaunchingTasks.addNewTask(task.getTaskID()); LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID()); actions.add(new LaunchTaskAction(task)); } } } } // Check for tasks to be killed List<TaskTrackerAction> killTasksList = getTasksToKill(trackerName); if (killTasksList != null) { actions.addAll(killTasksList); } // Check for jobs to be killed/cleanedup List<TaskTrackerAction> killJobsList = getJobsForCleanup(trackerName); if (killJobsList != null) { actions.addAll(killJobsList); } // Check for tasks whose outputs can be saved List<TaskTrackerAction> commitTasksList = getTasksToSave(status); if (commitTasksList != null) { actions.addAll(commitTasksList); } // calculate next heartbeat interval and put in heartbeat response int nextInterval = getNextHeartbeatInterval(); response.setHeartbeatInterval(nextInterval); response.setActions( actions.toArray(new TaskTrackerAction[actions.size()])); // check if the restart info is req if (addRestartInfo) { response.setRecoveredJobs(recoveryManager.getJobsToRecover()); } // Update the trackerToHeartbeatResponseMap trackerToHeartbeatResponseMap.put(trackerName, response); // Done processing the hearbeat, now remove 'marked' tasks removeMarkedTasks(trackerName); return response; } /** * Calculates next heartbeat interval using cluster size. * Heartbeat interval is incremented by 1 second for every 100 nodes by default. * @return next heartbeat interval. */ public int getNextHeartbeatInterval() { // get the no of task trackers int clusterSize = getClusterStatus().getTaskTrackers(); int heartbeatInterval = Math.max( (int)(1000 * HEARTBEATS_SCALING_FACTOR * Math.ceil((double)clusterSize / NUM_HEARTBEATS_IN_SECOND)), HEARTBEAT_INTERVAL_MIN) ; return heartbeatInterval; } /** * Return if the specified tasktracker is in the hosts list, * if one was configured. If none was configured, then this * returns true. */ private boolean inHostsList(TaskTrackerStatus status) { Set<String> hostsList = hostsReader.getHosts(); return (hostsList.isEmpty() || hostsList.contains(status.getHost())); } /** * Return if the specified tasktracker is in the exclude list. */ private boolean inExcludedHostsList(TaskTrackerStatus status) { Set<String> excludeList = hostsReader.getExcludedHosts(); return excludeList.contains(status.getHost()); } /** * Returns true if the tasktracker is in the hosts list and * not in the exclude list. */ private boolean acceptTaskTracker(TaskTrackerStatus status) { return (inHostsList(status) && !inExcludedHostsList(status)); } /** * Update the last recorded status for the given task tracker. * It assumes that the taskTrackers are locked on entry. * @param trackerName The name of the tracker * @param status The new status for the task tracker * @return Was an old status found? */ private boolean updateTaskTrackerStatus(String trackerName, TaskTrackerStatus status) { TaskTracker tt = getTaskTracker(trackerName); TaskTrackerStatus oldStatus = (tt == null) ? null : tt.getStatus(); if (oldStatus != null) { totalMaps -= oldStatus.countMapTasks(); totalReduces -= oldStatus.countReduceTasks(); occupiedMapSlots -= oldStatus.countOccupiedMapSlots(); occupiedReduceSlots -= oldStatus.countOccupiedReduceSlots(); getInstrumentation().decRunningMaps(oldStatus.countMapTasks()); getInstrumentation().decRunningReduces(oldStatus.countReduceTasks()); getInstrumentation().decOccupiedMapSlots(oldStatus.countOccupiedMapSlots()); getInstrumentation().decOccupiedReduceSlots(oldStatus.countOccupiedReduceSlots()); if (!faultyTrackers.isBlacklisted(oldStatus.getHost())) { int mapSlots = oldStatus.getMaxMapSlots(); totalMapTaskCapacity -= mapSlots; int reduceSlots = oldStatus.getMaxReduceSlots(); totalReduceTaskCapacity -= reduceSlots; } if (status == null) { taskTrackers.remove(trackerName); Integer numTaskTrackersInHost = uniqueHostsMap.get(oldStatus.getHost()); - numTaskTrackersInHost --; - if (numTaskTrackersInHost > 0) { - uniqueHostsMap.put(oldStatus.getHost(), numTaskTrackersInHost); - } - else { - uniqueHostsMap.remove(oldStatus.getHost()); + if (numTaskTrackersInHost != null) { + numTaskTrackersInHost --; + if (numTaskTrackersInHost > 0) { + uniqueHostsMap.put(oldStatus.getHost(), numTaskTrackersInHost); + } + else { + uniqueHostsMap.remove(oldStatus.getHost()); + } } } } if (status != null) { totalMaps += status.countMapTasks(); totalReduces += status.countReduceTasks(); occupiedMapSlots += status.countOccupiedMapSlots(); occupiedReduceSlots += status.countOccupiedReduceSlots(); getInstrumentation().addRunningMaps(status.countMapTasks()); getInstrumentation().addRunningReduces(status.countReduceTasks()); getInstrumentation().addOccupiedMapSlots(status.countOccupiedMapSlots()); getInstrumentation().addOccupiedReduceSlots(status.countOccupiedReduceSlots()); if (!faultyTrackers.isBlacklisted(status.getHost())) { int mapSlots = status.getMaxMapSlots(); totalMapTaskCapacity += mapSlots; int reduceSlots = status.getMaxReduceSlots(); totalReduceTaskCapacity += reduceSlots; } boolean alreadyPresent = false; TaskTracker taskTracker = taskTrackers.get(trackerName); if (taskTracker != null) { alreadyPresent = true; } else { taskTracker = new TaskTracker(trackerName); } taskTracker.setStatus(status); taskTrackers.put(trackerName, taskTracker); if (LOG.isDebugEnabled()) { int runningMaps = 0, runningReduces = 0; int commitPendingMaps = 0, commitPendingReduces = 0; int unassignedMaps = 0, unassignedReduces = 0; int miscMaps = 0, miscReduces = 0; List<TaskStatus> taskReports = status.getTaskReports(); for (Iterator<TaskStatus> it = taskReports.iterator(); it.hasNext();) { TaskStatus ts = (TaskStatus) it.next(); boolean isMap = ts.getIsMap(); TaskStatus.State state = ts.getRunState(); if (state == TaskStatus.State.RUNNING) { if (isMap) { ++runningMaps; } else { ++runningReduces; } } else if (state == TaskStatus.State.UNASSIGNED) { if (isMap) { ++unassignedMaps; } else { ++unassignedReduces; } } else if (state == TaskStatus.State.COMMIT_PENDING) { if (isMap) { ++commitPendingMaps; } else { ++commitPendingReduces; } } else { if (isMap) { ++miscMaps; } else { ++miscReduces; } } } LOG.debug(trackerName + ": Status -" + " running(m) = " + runningMaps + " unassigned(m) = " + unassignedMaps + " commit_pending(m) = " + commitPendingMaps + " misc(m) = " + miscMaps + " running(r) = " + runningReduces + " unassigned(r) = " + unassignedReduces + " commit_pending(r) = " + commitPendingReduces + " misc(r) = " + miscReduces); } if (!alreadyPresent) { Integer numTaskTrackersInHost = uniqueHostsMap.get(status.getHost()); if (numTaskTrackersInHost == null) { numTaskTrackersInHost = 0; } numTaskTrackersInHost ++; uniqueHostsMap.put(status.getHost(), numTaskTrackersInHost); } } getInstrumentation().setMapSlots(totalMapTaskCapacity); getInstrumentation().setReduceSlots(totalReduceTaskCapacity); return oldStatus != null; } // Increment the number of reserved slots in the cluster. // This method assumes the caller has JobTracker lock. void incrementReservations(TaskType type, int reservedSlots) { if (type.equals(TaskType.MAP)) { reservedMapSlots += reservedSlots; } else if (type.equals(TaskType.REDUCE)) { reservedReduceSlots += reservedSlots; } } // Decrement the number of reserved slots in the cluster. // This method assumes the caller has JobTracker lock. void decrementReservations(TaskType type, int reservedSlots) { if (type.equals(TaskType.MAP)) { reservedMapSlots -= reservedSlots; } else if (type.equals(TaskType.REDUCE)) { reservedReduceSlots -= reservedSlots; } } private void updateNodeHealthStatus(TaskTrackerStatus trackerStatus) { TaskTrackerHealthStatus status = trackerStatus.getHealthStatus(); synchronized (faultyTrackers) { faultyTrackers.setNodeHealthStatus(trackerStatus.getHost(), status.isNodeHealthy(), status.getHealthReport()); } } /** * Process incoming heartbeat messages from the task trackers. */ private synchronized boolean processHeartbeat( TaskTrackerStatus trackerStatus, boolean initialContact) { String trackerName = trackerStatus.getTrackerName(); synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { boolean seenBefore = updateTaskTrackerStatus(trackerName, trackerStatus); TaskTracker taskTracker = getTaskTracker(trackerName); if (initialContact) { // If it's first contact, then clear out // any state hanging around if (seenBefore) { lostTaskTracker(taskTracker); } } else { // If not first contact, there should be some record of the tracker if (!seenBefore) { LOG.warn("Status from unknown Tracker : " + trackerName); updateTaskTrackerStatus(trackerName, null); return false; } } if (initialContact) { // if this is lost tracker that came back now, and if it blacklisted // increment the count of blacklisted trackers in the cluster if (isBlacklisted(trackerName)) { faultyTrackers.incrBlackListedTrackers(1); } addNewTracker(taskTracker); } } } updateTaskStatuses(trackerStatus); updateNodeHealthStatus(trackerStatus); return true; } /** * A tracker wants to know if any of its Tasks have been * closed (because the job completed, whether successfully or not) */ private synchronized List<TaskTrackerAction> getTasksToKill( String taskTracker) { Set<TaskAttemptID> taskIds = trackerToTaskMap.get(taskTracker); List<TaskTrackerAction> killList = new ArrayList<TaskTrackerAction>(); if (taskIds != null) { for (TaskAttemptID killTaskId : taskIds) { TaskInProgress tip = taskidToTIPMap.get(killTaskId); if (tip == null) { continue; } if (tip.shouldClose(killTaskId)) { // // This is how the JobTracker ends a task at the TaskTracker. // It may be successfully completed, or may be killed in // mid-execution. // if (!tip.getJob().isComplete()) { killList.add(new KillTaskAction(killTaskId)); LOG.debug(taskTracker + " -> KillTaskAction: " + killTaskId); } } } } // add the stray attempts for uninited jobs synchronized (trackerToTasksToCleanup) { Set<TaskAttemptID> set = trackerToTasksToCleanup.remove(taskTracker); if (set != null) { for (TaskAttemptID id : set) { killList.add(new KillTaskAction(id)); } } } return killList; } /** * Add a job to cleanup for the tracker. */ private void addJobForCleanup(JobID id) { for (String taskTracker : taskTrackers.keySet()) { LOG.debug("Marking job " + id + " for cleanup by tracker " + taskTracker); synchronized (trackerToJobsToCleanup) { Set<JobID> jobsToKill = trackerToJobsToCleanup.get(taskTracker); if (jobsToKill == null) { jobsToKill = new HashSet<JobID>(); trackerToJobsToCleanup.put(taskTracker, jobsToKill); } jobsToKill.add(id); } } } /** * A tracker wants to know if any job needs cleanup because the job completed. */ private List<TaskTrackerAction> getJobsForCleanup(String taskTracker) { Set<JobID> jobs = null; synchronized (trackerToJobsToCleanup) { jobs = trackerToJobsToCleanup.remove(taskTracker); } if (jobs != null) { // prepare the actions list List<TaskTrackerAction> killList = new ArrayList<TaskTrackerAction>(); for (JobID killJobId : jobs) { killList.add(new KillJobAction(killJobId)); LOG.debug(taskTracker + " -> KillJobAction: " + killJobId); } return killList; } return null; } /** * A tracker wants to know if any of its Tasks can be committed */ private synchronized List<TaskTrackerAction> getTasksToSave( TaskTrackerStatus tts) { List<TaskStatus> taskStatuses = tts.getTaskReports(); if (taskStatuses != null) { List<TaskTrackerAction> saveList = new ArrayList<TaskTrackerAction>(); for (TaskStatus taskStatus : taskStatuses) { if (taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING) { TaskAttemptID taskId = taskStatus.getTaskID(); TaskInProgress tip = taskidToTIPMap.get(taskId); if (tip == null) { continue; } if (tip.shouldCommit(taskId)) { saveList.add(new CommitTaskAction(taskId)); LOG.debug(tts.getTrackerName() + " -> CommitTaskAction: " + taskId); } } } return saveList; } return null; } // returns cleanup tasks first, then setup tasks. synchronized List<Task> getSetupAndCleanupTasks( TaskTrackerStatus taskTracker) throws IOException { int maxMapTasks = taskTracker.getMaxMapSlots(); int maxReduceTasks = taskTracker.getMaxReduceSlots(); int numMaps = taskTracker.countOccupiedMapSlots(); int numReduces = taskTracker.countOccupiedReduceSlots(); int numTaskTrackers = getClusterStatus().getTaskTrackers(); int numUniqueHosts = getNumberOfUniqueHosts(); Task t = null; synchronized (jobs) { if (numMaps < maxMapTasks) { for (Iterator<JobInProgress> it = jobs.values().iterator(); it.hasNext();) { JobInProgress job = it.next(); t = job.obtainJobCleanupTask(taskTracker, numTaskTrackers, numUniqueHosts, true); if (t != null) { return Collections.singletonList(t); } } for (Iterator<JobInProgress> it = jobs.values().iterator(); it.hasNext();) { JobInProgress job = it.next(); t = job.obtainTaskCleanupTask(taskTracker, true); if (t != null) { return Collections.singletonList(t); } } for (Iterator<JobInProgress> it = jobs.values().iterator(); it.hasNext();) { JobInProgress job = it.next(); t = job.obtainJobSetupTask(taskTracker, numTaskTrackers, numUniqueHosts, true); if (t != null) { return Collections.singletonList(t); } } } if (numReduces < maxReduceTasks) { for (Iterator<JobInProgress> it = jobs.values().iterator(); it.hasNext();) { JobInProgress job = it.next(); t = job.obtainJobCleanupTask(taskTracker, numTaskTrackers, numUniqueHosts, false); if (t != null) { return Collections.singletonList(t); } } for (Iterator<JobInProgress> it = jobs.values().iterator(); it.hasNext();) { JobInProgress job = it.next(); t = job.obtainTaskCleanupTask(taskTracker, false); if (t != null) { return Collections.singletonList(t); } } for (Iterator<JobInProgress> it = jobs.values().iterator(); it.hasNext();) { JobInProgress job = it.next(); t = job.obtainJobSetupTask(taskTracker, numTaskTrackers, numUniqueHosts, false); if (t != null) { return Collections.singletonList(t); } } } } return null; } /** * Grab the local fs name */ public synchronized String getFilesystemName() throws IOException { if (fs == null) { throw new IllegalStateException("FileSystem object not available yet"); } return fs.getUri().toString(); } public void reportTaskTrackerError(String taskTracker, String errorClass, String errorMessage) throws IOException { LOG.warn("Report from " + taskTracker + ": " + errorMessage); } /** * Remove the job_ from jobids to get the unique string. */ static String getJobUniqueString(String jobid) { return jobid.substring(4); } //////////////////////////////////////////////////// // JobSubmissionProtocol //////////////////////////////////////////////////// /** * Allocates a new JobId string. */ public synchronized JobID getNewJobId() throws IOException { JobID id = new JobID(getTrackerIdentifier(), nextJobId++); // get the user group info UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); // mark the user for this id jobToUserMap.put(id, ugi.getUserName()); LOG.info("Job id " + id + " assigned to user " + ugi.getUserName()); return id; } /** * JobTracker.submitJob() kicks off a new job. * * Create a 'JobInProgress' object, which contains both JobProfile * and JobStatus. Those two sub-objects are sometimes shipped outside * of the JobTracker. But JobInProgress adds info that's useful for * the JobTracker alone. */ public synchronized JobStatus submitJob(JobID jobId) throws IOException { if(jobs.containsKey(jobId)) { //job already running, don't start twice return jobs.get(jobId).getStatus(); } // check if the owner is uploding the splits or not // get the user group info UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); // check if the user invoking this api is the owner of this job if (!jobToUserMap.get(jobId).equals(ugi.getUserName())) { throw new IOException("User " + ugi.getUserName() + " is not the owner of the job " + jobId); } jobToUserMap.remove(jobId); // persist File userFileForJob = new File(lDirAlloc.getLocalPathForWrite(SUBDIR + "/" + jobId, conf).toString()); if (userFileForJob == null) { LOG.info("Failed to create job-id file for job " + jobId + " at " + userFileForJob); } else { FileOutputStream fout = new FileOutputStream(userFileForJob); BufferedWriter writer = null; try { writer = new BufferedWriter(new OutputStreamWriter(fout)); writer.write(ugi.getUserName() + "\n"); } finally { if (writer != null) { writer.close(); } fout.close(); } LOG.info("Job " + jobId + " user info persisted to file : " + userFileForJob); } JobInProgress job = null; try { job = new JobInProgress(jobId, this, this.conf, ugi.getUserName(), 0); } catch (Exception e) { if (userFileForJob != null) { userFileForJob.delete(); } throw new IOException(e); } String queue = job.getProfile().getQueueName(); if(!(queueManager.getQueues().contains(queue))) { new CleanupQueue().addToQueue(conf,getSystemDirectoryForJob(jobId)); job.fail(); if (userFileForJob != null) { userFileForJob.delete(); } throw new IOException("Queue \"" + queue + "\" does not exist"); } // check for access try { checkAccess(job, QueueManager.QueueOperation.SUBMIT_JOB); } catch (IOException ioe) { LOG.warn("Access denied for user " + job.getJobConf().getUser() + ". Ignoring job " + jobId, ioe); job.fail(); if (userFileForJob != null) { userFileForJob.delete(); } new CleanupQueue().addToQueue(conf, getSystemDirectoryForJob(jobId)); throw ioe; } // Check the job if it cannot run in the cluster because of invalid memory // requirements. try { checkMemoryRequirements(job); } catch (IOException ioe) { new CleanupQueue().addToQueue(conf, getSystemDirectoryForJob(jobId)); throw ioe; } return addJob(jobId, job); } /** * Adds a job to the jobtracker. Make sure that the checks are inplace before * adding a job. This is the core job submission logic * @param jobId The id for the job submitted which needs to be added */ private synchronized JobStatus addJob(JobID jobId, JobInProgress job) { totalSubmissions++; synchronized (jobs) { synchronized (taskScheduler) { jobs.put(job.getProfile().getJobID(), job); for (JobInProgressListener listener : jobInProgressListeners) { try { listener.jobAdded(job); } catch (IOException ioe) { LOG.warn("Failed to add and so skipping the job : " + job.getJobID() + ". Exception : " + ioe); } } } } myInstrumentation.submitJob(job.getJobConf(), jobId); return job.getStatus(); } // Check whether the specified operation can be performed // related to the job. private void checkAccess(JobInProgress job, QueueManager.QueueOperation oper) throws IOException { // get the user group info UserGroupInformation ugi = UserGroupInformation.getCurrentUGI(); checkAccess(job, oper, ugi); } // use the passed ugi for checking the access private void checkAccess(JobInProgress job, QueueManager.QueueOperation oper, UserGroupInformation ugi) throws IOException { // get the queue String queue = job.getProfile().getQueueName(); if (!queueManager.hasAccess(queue, job, oper, ugi)) { throw new AccessControlException("User " @@ -3782,790 +3778,803 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, void storeCompletedJob(JobInProgress job) { //persists the job info in DFS completedJobStatusStore.store(job); } public JobProfile getJobProfile(JobID jobid) { synchronized (this) { JobInProgress job = jobs.get(jobid); if (job != null) { return job.getProfile(); } else { RetireJobInfo info = retireJobs.get(jobid); if (info != null) { return info.profile; } } } return completedJobStatusStore.readJobProfile(jobid); } public JobStatus getJobStatus(JobID jobid) { if (null == jobid) { LOG.warn("JobTracker.getJobStatus() cannot get status for null jobid"); return null; } synchronized (this) { JobInProgress job = jobs.get(jobid); if (job != null) { return job.getStatus(); } else { RetireJobInfo info = retireJobs.get(jobid); if (info != null) { return info.status; } } } return completedJobStatusStore.readJobStatus(jobid); } public Counters getJobCounters(JobID jobid) { synchronized (this) { JobInProgress job = jobs.get(jobid); if (job != null) { return job.getCounters(); } } return completedJobStatusStore.readCounters(jobid); } public synchronized TaskReport[] getMapTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeMapTasks = job.reportTasksInProgress(true, true); for (Iterator it = completeMapTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteMapTasks = job.reportTasksInProgress(true, false); for (Iterator it = incompleteMapTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } public synchronized TaskReport[] getReduceTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector completeReduceTasks = job.reportTasksInProgress(false, true); for (Iterator it = completeReduceTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector incompleteReduceTasks = job.reportTasksInProgress(false, false); for (Iterator it = incompleteReduceTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } public synchronized TaskReport[] getCleanupTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeTasks = job.reportCleanupTIPs(true); for (Iterator<TaskInProgress> it = completeTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteTasks = job.reportCleanupTIPs(false); for (Iterator<TaskInProgress> it = incompleteTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } public synchronized TaskReport[] getSetupTaskReports(JobID jobid) { JobInProgress job = jobs.get(jobid); if (job == null) { return new TaskReport[0]; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeTasks = job.reportSetupTIPs(true); for (Iterator<TaskInProgress> it = completeTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteTasks = job.reportSetupTIPs(false); for (Iterator<TaskInProgress> it = incompleteTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } } TaskCompletionEvent[] EMPTY_EVENTS = new TaskCompletionEvent[0]; static final String MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY = "mapred.cluster.map.memory.mb"; static final String MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY = "mapred.cluster.reduce.memory.mb"; static final String MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY = "mapred.cluster.max.map.memory.mb"; static final String MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY = "mapred.cluster.max.reduce.memory.mb"; /* * Returns a list of TaskCompletionEvent for the given job, * starting from fromEventId. * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getTaskCompletionEvents(java.lang.String, int, int) */ public synchronized TaskCompletionEvent[] getTaskCompletionEvents( JobID jobid, int fromEventId, int maxEvents) throws IOException{ synchronized (this) { JobInProgress job = this.jobs.get(jobid); if (null != job) { if (job.inited()) { return job.getTaskCompletionEvents(fromEventId, maxEvents); } else { return EMPTY_EVENTS; } } } return completedJobStatusStore.readJobTaskCompletionEvents(jobid, fromEventId, maxEvents); } /** * Get the diagnostics for a given task * @param taskId the id of the task * @return an array of the diagnostic messages */ public synchronized String[] getTaskDiagnostics(TaskAttemptID taskId) throws IOException { List<String> taskDiagnosticInfo = null; JobID jobId = taskId.getJobID(); TaskID tipId = taskId.getTaskID(); JobInProgress job = jobs.get(jobId); if (job != null) { TaskInProgress tip = job.getTaskInProgress(tipId); if (tip != null) { taskDiagnosticInfo = tip.getDiagnosticInfo(taskId); } } return ((taskDiagnosticInfo == null) ? new String[0] : taskDiagnosticInfo.toArray(new String[0])); } /** Get all the TaskStatuses from the tipid. */ TaskStatus[] getTaskStatuses(TaskID tipid) { TaskInProgress tip = getTip(tipid); return (tip == null ? new TaskStatus[0] : tip.getTaskStatuses()); } /** Returns the TaskStatus for a particular taskid. */ TaskStatus getTaskStatus(TaskAttemptID taskid) { TaskInProgress tip = getTip(taskid.getTaskID()); return (tip == null ? null : tip.getTaskStatus(taskid)); } /** * Returns the counters for the specified task in progress. */ Counters getTipCounters(TaskID tipid) { TaskInProgress tip = getTip(tipid); return (tip == null ? null : tip.getCounters()); } /** * Returns the configured task scheduler for this job tracker. * @return the configured task scheduler */ TaskScheduler getTaskScheduler() { return taskScheduler; } /** * Returns specified TaskInProgress, or null. */ public TaskInProgress getTip(TaskID tipid) { JobInProgress job = jobs.get(tipid.getJobID()); return (job == null ? null : job.getTaskInProgress(tipid)); } /** Mark a Task to be killed */ public synchronized boolean killTask(TaskAttemptID taskid, boolean shouldFail) throws IOException{ TaskInProgress tip = taskidToTIPMap.get(taskid); if(tip != null) { checkAccess(tip.getJob(), QueueManager.QueueOperation.ADMINISTER_JOBS); return tip.killTask(taskid, shouldFail); } else { LOG.info("Kill task attempt failed since task " + taskid + " was not found"); return false; } } /** * Get tracker name for a given task id. * @param taskId the name of the task * @return The name of the task tracker */ public synchronized String getAssignedTracker(TaskAttemptID taskId) { return taskidToTrackerMap.get(taskId); } public JobStatus[] jobsToComplete() { return getJobStatus(jobs.values(), true); } public JobStatus[] getAllJobs() { List<JobStatus> list = new ArrayList<JobStatus>(); list.addAll(Arrays.asList(getJobStatus(jobs.values(),false))); list.addAll(retireJobs.getAllJobStatus()); return list.toArray(new JobStatus[list.size()]); } /** * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir() */ public String getSystemDir() { Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system")); return fs.makeQualified(sysDir).toString(); } /////////////////////////////////////////////////////////////// // JobTracker methods /////////////////////////////////////////////////////////////// public JobInProgress getJob(JobID jobid) { return jobs.get(jobid); } // Get the job directory in system directory Path getSystemDirectoryForJob(JobID id) { return new Path(getSystemDir(), id.toString()); } /** * Change the run-time priority of the given job. * @param jobId job id * @param priority new {@link JobPriority} for the job */ synchronized void setJobPriority(JobID jobId, JobPriority priority) { JobInProgress job = jobs.get(jobId); if (job != null) { synchronized (taskScheduler) { JobStatus oldStatus = (JobStatus)job.getStatus().clone(); job.setPriority(priority); JobStatus newStatus = (JobStatus)job.getStatus().clone(); JobStatusChangeEvent event = new JobStatusChangeEvent(job, EventType.PRIORITY_CHANGED, oldStatus, newStatus); updateJobInProgressListeners(event); } } else { LOG.warn("Trying to change the priority of an unknown job: " + jobId); } } //////////////////////////////////////////////////// // Methods to track all the TaskTrackers //////////////////////////////////////////////////// /** * Accept and process a new TaskTracker profile. We might * have known about the TaskTracker previously, or it might * be brand-new. All task-tracker structures have already * been updated. Just process the contained tasks and any * jobs that might be affected. */ void updateTaskStatuses(TaskTrackerStatus status) { String trackerName = status.getTrackerName(); for (TaskStatus report : status.getTaskReports()) { report.setTaskTracker(trackerName); TaskAttemptID taskId = report.getTaskID(); // expire it expireLaunchingTasks.removeTask(taskId); JobInProgress job = getJob(taskId.getJobID()); if (job == null) { // if job is not there in the cleanup list ... add it synchronized (trackerToJobsToCleanup) { Set<JobID> jobs = trackerToJobsToCleanup.get(trackerName); if (jobs == null) { jobs = new HashSet<JobID>(); trackerToJobsToCleanup.put(trackerName, jobs); } jobs.add(taskId.getJobID()); } continue; } if (!job.inited()) { // if job is not yet initialized ... kill the attempt synchronized (trackerToTasksToCleanup) { Set<TaskAttemptID> tasks = trackerToTasksToCleanup.get(trackerName); if (tasks == null) { tasks = new HashSet<TaskAttemptID>(); trackerToTasksToCleanup.put(trackerName, tasks); } tasks.add(taskId); } continue; } TaskInProgress tip = taskidToTIPMap.get(taskId); // Check if the tip is known to the jobtracker. In case of a restarted // jt, some tasks might join in later if (tip != null || hasRestarted()) { if (tip == null) { tip = job.getTaskInProgress(taskId.getTaskID()); job.addRunningTaskToTIP(tip, taskId, status, false); } // Update the job and inform the listeners if necessary JobStatus prevStatus = (JobStatus)job.getStatus().clone(); // Clone TaskStatus object here, because JobInProgress // or TaskInProgress can modify this object and // the changes should not get reflected in TaskTrackerStatus. // An old TaskTrackerStatus is used later in countMapTasks, etc. job.updateTaskStatus(tip, (TaskStatus)report.clone()); JobStatus newStatus = (JobStatus)job.getStatus().clone(); // Update the listeners if an incomplete job completes if (prevStatus.getRunState() != newStatus.getRunState()) { JobStatusChangeEvent event = new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, newStatus); updateJobInProgressListeners(event); } } else { LOG.info("Serious problem. While updating status, cannot find taskid " + report.getTaskID()); } // Process 'failed fetch' notifications List<TaskAttemptID> failedFetchMaps = report.getFetchFailedMaps(); if (failedFetchMaps != null) { for (TaskAttemptID mapTaskId : failedFetchMaps) { TaskInProgress failedFetchMap = taskidToTIPMap.get(mapTaskId); if (failedFetchMap != null) { // Gather information about the map which has to be failed, if need be String failedFetchTrackerName = getAssignedTracker(mapTaskId); if (failedFetchTrackerName == null) { failedFetchTrackerName = "Lost task tracker"; } failedFetchMap.getJob().fetchFailureNotification(failedFetchMap, mapTaskId, failedFetchTrackerName); } } } } } /** * We lost the task tracker! All task-tracker structures have * already been updated. Just process the contained tasks and any * jobs that might be affected. */ void lostTaskTracker(TaskTracker taskTracker) { String trackerName = taskTracker.getTrackerName(); LOG.info("Lost tracker '" + trackerName + "'"); // remove the tracker from the local structures synchronized (trackerToJobsToCleanup) { trackerToJobsToCleanup.remove(trackerName); } synchronized (trackerToTasksToCleanup) { trackerToTasksToCleanup.remove(trackerName); } // Inform the recovery manager recoveryManager.unMarkTracker(trackerName); Set<TaskAttemptID> lostTasks = trackerToTaskMap.get(trackerName); trackerToTaskMap.remove(trackerName); if (lostTasks != null) { // List of jobs which had any of their tasks fail on this tracker Set<JobInProgress> jobsWithFailures = new HashSet<JobInProgress>(); for (TaskAttemptID taskId : lostTasks) { TaskInProgress tip = taskidToTIPMap.get(taskId); JobInProgress job = tip.getJob(); // Completed reduce tasks never need to be failed, because // their outputs go to dfs // And completed maps with zero reducers of the job // never need to be failed. if (!tip.isComplete() || (tip.isMapTask() && !tip.isJobSetupTask() && job.desiredReduces() != 0)) { // if the job is done, we don't want to change anything if (job.getStatus().getRunState() == JobStatus.RUNNING || job.getStatus().getRunState() == JobStatus.PREP) { // the state will be KILLED_UNCLEAN, if the task(map or reduce) // was RUNNING on the tracker TaskStatus.State killState = (tip.isRunningTask(taskId) && !tip.isJobSetupTask() && !tip.isJobCleanupTask()) ? TaskStatus.State.KILLED_UNCLEAN : TaskStatus.State.KILLED; job.failedTask(tip, taskId, ("Lost task tracker: " + trackerName), (tip.isMapTask() ? TaskStatus.Phase.MAP : TaskStatus.Phase.REDUCE), killState, trackerName); jobsWithFailures.add(job); } } else { // Completed 'reduce' task and completed 'maps' with zero // reducers of the job, not failed; // only removed from data-structures. markCompletedTaskAttempt(trackerName, taskId); } } // Penalize this tracker for each of the jobs which // had any tasks running on it when it was 'lost' // Also, remove any reserved slots on this tasktracker for (JobInProgress job : jobsWithFailures) { job.addTrackerTaskFailure(trackerName, taskTracker); } // Cleanup taskTracker.cancelAllReservations(); // Purge 'marked' tasks, needs to be done // here to prevent hanging references! removeMarkedTasks(trackerName); } } /** * Rereads the config to get hosts and exclude list file names. * Rereads the files to update the hosts and exclude lists. */ public synchronized void refreshNodes() throws IOException { // check access PermissionChecker.checkSuperuserPrivilege(mrOwner, supergroup); // call the actual api refreshHosts(); } private synchronized void refreshHosts() throws IOException { // Reread the config to get mapred.hosts and mapred.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list LOG.info("Refreshing hosts information"); Configuration conf = new Configuration(); hostsReader.updateFileNames(conf.get("mapred.hosts",""), conf.get("mapred.hosts.exclude", "")); hostsReader.refresh(); Set<String> excludeSet = new HashSet<String>(); for(Map.Entry<String, TaskTracker> eSet : taskTrackers.entrySet()) { String trackerName = eSet.getKey(); TaskTrackerStatus status = eSet.getValue().getStatus(); // Check if not include i.e not in host list or in hosts list but excluded if (!inHostsList(status) || inExcludedHostsList(status)) { excludeSet.add(status.getHost()); // add to rejected trackers } } decommissionNodes(excludeSet); } + // Remove a tracker from the system + private void removeTracker(TaskTracker tracker) { + String trackerName = tracker.getTrackerName(); + // Remove completely after marking the tasks as 'KILLED' + lostTaskTracker(tracker); + // tracker is lost, and if it is blacklisted, remove + // it from the count of blacklisted trackers in the cluster + if (isBlacklisted(trackerName)) { + faultyTrackers.decrBlackListedTrackers(1); + } + updateTaskTrackerStatus(trackerName, null); + statistics.taskTrackerRemoved(trackerName); + getInstrumentation().decTrackers(1); + } + // main decommission synchronized void decommissionNodes(Set<String> hosts) throws IOException { LOG.info("Decommissioning " + hosts.size() + " nodes"); // create a list of tracker hostnames synchronized (taskTrackers) { synchronized (trackerExpiryQueue) { int trackersDecommissioned = 0; for (String host : hosts) { LOG.info("Decommissioning host " + host); Set<TaskTracker> trackers = hostnameToTaskTracker.remove(host); if (trackers != null) { for (TaskTracker tracker : trackers) { - LOG.info("Decommission: Losing tracker " + tracker + + LOG.info("Decommission: Losing tracker " + tracker.getTrackerName() + " on host " + host); - lostTaskTracker(tracker); // lose the tracker - updateTaskTrackerStatus( - tracker.getStatus().getTrackerName(), null); + removeTracker(tracker); } trackersDecommissioned += trackers.size(); } LOG.info("Host " + host + " is ready for decommissioning"); } getInstrumentation().setDecommissionedTrackers(trackersDecommissioned); } } } /** * Returns a set of excluded nodes. */ Collection<String> getExcludedNodes() { return hostsReader.getExcludedHosts(); } /** * Get the localized job file path on the job trackers local file system * @param jobId id of the job * @return the path of the job conf file on the local file system */ public static String getLocalJobFilePath(JobID jobId){ return JobHistory.JobInfo.getLocalJobFilePath(jobId); } //////////////////////////////////////////////////////////// // main() //////////////////////////////////////////////////////////// /** * Start the JobTracker process. This is used only for debugging. As a rule, * JobTracker should be run as part of the DFS Namenode process. */ public static void main(String argv[] ) throws IOException, InterruptedException { StringUtils.startupShutdownMessage(JobTracker.class, argv, LOG); try { if(argv.length == 0) { JobTracker tracker = startTracker(new JobConf()); tracker.offerService(); } else { if ("-dumpConfiguration".equals(argv[0]) && argv.length == 1) { dumpConfiguration(new PrintWriter(System.out)); } else { System.out.println("usage: JobTracker [-dumpConfiguration]"); System.exit(-1); } } } catch (Throwable e) { LOG.fatal(StringUtils.stringifyException(e)); System.exit(-1); } } /** * Dumps the configuration properties in Json format * @param writer {@link}Writer object to which the output is written * @throws IOException */ private static void dumpConfiguration(Writer writer) throws IOException { Configuration.dumpConfiguration(new JobConf(), writer); writer.write("\n"); // get the QueueManager configuration properties QueueManager.dumpConfiguration(writer); writer.write("\n"); } @Override public JobQueueInfo[] getQueues() throws IOException { return queueManager.getJobQueueInfos(); } @Override public JobQueueInfo getQueueInfo(String queue) throws IOException { return queueManager.getJobQueueInfo(queue); } @Override public JobStatus[] getJobsFromQueue(String queue) throws IOException { Collection<JobInProgress> jips = taskScheduler.getJobs(queue); return getJobStatus(jips,false); } @Override public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{ return queueManager.getQueueAcls( UserGroupInformation.getCurrentUGI()); } private synchronized JobStatus[] getJobStatus(Collection<JobInProgress> jips, boolean toComplete) { if(jips == null || jips.isEmpty()) { return new JobStatus[]{}; } ArrayList<JobStatus> jobStatusList = new ArrayList<JobStatus>(); for(JobInProgress jip : jips) { JobStatus status = jip.getStatus(); status.setStartTime(jip.getStartTime()); status.setUsername(jip.getProfile().getUser()); if(toComplete) { if(status.getRunState() == JobStatus.RUNNING || status.getRunState() == JobStatus.PREP) { jobStatusList.add(status); } }else { jobStatusList.add(status); } } return (JobStatus[]) jobStatusList.toArray( new JobStatus[jobStatusList.size()]); } /** * Returns the confgiured maximum number of tasks for a single job */ int getMaxTasksPerJob() { return conf.getInt("mapred.jobtracker.maxtasks.per.job", -1); } @Override public void refreshServiceAcl() throws IOException { if (!conf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { throw new AuthorizationException("Service Level Authorization not enabled!"); } SecurityUtil.getPolicy().refresh(); } private void initializeTaskMemoryRelatedConfig() { memSizeForMapSlotOnJT = JobConf.normalizeMemoryConfigValue(conf.getLong( JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); memSizeForReduceSlotOnJT = JobConf.normalizeMemoryConfigValue(conf.getLong( JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); if (conf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) { LOG.warn( JobConf.deprecatedString( JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY)+ " instead use "+JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY+ " and " + JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY ); limitMaxMemForMapTasks = limitMaxMemForReduceTasks = JobConf.normalizeMemoryConfigValue( conf.getLong( JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); if (limitMaxMemForMapTasks != JobConf.DISABLED_MEMORY_LIMIT && limitMaxMemForMapTasks >= 0) { limitMaxMemForMapTasks = limitMaxMemForReduceTasks = limitMaxMemForMapTasks / (1024 * 1024); //Converting old values in bytes to MB } } else { limitMaxMemForMapTasks = JobConf.normalizeMemoryConfigValue( conf.getLong( JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); limitMaxMemForReduceTasks = JobConf.normalizeMemoryConfigValue( conf.getLong( JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY, JobConf.DISABLED_MEMORY_LIMIT)); } LOG.info(new StringBuilder().append("Scheduler configured with ").append( "(memSizeForMapSlotOnJT, memSizeForReduceSlotOnJT,").append( " limitMaxMemForMapTasks, limitMaxMemForReduceTasks) (").append( memSizeForMapSlotOnJT).append(", ").append(memSizeForReduceSlotOnJT) .append(", ").append(limitMaxMemForMapTasks).append(", ").append( limitMaxMemForReduceTasks).append(")")); } private boolean perTaskMemoryConfigurationSetOnJT() { if (limitMaxMemForMapTasks == JobConf.DISABLED_MEMORY_LIMIT || limitMaxMemForReduceTasks == JobConf.DISABLED_MEMORY_LIMIT || memSizeForMapSlotOnJT == JobConf.DISABLED_MEMORY_LIMIT || memSizeForReduceSlotOnJT == JobConf.DISABLED_MEMORY_LIMIT) { return false; } return true; } /** * Check the job if it has invalid requirements and throw and IOException if does have. * * @param job * @throws IOException */ private void checkMemoryRequirements(JobInProgress job) throws IOException { if (!perTaskMemoryConfigurationSetOnJT()) { LOG.debug("Per-Task memory configuration is not set on JT. " + "Not checking the job for invalid memory requirements."); return; } boolean invalidJob = false; String msg = ""; long maxMemForMapTask = job.getJobConf().getMemoryForMapTask(); long maxMemForReduceTask = job.getJobConf().getMemoryForReduceTask(); if (maxMemForMapTask == JobConf.DISABLED_MEMORY_LIMIT || maxMemForReduceTask == JobConf.DISABLED_MEMORY_LIMIT) { invalidJob = true; msg = "Invalid job requirements."; } if (maxMemForMapTask > limitMaxMemForMapTasks || maxMemForReduceTask > limitMaxMemForReduceTasks) { invalidJob = true; msg = "Exceeds the cluster's max-memory-limit."; } if (invalidJob) { StringBuilder jobStr = new StringBuilder().append(job.getJobID().toString()).append("(") .append(maxMemForMapTask).append(" memForMapTasks ").append( maxMemForReduceTask).append(" memForReduceTasks): "); LOG.warn(jobStr.toString() + msg); throw new IOException(jobStr.toString() + msg); } } @Override public void refreshQueueAcls() throws IOException{ LOG.info("Refreshing queue acls. requested by : " + UserGroupInformation.getCurrentUGI().getUserName()); this.queueManager.refreshAcls(new Configuration(this.conf)); } String getReasonsForBlacklisting(String host) { FaultInfo fi = faultyTrackers.getFaultInfo(host, false); if (fi == null) { return ""; } return fi.getTrackerFaultReport(); } /** Test Methods */ Set<ReasonForBlackListing> getReasonForBlackList(String host) { FaultInfo fi = faultyTrackers.getFaultInfo(host, false); if (fi == null) { return new HashSet<ReasonForBlackListing>(); } return fi.getReasonforblacklisting(); } void incrementFaults(String hostName) { faultyTrackers.incrementFaults(hostName); } } diff --git a/src/test/org/apache/hadoop/mapred/TestNodeBlacklisting.java b/src/test/org/apache/hadoop/mapred/TestNodeBlacklisting.java new file mode 100644 index 0000000..2629e01 --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/TestNodeBlacklisting.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.IOException; +import java.util.HashSet; + +import junit.framework.TestCase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.mapred.lib.IdentityReducer; + +/** + * Test node blacklisting. This testcase tests + * - node blacklisting along with node refresh + */ +public class TestNodeBlacklisting extends TestCase { + public static final Log LOG = LogFactory.getLog(TestNodeBlacklisting.class); + private static final Path TEST_DIR = + new Path(System.getProperty("test.build.data", "/tmp"), "node-bklisting"); + + // Mapper that fails once for the first time + static class FailOnceMapper extends MapReduceBase implements + Mapper<WritableComparable, Writable, WritableComparable, Writable> { + + private boolean shouldFail = false; + public void map(WritableComparable key, Writable value, + OutputCollector<WritableComparable, Writable> out, Reporter reporter) + throws IOException { + + if (shouldFail) { + throw new RuntimeException("failing map"); + } + } + + @Override + public void configure(JobConf conf) { + TaskAttemptID id = TaskAttemptID.forName(conf.get("mapred.task.id")); + shouldFail = id.getId() == 0 && id.getTaskID().getId() == 0; + } + } + + /** + * Check refreshNodes for decommissioning blacklisted nodes. + */ + public void testBlacklistedNodeDecommissioning() throws Exception { + LOG.info("Testing blacklisted node decommissioning"); + MiniMRCluster mr = null; + JobTracker jt = null; + + try { + // start mini mr + JobConf jtConf = new JobConf(); + jtConf.set("mapred.max.tracker.blacklists", "1"); + mr = new MiniMRCluster(0, 0, 2, "file:///", 1, null, null, null, jtConf); + jt = mr.getJobTrackerRunner().getJobTracker(); + + assertEquals("Trackers not up", 2, jt.taskTrackers().size()); + // validate the total tracker count + assertEquals("Active tracker count mismatch", + 2, jt.getClusterStatus(false).getTaskTrackers()); + // validate blacklisted count + assertEquals("Blacklisted tracker count mismatch", + 0, jt.getClusterStatus(false).getBlacklistedTrackers()); + + // run a failing job to blacklist the tracker + JobConf jConf = mr.createJobConf(); + jConf.set("mapred.max.tracker.failures", "1"); + jConf.setJobName("test-job-fail-once"); + jConf.setMapperClass(FailOnceMapper.class); + jConf.setReducerClass(IdentityReducer.class); + jConf.setNumMapTasks(1); + jConf.setNumReduceTasks(0); + + RunningJob job = + UtilsForTests.runJob(jConf, new Path(TEST_DIR, "in"), + new Path(TEST_DIR, "out")); + job.waitForCompletion(); + + // validate the total tracker count + assertEquals("Active tracker count mismatch", + 1, jt.getClusterStatus(false).getTaskTrackers()); + // validate blacklisted count + assertEquals("Blacklisted tracker count mismatch", + 1, jt.getClusterStatus(false).getBlacklistedTrackers()); + + // find the blacklisted tracker + String trackerName = null; + for (TaskTrackerStatus status : jt.taskTrackers()) { + if (jt.isBlacklisted(status.getTrackerName())) { + trackerName = status.getTrackerName(); + break; + } + } + // get the hostname + String hostToDecommission = + JobInProgress.convertTrackerNameToHostName(trackerName); + LOG.info("Decommissioning tracker " + hostToDecommission); + + // decommission the node + HashSet<String> decom = new HashSet<String>(1); + decom.add(hostToDecommission); + jt.decommissionNodes(decom); + + // validate + // check the cluster status and tracker size + assertEquals("Tracker is not lost upon host decommissioning", + 1, jt.getClusterStatus(false).getTaskTrackers()); + assertEquals("Blacklisted tracker count incorrect in cluster status " + + "after decommissioning", + 0, jt.getClusterStatus(false).getBlacklistedTrackers()); + assertEquals("Tracker is not lost upon host decommissioning", + 1, jt.taskTrackers().size()); + } finally { + if (mr != null) { + mr.shutdown(); + mr = null; + jt = null; + FileUtil.fullyDelete(new File(TEST_DIR.toString())); + } + } + } +}
jaxlaw/hadoop-common
3245e2bd33f90aaa24cddbb3af5ec829fac30cb6
HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: IllegalArgumentException is thrown. Contributed by Konstantin Boudnik
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 6c4df94..5931aa8 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,436 +1,439 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383001 + HADOOP-6386. NameNode's HttpServer can't instantiate InetSocketAddress: + IllegalArgumentException is thrown. (cos) + HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) MAPREDUCE-1185. Redirect running job url to history url if job is already retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/http/HttpServer.java b/src/core/org/apache/hadoop/http/HttpServer.java index 2c1b9e6..47d0500 100644 --- a/src/core/org/apache/hadoop/http/HttpServer.java +++ b/src/core/org/apache/hadoop/http/HttpServer.java @@ -1,517 +1,545 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.http; import java.io.IOException; import java.io.PrintWriter; import java.net.BindException; import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.nio.channels.ServerSocketChannel; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.util.ReflectionUtils; import org.mortbay.jetty.Connector; import org.mortbay.jetty.Handler; import org.mortbay.jetty.Server; import org.mortbay.jetty.handler.ContextHandlerCollection; import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.security.SslSocketConnector; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.DefaultServlet; import org.mortbay.jetty.servlet.FilterHolder; import org.mortbay.jetty.servlet.FilterMapping; import org.mortbay.jetty.servlet.ServletHandler; import org.mortbay.jetty.servlet.ServletHolder; import org.mortbay.jetty.webapp.WebAppContext; import org.mortbay.thread.QueuedThreadPool; import org.mortbay.util.MultiException; /** * Create a Jetty embedded server to answer http requests. The primary goal * is to serve up status information for the server. * There are three contexts: * "/logs/" -> points to the log directory * "/static/" -> points to common static files (src/webapps/static) * "/" -> the jsp server code from (src/webapps/<name>) */ public class HttpServer implements FilterContainer { public static final Log LOG = LogFactory.getLog(HttpServer.class); static final String FILTER_INITIALIZER_PROPERTY = "hadoop.http.filter.initializers"; protected final Server webServer; protected final Connector listener; protected final WebAppContext webAppContext; protected final boolean findPort; protected final Map<Context, Boolean> defaultContexts = new HashMap<Context, Boolean>(); protected final List<String> filterNames = new ArrayList<String>(); private static final int MAX_RETRIES = 10; /** Same as this(name, bindAddress, port, findPort, null); */ public HttpServer(String name, String bindAddress, int port, boolean findPort ) throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** * Create a status server on the given port. * The jsp scripts are taken from src/webapps/<name>. * @param name The name of the server * @param port The port to use on the server * @param findPort whether the server should start at the given port and * increment by 1 until it finds a free port. * @param conf Configuration */ public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) throws IOException { webServer = new Server(); this.findPort = findPort; listener = createBaseListener(conf); listener.setHost(bindAddress); listener.setPort(port); webServer.addConnector(listener); webServer.setThreadPool(new QueuedThreadPool()); final String appDir = getWebAppsPath(); ContextHandlerCollection contexts = new ContextHandlerCollection(); webServer.setHandler(contexts); webAppContext = new WebAppContext(); webAppContext.setContextPath("/"); webAppContext.setWar(appDir + "/" + name); webServer.addHandler(webAppContext); addDefaultApps(contexts, appDir); final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { for(FilterInitializer c : initializers) { c.initFilter(this); } } addDefaultServlets(); } /** * Create a required listener for the Jetty instance listening on the port * provided. This wrapper and all subclasses must create at least one * listener. */ protected Connector createBaseListener(Configuration conf) throws IOException { SelectChannelConnector ret = new SelectChannelConnector(); ret.setLowResourceMaxIdleTime(10000); ret.setAcceptQueueSize(128); ret.setResolveNames(false); ret.setUseDirectBuffers(false); return ret; } /** Get an array of FilterConfiguration specified in the conf */ private static FilterInitializer[] getFilterInitializers(Configuration conf) { if (conf == null) { return null; } Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY); if (classes == null) { return null; } FilterInitializer[] initializers = new FilterInitializer[classes.length]; for(int i = 0; i < classes.length; i++) { initializers[i] = (FilterInitializer)ReflectionUtils.newInstance( classes[i], conf); } return initializers; } /** * Add default apps. * @param appDir The application directory * @throws IOException */ protected void addDefaultApps(ContextHandlerCollection parent, final String appDir) throws IOException { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = System.getProperty("hadoop.log.dir"); if (logDir != null) { Context logContext = new Context(parent, "/logs"); logContext.setResourceBase(logDir); logContext.addServlet(DefaultServlet.class, "/"); defaultContexts.put(logContext, true); } // set up the context for "/static/*" Context staticContext = new Context(parent, "/static"); staticContext.setResourceBase(appDir + "/static"); staticContext.addServlet(DefaultServlet.class, "/*"); defaultContexts.put(staticContext, true); } /** * Add default servlets. */ protected void addDefaultServlets() { // set up default servlets addServlet("stacks", "/stacks", StackServlet.class); addServlet("logLevel", "/logLevel", LogLevel.Servlet.class); } public void addContext(Context ctxt, boolean isFiltered) throws IOException { webServer.addHandler(ctxt); defaultContexts.put(ctxt, isFiltered); } /** * Add a context * @param pathSpec The path spec for the context * @param dir The directory containing the context * @param isFiltered if true, the servlet is added to the filter path mapping * @throws IOException */ protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException { if (0 == webServer.getHandlers().length) { throw new RuntimeException("Couldn't find handler"); } WebAppContext webAppCtx = new WebAppContext(); webAppCtx.setContextPath(pathSpec); webAppCtx.setWar(dir); addContext(webAppCtx, true); } /** * Set a value in the webapp context. These values are available to the jsp * pages as "application.getAttribute(name)". * @param name The name of the attribute * @param value The value of the attribute */ public void setAttribute(String name, Object value) { webAppContext.setAttribute(name, value); } /** * Add a servlet in the server. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class */ public void addServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { addInternalServlet(name, pathSpec, clazz); addFilterPathMapping(pathSpec, webAppContext); } /** * Add an internal servlet in the server. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class * @deprecated this is a temporary method */ @Deprecated public void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); } webAppContext.addServlet(holder, pathSpec); } /** {@inheritDoc} */ public void addFilter(String name, String classname, Map<String, String> parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); final String[] ALL_URLS = { "/*" }; for (Map.Entry<Context, Boolean> e : defaultContexts.entrySet()) { if (e.getValue()) { Context ctx = e.getKey(); defineFilter(ctx, name, classname, parameters, ALL_URLS); LOG.info("Added filter " + name + " (class=" + classname + ") to context " + ctx.getDisplayName()); } } filterNames.add(name); } /** {@inheritDoc} */ public void addGlobalFilter(String name, String classname, Map<String, String> parameters) { final String[] ALL_URLS = { "/*" }; defineFilter(webAppContext, name, classname, parameters, ALL_URLS); for (Context ctx : defaultContexts.keySet()) { defineFilter(ctx, name, classname, parameters, ALL_URLS); } LOG.info("Added global filter" + name + " (class=" + classname + ")"); } /** * Define a filter for a context and set up default url mappings. */ protected void defineFilter(Context ctx, String name, String classname, Map<String,String> parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); holder.setInitParameters(parameters); FilterMapping fmap = new FilterMapping(); fmap.setPathSpecs(urls); fmap.setDispatches(Handler.ALL); fmap.setFilterName(name); ServletHandler handler = ctx.getServletHandler(); handler.addFilter(holder, fmap); } /** * Add the path spec to the filter path mapping. * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ protected void addFilterPathMapping(String pathSpec, Context webAppCtx) { ServletHandler handler = webAppCtx.getServletHandler(); for(String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); fmap.setDispatches(Handler.ALL); handler.addFilterMapping(fmap); } } /** * Get the value in the webapp context. * @param name The name of the attribute * @return The value of the attribute */ public Object getAttribute(String name) { return webAppContext.getAttribute(name); } /** * Get the pathname to the webapps files. * @return the pathname as a URL * @throws IOException if 'webapps' directory cannot be found on CLASSPATH. */ protected String getWebAppsPath() throws IOException { URL url = getClass().getClassLoader().getResource("webapps"); if (url == null) throw new IOException("webapps not found in CLASSPATH"); return url.toString(); } /** * Get the port that the server is on * @return the port */ public int getPort() { return webServer.getConnectors()[0].getLocalPort(); } /** * Set the min, max number of worker threads (simultaneous connections). */ public void setThreads(int min, int max) { QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ; pool.setMinThreads(min); pool.setMaxThreads(max); } /** * Configure an ssl listener on the server. * @param addr address to listen on * @param keystore location of the keystore * @param storPass password for the keystore * @param keyPass password for the key * @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)} */ @Deprecated public void addSslListener(InetSocketAddress addr, String keystore, String storPass, String keyPass) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(keystore); sslListener.setPassword(storPass); sslListener.setKeyPassword(keyPass); webServer.addConnector(sslListener); } /** * Configure an ssl listener on the server. * @param addr address to listen on * @param sslConf conf to retrieve ssl options * @param needClientAuth whether client authentication is required */ public void addSslListener(InetSocketAddress addr, Configuration sslConf, boolean needClientAuth) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } if (needClientAuth) { // setting up SSL truststore for authenticating clients System.setProperty("javax.net.ssl.trustStore", sslConf.get( "ssl.server.truststore.location", "")); System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get( "ssl.server.truststore.password", "")); System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( "ssl.server.truststore.type", "jks")); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); sslListener.setPassword(sslConf.get("ssl.server.keystore.password", "")); sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", "")); sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks")); sslListener.setNeedClientAuth(needClientAuth); webServer.addConnector(sslListener); } /** * Start the server. Does not wait for the server to start. */ public void start() throws IOException { try { int port = 0; int oriPort = listener.getPort(); // The original requested port while (true) { try { port = webServer.getConnectors()[0].getLocalPort(); LOG.info("Port returned by webServer.getConnectors()[0]." + "getLocalPort() before open() is "+ port + ". Opening the listener on " + oriPort); listener.open(); port = listener.getLocalPort(); LOG.info("listener.getLocalPort() returned " + listener.getLocalPort() + " webServer.getConnectors()[0].getLocalPort() returned " + webServer.getConnectors()[0].getLocalPort()); //Workaround to handle the problem reported in HADOOP-4744 if (port < 0) { Thread.sleep(100); int numRetries = 1; while (port < 0) { LOG.warn("listener.getLocalPort returned " + port); if (numRetries++ > MAX_RETRIES) { throw new Exception(" listener.getLocalPort is returning " + "less than 0 even after " +numRetries+" resets"); } for (int i = 0; i < 2; i++) { LOG.info("Retrying listener.getLocalPort()"); port = listener.getLocalPort(); if (port > 0) { break; } Thread.sleep(200); } if (port > 0) { break; } LOG.info("Bouncing the listener"); listener.close(); Thread.sleep(1000); listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); listener.open(); Thread.sleep(100); port = listener.getLocalPort(); } } //Workaround end LOG.info("Jetty bound to port " + port); webServer.start(); + // Workaround for HADOOP-6386 + port = listener.getLocalPort(); + if (port < 0) { + LOG.warn("Bounds port is " + port + " after webserver start"); + Random r = new Random(1000); + for (int i = 0; i < MAX_RETRIES/2; i++) { + try { + webServer.stop(); + } catch (Exception e) { + LOG.warn("Can't stop web-server", e); + } + Thread.sleep(r.nextInt()); + + listener.setPort(oriPort == 0 ? 0 : (oriPort += 1)); + listener.open(); + Thread.sleep(100); + webServer.start(); + LOG.info(i + "attempts to restart webserver"); + port = listener.getLocalPort(); + if (port > 0) + break; + } + if (port < 0) + throw new Exception("listener.getLocalPort() is returning " + + "less than 0 even after " +MAX_RETRIES+" resets"); + } + // End of HADOOP-6386 workaround break; } catch (IOException ex) { // if this is a bind exception, // then try the next port number. if (ex instanceof BindException) { if (!findPort) { throw (BindException) ex; } } else { LOG.info("HttpServer.start() threw a non Bind IOException"); throw ex; } } catch (MultiException ex) { LOG.info("HttpServer.start() threw a MultiException"); throw ex; } listener.setPort((oriPort += 1)); } } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException("Problem starting http server", e); } } /** * stop the server */ public void stop() throws Exception { listener.close(); webServer.stop(); } public void join() throws InterruptedException { webServer.join(); } /** * A very simple servlet to serve up a text representation of the current * stack traces. It both returns the stacks to the caller and logs them. * Currently the stack traces are done sequentially rather than exactly the * same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { PrintWriter out = new PrintWriter(response.getOutputStream()); ReflectionUtils.printThreadInfo(out, ""); out.close(); ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1); } } }
jaxlaw/hadoop-common
5c58d39f129dc8158e2e4a1ee27f25c10b7b6ed8
MAPREDUCE:1185 from https://issues.apache.org/jira/secure/attachment/12426630/patch-1185-3-ydist.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 0933533..6c4df94 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,433 +1,436 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383001 HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) + MAPREDUCE-1185. Redirect running job url to history url if job is already + retired. (Amareshwari Sriramadasu and Sharad Agarwal via sharad) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobHistory.java b/src/mapred/org/apache/hadoop/mapred/JobHistory.java index 2269f14..9b49725 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobHistory.java +++ b/src/mapred/org/apache/hadoop/mapred/JobHistory.java @@ -1,752 +1,783 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedReader; import java.io.File; import java.io.FileFilter; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.StringUtils; /** * Provides methods for writing to and reading from job history. * Job History works in an append mode, JobHistory and its inner classes provide methods * to log job events. * * JobHistory is split into multiple files, format of each file is plain text where each line * is of the format [type (key=value)*], where type identifies the type of the record. * Type maps to UID of one of the inner classes of this class. * * Job history is maintained in a master index which contains star/stop times of all jobs with * a few other job level properties. Apart from this each job's history is maintained in a seperate history * file. name of job history files follows the format jobtrackerId_jobid * * For parsing the job history it supports a listener based interface where each line is parsed * and passed to listener. The listener can create an object model of history or look for specific * events and discard rest of the history. * * CHANGE LOG : * Version 0 : The history has the following format : * TAG KEY1="VALUE1" KEY2="VALUE2" and so on. TAG can be Job, Task, MapAttempt or ReduceAttempt. Note that a '"' is the line delimiter. * Version 1 : Changes the line delimiter to '.' Values are now escaped for unambiguous parsing. Added the Meta tag to store version info. */ public class JobHistory { static final long VERSION = 1L; public static final Log LOG = LogFactory.getLog(JobHistory.class); private static final String DELIMITER = " "; static final char LINE_DELIMITER_CHAR = '.'; static final char[] charsToEscape = new char[] {'"', '=', LINE_DELIMITER_CHAR}; static final String DIGITS = "[0-9]+"; static final String KEY = "(\\w+)"; // value is any character other than quote, but escaped quotes can be there static final String VALUE = "[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*"; static final Pattern pattern = Pattern.compile(KEY + "=" + "\"" + VALUE + "\""); public static final int JOB_NAME_TRIM_LENGTH = 50; private static String JOBTRACKER_UNIQUE_STRING = null; private static String LOG_DIR = null; private static boolean disableHistory = true; private static final String SECONDARY_FILE_SUFFIX = ".recover"; private static long jobHistoryBlockSize = 0; private static String jobtrackerHostname; private static JobHistoryFilesManager fileManager = null; final static FsPermission HISTORY_DIR_PERMISSION = FsPermission.createImmutable((short) 0755); // rwxr-xr-x final static FsPermission HISTORY_FILE_PERMISSION = FsPermission.createImmutable((short) 0744); // rwxr--r-- private static FileSystem LOGDIR_FS; // log dir filesystem private static FileSystem DONEDIR_FS; // Done dir filesystem private static JobConf jtConf; private static Path DONE = null; // folder for completed jobs /** * A filter for conf files */ private static final PathFilter CONF_FILTER = new PathFilter() { public boolean accept(Path path) { return path.getName().endsWith("_conf.xml"); } }; + private static Map<JobID, MovedFileInfo> jobHistoryFileMap = + Collections.<JobID,MovedFileInfo>synchronizedMap( + new LinkedHashMap<JobID, MovedFileInfo>()); + + private static class MovedFileInfo { + private final String historyFile; + private final long timestamp; + public MovedFileInfo(String historyFile, long timestamp) { + this.historyFile = historyFile; + this.timestamp = timestamp; + } + } + + /** + * Given the job id, return the history file path from the cache + */ + public static String getHistoryFilePath(JobID jobId) { + MovedFileInfo info = jobHistoryFileMap.get(jobId); + if (info == null) { + return null; + } + return info.historyFile; + } + /** * A class that manages all the files related to a job. For now * - writers : list of open files * - job history filename * - job conf filename */ private static class JobHistoryFilesManager { // a private (virtual) folder for all the files related to a running job private static class FilesHolder { ArrayList<PrintWriter> writers = new ArrayList<PrintWriter>(); Path historyFilename; // path of job history file Path confFilename; // path of job's conf } private ThreadPoolExecutor executor = null; private final Configuration conf; private final JobTracker jobTracker; // cache from job-key to files associated with it. private Map<JobID, FilesHolder> fileCache = new ConcurrentHashMap<JobID, FilesHolder>(); JobHistoryFilesManager(Configuration conf, JobTracker jobTracker) throws IOException { this.conf = conf; this.jobTracker = jobTracker; } void start() { executor = new ThreadPoolExecutor(1, 3, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>()); } private FilesHolder getFileHolder(JobID id) { FilesHolder holder = fileCache.get(id); if (holder == null) { holder = new FilesHolder(); fileCache.put(id, holder); } return holder; } void addWriter(JobID id, PrintWriter writer) { FilesHolder holder = getFileHolder(id); holder.writers.add(writer); } void setHistoryFile(JobID id, Path file) { FilesHolder holder = getFileHolder(id); holder.historyFilename = file; } void setConfFile(JobID id, Path file) { FilesHolder holder = getFileHolder(id); holder.confFilename = file; } ArrayList<PrintWriter> getWriters(JobID id) { FilesHolder holder = fileCache.get(id); return holder == null ? null : holder.writers; } Path getHistoryFile(JobID id) { FilesHolder holder = fileCache.get(id); return holder == null ? null : holder.historyFilename; } Path getConfFileWriters(JobID id) { FilesHolder holder = fileCache.get(id); return holder == null ? null : holder.confFilename; } void purgeJob(JobID id) { fileCache.remove(id); } void moveToDone(final JobID id) { if (disableHistory) { return; } final List<Path> paths = new ArrayList<Path>(); final Path historyFile = fileManager.getHistoryFile(id); if (historyFile == null) { LOG.info("No file for job-history with " + id + " found in cache!"); } else { paths.add(historyFile); } final Path confPath = fileManager.getConfFileWriters(id); if (confPath == null) { LOG.info("No file for jobconf with " + id + " found in cache!"); } else { paths.add(confPath); } executor.execute(new Runnable() { public void run() { //move the files to DONE folder try { for (Path path : paths) { //check if path exists, in case of retries it may not exist if (LOGDIR_FS.exists(path)) { LOG.info("Moving " + path.toString() + " to " + DONE.toString()); DONEDIR_FS.moveFromLocalFile(path, DONE); DONEDIR_FS.setPermission(new Path(DONE, path.getName()), new FsPermission(HISTORY_FILE_PERMISSION)); } } } catch (Throwable e) { LOG.error("Unable to move history file to DONE folder.", e); } String historyFileDonePath = null; if (historyFile != null) { historyFileDonePath = new Path(DONE, historyFile.getName()).toString(); } + + jobHistoryFileMap.put(id, new MovedFileInfo(historyFileDonePath, + System.currentTimeMillis())); jobTracker.historyFileCopied(id, historyFileDonePath); //purge the job from the cache fileManager.purgeJob(id); } }); } } /** * Record types are identifiers for each line of log in history files. * A record type appears as the first token in a single line of log. */ public static enum RecordTypes { Jobtracker, Job, Task, MapAttempt, ReduceAttempt, Meta } /** * Job history files contain key="value" pairs, where keys belong to this enum. * It acts as a global namespace for all keys. */ public static enum Keys { JOBTRACKERID, START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME, LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES, JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED, COUNTERS, SPLITS, JOB_PRIORITY, HTTP_PORT, TRACKER_NAME, STATE_STRING, VERSION, MAP_COUNTERS, REDUCE_COUNTERS } /** * This enum contains some of the values commonly used by history log events. * since values in history can only be strings - Values.name() is used in * most places in history file. */ public static enum Values { SUCCESS, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP, SETUP } /** * Initialize JobHistory files. * @param conf Jobconf of the job tracker. * @param hostname jobtracker's hostname * @param jobTrackerStartTime jobtracker's start time * @return true if intialized properly * false otherwise */ public static boolean init(JobTracker jobTracker, JobConf conf, String hostname, long jobTrackerStartTime){ try { LOG_DIR = conf.get("hadoop.job.history.location" , "file:///" + new File( System.getProperty("hadoop.log.dir")).getAbsolutePath() + File.separator + "history"); JOBTRACKER_UNIQUE_STRING = hostname + "_" + String.valueOf(jobTrackerStartTime) + "_"; jobtrackerHostname = hostname; Path logDir = new Path(LOG_DIR); LOGDIR_FS = logDir.getFileSystem(conf); if (!LOGDIR_FS.exists(logDir)){ if (!LOGDIR_FS.mkdirs(logDir, new FsPermission(HISTORY_DIR_PERMISSION))) { throw new IOException("Mkdirs failed to create " + logDir.toString()); } } conf.set("hadoop.job.history.location", LOG_DIR); disableHistory = false; // set the job history block size (default is 3MB) jobHistoryBlockSize = conf.getLong("mapred.jobtracker.job.history.block.size", 3 * 1024 * 1024); jtConf = conf; // initialize the file manager fileManager = new JobHistoryFilesManager(conf, jobTracker); } catch(IOException e) { LOG.error("Failed to initialize JobHistory log file", e); disableHistory = true; } return !(disableHistory); } static boolean initDone(JobConf conf, FileSystem fs){ try { //if completed job history location is set, use that String doneLocation = conf. get("mapred.job.tracker.history.completed.location"); if (doneLocation != null) { DONE = fs.makeQualified(new Path(doneLocation)); DONEDIR_FS = fs; } else { DONE = new Path(LOG_DIR, "done"); DONEDIR_FS = LOGDIR_FS; } //If not already present create the done folder with appropriate //permission if (!DONEDIR_FS.exists(DONE)) { LOG.info("Creating DONE folder at "+ DONE); if (! DONEDIR_FS.mkdirs(DONE, new FsPermission(HISTORY_DIR_PERMISSION))) { throw new IOException("Mkdirs failed to create " + DONE.toString()); } } fileManager.start(); } catch(IOException e) { LOG.error("Failed to initialize JobHistory log file", e); disableHistory = true; } return !(disableHistory); } /** * Manages job-history's meta information such as version etc. * Helps in logging version information to the job-history and recover * version information from the history. */ static class MetaInfoManager implements Listener { private long version = 0L; private KeyValuePair pairs = new KeyValuePair(); // Extract the version of the history that was used to write the history public MetaInfoManager(String line) throws IOException { if (null != line) { // Parse the line parseLine(line, this, false); } } // Get the line delimiter char getLineDelim() { if (version == 0) { return '"'; } else { return LINE_DELIMITER_CHAR; } } // Checks if the values are escaped or not boolean isValueEscaped() { // Note that the values are not escaped in version 0 return version != 0; } public void handle(RecordTypes recType, Map<Keys, String> values) throws IOException { // Check if the record is of type META if (RecordTypes.Meta == recType) { pairs.handle(values); version = pairs.getLong(Keys.VERSION); // defaults to 0 } } /** * Logs history meta-info to the history file. This needs to be called once * per history file. * @param jobId job id, assigned by jobtracker. */ static void logMetaInfo(ArrayList<PrintWriter> writers){ if (!disableHistory){ if (null != writers){ JobHistory.log(writers, RecordTypes.Meta, new Keys[] {Keys.VERSION}, new String[] {String.valueOf(VERSION)}); } } } } /** Escapes the string especially for {@link JobHistory} */ static String escapeString(String data) { return StringUtils.escapeString(data, StringUtils.ESCAPE_CHAR, charsToEscape); } /** * Parses history file and invokes Listener.handle() for * each line of history. It can be used for looking through history * files for specific items without having to keep whole history in memory. * @param path path to history file * @param l Listener for history events * @param fs FileSystem where history file is present * @throws IOException */ public static void parseHistoryFromFS(String path, Listener l, FileSystem fs) throws IOException{ FSDataInputStream in = fs.open(new Path(path)); BufferedReader reader = new BufferedReader(new InputStreamReader (in)); try { String line = null; StringBuffer buf = new StringBuffer(); // Read the meta-info line. Note that this might a jobinfo line for files // written with older format line = reader.readLine(); // Check if the file is empty if (line == null) { return; } // Get the information required for further processing MetaInfoManager mgr = new MetaInfoManager(line); boolean isEscaped = mgr.isValueEscaped(); String lineDelim = String.valueOf(mgr.getLineDelim()); String escapedLineDelim = StringUtils.escapeString(lineDelim, StringUtils.ESCAPE_CHAR, mgr.getLineDelim()); do { buf.append(line); if (!line.trim().endsWith(lineDelim) || line.trim().endsWith(escapedLineDelim)) { buf.append("\n"); continue; } parseLine(buf.toString(), l, isEscaped); buf = new StringBuffer(); } while ((line = reader.readLine())!= null); } finally { try { reader.close(); } catch (IOException ex) {} } } /** * Parse a single line of history. * @param line * @param l * @throws IOException */ private static void parseLine(String line, Listener l, boolean isEscaped) throws IOException{ // extract the record type int idx = line.indexOf(' '); String recType = line.substring(0, idx); String data = line.substring(idx+1, line.length()); Matcher matcher = pattern.matcher(data); Map<Keys,String> parseBuffer = new HashMap<Keys, String>(); while(matcher.find()){ String tuple = matcher.group(0); String []parts = StringUtils.split(tuple, StringUtils.ESCAPE_CHAR, '='); String value = parts[1].substring(1, parts[1].length() -1); if (isEscaped) { value = StringUtils.unEscapeString(value, StringUtils.ESCAPE_CHAR, charsToEscape); } parseBuffer.put(Keys.valueOf(parts[0]), value); } l.handle(RecordTypes.valueOf(recType), parseBuffer); parseBuffer.clear(); } /** * Log a raw record type with keys and values. This is method is generally not used directly. * @param recordType type of log event * @param key key * @param value value */ static void log(PrintWriter out, RecordTypes recordType, Keys key, String value){ value = escapeString(value); out.println(recordType.name() + DELIMITER + key + "=\"" + value + "\"" + DELIMITER + LINE_DELIMITER_CHAR); } /** * Log a number of keys and values with record. the array length of keys and values * should be same. * @param recordType type of log event * @param keys type of log event * @param values type of log event */ static void log(ArrayList<PrintWriter> writers, RecordTypes recordType, Keys[] keys, String[] values) { StringBuffer buf = new StringBuffer(recordType.name()); buf.append(DELIMITER); for(int i =0; i< keys.length; i++){ buf.append(keys[i]); buf.append("=\""); values[i] = escapeString(values[i]); buf.append(values[i]); buf.append("\""); buf.append(DELIMITER); } buf.append(LINE_DELIMITER_CHAR); for (PrintWriter out : writers) { out.println(buf.toString()); } } /** * Returns history disable status. by default history is enabled so this * method returns false. * @return true if history logging is disabled, false otherwise. */ public static boolean isDisableHistory() { return disableHistory; } /** * Enable/disable history logging. Default value is false, so history * is enabled by default. * @param disableHistory true if history should be disabled, false otherwise. */ public static void setDisableHistory(boolean disableHistory) { JobHistory.disableHistory = disableHistory; } /** * Get the history location */ static Path getJobHistoryLocation() { return new Path(LOG_DIR); } /** * Get the history location for completed jobs */ static Path getCompletedJobHistoryLocation() { return DONE; } /** * Base class contais utility stuff to manage types key value pairs with enums. */ static class KeyValuePair{ private Map<Keys, String> values = new HashMap<Keys, String>(); /** * Get 'String' value for given key. Most of the places use Strings as * values so the default get' method returns 'String'. This method never returns * null to ease on GUIs. if no value is found it returns empty string "" * @param k * @return if null it returns empty string - "" */ public String get(Keys k){ String s = values.get(k); return s == null ? "" : s; } /** * Convert value from history to int and return. * if no value is found it returns 0. * @param k key */ public int getInt(Keys k){ String s = values.get(k); if (null != s){ return Integer.parseInt(s); } return 0; } /** * Convert value from history to int and return. * if no value is found it returns 0. * @param k */ public long getLong(Keys k){ String s = values.get(k); if (null != s){ return Long.parseLong(s); } return 0; } /** * Set value for the key. * @param k * @param s */ public void set(Keys k, String s){ values.put(k, s); } /** * Adds all values in the Map argument to its own values. * @param m */ public void set(Map<Keys, String> m){ values.putAll(m); } /** * Reads values back from the history, input is same Map as passed to Listener by parseHistory(). * @param values */ public synchronized void handle(Map<Keys, String> values){ set(values); } /** * Returns Map containing all key-values. */ public Map<Keys, String> getValues(){ return values; } } /** * Helper class for logging or reading back events related to job start, finish or failure. */ public static class JobInfo extends KeyValuePair{ private Map<String, Task> allTasks = new TreeMap<String, Task>(); /** Create new JobInfo */ public JobInfo(String jobId){ set(Keys.JOBID, jobId); } /** * Returns all map and reduce tasks <taskid-Task>. */ public Map<String, Task> getAllTasks() { return allTasks; } /** * Get the path of the locally stored job file * @param jobId id of the job * @return the path of the job file on the local file system */ public static String getLocalJobFilePath(JobID jobId){ return System.getProperty("hadoop.log.dir") + File.separator + jobId + "_conf.xml"; } /** * Helper function to encode the URL of the path of the job-history * log file. * * @param logFile path of the job-history file * @return URL encoded path * @throws IOException */ public static String encodeJobHistoryFilePath(String logFile) throws IOException { Path rawPath = new Path(logFile); String encodedFileName = null; try { encodedFileName = URLEncoder.encode(rawPath.getName(), "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } Path encodedPath = new Path(rawPath.getParent(), encodedFileName); return encodedPath.toString(); } /** * Helper function to encode the URL of the filename of the job-history * log file. * * @param logFileName file name of the job-history file * @return URL encoded filename * @throws IOException */ public static String encodeJobHistoryFileName(String logFileName) throws IOException { String encodedFileName = null; try { encodedFileName = URLEncoder.encode(logFileName, "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } return encodedFileName; } /** * Helper function to decode the URL of the filename of the job-history * log file. * * @param logFileName file name of the job-history file * @return URL decoded filename * @throws IOException */ public static String decodeJobHistoryFileName(String logFileName) throws IOException { String decodedFileName = null; try { decodedFileName = URLDecoder.decode(logFileName, "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } return decodedFileName; } /** * Get the job name from the job conf */ static String getJobName(JobConf jobConf) { String jobName = jobConf.getJobName(); if (jobName == null || jobName.length() == 0) { jobName = "NA"; } return jobName; } /** @@ -1527,545 +1558,561 @@ public class JobHistory { if (null != writer){ JobHistory.log(writer, RecordTypes.Task, new Keys[]{Keys.TASKID, Keys.FINISH_TIME}, new String[]{ taskId.toString(), String.valueOf(finishTime)}); } } } /** * Log job failed event. * @param taskId task id * @param taskType MAP or REDUCE. * @param time timestamp when job failed detected. * @param error error message for failure. */ public static void logFailed(TaskID taskId, String taskType, long time, String error){ logFailed(taskId, taskType, time, error, null); } /** * @param failedDueToAttempt The attempt that caused the failure, if any */ public static void logFailed(TaskID taskId, String taskType, long time, String error, TaskAttemptID failedDueToAttempt){ if (!disableHistory){ JobID id = taskId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ String failedAttempt = failedDueToAttempt == null ? "" : failedDueToAttempt.toString(); JobHistory.log(writer, RecordTypes.Task, new Keys[]{Keys.TASKID, Keys.TASK_TYPE, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR, Keys.TASK_ATTEMPT_ID}, new String[]{ taskId.toString(), taskType, Values.FAILED.name(), String.valueOf(time) , error, failedAttempt}); } } } /** * Returns all task attempts for this task. <task attempt id - TaskAttempt> */ public Map<String, TaskAttempt> getTaskAttempts(){ return this.taskAttempts; } } /** * Base class for Map and Reduce TaskAttempts. */ public static class TaskAttempt extends Task{} /** * Helper class for logging or reading back events related to start, finish or failure of * a Map Attempt on a node. */ public static class MapAttempt extends TaskAttempt{ /** * Log start time of this map task attempt. * @param taskAttemptId task attempt id * @param startTime start time of task attempt as reported by task tracker. * @param hostName host name of the task attempt. * @deprecated Use * {@link #logStarted(TaskAttemptID, long, String, int, String)} */ @Deprecated public static void logStarted(TaskAttemptID taskAttemptId, long startTime, String hostName){ logStarted(taskAttemptId, startTime, hostName, -1, Values.MAP.name()); } /** * Log start time of this map task attempt. * * @param taskAttemptId task attempt id * @param startTime start time of task attempt as reported by task tracker. * @param trackerName name of the tracker executing the task attempt. * @param httpPort http port of the task tracker executing the task attempt * @param taskType Whether the attempt is cleanup or setup or map */ public static void logStarted(TaskAttemptID taskAttemptId, long startTime, String trackerName, int httpPort, String taskType) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.MapAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.TRACKER_NAME, Keys.HTTP_PORT}, new String[]{taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), String.valueOf(startTime), trackerName, httpPort == -1 ? "" : String.valueOf(httpPort)}); } } } /** * Log finish time of map task attempt. * @param taskAttemptId task attempt id * @param finishTime finish time * @param hostName host name * @deprecated Use * {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)} */ @Deprecated public static void logFinished(TaskAttemptID taskAttemptId, long finishTime, String hostName){ logFinished(taskAttemptId, finishTime, hostName, Values.MAP.name(), "", new Counters()); } /** * Log finish time of map task attempt. * * @param taskAttemptId task attempt id * @param finishTime finish time * @param hostName host name * @param taskType Whether the attempt is cleanup or setup or map * @param stateString state string of the task attempt * @param counter counters of the task attempt */ public static void logFinished(TaskAttemptID taskAttemptId, long finishTime, String hostName, String taskType, String stateString, Counters counter) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.MapAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.STATE_STRING, Keys.COUNTERS}, new String[]{taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.SUCCESS.name(), String.valueOf(finishTime), hostName, stateString, counter.makeEscapedCompactString()}); } } } /** * Log task attempt failed event. * @param taskAttemptId task attempt id * @param timestamp timestamp * @param hostName hostname of this task attempt. * @param error error message if any for this task attempt. * @deprecated Use * {@link #logFailed(TaskAttemptID, long, String, String, String)} */ @Deprecated public static void logFailed(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error) { logFailed(taskAttemptId, timestamp, hostName, error, Values.MAP.name()); } /** * Log task attempt failed event. * * @param taskAttemptId task attempt id * @param timestamp timestamp * @param hostName hostname of this task attempt. * @param error error message if any for this task attempt. * @param taskType Whether the attempt is cleanup or setup or map */ public static void logFailed(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error, String taskType) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.MapAttempt, new Keys[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR}, new String[]{ taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.FAILED.name(), String.valueOf(timestamp), hostName, error}); } } } /** * Log task attempt killed event. * @param taskAttemptId task attempt id * @param timestamp timestamp * @param hostName hostname of this task attempt. * @param error error message if any for this task attempt. * @deprecated Use * {@link #logKilled(TaskAttemptID, long, String, String, String)} */ @Deprecated public static void logKilled(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error){ logKilled(taskAttemptId, timestamp, hostName, error, Values.MAP.name()); } /** * Log task attempt killed event. * * @param taskAttemptId task attempt id * @param timestamp timestamp * @param hostName hostname of this task attempt. * @param error error message if any for this task attempt. * @param taskType Whether the attempt is cleanup or setup or map */ public static void logKilled(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error, String taskType) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.MapAttempt, new Keys[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR}, new String[]{ taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.KILLED.name(), String.valueOf(timestamp), hostName, error}); } } } } /** * Helper class for logging or reading back events related to start, finish or failure of * a Map Attempt on a node. */ public static class ReduceAttempt extends TaskAttempt{ /** * Log start time of Reduce task attempt. * @param taskAttemptId task attempt id * @param startTime start time * @param hostName host name * @deprecated Use * {@link #logStarted(TaskAttemptID, long, String, int, String)} */ @Deprecated public static void logStarted(TaskAttemptID taskAttemptId, long startTime, String hostName){ logStarted(taskAttemptId, startTime, hostName, -1, Values.REDUCE.name()); } /** * Log start time of Reduce task attempt. * * @param taskAttemptId task attempt id * @param startTime start time * @param trackerName tracker name * @param httpPort the http port of the tracker executing the task attempt * @param taskType Whether the attempt is cleanup or setup or reduce */ public static void logStarted(TaskAttemptID taskAttemptId, long startTime, String trackerName, int httpPort, String taskType) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.TRACKER_NAME, Keys.HTTP_PORT}, new String[]{taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), String.valueOf(startTime), trackerName, httpPort == -1 ? "" : String.valueOf(httpPort)}); } } } /** * Log finished event of this task. * @param taskAttemptId task attempt id * @param shuffleFinished shuffle finish time * @param sortFinished sort finish time * @param finishTime finish time of task * @param hostName host name where task attempt executed * @deprecated Use * {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)} */ @Deprecated public static void logFinished(TaskAttemptID taskAttemptId, long shuffleFinished, long sortFinished, long finishTime, String hostName){ logFinished(taskAttemptId, shuffleFinished, sortFinished, finishTime, hostName, Values.REDUCE.name(), "", new Counters()); } /** * Log finished event of this task. * * @param taskAttemptId task attempt id * @param shuffleFinished shuffle finish time * @param sortFinished sort finish time * @param finishTime finish time of task * @param hostName host name where task attempt executed * @param taskType Whether the attempt is cleanup or setup or reduce * @param stateString the state string of the attempt * @param counter counters of the attempt */ public static void logFinished(TaskAttemptID taskAttemptId, long shuffleFinished, long sortFinished, long finishTime, String hostName, String taskType, String stateString, Counters counter) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.STATE_STRING, Keys.COUNTERS}, new String[]{taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.SUCCESS.name(), String.valueOf(shuffleFinished), String.valueOf(sortFinished), String.valueOf(finishTime), hostName, stateString, counter.makeEscapedCompactString()}); } } } /** * Log failed reduce task attempt. * @param taskAttemptId task attempt id * @param timestamp time stamp when task failed * @param hostName host name of the task attempt. * @param error error message of the task. * @deprecated Use * {@link #logFailed(TaskAttemptID, long, String, String, String)} */ @Deprecated public static void logFailed(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error){ logFailed(taskAttemptId, timestamp, hostName, error, Values.REDUCE.name()); } /** * Log failed reduce task attempt. * * @param taskAttemptId task attempt id * @param timestamp time stamp when task failed * @param hostName host name of the task attempt. * @param error error message of the task. * @param taskType Whether the attempt is cleanup or setup or reduce */ public static void logFailed(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error, String taskType) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR }, new String[]{ taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.FAILED.name(), String.valueOf(timestamp), hostName, error }); } } } /** * Log killed reduce task attempt. * @param taskAttemptId task attempt id * @param timestamp time stamp when task failed * @param hostName host name of the task attempt. * @param error error message of the task. * @deprecated Use * {@link #logKilled(TaskAttemptID, long, String, String, String)} */ @Deprecated public static void logKilled(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error) { logKilled(taskAttemptId, timestamp, hostName, error, Values.REDUCE.name()); } /** * Log killed reduce task attempt. * * @param taskAttemptId task attempt id * @param timestamp time stamp when task failed * @param hostName host name of the task attempt. * @param error error message of the task. * @param taskType Whether the attempt is cleanup or setup or reduce */ public static void logKilled(TaskAttemptID taskAttemptId, long timestamp, String hostName, String error, String taskType) { if (!disableHistory){ JobID id = taskAttemptId.getJobID(); ArrayList<PrintWriter> writer = fileManager.getWriters(id); if (null != writer){ JobHistory.log(writer, RecordTypes.ReduceAttempt, new Keys[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR }, new String[]{ taskType, taskAttemptId.getTaskID().toString(), taskAttemptId.toString(), Values.KILLED.name(), String.valueOf(timestamp), hostName, error }); } } } } /** * Callback interface for reading back log events from JobHistory. This interface * should be implemented and passed to JobHistory.parseHistory() * */ public static interface Listener{ /** * Callback method for history parser. * @param recType type of record, which is the first entry in the line. * @param values a map of key-value pairs as thry appear in history. * @throws IOException */ public void handle(RecordTypes recType, Map<Keys, String> values) throws IOException; } /** * Delete history files older than one month. Update master index and remove all * jobs older than one month. Also if a job tracker has no jobs in last one month * remove reference to the job tracker. * */ public static class HistoryCleaner implements Runnable{ static final long ONE_DAY_IN_MS = 24 * 60 * 60 * 1000L; static final long THIRTY_DAYS_IN_MS = 30 * ONE_DAY_IN_MS; private long now; private static boolean isRunning = false; private static long lastRan = 0; /** * Cleans up history data. */ public void run(){ if (isRunning){ return; } now = System.currentTimeMillis(); // clean history only once a day at max if (lastRan != 0 && (now - lastRan) < ONE_DAY_IN_MS) { return; } lastRan = now; isRunning = true; try { FileStatus[] historyFiles = DONEDIR_FS.listStatus(DONE); // delete if older than 30 days if (historyFiles != null) { for (FileStatus f : historyFiles) { if (now - f.getModificationTime() > THIRTY_DAYS_IN_MS) { DONEDIR_FS.delete(f.getPath(), true); LOG.info("Deleting old history file : " + f.getPath()); } } } + + //walking over the map to purge entries from jobHistoryFileMap + synchronized (jobHistoryFileMap) { + Iterator<Entry<JobID, MovedFileInfo>> it = + jobHistoryFileMap.entrySet().iterator(); + while (it.hasNext()) { + MovedFileInfo info = it.next().getValue(); + if (now - info.timestamp > THIRTY_DAYS_IN_MS) { + it.remove(); + } else { + //since entries are in sorted timestamp order, no more entries + //are required to be checked + break; + } + } + } } catch (IOException ie) { LOG.info("Error cleaning up history directory" + StringUtils.stringifyException(ie)); } isRunning = false; } static long getLastRan() { return lastRan; } } /** * Return the TaskLogsUrl of a particular TaskAttempt * * @param attempt * @return the taskLogsUrl. null if http-port or tracker-name or * task-attempt-id are unavailable. */ public static String getTaskLogsUrl(JobHistory.TaskAttempt attempt) { if (attempt.get(Keys.HTTP_PORT).equals("") || attempt.get(Keys.TRACKER_NAME).equals("") || attempt.get(Keys.TASK_ATTEMPT_ID).equals("")) { return null; } String taskTrackerName = JobInProgress.convertTrackerNameToHostName( attempt.get(Keys.TRACKER_NAME)); return TaskLogServlet.getTaskLogUrl(taskTrackerName, attempt .get(Keys.HTTP_PORT), attempt.get(Keys.TASK_ATTEMPT_ID)); } } diff --git a/src/test/findbugsExcludeFile.xml b/src/test/findbugsExcludeFile.xml index 0f76bd5..35f1665 100644 --- a/src/test/findbugsExcludeFile.xml +++ b/src/test/findbugsExcludeFile.xml @@ -1,79 +1,83 @@ <FindBugsFilter> <Match> <Package name="org.apache.hadoop.record.compiler.generated" /> </Match> <Match> <Bug pattern="EI_EXPOSE_REP" /> </Match> <Match> <Bug pattern="EI_EXPOSE_REP2" /> </Match> <Match> <Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE" /> </Match> <Match> <Class name="~.*_jsp" /> <Bug pattern="DLS_DEAD_LOCAL_STORE" /> </Match> + <Match> + <Class name="org.apache.hadoop.mapred.jobdetails_jsp"/> + <Bug pattern="HRS_REQUEST_PARAMETER_TO_HTTP_HEADER"/> + </Match> <Match> <Field name="_jspx_dependants" /> <Bug pattern="UWF_UNWRITTEN_FIELD" /> </Match> <!-- Inconsistent synchronization for Client.Connection.out is is intentional to make a connection to be closed instantly. --> <Match> <Class name="org.apache.hadoop.ipc.Client$Connection" /> <Field name="out" /> <Bug pattern="IS2_INCONSISTENT_SYNC" /> </Match> <Match> <Class name="org.apache.hadoop.mapred.OutputCommitter" /> <Or> <Method name="abortJob" /> <Method name="commitJob" /> <Method name="cleanupJob" /> </Or> <Bug pattern="NM_WRONG_PACKAGE_INTENTIONAL" /> </Match> <!-- TFile --> <Match> <Class name="org.apache.hadoop.io.file.tfile.Chunk$ChunkDecoder" /> <Method name="close" /> <Bug pattern="SR_NOT_CHECKED" /> </Match> <!-- The purpose of skip() is to drain remaining bytes of the chunk-encoded stream (one chunk at a time). The termination condition is checked by checkEOF(). --> <Match> <Class name="org.apache.hadoop.io.file.tfile.Utils" /> <Method name="writeVLong" /> <Bug pattern="SF_SWITCH_FALLTHROUGH" /> </Match> <!-- The switch condition fall through is intentional and for performance purposes. --> <Match> <Class name="org.apache.hadoop.mapred.Task" /> <Method name="reportFatalError" /> <Bug pattern="DM_EXIT" /> </Match> <!-- We need to cast objects between old and new api objects --> <Match> <Class name="org.apache.hadoop.mapred.OutputCommitter" /> <Bug pattern="BC_UNCONFIRMED_CAST" /> </Match> <Match> <Class name="org.apache.hadoop.mapred.FileOutputCommitter" /> <Bug pattern="NM_WRONG_PACKAGE_INTENTIONAL" /> </Match> </FindBugsFilter> diff --git a/src/test/org/apache/hadoop/mapred/TestJobRetire.java b/src/test/org/apache/hadoop/mapred/TestJobRetire.java new file mode 100644 index 0000000..b2a879d --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/TestJobRetire.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; + +import junit.framework.TestCase; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobTracker.RetireJobInfo; + +/** + * Test if the job retire works fine. + */ +public class TestJobRetire extends TestCase { + static final Path testDir = + new Path(System.getProperty("test.build.data","/tmp"), + "job-expiry-testing"); + + public void testJobRetire() throws Exception { + MiniMRCluster mr = null; + try { + JobConf conf = new JobConf(); + + conf.setLong("mapred.job.tracker.retiredjobs.cache.size", 1); + conf.setLong("mapred.jobtracker.retirejob.interval", 0); + conf.setLong("mapred.jobtracker.retirejob.check", 0); + conf.getLong("mapred.jobtracker.completeuserjobs.maximum", 0); + mr = new MiniMRCluster(0, 0, 1, "file:///", 1, null, null, null, conf, 0); + JobConf jobConf = mr.createJobConf(); + JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker(); + + Path inDir = new Path(testDir, "input1"); + Path outDir = new Path(testDir, "output1"); + + JobID id1 = validateJobRetire(jobConf, inDir, outDir, jobtracker); + + outDir = new Path(testDir, "output2"); + JobID id2 = validateJobRetire(jobConf, inDir, outDir, jobtracker); + + assertNull("Job not removed from cache", jobtracker.getJobStatus(id1)); + + assertEquals("Total job in cache not correct", + 1, jobtracker.getAllJobs().length); + } finally { + if (mr != null) { mr.shutdown();} + } + } + + private JobID validateJobRetire(JobConf jobConf, Path inDir, Path outDir, + JobTracker jobtracker) throws IOException { + + RunningJob rj = UtilsForTests.runJob(jobConf, inDir, outDir, 0, 0); + rj.waitForCompletion(); + assertTrue(rj.isSuccessful()); + JobID id = rj.getID(); + + JobInProgress job = jobtracker.getJob(id); + //wait for job to get retired + for (int i = 0; i < 10 && job != null; i++) { + UtilsForTests.waitFor(1000); + job = jobtracker.getJob(id); + } + assertNull("Job did not retire", job); + RetireJobInfo retired = jobtracker.retireJobs.get(id); + assertTrue("History url not set", retired.getHistoryFile() != null && + retired.getHistoryFile().length() > 0); + assertNotNull("Job is not in cache", jobtracker.getJobStatus(id)); + + // get the job conf filename + String name = jobtracker.getLocalJobFilePath(id); + File file = new File(name); + + assertFalse("JobConf file not deleted", file.exists()); + //test redirection + URL jobUrl = new URL(rj.getTrackingURL()); + HttpURLConnection conn = (HttpURLConnection) jobUrl.openConnection(); + conn.setInstanceFollowRedirects(false); + conn.connect(); + assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, conn.getResponseCode()); + conn.disconnect(); + + URL redirectedUrl = new URL(conn.getHeaderField("Location")); + conn = (HttpURLConnection) redirectedUrl.openConnection(); + conn.connect(); + assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + conn.disconnect(); + + return id; + } + +} diff --git a/src/webapps/job/jobdetails.jsp b/src/webapps/job/jobdetails.jsp index 5546a4a..563c35e 100644 --- a/src/webapps/job/jobdetails.jsp +++ b/src/webapps/job/jobdetails.jsp @@ -1,385 +1,392 @@ <%@ page contentType="text/html; charset=UTF-8" import="javax.servlet.*" import="javax.servlet.http.*" import="java.io.*" import="java.text.*" import="java.util.*" import="java.text.DecimalFormat" import="org.apache.hadoop.mapred.*" import="org.apache.hadoop.util.*" %> <% JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); String trackerName = StringUtils.simpleHostname(tracker.getJobTrackerMachine()); %> <%! private static final String PRIVATE_ACTIONS_KEY = "webinterface.private.actions"; private void printTaskSummary(JspWriter out, String jobId, String kind, double completePercent, TaskInProgress[] tasks ) throws IOException { int totalTasks = tasks.length; int runningTasks = 0; int finishedTasks = 0; int killedTasks = 0; int failedTaskAttempts = 0; int killedTaskAttempts = 0; for(int i=0; i < totalTasks; ++i) { TaskInProgress task = tasks[i]; if (task.isComplete()) { finishedTasks += 1; } else if (task.isRunning()) { runningTasks += 1; } else if (task.wasKilled()) { killedTasks += 1; } failedTaskAttempts += task.numTaskFailures(); killedTaskAttempts += task.numKilledTasks(); } int pendingTasks = totalTasks - runningTasks - killedTasks - finishedTasks; out.print("<tr><th><a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1\">" + kind + "</a></th><td align=\"right\">" + StringUtils.formatPercent(completePercent, 2) + ServletUtil.percentageGraph((int)(completePercent * 100), 80) + "</td><td align=\"right\">" + totalTasks + "</td><td align=\"right\">" + ((pendingTasks > 0) ? "<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=pending\">" + pendingTasks + "</a>" : "0") + "</td><td align=\"right\">" + ((runningTasks > 0) ? "<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=running\">" + runningTasks + "</a>" : "0") + "</td><td align=\"right\">" + ((finishedTasks > 0) ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=completed\">" + finishedTasks + "</a>" : "0") + "</td><td align=\"right\">" + ((killedTasks > 0) ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=killed\">" + killedTasks + "</a>" : "0") + "</td><td align=\"right\">" + ((failedTaskAttempts > 0) ? ("<a href=\"jobfailures.jsp?jobid=" + jobId + "&kind=" + kind + "&cause=failed\">" + failedTaskAttempts + "</a>") : "0" ) + " / " + ((killedTaskAttempts > 0) ? ("<a href=\"jobfailures.jsp?jobid=" + jobId + "&kind=" + kind + "&cause=killed\">" + killedTaskAttempts + "</a>") : "0" ) + "</td></tr>\n"); } private void printJobLevelTaskSummary(JspWriter out, String jobId, String kind, TaskInProgress[] tasks ) throws IOException { int totalTasks = tasks.length; int runningTasks = 0; int finishedTasks = 0; int killedTasks = 0; for(int i=0; i < totalTasks; ++i) { TaskInProgress task = tasks[i]; if (task.isComplete()) { finishedTasks += 1; } else if (task.isRunning()) { runningTasks += 1; } else if (task.isFailed()) { killedTasks += 1; } } int pendingTasks = totalTasks - runningTasks - killedTasks - finishedTasks; out.print(((runningTasks > 0) ? "<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=running\">" + " Running" + "</a>" : ((pendingTasks > 0) ? " Pending" : ((finishedTasks > 0) ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=completed\">" + " Successful" + "</a>" : ((killedTasks > 0) ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + "&pagenum=1" + "&state=killed\">" + " Failed" + "</a>" : "None"))))); } private void printConfirm(JspWriter out, String jobId) throws IOException{ String url = "jobdetails.jsp?jobid=" + jobId; out.print("<html><head><META http-equiv=\"refresh\" content=\"15;URL=" + url+"\"></head>" + "<body><h3> Are you sure you want to kill " + jobId + " ?<h3><br><table border=\"0\"><tr><td width=\"100\">" + "<form action=\"" + url + "\" method=\"post\">" + "<input type=\"hidden\" name=\"action\" value=\"kill\" />" + "<input type=\"submit\" name=\"kill\" value=\"Kill\" />" + "</form>" + "</td><td width=\"100\"><form method=\"post\" action=\"" + url + "\"><input type=\"submit\" value=\"Cancel\" name=\"Cancel\"" + "/></form></td></tr></table></body></html>"); } %> <% String jobId = request.getParameter("jobid"); String refreshParam = request.getParameter("refresh"); if (jobId == null) { out.println("<h2>Missing 'jobid'!</h2>"); return; } int refresh = 60; // refresh every 60 seconds by default if (refreshParam != null) { try { refresh = Integer.parseInt(refreshParam); } catch (NumberFormatException ignored) { } } JobID jobIdObj = JobID.forName(jobId); JobInProgress job = (JobInProgress) tracker.getJob(jobIdObj); String action = request.getParameter("action"); if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false) && "changeprio".equalsIgnoreCase(action) && request.getMethod().equalsIgnoreCase("POST")) { tracker.setJobPriority(jobIdObj, JobPriority.valueOf(request.getParameter("prio"))); } if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false)) { action = request.getParameter("action"); if(action!=null && action.equalsIgnoreCase("confirm")) { printConfirm(out, jobId); return; } else if(action != null && action.equalsIgnoreCase("kill") && request.getMethod().equalsIgnoreCase("POST")) { tracker.killJob(jobIdObj); } } %> <%@page import="org.apache.hadoop.mapred.TaskGraphServlet"%> <html> <head> <% if (refresh != 0) { %> <meta http-equiv="refresh" content="<%=refresh%>"> <% } %> <title>Hadoop <%=jobId%> on <%=trackerName%></title> <link rel="stylesheet" type="text/css" href="/static/hadoop.css"> </head> <body> <h1>Hadoop <%=jobId%> on <a href="jobtracker.jsp"><%=trackerName%></a></h1> <% if (job == null) { - out.print("<b>Job " + jobId + " not found.</b><br>\n"); + String historyFile = JobHistory.getHistoryFilePath(jobIdObj); + if (historyFile == null) { + out.println("<h2>Job " + jobId + " not known!</h2>"); + return; + } + String historyUrl = "/jobdetailshistory.jsp?jobid=" + jobId + + "&logFile=" + JobHistory.JobInfo.encodeJobHistoryFilePath(historyFile); + response.sendRedirect(response.encodeRedirectURL(historyUrl)); return; } JobProfile profile = job.getProfile(); JobStatus status = job.getStatus(); int runState = status.getRunState(); int flakyTaskTrackers = job.getNoOfBlackListedTrackers(); out.print("<b>User:</b> " + profile.getUser() + "<br>\n"); out.print("<b>Job Name:</b> " + profile.getJobName() + "<br>\n"); out.print("<b>Job File:</b> <a href=\"jobconf.jsp?jobid=" + jobId + "\">" + profile.getJobFile() + "</a><br>\n"); out.print("<b>Job Setup:</b>"); printJobLevelTaskSummary(out, jobId, "setup", job.getSetupTasks()); out.print("<br>\n"); if (runState == JobStatus.RUNNING) { out.print("<b>Status:</b> Running<br>\n"); out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n"); out.print("<b>Running for:</b> " + StringUtils.formatTimeDiff( System.currentTimeMillis(), job.getStartTime()) + "<br>\n"); } else { if (runState == JobStatus.SUCCEEDED) { out.print("<b>Status:</b> Succeeded<br>\n"); out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n"); out.print("<b>Finished at:</b> " + new Date(job.getFinishTime()) + "<br>\n"); out.print("<b>Finished in:</b> " + StringUtils.formatTimeDiff( job.getFinishTime(), job.getStartTime()) + "<br>\n"); } else if (runState == JobStatus.FAILED) { out.print("<b>Status:</b> Failed<br>\n"); out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n"); out.print("<b>Failed at:</b> " + new Date(job.getFinishTime()) + "<br>\n"); out.print("<b>Failed in:</b> " + StringUtils.formatTimeDiff( job.getFinishTime(), job.getStartTime()) + "<br>\n"); } else if (runState == JobStatus.KILLED) { out.print("<b>Status:</b> Killed<br>\n"); out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n"); out.print("<b>Killed at:</b> " + new Date(job.getFinishTime()) + "<br>\n"); out.print("<b>Killed in:</b> " + StringUtils.formatTimeDiff( job.getFinishTime(), job.getStartTime()) + "<br>\n"); } } out.print("<b>Job Cleanup:</b>"); printJobLevelTaskSummary(out, jobId, "cleanup", job.getCleanupTasks()); out.print("<br>\n"); if (flakyTaskTrackers > 0) { out.print("<b>Black-listed TaskTrackers:</b> " + "<a href=\"jobblacklistedtrackers.jsp?jobid=" + jobId + "\">" + flakyTaskTrackers + "</a><br>\n"); } if (job.getSchedulingInfo() != null) { out.print("<b>Job Scheduling information: </b>" + job.getSchedulingInfo().toString() +"\n"); } out.print("<hr>\n"); out.print("<table border=2 cellpadding=\"5\" cellspacing=\"2\">"); out.print("<tr><th>Kind</th><th>% Complete</th><th>Num Tasks</th>" + "<th>Pending</th><th>Running</th><th>Complete</th>" + "<th>Killed</th>" + "<th><a href=\"jobfailures.jsp?jobid=" + jobId + "\">Failed/Killed<br>Task Attempts</a></th></tr>\n"); printTaskSummary(out, jobId, "map", status.mapProgress(), job.getMapTasks()); printTaskSummary(out, jobId, "reduce", status.reduceProgress(), job.getReduceTasks()); out.print("</table>\n"); %> <p/> <table border=2 cellpadding="5" cellspacing="2"> <tr> <th><br/></th> <th>Counter</th> <th>Map</th> <th>Reduce</th> <th>Total</th> </tr> <% Counters mapCounters = job.getMapCounters(); Counters reduceCounters = job.getReduceCounters(); Counters totalCounters = job.getCounters(); for (String groupName : totalCounters.getGroupNames()) { Counters.Group totalGroup = totalCounters.getGroup(groupName); Counters.Group mapGroup = mapCounters.getGroup(groupName); Counters.Group reduceGroup = reduceCounters.getGroup(groupName); Format decimal = new DecimalFormat(); boolean isFirst = true; for (Counters.Counter counter : totalGroup) { String name = counter.getDisplayName(); String mapValue = decimal.format(mapGroup.getCounter(name)); String reduceValue = decimal.format(reduceGroup.getCounter(name)); String totalValue = decimal.format(counter.getCounter()); %> <tr> <% if (isFirst) { isFirst = false; %> <td rowspan="<%=totalGroup.size()%>"><%=totalGroup.getDisplayName()%></td> <% } %> <td><%=name%></td> <td align="right"><%=mapValue%></td> <td align="right"><%=reduceValue%></td> <td align="right"><%=totalValue%></td> </tr> <% } } %> </table> <hr>Map Completion Graph - <% if("off".equals(request.getParameter("map.graph"))) { session.setAttribute("map.graph", "off"); } else if("on".equals(request.getParameter("map.graph"))){ session.setAttribute("map.graph", "on"); } if("off".equals(request.getParameter("reduce.graph"))) { session.setAttribute("reduce.graph", "off"); } else if("on".equals(request.getParameter("reduce.graph"))){ session.setAttribute("reduce.graph", "on"); } if("off".equals(session.getAttribute("map.graph"))) { %> <a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&map.graph=on" > open </a> <%} else { %> <a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&map.graph=off" > close </a> <br><embed src="/taskgraph?type=map&jobid=<%=jobId%>" width="<%=TaskGraphServlet.width + 2 * TaskGraphServlet.xmargin%>" height="<%=TaskGraphServlet.height + 3 * TaskGraphServlet.ymargin%>" style="width:100%" type="image/svg+xml" pluginspage="http://www.adobe.com/svg/viewer/install/" /> <%}%> <%if(job.getReduceTasks().length > 0) { %> <hr>Reduce Completion Graph - <%if("off".equals(session.getAttribute("reduce.graph"))) { %> <a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&reduce.graph=on" > open </a> <%} else { %> <a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&reduce.graph=off" > close </a> <br><embed src="/taskgraph?type=reduce&jobid=<%=jobId%>" width="<%=TaskGraphServlet.width + 2 * TaskGraphServlet.xmargin%>" height="<%=TaskGraphServlet.height + 3 * TaskGraphServlet.ymargin%>" style="width:100%" type="image/svg+xml" pluginspage="http://www.adobe.com/svg/viewer/install/" /> <%} }%> <hr> <% if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false)) { %> <table border="0"> <tr> <td> Change priority from <%=job.getPriority()%> to: <form action="jobdetails.jsp" method="post"> <input type="hidden" name="action" value="changeprio"/> <input type="hidden" name="jobid" value="<%=jobId%>"/> </td><td> <select name="prio"> <% JobPriority jobPrio = job.getPriority(); for (JobPriority prio : JobPriority.values()) { if(jobPrio != prio) { %> <option value=<%=prio%>><%=prio%></option> <% } } %> </select> </td><td><input type="submit" value="Submit"> </form></td></tr> </table> <% } %> <table border="0"> <tr> <% if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false) && runState == JobStatus.RUNNING) { %> <br/><a href="jobdetails.jsp?action=confirm&jobid=<%=jobId%>"> Kill this job </a> <% } %> <hr> <hr> <a href="jobtracker.jsp">Go back to JobTracker</a><br> <% out.println(ServletUtil.htmlFooter()); %>
jaxlaw/hadoop-common
463ceb0ed12359e289361ed49520375201eee13a
HDFS-781 from https://issues.apache.org/jira/secure/attachment/12426993/hdfs-781.rel20.1.patch.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index e8504d4..0933533 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,431 +1,433 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383001 + HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. (suresh) + HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a37e313..48a4118 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2213,1024 +2213,1025 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean { /** * The given node has reported in. This method should: * 1) Record the heartbeat, so the datanode isn't timed out * 2) Adjust usage stats for future block allocation * * If a substantial amount of time passed since the last datanode * heartbeat then request an immediate block report. * * @return an array of datanode commands * @throws IOException */ DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, int xceiverCount, int xmitsInProgress) throws IOException { DatanodeCommand cmd = null; synchronized (heartbeats) { synchronized (datanodeMap) { DatanodeDescriptor nodeinfo = null; try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredDatanodeException e) { return new DatanodeCommand[]{DatanodeCommand.REGISTER}; } // Check if this datanode should actually be shutdown instead. if (nodeinfo != null && shouldNodeShutdown(nodeinfo)) { setDatanodeDead(nodeinfo); throw new DisallowedDatanodeException(nodeinfo); } if (nodeinfo == null || !nodeinfo.isAlive) { return new DatanodeCommand[]{DatanodeCommand.REGISTER}; } updateStats(nodeinfo, false); nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount); updateStats(nodeinfo, true); //check lease recovery cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE); if (cmd != null) { return new DatanodeCommand[] {cmd}; } ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(2); //check pending replication cmd = nodeinfo.getReplicationCommand( maxReplicationStreams - xmitsInProgress); if (cmd != null) { cmds.add(cmd); } //check block invalidation cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit); if (cmd != null) { cmds.add(cmd); } if (!cmds.isEmpty()) { return cmds.toArray(new DatanodeCommand[cmds.size()]); } } } //check distributed upgrade cmd = getDistributedUpgradeCommand(); if (cmd != null) { return new DatanodeCommand[] {cmd}; } return null; } private void updateStats(DatanodeDescriptor node, boolean isAdded) { // // The statistics are protected by the heartbeat lock // assert(Thread.holdsLock(heartbeats)); if (isAdded) { capacityTotal += node.getCapacity(); capacityUsed += node.getDfsUsed(); capacityRemaining += node.getRemaining(); totalLoad += node.getXceiverCount(); } else { capacityTotal -= node.getCapacity(); capacityUsed -= node.getDfsUsed(); capacityRemaining -= node.getRemaining(); totalLoad -= node.getXceiverCount(); } } /** * Periodically calls heartbeatCheck(). */ class HeartbeatMonitor implements Runnable { /** */ public void run() { while (fsRunning) { try { heartbeatCheck(); } catch (Exception e) { FSNamesystem.LOG.error(StringUtils.stringifyException(e)); } try { Thread.sleep(heartbeatRecheckInterval); } catch (InterruptedException ie) { } } } } /** * Periodically calls computeReplicationWork(). */ class ReplicationMonitor implements Runnable { static final int INVALIDATE_WORK_PCT_PER_ITERATION = 32; static final float REPLICATION_WORK_MULTIPLIER_PER_ITERATION = 2; public void run() { while (fsRunning) { try { computeDatanodeWork(); processPendingReplications(); Thread.sleep(replicationRecheckInterval); } catch (InterruptedException ie) { LOG.warn("ReplicationMonitor thread received InterruptedException." + ie); break; } catch (IOException ie) { LOG.warn("ReplicationMonitor thread received exception. " + ie); } catch (Throwable t) { LOG.warn("ReplicationMonitor thread received Runtime exception. " + t); Runtime.getRuntime().exit(-1); } } } } ///////////////////////////////////////////////////////// // // These methods are called by the Namenode system, to see // if there is any work for registered datanodes. // ///////////////////////////////////////////////////////// /** * Compute block replication and block invalidation work * that can be scheduled on data-nodes. * The datanode will be informed of this work at the next heartbeat. * * @return number of blocks scheduled for replication or removal. */ public int computeDatanodeWork() throws IOException { int workFound = 0; int blocksToProcess = 0; int nodesToProcess = 0; // blocks should not be replicated or removed if safe mode is on if (isInSafeMode()) return workFound; synchronized(heartbeats) { blocksToProcess = (int)(heartbeats.size() * ReplicationMonitor.REPLICATION_WORK_MULTIPLIER_PER_ITERATION); nodesToProcess = (int)Math.ceil((double)heartbeats.size() * ReplicationMonitor.INVALIDATE_WORK_PCT_PER_ITERATION / 100); } workFound = computeReplicationWork(blocksToProcess); // Update FSNamesystemMetrics counters synchronized (this) { pendingReplicationBlocksCount = pendingReplications.size(); underReplicatedBlocksCount = neededReplications.size(); scheduledReplicationBlocksCount = workFound; corruptReplicaBlocksCount = corruptReplicas.size(); } workFound += computeInvalidateWork(nodesToProcess); return workFound; } private int computeInvalidateWork(int nodesToProcess) { int blockCnt = 0; for(int nodeCnt = 0; nodeCnt < nodesToProcess; nodeCnt++ ) { int work = invalidateWorkForOneNode(); if(work == 0) break; blockCnt += work; } return blockCnt; } /** * Scan blocks in {@link #neededReplications} and assign replication * work to data-nodes they belong to. * * The number of process blocks equals either twice the number of live * data-nodes or the number of under-replicated blocks whichever is less. * * @return number of blocks scheduled for replication during this iteration. */ private int computeReplicationWork( int blocksToProcess) throws IOException { // Choose the blocks to be replicated List<List<Block>> blocksToReplicate = chooseUnderReplicatedBlocks(blocksToProcess); // replicate blocks int scheduledReplicationCount = 0; for (int i=0; i<blocksToReplicate.size(); i++) { for(Block block : blocksToReplicate.get(i)) { if (computeReplicationWorkForBlock(block, i)) { scheduledReplicationCount++; } } } return scheduledReplicationCount; } /** Get a list of block lists to be replicated * The index of block lists represents the * * @param blocksToProcess * @return Return a list of block lists to be replicated. * The block list index represents its replication priority. */ synchronized List<List<Block>> chooseUnderReplicatedBlocks(int blocksToProcess) { // initialize data structure for the return value List<List<Block>> blocksToReplicate = new ArrayList<List<Block>>(UnderReplicatedBlocks.LEVEL); for (int i=0; i<UnderReplicatedBlocks.LEVEL; i++) { blocksToReplicate.add(new ArrayList<Block>()); } synchronized(neededReplications) { if (neededReplications.size() == 0) { missingBlocksInCurIter = 0; missingBlocksInPrevIter = 0; return blocksToReplicate; } // Go through all blocks that need replications. BlockIterator neededReplicationsIterator = neededReplications.iterator(); // skip to the first unprocessed block, which is at replIndex for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) { neededReplicationsIterator.next(); } // # of blocks to process equals either twice the number of live // data-nodes or the number of under-replicated blocks whichever is less blocksToProcess = Math.min(blocksToProcess, neededReplications.size()); for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) { if( ! neededReplicationsIterator.hasNext()) { // start from the beginning replIndex = 0; missingBlocksInPrevIter = missingBlocksInCurIter; missingBlocksInCurIter = 0; blocksToProcess = Math.min(blocksToProcess, neededReplications.size()); if(blkCnt >= blocksToProcess) break; neededReplicationsIterator = neededReplications.iterator(); assert neededReplicationsIterator.hasNext() : "neededReplications should not be empty."; } Block block = neededReplicationsIterator.next(); int priority = neededReplicationsIterator.getPriority(); if (priority < 0 || priority >= blocksToReplicate.size()) { LOG.warn("Unexpected replication priority: " + priority + " " + block); } else { blocksToReplicate.get(priority).add(block); } } // end for } // end synchronized return blocksToReplicate; } /** Replicate a block * * @param block block to be replicated * @param priority a hint of its priority in the neededReplication queue * @return if the block gets replicated or not */ boolean computeReplicationWorkForBlock(Block block, int priority) { int requiredReplication, numEffectiveReplicas; List<DatanodeDescriptor> containingNodes; DatanodeDescriptor srcNode; synchronized (this) { synchronized (neededReplications) { // block should belong to a file INodeFile fileINode = blocksMap.getINode(block); // abandoned block or block reopened for append if(fileINode == null || fileINode.isUnderConstruction()) { neededReplications.remove(block, priority); // remove from neededReplications replIndex--; return false; } requiredReplication = fileINode.getReplication(); // get a source data-node containingNodes = new ArrayList<DatanodeDescriptor>(); NumberReplicas numReplicas = new NumberReplicas(); srcNode = chooseSourceDatanode(block, containingNodes, numReplicas); if ((numReplicas.liveReplicas() + numReplicas.decommissionedReplicas()) <= 0) { missingBlocksInCurIter++; } if(srcNode == null) // block can not be replicated from any node return false; // do not schedule more if enough replicas is already pending numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplications.getNumReplicas(block); if(numEffectiveReplicas >= requiredReplication) { neededReplications.remove(block, priority); // remove from neededReplications replIndex--; NameNode.stateChangeLog.info("BLOCK* " + "Removing block " + block + " from neededReplications as it has enough replicas."); return false; } } } // choose replication targets: NOT HODING THE GLOBAL LOCK DatanodeDescriptor targets[] = replicator.chooseTarget( requiredReplication - numEffectiveReplicas, srcNode, containingNodes, null, block.getNumBytes()); if(targets.length == 0) return false; synchronized (this) { synchronized (neededReplications) { // Recheck since global lock was released // block should belong to a file INodeFile fileINode = blocksMap.getINode(block); // abandoned block or block reopened for append if(fileINode == null || fileINode.isUnderConstruction()) { neededReplications.remove(block, priority); // remove from neededReplications replIndex--; return false; } requiredReplication = fileINode.getReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplications.getNumReplicas(block); if(numEffectiveReplicas >= requiredReplication) { neededReplications.remove(block, priority); // remove from neededReplications replIndex--; NameNode.stateChangeLog.info("BLOCK* " + "Removing block " + block + " from neededReplications as it has enough replicas."); return false; } // Add block to the to be replicated list srcNode.addBlockToBeReplicated(block, targets); for (DatanodeDescriptor dn : targets) { dn.incBlocksScheduled(); } // Move the block-replication into a "pending" state. // The reason we use 'pending' is so we can retry // replications that fail after an appropriate amount of time. pendingReplications.add(block, targets.length); NameNode.stateChangeLog.debug( "BLOCK* block " + block + " is moved from neededReplications to pendingReplications"); // remove from neededReplications if(numEffectiveReplicas + targets.length >= requiredReplication) { neededReplications.remove(block, priority); // remove from neededReplications replIndex--; } if (NameNode.stateChangeLog.isInfoEnabled()) { StringBuffer targetList = new StringBuffer("datanode(s)"); for (int k = 0; k < targets.length; k++) { targetList.append(' '); targetList.append(targets[k].getName()); } NameNode.stateChangeLog.info( "BLOCK* ask " + srcNode.getName() + " to replicate " + block + " to " + targetList); NameNode.stateChangeLog.debug( "BLOCK* neededReplications = " + neededReplications.size() + " pendingReplications = " + pendingReplications.size()); } } } return true; } /** * Parse the data-nodes the block belongs to and choose one, * which will be the replication source. * * We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes * since the former do not have write traffic and hence are less busy. * We do not use already decommissioned nodes as a source. * Otherwise we choose a random node among those that did not reach their * replication limit. * * In addition form a list of all nodes containing the block * and calculate its replication numbers. */ private DatanodeDescriptor chooseSourceDatanode( Block block, List<DatanodeDescriptor> containingNodes, NumberReplicas numReplicas) { containingNodes.clear(); DatanodeDescriptor srcNode = null; int live = 0; int decommissioned = 0; int corrupt = 0; int excess = 0; Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block); while(it.hasNext()) { DatanodeDescriptor node = it.next(); Collection<Block> excessBlocks = excessReplicateMap.get(node.getStorageID()); if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) corrupt++; else if (node.isDecommissionInProgress() || node.isDecommissioned()) decommissioned++; else if (excessBlocks != null && excessBlocks.contains(block)) { excess++; } else { live++; } containingNodes.add(node); // Check if this replica is corrupt // If so, do not select the node as src node if ((nodesCorrupt != null) && nodesCorrupt.contains(node)) continue; if(node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) continue; // already reached replication limit // the block must not be scheduled for removal on srcNode if(excessBlocks != null && excessBlocks.contains(block)) continue; // never use already decommissioned nodes if(node.isDecommissioned()) continue; // we prefer nodes that are in DECOMMISSION_INPROGRESS state if(node.isDecommissionInProgress() || srcNode == null) { srcNode = node; continue; } if(srcNode.isDecommissionInProgress()) continue; // switch to a different node randomly // this to prevent from deterministically selecting the same node even // if the node failed to replicate the block on previous iterations if(r.nextBoolean()) srcNode = node; } if(numReplicas != null) numReplicas.initialize(live, decommissioned, corrupt, excess); return srcNode; } /** * Get blocks to invalidate for the first node * in {@link #recentInvalidateSets}. * * @return number of blocks scheduled for removal during this iteration. */ private synchronized int invalidateWorkForOneNode() { // blocks should not be replicated or removed if safe mode is on if (isInSafeMode()) return 0; if(recentInvalidateSets.isEmpty()) return 0; // get blocks to invalidate for the first node String firstNodeId = recentInvalidateSets.keySet().iterator().next(); assert firstNodeId != null; DatanodeDescriptor dn = datanodeMap.get(firstNodeId); if (dn == null) { removeFromInvalidates(firstNodeId); return 0; } Collection<Block> invalidateSet = recentInvalidateSets.get(firstNodeId); if(invalidateSet == null) return 0; ArrayList<Block> blocksToInvalidate = new ArrayList<Block>(blockInvalidateLimit); // # blocks that can be sent in one message is limited Iterator<Block> it = invalidateSet.iterator(); for(int blkCount = 0; blkCount < blockInvalidateLimit && it.hasNext(); blkCount++) { blocksToInvalidate.add(it.next()); it.remove(); } // If we send everything in this message, remove this node entry if (!it.hasNext()) { removeFromInvalidates(firstNodeId); } dn.addBlocksToBeInvalidated(blocksToInvalidate); if(NameNode.stateChangeLog.isInfoEnabled()) { StringBuffer blockList = new StringBuffer(); for(Block blk : blocksToInvalidate) { blockList.append(' '); blockList.append(blk); } NameNode.stateChangeLog.info("BLOCK* ask " + dn.getName() + " to delete " + blockList); } + pendingDeletionBlocksCount -= blocksToInvalidate.size(); return blocksToInvalidate.size(); } public void setNodeReplicationLimit(int limit) { this.maxReplicationStreams = limit; } /** * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { synchronized (this) { for (int i = 0; i < timedOutItems.length; i++) { NumberReplicas num = countNodes(timedOutItems[i]); neededReplications.add(timedOutItems[i], num.liveReplicas(), num.decommissionedReplicas(), getReplication(timedOutItems[i])); } } /* If we know the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. */ } } /** * remove a datanode descriptor * @param nodeID datanode ID */ synchronized public void removeDatanode(DatanodeID nodeID) throws IOException { DatanodeDescriptor nodeInfo = getDatanode(nodeID); if (nodeInfo != null) { removeDatanode(nodeInfo); } else { NameNode.stateChangeLog.warn("BLOCK* NameSystem.removeDatanode: " + nodeID.getName() + " does not exist"); } } /** * remove a datanode descriptor * @param nodeInfo datanode descriptor */ private void removeDatanode(DatanodeDescriptor nodeInfo) { synchronized (heartbeats) { if (nodeInfo.isAlive) { updateStats(nodeInfo, false); heartbeats.remove(nodeInfo); nodeInfo.isAlive = false; } } for (Iterator<Block> it = nodeInfo.getBlockIterator(); it.hasNext();) { removeStoredBlock(it.next(), nodeInfo); } unprotectedRemoveDatanode(nodeInfo); clusterMap.remove(nodeInfo); } void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) { nodeDescr.resetBlocks(); removeFromInvalidates(nodeDescr.getStorageID()); NameNode.stateChangeLog.debug( "BLOCK* NameSystem.unprotectedRemoveDatanode: " + nodeDescr.getName() + " is out of service now."); } void unprotectedAddDatanode(DatanodeDescriptor nodeDescr) { /* To keep host2DataNodeMap consistent with datanodeMap, remove from host2DataNodeMap the datanodeDescriptor removed from datanodeMap before adding nodeDescr to host2DataNodeMap. */ host2DataNodeMap.remove( datanodeMap.put(nodeDescr.getStorageID(), nodeDescr)); host2DataNodeMap.add(nodeDescr); NameNode.stateChangeLog.debug( "BLOCK* NameSystem.unprotectedAddDatanode: " + "node " + nodeDescr.getName() + " is added to datanodeMap."); } /** * Physically remove node from datanodeMap. * * @param nodeID node */ void wipeDatanode(DatanodeID nodeID) throws IOException { String key = nodeID.getStorageID(); host2DataNodeMap.remove(datanodeMap.remove(key)); NameNode.stateChangeLog.debug( "BLOCK* NameSystem.wipeDatanode: " + nodeID.getName() + " storage " + key + " is removed from datanodeMap."); } FSImage getFSImage() { return dir.fsImage; } FSEditLog getEditLog() { return getFSImage().getEditLog(); } /** * Check if there are any expired heartbeats, and if so, * whether any blocks have to be re-replicated. * While removing dead datanodes, make sure that only one datanode is marked * dead at a time within the synchronized section. Otherwise, a cascading * effect causes more datanodes to be declared dead. */ void heartbeatCheck() { boolean allAlive = false; while (!allAlive) { boolean foundDead = false; DatanodeID nodeID = null; // locate the first dead node. synchronized(heartbeats) { for (Iterator<DatanodeDescriptor> it = heartbeats.iterator(); it.hasNext();) { DatanodeDescriptor nodeInfo = it.next(); if (isDatanodeDead(nodeInfo)) { foundDead = true; nodeID = nodeInfo; break; } } } // acquire the fsnamesystem lock, and then remove the dead node. if (foundDead) { synchronized (this) { synchronized(heartbeats) { synchronized (datanodeMap) { DatanodeDescriptor nodeInfo = null; try { nodeInfo = getDatanode(nodeID); } catch (IOException e) { nodeInfo = null; } if (nodeInfo != null && isDatanodeDead(nodeInfo)) { NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: " + "lost heartbeat from " + nodeInfo.getName()); removeDatanode(nodeInfo); } } } } } allAlive = !foundDead; } } /** * The given node is reporting all its blocks. Use this info to * update the (machine-->blocklist) and (block-->machinelist) tables. */ public synchronized void processReport(DatanodeID nodeID, BlockListAsLongs newReport ) throws IOException { long startTime = now(); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.processReport: " + "from " + nodeID.getName()+" " + newReport.getNumberOfBlocks()+" blocks"); } DatanodeDescriptor node = getDatanode(nodeID); if (node == null) { throw new IOException("ProcessReport from unregisterted node: " + nodeID.getName()); } // Check if this datanode should actually be shutdown instead. if (shouldNodeShutdown(node)) { setDatanodeDead(node); throw new DisallowedDatanodeException(node); } // // Modify the (block-->datanode) map, according to the difference // between the old and new block report. // Collection<Block> toAdd = new LinkedList<Block>(); Collection<Block> toRemove = new LinkedList<Block>(); Collection<Block> toInvalidate = new LinkedList<Block>(); node.reportDiff(blocksMap, newReport, toAdd, toRemove, toInvalidate); for (Block b : toRemove) { removeStoredBlock(b, node); } for (Block b : toAdd) { addStoredBlock(b, node, null); } for (Block b : toInvalidate) { NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: block " + b + " on " + node.getName() + " size " + b.getNumBytes() + " does not belong to any file."); addToInvalidates(b, node); } NameNode.getNameNodeMetrics().blockReport.inc((int) (now() - startTime)); } /** * Modify (block-->datanode) map. Remove block from set of * needed replications if this takes care of the problem. * @return the block that is stored in blockMap. */ synchronized Block addStoredBlock(Block block, DatanodeDescriptor node, DatanodeDescriptor delNodeHint) { BlockInfo storedBlock = blocksMap.getStoredBlock(block); if(storedBlock == null || storedBlock.getINode() == null) { // If this block does not belong to anyfile, then we are done. NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: " + "addStoredBlock request received for " + block + " on " + node.getName() + " size " + block.getNumBytes() + " But it does not belong to any file."); // we could add this block to invalidate set of this datanode. // it will happen in next block report otherwise. return block; } // add block to the data-node boolean added = node.addBlock(storedBlock); assert storedBlock != null : "Block must be stored by now"; if (block != storedBlock) { if (block.getNumBytes() >= 0) { long cursize = storedBlock.getNumBytes(); if (cursize == 0) { storedBlock.setNumBytes(block.getNumBytes()); } else if (cursize != block.getNumBytes()) { LOG.warn("Inconsistent size for block " + block + " reported from " + node.getName() + " current size is " + cursize + " reported size is " + block.getNumBytes()); try { if (cursize > block.getNumBytes()) { // new replica is smaller in size than existing block. // Mark the new replica as corrupt. LOG.warn("Mark new replica " + block + " from " + node.getName() + "as corrupt because its length is shorter than existing ones"); markBlockAsCorrupt(block, node); } else { // new replica is larger in size than existing block. // Mark pre-existing replicas as corrupt. int numNodes = blocksMap.numNodes(block); int count = 0; DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes]; Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); for (; it != null && it.hasNext(); ) { DatanodeDescriptor dd = it.next(); if (!dd.equals(node)) { nodes[count++] = dd; } } for (int j = 0; j < count; j++) { LOG.warn("Mark existing replica " + block + " from " + node.getName() + " as corrupt because its length is shorter than the new one"); markBlockAsCorrupt(block, nodes[j]); } // // change the size of block in blocksMap // storedBlock = blocksMap.getStoredBlock(block); //extra look up! if (storedBlock == null) { LOG.warn("Block " + block + " reported from " + node.getName() + " does not exist in blockMap. Surprise! Surprise!"); } else { storedBlock.setNumBytes(block.getNumBytes()); } } } catch (IOException e) { LOG.warn("Error in deleting bad block " + block + e); } } //Updated space consumed if required. INodeFile file = (storedBlock != null) ? storedBlock.getINode() : null; long diff = (file == null) ? 0 : (file.getPreferredBlockSize() - storedBlock.getNumBytes()); if (diff > 0 && file.isUnderConstruction() && cursize < storedBlock.getNumBytes()) { try { String path = /* For finding parents */ leaseManager.findPath((INodeFileUnderConstruction)file); dir.updateSpaceConsumed(path, 0, -diff*file.getReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space : " + e.getMessage()); } } } block = storedBlock; } assert storedBlock == block : "Block must be stored by now"; int curReplicaDelta = 0; if (added) { curReplicaDelta = 1; // // At startup time, because too many new blocks come in // they take up lots of space in the log file. // So, we log only when namenode is out of safemode. // if (!isInSafeMode()) { NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: " +"blockMap updated: "+node.getName()+" is added to "+block+" size "+block.getNumBytes()); } } else { NameNode.stateChangeLog.warn("BLOCK* NameSystem.addStoredBlock: " + "Redundant addStoredBlock request received for " + block + " on " + node.getName() + " size " + block.getNumBytes()); } // filter out containingNodes that are marked for decommission. NumberReplicas num = countNodes(storedBlock); int numLiveReplicas = num.liveReplicas(); int numCurrentReplica = numLiveReplicas + pendingReplications.getNumReplicas(block); // check whether safe replication is reached for the block incrementSafeBlockCount(numCurrentReplica); // // if file is being actively written to, then do not check // replication-factor here. It will be checked when the file is closed. // INodeFile fileINode = null; fileINode = storedBlock.getINode(); if (fileINode.isUnderConstruction()) { return block; } // do not handle mis-replicated blocks during startup if(isInSafeMode()) return block; // handle underReplication/overReplication short fileReplication = fileINode.getReplication(); if (numCurrentReplica >= fileReplication) { neededReplications.remove(block, numCurrentReplica, num.decommissionedReplicas, fileReplication); } else { updateNeededReplications(block, curReplicaDelta, 0); } if (numCurrentReplica > fileReplication) { processOverReplicatedBlock(block, fileReplication, node, delNodeHint); } // If the file replication has reached desired value // we can remove any corrupt replicas the block may have int corruptReplicasCount = corruptReplicas.numCorruptReplicas(block); int numCorruptNodes = num.corruptReplicas(); if ( numCorruptNodes != corruptReplicasCount) { LOG.warn("Inconsistent number of corrupt replicas for " + block + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + corruptReplicasCount); } if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) invalidateCorruptReplicas(block); return block; } /** * Invalidate corrupt replicas. * <p> * This will remove the replicas from the block's location list, * add them to {@link #recentInvalidateSets} so that they could be further * deleted from the respective data-nodes, * and remove the block from corruptReplicasMap. * <p> * This method should be called when the block has sufficient * number of live replicas. * * @param blk Block whose corrupt replicas need to be invalidated */ void invalidateCorruptReplicas(Block blk) { Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk); boolean gotException = false; if (nodes == null) return; for (Iterator<DatanodeDescriptor> it = nodes.iterator(); it.hasNext(); ) { DatanodeDescriptor node = it.next(); try { invalidateBlock(blk, node); } catch (IOException e) { NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " + "error in deleting bad block " + blk + " on " + node + e); gotException = true; } } // Remove the block from corruptReplicasMap if (!gotException) corruptReplicas.removeFromCorruptReplicasMap(blk); } /** * For each block in the name-node verify whether it belongs to any file, * over or under replicated. Place it into the respective queue. */ private synchronized void processMisReplicatedBlocks() { long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0; neededReplications.clear(); for(BlocksMap.BlockInfo block : blocksMap.getBlocks()) { INodeFile fileINode = block.getINode(); if(fileINode == null) { // block does not belong to any file nrInvalid++; addToInvalidates(block); continue; } // calculate current replication short expectedReplication = fileINode.getReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be if (neededReplications.add(block, numCurrentReplica, num.decommissionedReplicas(), expectedReplication)) { nrUnderReplicated++; } if (numCurrentReplica > expectedReplication) { // over-replicated block nrOverReplicated++; processOverReplicatedBlock(block, expectedReplication, null, null); } } LOG.info("Total number of blocks = " + blocksMap.size()); LOG.info("Number of invalid blocks = " + nrInvalid); LOG.info("Number of under-replicated blocks = " + nrUnderReplicated); LOG.info("Number of over-replicated blocks = " + nrOverReplicated); } /** * Find how many of the containing nodes are "extra", if any. * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ private void processOverReplicatedBlock(Block block, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { if(addedNode == delNodeHint) { delNodeHint = null; } Collection<DatanodeDescriptor> nonExcess = new ArrayList<DatanodeDescriptor>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { DatanodeDescriptor cur = it.next(); Collection<Block> excessBlocks = excessReplicateMap.get(cur.getStorageID()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { // exclude corrupt replicas if (corruptNodes == null || !corruptNodes.contains(cur)) { nonExcess.add(cur); } } } } chooseExcessReplicates(nonExcess, block, replication, addedNode, delNodeHint); } /** * We want "replication" replicates for the block, but we now have too many. * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that: * * srcNodes.size() - dstNodes.size() == replication * * We pick node that make sure that replicas are spread across racks and * also try hard to pick one with least free space. * The algorithm is first to pick a node with least free space from nodes * that are on a rack holding more than one replicas of the block. * So removing such a replica won't remove a rack. * If no such a node is available, * then pick a node with least free space */ void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, Block b, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { // first form a rack to datanodes map and HashMap<String, ArrayList<DatanodeDescriptor>> rackMap = new HashMap<String, ArrayList<DatanodeDescriptor>>(); for (Iterator<DatanodeDescriptor> iter = nonExcess.iterator(); iter.hasNext();) { DatanodeDescriptor node = iter.next(); String rackName = node.getNetworkLocation(); ArrayList<DatanodeDescriptor> datanodeList = rackMap.get(rackName); if(datanodeList==null) { datanodeList = new ArrayList<DatanodeDescriptor>(); } datanodeList.add(node); rackMap.put(rackName, datanodeList); } // split nodes into two sets // priSet contains nodes on rack with more than one replica // remains contains the remaining nodes diff --git a/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 6fd7fc3..a79eb31 100644 --- a/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -1,151 +1,168 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode.metrics; import java.io.IOException; import java.util.Random; import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; /** * Test for metrics published by the Namenode */ public class TestNameNodeMetrics extends TestCase { private static final Configuration CONF = new Configuration(); + private static final int DFS_REPLICATION_INTERVAL = 1; + private static final Path TEST_ROOT_DIR_PATH = + new Path(System.getProperty("test.build.data", "build/test/data")); + + // Number of datanodes in the cluster + private static final int DATANODE_COUNT = 3; static { CONF.setLong("dfs.block.size", 100); CONF.setInt("io.bytes.per.checksum", 1); - CONF.setLong("dfs.heartbeat.interval", 1L); - CONF.setInt("dfs.replication.interval", 1); + CONF.setLong("dfs.heartbeat.interval", DFS_REPLICATION_INTERVAL); + CONF.setInt("dfs.replication.interval", DFS_REPLICATION_INTERVAL); } private MiniDFSCluster cluster; private FSNamesystemMetrics metrics; private DistributedFileSystem fs; private Random rand = new Random(); private FSNamesystem namesystem; + private static Path getTestPath(String fileName) { + return new Path(TEST_ROOT_DIR_PATH, fileName); + } + @Override protected void setUp() throws Exception { - cluster = new MiniDFSCluster(CONF, 3, true, null); + cluster = new MiniDFSCluster(CONF, DATANODE_COUNT, true, null); cluster.waitActive(); namesystem = cluster.getNameNode().getNamesystem(); fs = (DistributedFileSystem) cluster.getFileSystem(); metrics = namesystem.getFSNamesystemMetrics(); } @Override protected void tearDown() throws Exception { cluster.shutdown(); } /** create a file with a length of <code>fileLen</code> */ - private void createFile(String fileName, long fileLen, short replicas) throws IOException { - Path filePath = new Path(fileName); - DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong()); + private void createFile(Path file, long fileLen, short replicas) throws IOException { + DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong()); } private void updateMetrics() throws Exception { // Wait for metrics update (corresponds to dfs.replication.interval // for some block related metrics to get updated) Thread.sleep(1000); metrics.doUpdates(null); } /** Test metrics associated with addition of a file */ public void testFileAdd() throws Exception { // Add files with 100 blocks - final String file = "/tmp/t"; + final Path file = getTestPath("testFileAdd"); createFile(file, 3200, (short)3); final int blockCount = 32; int blockCapacity = namesystem.getBlockCapacity(); updateMetrics(); assertEquals(blockCapacity, metrics.blockCapacity.get()); // Blocks are stored in a hashmap. Compute its capacity, which // doubles every time the number of entries reach the threshold. int threshold = (int)(blockCapacity * FSNamesystem.DEFAULT_MAP_LOAD_FACTOR); while (threshold < blockCount) { blockCapacity <<= 1; } updateMetrics(); - assertEquals(3, metrics.filesTotal.get()); + int filesTotal = file.depth() + 1; // Add 1 for root + assertEquals(filesTotal, metrics.filesTotal.get()); assertEquals(blockCount, metrics.blocksTotal.get()); assertEquals(blockCapacity, metrics.blockCapacity.get()); - fs.delete(new Path(file), true); + fs.delete(file, true); + filesTotal--; // reduce the filecount for deleted file + + // Wait for more than DATANODE_COUNT replication intervals to ensure all + // the blocks pending deletion are sent for deletion to the datanodes. + Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000); + updateMetrics(); + assertEquals(filesTotal, metrics.filesTotal.get()); + assertEquals(0, metrics.pendingDeletionBlocks.get()); } /** Corrupt a block and ensure metrics reflects it */ public void testCorruptBlock() throws Exception { // Create a file with single block with two replicas - String file = "/tmp/t"; + final Path file = getTestPath("testCorruptBlock"); createFile(file, 100, (short)2); // Corrupt first replica of the block - LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0); + LocatedBlock block = namesystem.getBlockLocations(file.toString(), 0, 1).get(0); namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]); updateMetrics(); assertEquals(1, metrics.corruptBlocks.get()); assertEquals(1, metrics.pendingReplicationBlocks.get()); assertEquals(1, metrics.scheduledReplicationBlocks.get()); - fs.delete(new Path(file), true); + fs.delete(file, true); updateMetrics(); assertEquals(0, metrics.corruptBlocks.get()); assertEquals(0, metrics.pendingReplicationBlocks.get()); assertEquals(0, metrics.scheduledReplicationBlocks.get()); } /** Create excess blocks by reducing the replication factor for * for a file and ensure metrics reflects it */ public void testExcessBlocks() throws Exception { - String file = "/tmp/t"; + Path file = getTestPath("testExcessBlocks"); createFile(file, 100, (short)2); int totalBlocks = 1; - namesystem.setReplication(file, (short)1); + namesystem.setReplication(file.toString(), (short)1); updateMetrics(); assertEquals(totalBlocks, metrics.excessBlocks.get()); - assertEquals(totalBlocks, metrics.pendingDeletionBlocks.get()); - fs.delete(new Path(file), true); + fs.delete(file, true); } /** Test to ensure metrics reflects missing blocks */ public void testMissingBlock() throws Exception { // Create a file with single block with two replicas - String file = "/tmp/t"; + Path file = getTestPath("testMissingBlocks"); createFile(file, 100, (short)1); // Corrupt the only replica of the block to result in a missing block - LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0); + LocatedBlock block = namesystem.getBlockLocations(file.toString(), 0, 1).get(0); namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]); updateMetrics(); assertEquals(1, metrics.underReplicatedBlocks.get()); assertEquals(1, metrics.missingBlocks.get()); - fs.delete(new Path(file), true); + fs.delete(file, true); updateMetrics(); assertEquals(0, metrics.underReplicatedBlocks.get()); } }
jaxlaw/hadoop-common
35c1c28db59a2c268faab79d33dce18c70d739ca
Added version number to yahoo-changes.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 341602a..e8504d4 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,429 +1,431 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + +yahoo-hadoop-0.20.1-3195383001 HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for unreferenced files in error conditions. (Amareshwari Sriramadasu via yhemanth) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch
jaxlaw/hadoop-common
54c0a28a4bf83e84651dfe5ac4a7cd7dcaa06fc9
MAPREDUCE:1140 from https://issues.apache.org/jira/secure/attachment/12426383/patch-1140-2-ydist.txt
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index cef35fb..341602a 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,425 +1,429 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up when the history viewer is accessed concurrently. (Amar Kamat via ddas) + MAPREDUCE-1140. Fix DistributedCache to not decrement reference counts for + unreferenced files in error conditions. + (Amareshwari Sriramadasu via yhemanth) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/filecache/DistributedCache.java b/src/core/org/apache/hadoop/filecache/DistributedCache.java index f9e9c88..394c847 100644 --- a/src/core/org/apache/hadoop/filecache/DistributedCache.java +++ b/src/core/org/apache/hadoop/filecache/DistributedCache.java @@ -1,762 +1,773 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.filecache; import org.apache.commons.logging.*; import java.io.*; import java.util.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.util.*; import org.apache.hadoop.fs.*; import java.net.URI; /** * Distribute application-specific large, read-only files efficiently. * * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce * framework to cache files (text, archives, jars etc.) needed by applications. * </p> * * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached * via the {@link org.apache.hadoop.mapred.JobConf}. * The <code>DistributedCache</code> assumes that the * files specified via hdfs:// urls are already present on the * {@link FileSystem} at the path specified by the url.</p> * * <p>The framework will copy the necessary files on to the slave node before * any tasks for the job are executed on that node. Its efficiency stems from * the fact that the files are only copied once per job and the ability to * cache archives which are un-archived on the slaves.</p> * * <p><code>DistributedCache</code> can be used to distribute simple, read-only * data/text files and/or more complex types such as archives, jars etc. * Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes. * Jars may be optionally added to the classpath of the tasks, a rudimentary * software distribution mechanism. Files have execution permissions. * Optionally users can also direct it to symlink the distributed cache file(s) * into the working directory of the task.</p> * * <p><code>DistributedCache</code> tracks modification timestamps of the cache * files. Clearly the cache files should not be modified by the application * or externally while the job is executing.</p> * * <p>Here is an illustrative example on how to use the * <code>DistributedCache</code>:</p> * <p><blockquote><pre> * // Setting up the cache for the application * * 1. Copy the requisite files to the <code>FileSystem</code>: * * $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat * $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip * $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar * $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar * $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz * $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz * * 2. Setup the application's <code>JobConf</code>: * * JobConf job = new JobConf(); * DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), * job); * DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job); * DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job); * DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job); * * 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper} * or {@link org.apache.hadoop.mapred.Reducer}: * * public static class MapClass extends MapReduceBase * implements Mapper&lt;K, V, K, V&gt; { * * private Path[] localArchives; * private Path[] localFiles; * * public void configure(JobConf job) { * // Get the cached archives/files * localArchives = DistributedCache.getLocalCacheArchives(job); * localFiles = DistributedCache.getLocalCacheFiles(job); * } * * public void map(K key, V value, * OutputCollector&lt;K, V&gt; output, Reporter reporter) * throws IOException { * // Use data from the cached archives/files here * // ... * // ... * output.collect(k, v); * } * } * * </pre></blockquote></p> * * @see org.apache.hadoop.mapred.JobConf * @see org.apache.hadoop.mapred.JobClient */ public class DistributedCache { // cacheID to cacheStatus mapping private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>(); private static TreeMap<Path, Long> baseDirSize = new TreeMap<Path, Long>(); // default total cache size private static final long DEFAULT_CACHE_SIZE = 10737418240L; private static final Log LOG = LogFactory.getLog(DistributedCache.class); private static Random random = new Random(); /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred is * returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path baseDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir) throws IOException { return getLocalCache(cache, conf, baseDir, fileStatus, isArchive, confFileStamp, currentWorkDir, true, new LocalDirAllocator("mapred.local.dir")); } /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param subDir The sub cache Dir where you want to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred is * returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @param honorSymLinkConf if this is false, then the symlinks are not * created even if conf says so (this is required for an optimization in task * launches * @param lDirAllocator LocalDirAllocator of the tracker * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path subDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir, boolean honorSymLinkConf, LocalDirAllocator lDirAllocator) throws IOException { String key = getKey(cache, conf, confFileStamp); CacheStatus lcacheStatus; Path localizedPath; synchronized (cachedArchives) { lcacheStatus = cachedArchives.get(key); if (lcacheStatus == null) { // was never localized Path uniqueParentDir = new Path(subDir, String.valueOf(random.nextLong())); String cachePath = new Path(uniqueParentDir, makeRelative(cache, conf)).toString(); Path localPath = lDirAllocator.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf); lcacheStatus = new CacheStatus(new Path(localPath.toString().replace(cachePath, "")), localPath, uniqueParentDir); cachedArchives.put(key, lcacheStatus); } lcacheStatus.refcount++; } - synchronized (lcacheStatus) { - if (!lcacheStatus.isInited()) { - localizedPath = localizeCache(conf, cache, confFileStamp, lcacheStatus, - fileStatus, isArchive); - lcacheStatus.initComplete(); - } else { - localizedPath = checkCacheStatusValidity(conf, cache, confFileStamp, - lcacheStatus, fileStatus, isArchive); - } - createSymlink(conf, cache, lcacheStatus, isArchive, - currentWorkDir, honorSymLinkConf); - } - - // try deleting stuff if you can - long size = 0; - synchronized (lcacheStatus) { - synchronized (baseDirSize) { - Long get = baseDirSize.get(lcacheStatus.getBaseDir()); - if ( get != null ) { - size = get.longValue(); + boolean initSuccessful = false; + try { + synchronized (lcacheStatus) { + if (!lcacheStatus.isInited()) { + localizedPath = localizeCache(conf, cache, confFileStamp, + lcacheStatus, fileStatus, isArchive); + lcacheStatus.initComplete(); } else { - LOG.warn("Cannot find size of baseDir: " + lcacheStatus.getBaseDir()); + localizedPath = checkCacheStatusValidity(conf, cache, confFileStamp, + lcacheStatus, fileStatus, isArchive); + } + createSymlink(conf, cache, lcacheStatus, isArchive, currentWorkDir, + honorSymLinkConf); + } + + // try deleting stuff if you can + long size = 0; + synchronized (lcacheStatus) { + synchronized (baseDirSize) { + Long get = baseDirSize.get(lcacheStatus.getBaseDir()); + if (get != null) { + size = get.longValue(); + } else { + LOG.warn("Cannot find size of baseDir: " + + lcacheStatus.getBaseDir()); + } + } + } + // setting the cache size to a default of 10GB + long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE); + if (allowedSize < size) { + // try some cache deletions + deleteCache(conf); + } + initSuccessful = true; + return localizedPath; + } finally { + if (!initSuccessful) { + synchronized (cachedArchives) { + lcacheStatus.refcount--; } } } - // setting the cache size to a default of 10GB - long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE); - if (allowedSize < size) { - // try some cache deletions - deleteCache(conf); - } - return localizedPath; } /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred * is returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path baseDir, boolean isArchive, long confFileStamp, Path currentWorkDir) throws IOException { return getLocalCache(cache, conf, baseDir, null, isArchive, confFileStamp, currentWorkDir); } /** * This is the opposite of getlocalcache. When you are done with * using the cache, you need to release the cache * @param cache The cache URI to be released * @param conf configuration which contains the filesystem the cache * is contained in. * @throws IOException */ public static void releaseCache(URI cache, Configuration conf, long timeStamp) throws IOException { String cacheId = getKey(cache, conf, timeStamp); synchronized (cachedArchives) { CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus == null) { LOG.warn("Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!"); return; } lcacheStatus.refcount--; } } // To delete the caches which have a refcount of zero private static void deleteCache(Configuration conf) throws IOException { Set<CacheStatus> deleteSet = new HashSet<CacheStatus>(); // try deleting cache Status with refcount of zero synchronized (cachedArchives) { for (Iterator it = cachedArchives.keySet().iterator(); it.hasNext();) { String cacheId = (String) it.next(); CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus.refcount == 0) { // delete this cache entry from the global list // and mark the localized file for deletion deleteSet.add(lcacheStatus); it.remove(); } } } // do the deletion, after releasing the global lock for (CacheStatus lcacheStatus : deleteSet) { synchronized (lcacheStatus) { FileSystem.getLocal(conf).delete(lcacheStatus.localizedLoadPath, true); LOG.info("Deleted path " + lcacheStatus.localizedLoadPath); // decrement the size of the cache from baseDirSize synchronized (baseDirSize) { Long dirSize = baseDirSize.get(lcacheStatus.localizedBaseDir); if ( dirSize != null ) { dirSize -= lcacheStatus.size; baseDirSize.put(lcacheStatus.localizedBaseDir, dirSize); } else { LOG.warn("Cannot find record of the baseDir: " + lcacheStatus.localizedBaseDir + " during delete!"); } } } } } /* * Returns the relative path of the dir this cache will be localized in * relative path that this cache will be localized in. For * hdfs://hostname:port/absolute_path -- the relative path is * hostname/absolute path -- if it is just /absolute_path -- then the * relative path is hostname of DFS this mapred cluster is running * on/absolute_path */ public static String makeRelative(URI cache, Configuration conf) throws IOException { String host = cache.getHost(); if (host == null) { host = cache.getScheme(); } if (host == null) { URI defaultUri = FileSystem.get(conf).getUri(); host = defaultUri.getHost(); if (host == null) { host = defaultUri.getScheme(); } } String path = host + cache.getPath(); path = path.replace(":/","/"); // remove windows device colon return path; } static String getKey(URI cache, Configuration conf, long timeStamp) throws IOException { return makeRelative(cache, conf) + String.valueOf(timeStamp); } private static Path checkCacheStatusValidity(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive ) throws IOException { FileSystem fs = FileSystem.get(cache, conf); // Has to be if (!ifExistsAndFresh(conf, fs, cache, confFileStamp, cacheStatus, fileStatus)) { throw new IOException("Stale cache file: " + cacheStatus.localizedLoadPath + " for cache-file: " + cache); } LOG.info(String.format("Using existing cache of %s->%s", cache.toString(), cacheStatus.localizedLoadPath)); return cacheStatus.localizedLoadPath; } private static void createSymlink(Configuration conf, URI cache, CacheStatus cacheStatus, boolean isArchive, Path currentWorkDir, boolean honorSymLinkConf) throws IOException { boolean doSymlink = honorSymLinkConf && DistributedCache.getSymlink(conf); if(cache.getFragment() == null) { doSymlink = false; } String link = currentWorkDir.toString() + Path.SEPARATOR + cache.getFragment(); File flink = new File(link); if (doSymlink){ if (!flink.exists()) { FileUtil.symLink(cacheStatus.localizedLoadPath.toString(), link); } } } // the method which actually copies the caches locally and unjars/unzips them // and does chmod for the files private static Path localizeCache(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive) throws IOException { FileSystem fs = getFileSystem(cache, conf); FileSystem localFs = FileSystem.getLocal(conf); Path parchive = null; if (isArchive) { parchive = new Path(cacheStatus.localizedLoadPath, new Path(cacheStatus.localizedLoadPath.getName())); } else { parchive = cacheStatus.localizedLoadPath; } if (!localFs.mkdirs(parchive.getParent())) { throw new IOException("Mkdirs failed to create directory " + cacheStatus.localizedLoadPath.toString()); } String cacheId = cache.getPath(); fs.copyToLocalFile(new Path(cacheId), parchive); if (isArchive) { String tmpArchive = parchive.toString().toLowerCase(); File srcFile = new File(parchive.toString()); File destDir = new File(parchive.getParent().toString()); if (tmpArchive.endsWith(".jar")) { RunJar.unJar(srcFile, destDir); } else if (tmpArchive.endsWith(".zip")) { FileUtil.unZip(srcFile, destDir); } else if (isTarFile(tmpArchive)) { FileUtil.unTar(srcFile, destDir); } // else will not do anyhting // and copy the file into the dir as it is } long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString())); cacheStatus.size = cacheSize; synchronized (baseDirSize) { Long dirSize = baseDirSize.get(cacheStatus.localizedBaseDir); if (dirSize == null) { dirSize = Long.valueOf(cacheSize); } else { dirSize += cacheSize; } baseDirSize.put(cacheStatus.localizedBaseDir, dirSize); } // do chmod here try { //Setting recursive permission to grant everyone read and execute Path localDir = new Path(cacheStatus.localizedBaseDir, cacheStatus.uniqueParentDir); LOG.info("Doing chmod on localdir :" + localDir); FileUtil.chmod(localDir.toString(), "ugo+rx", true); } catch(InterruptedException e) { LOG.warn("Exception in chmod" + e.toString()); } // update cacheStatus to reflect the newly cached file cacheStatus.mtime = getTimestamp(conf, cache); return cacheStatus.localizedLoadPath; } private static boolean isTarFile(String filename) { return (filename.endsWith(".tgz") || filename.endsWith(".tar.gz") || filename.endsWith(".tar")); } // Checks if the cache has already been localized and is fresh private static boolean ifExistsAndFresh(Configuration conf, FileSystem fs, URI cache, long confFileStamp, CacheStatus lcacheStatus, FileStatus fileStatus) throws IOException { // check for existence of the cache long dfsFileStamp; if (fileStatus != null) { dfsFileStamp = fileStatus.getModificationTime(); } else { dfsFileStamp = getTimestamp(conf, cache); } // ensure that the file on hdfs hasn't been modified since the job started if (dfsFileStamp != confFileStamp) { LOG.fatal("File: " + cache + " has changed on HDFS since job started"); throw new IOException("File: " + cache + " has changed on HDFS since job started"); } if (dfsFileStamp != lcacheStatus.mtime) { return false; } return true; } /** * Returns mtime of a given cache file on hdfs. * @param conf configuration * @param cache cache file * @return mtime of a given cache file on hdfs * @throws IOException */ public static long getTimestamp(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); Path filePath = new Path(cache.getPath()); return fileSystem.getFileStatus(filePath).getModificationTime(); } /** * This method create symlinks for all files in a given dir in another directory * @param conf the configuration * @param jobCacheDir the target directory for creating symlinks * @param workDir the directory in which the symlinks are created * @throws IOException */ public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir) throws IOException{ if ((jobCacheDir == null || !jobCacheDir.isDirectory()) || workDir == null || (!workDir.isDirectory())) { return; } boolean createSymlink = getSymlink(conf); if (createSymlink){ File[] list = jobCacheDir.listFiles(); for (int i=0; i < list.length; i++){ FileUtil.symLink(list[i].getAbsolutePath(), new File(workDir, list[i].getName()).toString()); } } } private static String getFileSysName(URI url) { String fsname = url.getScheme(); if ("hdfs".equals(fsname)) { String host = url.getHost(); int port = url.getPort(); return (port == (-1)) ? host : (host + ":" + port); } else { return null; } } private static FileSystem getFileSystem(URI cache, Configuration conf) throws IOException { String fileSysName = getFileSysName(cache); if (fileSysName != null) return FileSystem.getNamed(fileSysName, conf); else return FileSystem.get(conf); } /** * Set the configuration with the given set of archives * @param archives The list of archives that need to be localized * @param conf Configuration which will be changed */ public static void setCacheArchives(URI[] archives, Configuration conf) { String sarchives = StringUtils.uriToString(archives); conf.set("mapred.cache.archives", sarchives); } /** * Set the configuration with the given set of files * @param files The list of files that need to be localized * @param conf Configuration which will be changed */ public static void setCacheFiles(URI[] files, Configuration conf) { String sfiles = StringUtils.uriToString(files); conf.set("mapred.cache.files", sfiles); } /** * Get cache archives set in the Configuration * @param conf The configuration which contains the archives * @return A URI array of the caches set in the Configuration * @throws IOException */ public static URI[] getCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives")); } /** * Get cache files set in the Configuration * @param conf The configuration which contains the files * @return A URI array of the files set in the Configuration * @throws IOException */ public static URI[] getCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.files")); } /** * Return the path array of the localized caches * @param conf Configuration that contains the localized archives * @return A path array of localized caches * @throws IOException */ public static Path[] getLocalCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToPath(conf .getStrings("mapred.cache.localArchives")); } /** * Return the path array of the localized files * @param conf Configuration that contains the localized files * @return A path array of localized files * @throws IOException */ public static Path[] getLocalCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles")); } /** * Get the timestamps of the archives * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getArchiveTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.archives.timestamps"); } /** * Get the timestamps of the files * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getFileTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.files.timestamps"); } /** * This is to check the timestamp of the archives to be localized * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of archives. * The order should be the same as the order in which the archives are added. */ public static void setArchiveTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.archives.timestamps", timestamps); } /** * This is to check the timestamp of the files to be localized * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of files. * The order should be the same as the order in which the files are added. */ public static void setFileTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.files.timestamps", timestamps); } /** * Set the conf to contain the location for localized archives * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local archives */ public static void setLocalArchives(Configuration conf, String str) { conf.set("mapred.cache.localArchives", str); } /** * Set the conf to contain the location for localized files * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local files */ public static void setLocalFiles(Configuration conf, String str) { conf.set("mapred.cache.localFiles", str); } /** * Add a archives to be localized to the conf * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheArchive(URI uri, Configuration conf) { String archives = conf.get("mapred.cache.archives"); conf.set("mapred.cache.archives", archives == null ? uri.toString() : archives + "," + uri.toString()); } /** * Add a file to be localized to the conf * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheFile(URI uri, Configuration conf) { String files = conf.get("mapred.cache.files"); conf.set("mapred.cache.files", files == null ? uri.toString() : files + "," + uri.toString()); } /** * Add an file path to the current set of classpath entries It adds the file * to cache as well. * * @param file Path of the file to be added * @param conf Configuration that contains the classpath setting */ public static void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set("mapred.job.classpath.files", classpath == null ? file.toString() : classpath + System.getProperty("path.separator") + file.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(file).toUri(); addCacheFile(uri, conf); } /** * Get the file entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getFileClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.files"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System .getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } /** * Add an archive path to the current set of classpath entries. It adds the * archive to cache as well. * * @param archive Path of the archive to be added * @param conf Configuration that contains the classpath setting */ public static void addArchiveToClassPath(Path archive, Configuration conf) throws IOException { diff --git a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java index d6fc669..90499b7 100644 --- a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java +++ b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java @@ -1,639 +1,639 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import org.apache.commons.logging.*; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.filecache.*; import org.apache.hadoop.util.*; import java.io.*; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Vector; import java.net.URI; /** Base class that runs a task in a separate process. Tasks are run in a * separate process in order to isolate the map/reduce system code from bugs in * user supplied map and reduce functions. */ abstract class TaskRunner extends Thread { public static final Log LOG = LogFactory.getLog(TaskRunner.class); volatile boolean killed = false; private TaskTracker.TaskInProgress tip; private Task t; private Object lock = new Object(); private volatile boolean done = false; private int exitCode = -1; private boolean exitCodeSet = false; private TaskTracker tracker; protected JobConf conf; JvmManager jvmManager; /** * for cleaning up old map outputs */ protected MapOutputFile mapOutputFile; public TaskRunner(TaskTracker.TaskInProgress tip, TaskTracker tracker, JobConf conf) { this.tip = tip; this.t = tip.getTask(); this.tracker = tracker; this.conf = conf; this.mapOutputFile = new MapOutputFile(t.getJobID()); this.mapOutputFile.setConf(conf); this.jvmManager = tracker.getJvmManagerInstance(); } public Task getTask() { return t; } public TaskTracker.TaskInProgress getTaskInProgress() { return tip; } public TaskTracker getTracker() { return tracker; } /** Called to assemble this task's input. This method is run in the parent * process before the child is spawned. It should not execute user code, * only system code. */ public boolean prepare() throws IOException { return true; } /** Called when this task's output is no longer needed. * This method is run in the parent process after the child exits. It should * not execute user code, only system code. */ public void close() throws IOException {} private static String stringifyPathArray(Path[] p){ if (p == null){ return null; } StringBuffer str = new StringBuffer(p[0].toString()); for (int i = 1; i < p.length; i++){ str.append(","); str.append(p[i].toString()); } return str.toString(); } /** * Get the java command line options for the child map/reduce tasks. * @param jobConf job configuration * @param defaultValue default value * @return the java command line options for child map/reduce tasks * @deprecated Use command line options specific to map or reduce tasks set * via {@link JobConf#MAPRED_MAP_TASK_JAVA_OPTS} or * {@link JobConf#MAPRED_REDUCE_TASK_JAVA_OPTS} */ @Deprecated public String getChildJavaOpts(JobConf jobConf, String defaultValue) { return jobConf.get(JobConf.MAPRED_TASK_JAVA_OPTS, defaultValue); } /** * Get the maximum virtual memory of the child map/reduce tasks. * @param jobConf job configuration * @return the maximum virtual memory of the child task or <code>-1</code> if * none is specified * @deprecated Use limits specific to the map or reduce tasks set via * {@link JobConf#MAPRED_MAP_TASK_ULIMIT} or * {@link JobConf#MAPRED_REDUCE_TASK_ULIMIT} */ @Deprecated public int getChildUlimit(JobConf jobConf) { return jobConf.getInt(JobConf.MAPRED_TASK_ULIMIT, -1); } /** * Get the environment variables for the child map/reduce tasks. * @param jobConf job configuration * @return the environment variables for the child map/reduce tasks or * <code>null</code> if unspecified * @deprecated Use environment variables specific to the map or reduce tasks * set via {@link JobConf#MAPRED_MAP_TASK_ENV} or * {@link JobConf#MAPRED_REDUCE_TASK_ENV} */ public String getChildEnv(JobConf jobConf) { return jobConf.get(JobConf.MAPRED_TASK_ENV); } + private static class CacheFile { + URI uri; + long timeStamp; + CacheFile (URI uri, long timeStamp) { + this.uri = uri; + this.timeStamp = timeStamp; + } + } + @Override public final void run() { String errorInfo = "Child Error"; + List<CacheFile> localizedCacheFiles = new ArrayList<CacheFile>(); try { //before preparing the job localize //all the archives TaskAttemptID taskid = t.getTaskID(); LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); File jobCacheDir = null; if (conf.getJar() != null) { jobCacheDir = new File( new Path(conf.getJar()).getParent().toString()); } File workDir = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( t.getJobID().toString(), t.getTaskID().toString(), t.isTaskCleanupTask()) + Path.SEPARATOR + MRConstants.WORKDIR, conf). toString()); URI[] archives = DistributedCache.getCacheArchives(conf); URI[] files = DistributedCache.getCacheFiles(conf); FileStatus fileStatus; FileSystem fileSystem; Path localPath; String baseDir; if ((archives != null) || (files != null)) { if (archives != null) { String[] archivesTimestamps = DistributedCache.getArchiveTimestamps(conf); Path[] p = new Path[archives.length]; for (int i = 0; i < archives.length;i++){ fileSystem = FileSystem.get(archives[i], conf); fileStatus = fileSystem.getFileStatus( new Path(archives[i].getPath())); p[i] = DistributedCache.getLocalCache(archives[i], conf, new Path(TaskTracker.getCacheSubdir()), fileStatus, true, Long.parseLong( archivesTimestamps[i]), new Path(workDir. getAbsolutePath()), false, lDirAlloc); + localizedCacheFiles.add(new CacheFile(archives[i], Long + .parseLong(archivesTimestamps[i]))); } DistributedCache.setLocalArchives(conf, stringifyPathArray(p)); } if ((files != null)) { String[] fileTimestamps = DistributedCache.getFileTimestamps(conf); Path[] p = new Path[files.length]; for (int i = 0; i < files.length;i++){ fileSystem = FileSystem.get(files[i], conf); fileStatus = fileSystem.getFileStatus( new Path(files[i].getPath())); p[i] = DistributedCache.getLocalCache(files[i], conf, new Path(TaskTracker.getCacheSubdir()), fileStatus, false, Long.parseLong( fileTimestamps[i]), new Path(workDir. getAbsolutePath()), false, lDirAlloc); + localizedCacheFiles.add(new CacheFile(files[i], Long + .parseLong(fileTimestamps[i]))); } DistributedCache.setLocalFiles(conf, stringifyPathArray(p)); } Path localTaskFile = new Path(t.getJobFile()); FileSystem localFs = FileSystem.getLocal(conf); localFs.delete(localTaskFile, true); OutputStream out = localFs.create(localTaskFile); try { conf.writeXml(out); } finally { out.close(); } } if (!prepare()) { return; } String sep = System.getProperty("path.separator"); StringBuffer classPath = new StringBuffer(); // start with same classpath as parent process classPath.append(System.getProperty("java.class.path")); classPath.append(sep); if (!workDir.mkdirs()) { if (!workDir.isDirectory()) { LOG.fatal("Mkdirs failed to create " + workDir.toString()); } } String jar = conf.getJar(); if (jar != null) { // if jar exists, it into workDir File[] libs = new File(jobCacheDir, "lib").listFiles(); if (libs != null) { for (int i = 0; i < libs.length; i++) { classPath.append(sep); // add libs from jar to classpath classPath.append(libs[i]); } } classPath.append(sep); classPath.append(new File(jobCacheDir, "classes")); classPath.append(sep); classPath.append(jobCacheDir); } // include the user specified classpath //archive paths Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClasspaths != null && archives != null) { Path[] localArchives = DistributedCache .getLocalCacheArchives(conf); if (localArchives != null){ for (int i=0;i<archives.length;i++){ for(int j=0;j<archiveClasspaths.length;j++){ if (archives[i].getPath().equals( archiveClasspaths[j].toString())){ classPath.append(sep); classPath.append(localArchives[i] .toString()); } } } } } //file paths Path[] fileClasspaths = DistributedCache.getFileClassPaths(conf); if (fileClasspaths!=null && files != null) { Path[] localFiles = DistributedCache .getLocalCacheFiles(conf); if (localFiles != null) { for (int i = 0; i < files.length; i++) { for (int j = 0; j < fileClasspaths.length; j++) { if (files[i].getPath().equals( fileClasspaths[j].toString())) { classPath.append(sep); classPath.append(localFiles[i].toString()); } } } } } classPath.append(sep); classPath.append(workDir); // Build exec child jmv args. Vector<String> vargs = new Vector<String>(8); File jvm = // use same jvm as parent new File(new File(System.getProperty("java.home"), "bin"), "java"); vargs.add(jvm.toString()); // Add child (task) java-vm options. // // The following symbols if present in mapred.{map|reduce}.child.java.opts // value are replaced: // + @taskid@ is interpolated with value of TaskID. // Other occurrences of @ will not be altered. // // Example with multiple arguments and substitutions, showing // jvm GC logging, and start of a passwordless JVM JMX agent so can // connect with jconsole and the likes to watch child memory, threads // and get thread dumps. // // <property> // <name>mapred.map.child.java.opts</name> // <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@[email protected] \ // -Dcom.sun.management.jmxremote.authenticate=false \ // -Dcom.sun.management.jmxremote.ssl=false \ // </value> // </property> // // <property> // <name>mapred.reduce.child.java.opts</name> // <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@[email protected] \ // -Dcom.sun.management.jmxremote.authenticate=false \ // -Dcom.sun.management.jmxremote.ssl=false \ // </value> // </property> // String javaOpts = getChildJavaOpts(conf, JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS); javaOpts = javaOpts.replace("@taskid@", taskid.toString()); String [] javaOptsSplit = javaOpts.split(" "); // Add java.library.path; necessary for loading native libraries. // // 1. To support native-hadoop library i.e. libhadoop.so, we add the // parent processes' java.library.path to the child. // 2. We also add the 'cwd' of the task to it's java.library.path to help // users distribute native libraries via the DistributedCache. // 3. The user can also specify extra paths to be added to the // java.library.path via mapred.{map|reduce}.child.java.opts. // String libraryPath = System.getProperty("java.library.path"); if (libraryPath == null) { libraryPath = workDir.getAbsolutePath(); } else { libraryPath += sep + workDir; } boolean hasUserLDPath = false; for(int i=0; i<javaOptsSplit.length ;i++) { if(javaOptsSplit[i].startsWith("-Djava.library.path=")) { javaOptsSplit[i] += sep + libraryPath; hasUserLDPath = true; break; } } if(!hasUserLDPath) { vargs.add("-Djava.library.path=" + libraryPath); } for (int i = 0; i < javaOptsSplit.length; i++) { vargs.add(javaOptsSplit[i]); } // add java.io.tmpdir given by mapred.child.tmp String tmp = conf.get("mapred.child.tmp", "./tmp"); Path tmpDir = new Path(tmp); // if temp directory path is not absolute // prepend it with workDir. if (!tmpDir.isAbsolute()) { tmpDir = new Path(workDir.toString(), tmp); } FileSystem localFs = FileSystem.getLocal(conf); if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) { throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } vargs.add("-Djava.io.tmpdir=" + tmpDir.toString()); // Add classpath. vargs.add("-classpath"); vargs.add(classPath.toString()); // Setup the log4j prop long logSize = TaskLog.getTaskLogLength(conf); vargs.add("-Dhadoop.log.dir=" + new File(System.getProperty("hadoop.log.dir") ).getAbsolutePath()); vargs.add("-Dhadoop.root.logger=INFO,TLA"); vargs.add("-Dhadoop.tasklog.taskid=" + taskid); vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize); if (conf.getProfileEnabled()) { if (conf.getProfileTaskRange(t.isMapTask() ).isIncluded(t.getPartition())) { File prof = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.PROFILE); vargs.add(String.format(conf.getProfileParams(), prof.toString())); } } // Add main class and its arguments vargs.add(Child.class.getName()); // main of Child // pass umbilical address InetSocketAddress address = tracker.getTaskTrackerReportAddress(); vargs.add(address.getAddress().getHostAddress()); vargs.add(Integer.toString(address.getPort())); vargs.add(taskid.toString()); // pass task identifier tracker.addToMemoryManager(t.getTaskID(), t.isMapTask(), conf); // set memory limit using ulimit if feasible and necessary ... String[] ulimitCmd = Shell.getUlimitMemoryCommand(getChildUlimit(conf)); List<String> setup = null; if (ulimitCmd != null) { setup = new ArrayList<String>(); for (String arg : ulimitCmd) { setup.add(arg); } } // Set up the redirection of the task's stdout and stderr streams File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT); File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR); stdout.getParentFile().mkdirs(); tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr); Map<String, String> env = new HashMap<String, String>(); StringBuffer ldLibraryPath = new StringBuffer(); ldLibraryPath.append(workDir.toString()); String oldLdLibraryPath = null; oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH"); if (oldLdLibraryPath != null) { ldLibraryPath.append(sep); ldLibraryPath.append(oldLdLibraryPath); } env.put("LD_LIBRARY_PATH", ldLibraryPath.toString()); // for the child of task jvm, set hadoop.root.logger env.put("HADOOP_ROOT_LOGGER","INFO,TLA"); String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS"); if (hadoopClientOpts == null) { hadoopClientOpts = ""; } else { hadoopClientOpts = hadoopClientOpts + " "; } hadoopClientOpts = hadoopClientOpts + "-Dhadoop.tasklog.taskid=" + taskid + " -Dhadoop.tasklog.totalLogFileSize=" + logSize; env.put("HADOOP_CLIENT_OPTS", "\"" + hadoopClientOpts + "\""); // add the env variables passed by the user String mapredChildEnv = getChildEnv(conf); if (mapredChildEnv != null && mapredChildEnv.length() > 0) { String childEnvs[] = mapredChildEnv.split(","); for (String cEnv : childEnvs) { try { String[] parts = cEnv.split("="); // split on '=' String value = env.get(parts[0]); if (value != null) { // replace $env with the child's env constructed by tt's // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp value = parts[1].replace("$" + parts[0], value); } else { // this key is not configured by the tt for the child .. get it // from the tt's env // example PATH=$PATH:/tmp value = System.getenv(parts[0]); if (value != null) { // the env key is present in the tt's env value = parts[1].replace("$" + parts[0], value); } else { // the env key is note present anywhere .. simply set it // example X=$X:/tmp or X=/tmp value = parts[1].replace("$" + parts[0], ""); } } env.put(parts[0], value); } catch (Throwable t) { // set the error msg errorInfo = "Invalid User environment settings : " + mapredChildEnv + ". Failed to parse user-passed environment param." + " Expecting : env1=value1,env2=value2..."; LOG.warn(errorInfo); throw t; } } } jvmManager.launchJvm(this, jvmManager.constructJvmEnv(setup,vargs,stdout,stderr,logSize, workDir, env, conf)); synchronized (lock) { while (!done) { lock.wait(); } } tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID()); if (exitCodeSet) { if (!killed && exitCode != 0) { if (exitCode == 65) { tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID()); } throw new IOException("Task process exit with nonzero status of " + exitCode + "."); } } } catch (FSError e) { LOG.fatal("FSError", e); try { tracker.fsError(t.getTaskID(), e.getMessage()); } catch (IOException ie) { LOG.fatal(t.getTaskID()+" reporting FSError", ie); } } catch (Throwable throwable) { LOG.warn(t.getTaskID() + errorInfo, throwable); Throwable causeThrowable = new Throwable(errorInfo, throwable); ByteArrayOutputStream baos = new ByteArrayOutputStream(); causeThrowable.printStackTrace(new PrintStream(baos)); try { tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString()); } catch (IOException e) { LOG.warn(t.getTaskID()+" Reporting Diagnostics", e); } } finally { try{ - URI[] archives = DistributedCache.getCacheArchives(conf); - URI[] files = DistributedCache.getCacheFiles(conf); - String[] archivesTimestamps = - DistributedCache.getArchiveTimestamps(conf); - String[] fileTimestamps = DistributedCache.getFileTimestamps(conf); - if (archives != null){ - for (int i = 0; i < archives.length; i++){ - DistributedCache.releaseCache(archives[i], conf, - Long.parseLong(archivesTimestamps[i])); - } - } - if (files != null){ - for(int i = 0; i < files.length; i++){ - DistributedCache.releaseCache(files[i], conf, - Long.parseLong(fileTimestamps[i])); - } + for (CacheFile cf : localizedCacheFiles){ + DistributedCache.releaseCache(cf.uri, conf, cf.timeStamp); } }catch(IOException ie){ LOG.warn("Error releasing caches : Cache files might not have been cleaned up"); } // It is safe to call TaskTracker.TaskInProgress.reportTaskFinished with // *false* since the task has either // a) SUCCEEDED - which means commit has been done // b) FAILED - which means we do not need to commit tip.reportTaskFinished(false); } } //Mostly for setting up the symlinks. Note that when we setup the distributed //cache, we didn't create the symlinks. This is done on a per task basis //by the currently executing task. public static void setupWorkDir(JobConf conf) throws IOException { File workDir = new File(".").getAbsoluteFile(); FileUtil.fullyDelete(workDir); if (DistributedCache.getSymlink(conf)) { URI[] archives = DistributedCache.getCacheArchives(conf); URI[] files = DistributedCache.getCacheFiles(conf); Path[] localArchives = DistributedCache.getLocalCacheArchives(conf); Path[] localFiles = DistributedCache.getLocalCacheFiles(conf); if (archives != null) { for (int i = 0; i < archives.length; i++) { String link = archives[i].getFragment(); if (link != null) { link = workDir.toString() + Path.SEPARATOR + link; File flink = new File(link); if (!flink.exists()) { FileUtil.symLink(localArchives[i].toString(), link); } } } } if (files != null) { for (int i = 0; i < files.length; i++) { String link = files[i].getFragment(); if (link != null) { link = workDir.toString() + Path.SEPARATOR + link; File flink = new File(link); if (!flink.exists()) { FileUtil.symLink(localFiles[i].toString(), link); } } } } } File jobCacheDir = null; if (conf.getJar() != null) { jobCacheDir = new File( new Path(conf.getJar()).getParent().toString()); } // create symlinks for all the files in job cache dir in current // workingdir for streaming try{ DistributedCache.createAllSymlink(conf, jobCacheDir, workDir); } catch(IOException ie){ // Do not exit even if symlinks have not been created. LOG.warn(StringUtils.stringifyException(ie)); } // add java.io.tmpdir given by mapred.child.tmp String tmp = conf.get("mapred.child.tmp", "./tmp"); Path tmpDir = new Path(tmp); // if temp directory path is not absolute // prepend it with workDir. if (!tmpDir.isAbsolute()) { tmpDir = new Path(workDir.toString(), tmp); FileSystem localFs = FileSystem.getLocal(conf); if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()){ throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } } } /** * Kill the child process */ public void kill() { killed = true; jvmManager.taskKilled(this); signalDone(); } public void signalDone() { synchronized (lock) { done = true; lock.notify(); } } public void setExitCode(int exitCode) { this.exitCodeSet = true; this.exitCode = exitCode; } }
jaxlaw/hadoop-common
fbf7ed4976171f6020f76e6bef73d76cafb98055
HADOOP:4933 from http://issues.apache.org/jira/secure/attachment/12397116/HADOOP-4933-v1.1.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index fc61fcc..cef35fb 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,421 +1,425 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) + HADOOP-4933. Fixes a ConcurrentModificationException problem that shows up + when the history viewer is accessed concurrently. + (Amar Kamat via ddas) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1231. Add an option to distcp to avoid checking checksums with the upgrade option. (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobHistory.java b/src/mapred/org/apache/hadoop/mapred/JobHistory.java index 4dc7002..2269f14 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobHistory.java +++ b/src/mapred/org/apache/hadoop/mapred/JobHistory.java @@ -1,996 +1,994 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.BufferedReader; import java.io.File; import java.io.FileFilter; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.StringUtils; /** * Provides methods for writing to and reading from job history. * Job History works in an append mode, JobHistory and its inner classes provide methods * to log job events. * * JobHistory is split into multiple files, format of each file is plain text where each line * is of the format [type (key=value)*], where type identifies the type of the record. * Type maps to UID of one of the inner classes of this class. * * Job history is maintained in a master index which contains star/stop times of all jobs with * a few other job level properties. Apart from this each job's history is maintained in a seperate history * file. name of job history files follows the format jobtrackerId_jobid * * For parsing the job history it supports a listener based interface where each line is parsed * and passed to listener. The listener can create an object model of history or look for specific * events and discard rest of the history. * * CHANGE LOG : * Version 0 : The history has the following format : * TAG KEY1="VALUE1" KEY2="VALUE2" and so on. TAG can be Job, Task, MapAttempt or ReduceAttempt. Note that a '"' is the line delimiter. * Version 1 : Changes the line delimiter to '.' Values are now escaped for unambiguous parsing. Added the Meta tag to store version info. */ public class JobHistory { static final long VERSION = 1L; public static final Log LOG = LogFactory.getLog(JobHistory.class); private static final String DELIMITER = " "; static final char LINE_DELIMITER_CHAR = '.'; static final char[] charsToEscape = new char[] {'"', '=', LINE_DELIMITER_CHAR}; static final String DIGITS = "[0-9]+"; static final String KEY = "(\\w+)"; // value is any character other than quote, but escaped quotes can be there static final String VALUE = "[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*"; static final Pattern pattern = Pattern.compile(KEY + "=" + "\"" + VALUE + "\""); public static final int JOB_NAME_TRIM_LENGTH = 50; private static String JOBTRACKER_UNIQUE_STRING = null; private static String LOG_DIR = null; private static boolean disableHistory = true; private static final String SECONDARY_FILE_SUFFIX = ".recover"; private static long jobHistoryBlockSize = 0; private static String jobtrackerHostname; private static JobHistoryFilesManager fileManager = null; final static FsPermission HISTORY_DIR_PERMISSION = FsPermission.createImmutable((short) 0755); // rwxr-xr-x final static FsPermission HISTORY_FILE_PERMISSION = FsPermission.createImmutable((short) 0744); // rwxr--r-- private static FileSystem LOGDIR_FS; // log dir filesystem private static FileSystem DONEDIR_FS; // Done dir filesystem private static JobConf jtConf; private static Path DONE = null; // folder for completed jobs /** * A filter for conf files */ private static final PathFilter CONF_FILTER = new PathFilter() { public boolean accept(Path path) { return path.getName().endsWith("_conf.xml"); } }; /** * A class that manages all the files related to a job. For now * - writers : list of open files * - job history filename * - job conf filename */ private static class JobHistoryFilesManager { // a private (virtual) folder for all the files related to a running job private static class FilesHolder { ArrayList<PrintWriter> writers = new ArrayList<PrintWriter>(); Path historyFilename; // path of job history file Path confFilename; // path of job's conf } private ThreadPoolExecutor executor = null; private final Configuration conf; private final JobTracker jobTracker; // cache from job-key to files associated with it. private Map<JobID, FilesHolder> fileCache = new ConcurrentHashMap<JobID, FilesHolder>(); JobHistoryFilesManager(Configuration conf, JobTracker jobTracker) throws IOException { this.conf = conf; this.jobTracker = jobTracker; } void start() { executor = new ThreadPoolExecutor(1, 3, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>()); } private FilesHolder getFileHolder(JobID id) { FilesHolder holder = fileCache.get(id); if (holder == null) { holder = new FilesHolder(); fileCache.put(id, holder); } return holder; } void addWriter(JobID id, PrintWriter writer) { FilesHolder holder = getFileHolder(id); holder.writers.add(writer); } void setHistoryFile(JobID id, Path file) { FilesHolder holder = getFileHolder(id); holder.historyFilename = file; } void setConfFile(JobID id, Path file) { FilesHolder holder = getFileHolder(id); holder.confFilename = file; } ArrayList<PrintWriter> getWriters(JobID id) { FilesHolder holder = fileCache.get(id); return holder == null ? null : holder.writers; } Path getHistoryFile(JobID id) { FilesHolder holder = fileCache.get(id); return holder == null ? null : holder.historyFilename; } Path getConfFileWriters(JobID id) { FilesHolder holder = fileCache.get(id); return holder == null ? null : holder.confFilename; } void purgeJob(JobID id) { fileCache.remove(id); } void moveToDone(final JobID id) { if (disableHistory) { return; } final List<Path> paths = new ArrayList<Path>(); final Path historyFile = fileManager.getHistoryFile(id); if (historyFile == null) { LOG.info("No file for job-history with " + id + " found in cache!"); } else { paths.add(historyFile); } final Path confPath = fileManager.getConfFileWriters(id); if (confPath == null) { LOG.info("No file for jobconf with " + id + " found in cache!"); } else { paths.add(confPath); } executor.execute(new Runnable() { public void run() { //move the files to DONE folder try { for (Path path : paths) { //check if path exists, in case of retries it may not exist if (LOGDIR_FS.exists(path)) { LOG.info("Moving " + path.toString() + " to " + DONE.toString()); DONEDIR_FS.moveFromLocalFile(path, DONE); DONEDIR_FS.setPermission(new Path(DONE, path.getName()), new FsPermission(HISTORY_FILE_PERMISSION)); } } } catch (Throwable e) { LOG.error("Unable to move history file to DONE folder.", e); } String historyFileDonePath = null; if (historyFile != null) { historyFileDonePath = new Path(DONE, historyFile.getName()).toString(); } jobTracker.historyFileCopied(id, historyFileDonePath); //purge the job from the cache fileManager.purgeJob(id); } }); } } /** * Record types are identifiers for each line of log in history files. * A record type appears as the first token in a single line of log. */ public static enum RecordTypes { Jobtracker, Job, Task, MapAttempt, ReduceAttempt, Meta } /** * Job history files contain key="value" pairs, where keys belong to this enum. * It acts as a global namespace for all keys. */ public static enum Keys { JOBTRACKERID, START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME, LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES, JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED, COUNTERS, SPLITS, JOB_PRIORITY, HTTP_PORT, TRACKER_NAME, STATE_STRING, VERSION, MAP_COUNTERS, REDUCE_COUNTERS } /** * This enum contains some of the values commonly used by history log events. * since values in history can only be strings - Values.name() is used in * most places in history file. */ public static enum Values { SUCCESS, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP, SETUP } - // temp buffer for parsed dataa - private static Map<Keys,String> parseBuffer = new HashMap<Keys, String>(); - /** * Initialize JobHistory files. * @param conf Jobconf of the job tracker. * @param hostname jobtracker's hostname * @param jobTrackerStartTime jobtracker's start time * @return true if intialized properly * false otherwise */ public static boolean init(JobTracker jobTracker, JobConf conf, String hostname, long jobTrackerStartTime){ try { LOG_DIR = conf.get("hadoop.job.history.location" , "file:///" + new File( System.getProperty("hadoop.log.dir")).getAbsolutePath() + File.separator + "history"); JOBTRACKER_UNIQUE_STRING = hostname + "_" + String.valueOf(jobTrackerStartTime) + "_"; jobtrackerHostname = hostname; Path logDir = new Path(LOG_DIR); LOGDIR_FS = logDir.getFileSystem(conf); if (!LOGDIR_FS.exists(logDir)){ if (!LOGDIR_FS.mkdirs(logDir, new FsPermission(HISTORY_DIR_PERMISSION))) { throw new IOException("Mkdirs failed to create " + logDir.toString()); } } conf.set("hadoop.job.history.location", LOG_DIR); disableHistory = false; // set the job history block size (default is 3MB) jobHistoryBlockSize = conf.getLong("mapred.jobtracker.job.history.block.size", 3 * 1024 * 1024); jtConf = conf; // initialize the file manager fileManager = new JobHistoryFilesManager(conf, jobTracker); } catch(IOException e) { LOG.error("Failed to initialize JobHistory log file", e); disableHistory = true; } return !(disableHistory); } static boolean initDone(JobConf conf, FileSystem fs){ try { //if completed job history location is set, use that String doneLocation = conf. get("mapred.job.tracker.history.completed.location"); if (doneLocation != null) { DONE = fs.makeQualified(new Path(doneLocation)); DONEDIR_FS = fs; } else { DONE = new Path(LOG_DIR, "done"); DONEDIR_FS = LOGDIR_FS; } //If not already present create the done folder with appropriate //permission if (!DONEDIR_FS.exists(DONE)) { LOG.info("Creating DONE folder at "+ DONE); if (! DONEDIR_FS.mkdirs(DONE, new FsPermission(HISTORY_DIR_PERMISSION))) { throw new IOException("Mkdirs failed to create " + DONE.toString()); } } fileManager.start(); } catch(IOException e) { LOG.error("Failed to initialize JobHistory log file", e); disableHistory = true; } return !(disableHistory); } /** * Manages job-history's meta information such as version etc. * Helps in logging version information to the job-history and recover * version information from the history. */ static class MetaInfoManager implements Listener { private long version = 0L; private KeyValuePair pairs = new KeyValuePair(); // Extract the version of the history that was used to write the history public MetaInfoManager(String line) throws IOException { if (null != line) { // Parse the line parseLine(line, this, false); } } // Get the line delimiter char getLineDelim() { if (version == 0) { return '"'; } else { return LINE_DELIMITER_CHAR; } } // Checks if the values are escaped or not boolean isValueEscaped() { // Note that the values are not escaped in version 0 return version != 0; } public void handle(RecordTypes recType, Map<Keys, String> values) throws IOException { // Check if the record is of type META if (RecordTypes.Meta == recType) { pairs.handle(values); version = pairs.getLong(Keys.VERSION); // defaults to 0 } } /** * Logs history meta-info to the history file. This needs to be called once * per history file. * @param jobId job id, assigned by jobtracker. */ static void logMetaInfo(ArrayList<PrintWriter> writers){ if (!disableHistory){ if (null != writers){ JobHistory.log(writers, RecordTypes.Meta, new Keys[] {Keys.VERSION}, new String[] {String.valueOf(VERSION)}); } } } } /** Escapes the string especially for {@link JobHistory} */ static String escapeString(String data) { return StringUtils.escapeString(data, StringUtils.ESCAPE_CHAR, charsToEscape); } /** * Parses history file and invokes Listener.handle() for * each line of history. It can be used for looking through history * files for specific items without having to keep whole history in memory. * @param path path to history file * @param l Listener for history events * @param fs FileSystem where history file is present * @throws IOException */ public static void parseHistoryFromFS(String path, Listener l, FileSystem fs) throws IOException{ FSDataInputStream in = fs.open(new Path(path)); BufferedReader reader = new BufferedReader(new InputStreamReader (in)); try { String line = null; StringBuffer buf = new StringBuffer(); // Read the meta-info line. Note that this might a jobinfo line for files // written with older format line = reader.readLine(); // Check if the file is empty if (line == null) { return; } // Get the information required for further processing MetaInfoManager mgr = new MetaInfoManager(line); boolean isEscaped = mgr.isValueEscaped(); String lineDelim = String.valueOf(mgr.getLineDelim()); String escapedLineDelim = StringUtils.escapeString(lineDelim, StringUtils.ESCAPE_CHAR, mgr.getLineDelim()); do { buf.append(line); if (!line.trim().endsWith(lineDelim) || line.trim().endsWith(escapedLineDelim)) { buf.append("\n"); continue; } parseLine(buf.toString(), l, isEscaped); buf = new StringBuffer(); } while ((line = reader.readLine())!= null); } finally { try { reader.close(); } catch (IOException ex) {} } } /** * Parse a single line of history. * @param line * @param l * @throws IOException */ private static void parseLine(String line, Listener l, boolean isEscaped) throws IOException{ // extract the record type int idx = line.indexOf(' '); String recType = line.substring(0, idx); String data = line.substring(idx+1, line.length()); Matcher matcher = pattern.matcher(data); + Map<Keys,String> parseBuffer = new HashMap<Keys, String>(); while(matcher.find()){ String tuple = matcher.group(0); String []parts = StringUtils.split(tuple, StringUtils.ESCAPE_CHAR, '='); String value = parts[1].substring(1, parts[1].length() -1); if (isEscaped) { value = StringUtils.unEscapeString(value, StringUtils.ESCAPE_CHAR, charsToEscape); } parseBuffer.put(Keys.valueOf(parts[0]), value); } l.handle(RecordTypes.valueOf(recType), parseBuffer); parseBuffer.clear(); } /** * Log a raw record type with keys and values. This is method is generally not used directly. * @param recordType type of log event * @param key key * @param value value */ static void log(PrintWriter out, RecordTypes recordType, Keys key, String value){ value = escapeString(value); out.println(recordType.name() + DELIMITER + key + "=\"" + value + "\"" + DELIMITER + LINE_DELIMITER_CHAR); } /** * Log a number of keys and values with record. the array length of keys and values * should be same. * @param recordType type of log event * @param keys type of log event * @param values type of log event */ static void log(ArrayList<PrintWriter> writers, RecordTypes recordType, Keys[] keys, String[] values) { StringBuffer buf = new StringBuffer(recordType.name()); buf.append(DELIMITER); for(int i =0; i< keys.length; i++){ buf.append(keys[i]); buf.append("=\""); values[i] = escapeString(values[i]); buf.append(values[i]); buf.append("\""); buf.append(DELIMITER); } buf.append(LINE_DELIMITER_CHAR); for (PrintWriter out : writers) { out.println(buf.toString()); } } /** * Returns history disable status. by default history is enabled so this * method returns false. * @return true if history logging is disabled, false otherwise. */ public static boolean isDisableHistory() { return disableHistory; } /** * Enable/disable history logging. Default value is false, so history * is enabled by default. * @param disableHistory true if history should be disabled, false otherwise. */ public static void setDisableHistory(boolean disableHistory) { JobHistory.disableHistory = disableHistory; } /** * Get the history location */ static Path getJobHistoryLocation() { return new Path(LOG_DIR); } /** * Get the history location for completed jobs */ static Path getCompletedJobHistoryLocation() { return DONE; } /** * Base class contais utility stuff to manage types key value pairs with enums. */ static class KeyValuePair{ private Map<Keys, String> values = new HashMap<Keys, String>(); /** * Get 'String' value for given key. Most of the places use Strings as * values so the default get' method returns 'String'. This method never returns * null to ease on GUIs. if no value is found it returns empty string "" * @param k * @return if null it returns empty string - "" */ public String get(Keys k){ String s = values.get(k); return s == null ? "" : s; } /** * Convert value from history to int and return. * if no value is found it returns 0. * @param k key */ public int getInt(Keys k){ String s = values.get(k); if (null != s){ return Integer.parseInt(s); } return 0; } /** * Convert value from history to int and return. * if no value is found it returns 0. * @param k */ public long getLong(Keys k){ String s = values.get(k); if (null != s){ return Long.parseLong(s); } return 0; } /** * Set value for the key. * @param k * @param s */ public void set(Keys k, String s){ values.put(k, s); } /** * Adds all values in the Map argument to its own values. * @param m */ public void set(Map<Keys, String> m){ values.putAll(m); } /** * Reads values back from the history, input is same Map as passed to Listener by parseHistory(). * @param values */ public synchronized void handle(Map<Keys, String> values){ set(values); } /** * Returns Map containing all key-values. */ public Map<Keys, String> getValues(){ return values; } } /** * Helper class for logging or reading back events related to job start, finish or failure. */ public static class JobInfo extends KeyValuePair{ private Map<String, Task> allTasks = new TreeMap<String, Task>(); /** Create new JobInfo */ public JobInfo(String jobId){ set(Keys.JOBID, jobId); } /** * Returns all map and reduce tasks <taskid-Task>. */ public Map<String, Task> getAllTasks() { return allTasks; } /** * Get the path of the locally stored job file * @param jobId id of the job * @return the path of the job file on the local file system */ public static String getLocalJobFilePath(JobID jobId){ return System.getProperty("hadoop.log.dir") + File.separator + jobId + "_conf.xml"; } /** * Helper function to encode the URL of the path of the job-history * log file. * * @param logFile path of the job-history file * @return URL encoded path * @throws IOException */ public static String encodeJobHistoryFilePath(String logFile) throws IOException { Path rawPath = new Path(logFile); String encodedFileName = null; try { encodedFileName = URLEncoder.encode(rawPath.getName(), "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } Path encodedPath = new Path(rawPath.getParent(), encodedFileName); return encodedPath.toString(); } /** * Helper function to encode the URL of the filename of the job-history * log file. * * @param logFileName file name of the job-history file * @return URL encoded filename * @throws IOException */ public static String encodeJobHistoryFileName(String logFileName) throws IOException { String encodedFileName = null; try { encodedFileName = URLEncoder.encode(logFileName, "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } return encodedFileName; } /** * Helper function to decode the URL of the filename of the job-history * log file. * * @param logFileName file name of the job-history file * @return URL decoded filename * @throws IOException */ public static String decodeJobHistoryFileName(String logFileName) throws IOException { String decodedFileName = null; try { decodedFileName = URLDecoder.decode(logFileName, "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } return decodedFileName; } /** * Get the job name from the job conf */ static String getJobName(JobConf jobConf) { String jobName = jobConf.getJobName(); if (jobName == null || jobName.length() == 0) { jobName = "NA"; } return jobName; } /** * Get the user name from the job conf */ public static String getUserName(JobConf jobConf) { String user = jobConf.getUser(); if (user == null || user.length() == 0) { user = "NA"; } return user; } /** * Get the job history file path given the history filename */ public static Path getJobHistoryLogLocation(String logFileName) { return LOG_DIR == null ? null : new Path(LOG_DIR, logFileName); } /** * Get the user job history file path */ public static Path getJobHistoryLogLocationForUser(String logFileName, JobConf jobConf) { // find user log directory Path userLogFile = null; Path outputPath = FileOutputFormat.getOutputPath(jobConf); String userLogDir = jobConf.get("hadoop.job.history.user.location", outputPath == null ? null : outputPath.toString()); if ("none".equals(userLogDir)) { userLogDir = null; } if (userLogDir != null) { userLogDir = userLogDir + Path.SEPARATOR + "_logs" + Path.SEPARATOR + "history"; userLogFile = new Path(userLogDir, logFileName); } return userLogFile; } /** * Generates the job history filename for a new job */ private static String getNewJobHistoryFileName(JobConf jobConf, JobID id) { return JOBTRACKER_UNIQUE_STRING + id.toString() + "_" + getUserName(jobConf) + "_" + trimJobName(getJobName(jobConf)); } /** * Trims the job-name if required */ private static String trimJobName(String jobName) { if (jobName.length() > JOB_NAME_TRIM_LENGTH) { jobName = jobName.substring(0, JOB_NAME_TRIM_LENGTH); } return jobName; } private static String escapeRegexChars( String string ) { return "\\Q"+string.replaceAll("\\\\E", "\\\\E\\\\\\\\E\\\\Q")+"\\E"; } /** * Recover the job history filename from the history folder. * Uses the following pattern * $jt-hostname_[0-9]*_$job-id_$user-$job-name* * @param jobConf the job conf * @param id job id */ public static synchronized String getJobHistoryFileName(JobConf jobConf, JobID id) throws IOException { return getJobHistoryFileName(jobConf, id, new Path(LOG_DIR), LOGDIR_FS); } static synchronized String getDoneJobHistoryFileName(JobConf jobConf, JobID id) throws IOException { if (DONE == null) { return null; } return getJobHistoryFileName(jobConf, id, DONE, DONEDIR_FS); } /** * @param dir The directory where to search. */ private static synchronized String getJobHistoryFileName(JobConf jobConf, JobID id, Path dir, FileSystem fs) throws IOException { String user = getUserName(jobConf); String jobName = trimJobName(getJobName(jobConf)); if (LOG_DIR == null) { return null; } // Make the pattern matching the job's history file final Pattern historyFilePattern = Pattern.compile(jobtrackerHostname + "_" + DIGITS + "_" + id.toString() + "_" + user + "_" + escapeRegexChars(jobName) + "+"); // a path filter that matches 4 parts of the filenames namely // - jt-hostname // - job-id // - username // - jobname PathFilter filter = new PathFilter() { public boolean accept(Path path) { String fileName = path.getName(); try { fileName = decodeJobHistoryFileName(fileName); } catch (IOException ioe) { LOG.info("Error while decoding history file " + fileName + "." + " Ignoring file.", ioe); return false; } return historyFilePattern.matcher(fileName).find(); } }; FileStatus[] statuses = fs.listStatus(dir, filter); String filename = null; if (statuses.length == 0) { LOG.info("Nothing to recover for job " + id); } else { // return filename considering that fact the name can be a // secondary filename like filename.recover filename = getPrimaryFilename(statuses[0].getPath().getName(), jobName); LOG.info("Recovered job history filename for job " + id + " is " + filename); } return filename; } // removes all extra extensions from a filename and returns the core/primary // filename private static String getPrimaryFilename(String filename, String jobName) throws IOException{ filename = decodeJobHistoryFileName(filename); // Remove the '.recover' suffix if it exists if (filename.endsWith(jobName + SECONDARY_FILE_SUFFIX)) { int newLength = filename.length() - SECONDARY_FILE_SUFFIX.length(); filename = filename.substring(0, newLength); } return encodeJobHistoryFileName(filename); } /** Since there was a restart, there should be a master file and * a recovery file. Once the recovery is complete, the master should be * deleted as an indication that the recovery file should be treated as the * master upon completion or next restart. * @param fileName the history filename that needs checkpointing * @param conf Job conf * @throws IOException */ static synchronized void checkpointRecovery(String fileName, JobConf conf) throws IOException { Path logPath = JobHistory.JobInfo.getJobHistoryLogLocation(fileName); if (logPath != null) { LOG.info("Deleting job history file " + logPath.getName()); LOGDIR_FS.delete(logPath, false); } // do the same for the user file too logPath = JobHistory.JobInfo.getJobHistoryLogLocationForUser(fileName, conf); if (logPath != null) { FileSystem fs = logPath.getFileSystem(conf); fs.delete(logPath, false); } } static String getSecondaryJobHistoryFile(String filename) throws IOException { return encodeJobHistoryFileName( decodeJobHistoryFileName(filename) + SECONDARY_FILE_SUFFIX); } /** Selects one of the two files generated as a part of recovery. * The thumb rule is that always select the oldest file. * This call makes sure that only one file is left in the end. * @param conf job conf * @param logFilePath Path of the log file * @throws IOException */ public synchronized static Path recoverJobHistoryFile(JobConf conf, Path logFilePath) throws IOException { Path ret; String logFileName = logFilePath.getName(); String tmpFilename = getSecondaryJobHistoryFile(logFileName); Path logDir = logFilePath.getParent(); Path tmpFilePath = new Path(logDir, tmpFilename); if (LOGDIR_FS.exists(logFilePath)) { LOG.info(logFileName + " exists!"); if (LOGDIR_FS.exists(tmpFilePath)) { LOG.info("Deleting " + tmpFilename + " and using " + logFileName + " for recovery."); LOGDIR_FS.delete(tmpFilePath, false); } ret = tmpFilePath; } else { LOG.info(logFileName + " doesnt exist! Using " + tmpFilename + " for recovery."); if (LOGDIR_FS.exists(tmpFilePath)) { LOG.info("Renaming " + tmpFilename + " to " + logFileName); LOGDIR_FS.rename(tmpFilePath, logFilePath); ret = tmpFilePath; } else { ret = logFilePath; } } // do the same for the user files too logFilePath = getJobHistoryLogLocationForUser(logFileName, conf); if (logFilePath != null) { FileSystem fs = logFilePath.getFileSystem(conf); logDir = logFilePath.getParent(); tmpFilePath = new Path(logDir, tmpFilename); if (fs.exists(logFilePath)) { LOG.info(logFileName + " exists!"); if (fs.exists(tmpFilePath)) { LOG.info("Deleting " + tmpFilename + " and making " + logFileName + " as the master history file for user."); fs.delete(tmpFilePath, false); } } else { LOG.info(logFileName + " doesnt exist! Using " + tmpFilename + " as the master history file for user."); if (fs.exists(tmpFilePath)) { LOG.info("Renaming " + tmpFilename + " to " + logFileName + " in user directory"); fs.rename(tmpFilePath, logFilePath); } } } return ret; } /** Finalize the recovery and make one file in the end.
jaxlaw/hadoop-common
9a2e88bd49e2c11a7abc74f284b81b9cbfc6036e
MAPREDUCE:1231 from https://issues.apache.org/jira/secure/attachment/12426265/mapred-1231-y20-v4.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 995b19e..fc61fcc 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,417 +1,421 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. (Amareshwari Sriramadasu via acmurthy) + + MAPREDUCE-1231. Add an option to distcp to avoid checking checksums + with the upgrade option. + (Jothi Padmanabhan via yhemanth) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/test/org/apache/hadoop/fs/TestCopyFiles.java b/src/test/org/apache/hadoop/fs/TestCopyFiles.java index a64d24d..f103421 100644 --- a/src/test/org/apache/hadoop/fs/TestCopyFiles.java +++ b/src/test/org/apache/hadoop/fs/TestCopyFiles.java @@ -1,853 +1,928 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.ByteArrayOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; import java.net.URI; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.StringTokenizer; import junit.framework.TestCase; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; /** * A JUnit test for copying files recursively. */ public class TestCopyFiles extends TestCase { { ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange") ).getLogger().setLevel(Level.OFF); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF); ((Log4JLogger)DistCp.LOG).getLogger().setLevel(Level.ALL); } static final URI LOCAL_FS = URI.create("file:///"); private static final Random RAN = new Random(); private static final int NFILES = 20; private static String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/tmp")) .toString().replace(' ', '+'); /** class MyFile contains enough information to recreate the contents of * a single file. */ private static class MyFile { private static Random gen = new Random(); private static final int MAX_LEVELS = 3; private static final int MAX_SIZE = 8*1024; private static String[] dirNames = { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" }; private final String name; private int size = 0; private long seed = 0L; MyFile() { this(gen.nextInt(MAX_LEVELS)); } MyFile(int nLevels) { String xname = ""; if (nLevels != 0) { int[] levels = new int[nLevels]; for (int idx = 0; idx < nLevels; idx++) { levels[idx] = gen.nextInt(10); } StringBuffer sb = new StringBuffer(); for (int idx = 0; idx < nLevels; idx++) { sb.append(dirNames[levels[idx]]); sb.append("/"); } xname = sb.toString(); } long fidx = gen.nextLong() & Long.MAX_VALUE; name = xname + Long.toString(fidx); reset(); } void reset() { final int oldsize = size; do { size = gen.nextInt(MAX_SIZE); } while (oldsize == size); final long oldseed = seed; do { seed = gen.nextLong() & Long.MAX_VALUE; } while (oldseed == seed); } String getName() { return name; } int getSize() { return size; } long getSeed() { return seed; } } private static MyFile[] createFiles(URI fsname, String topdir) throws IOException { return createFiles(FileSystem.get(fsname, new Configuration()), topdir); } /** create NFILES with random names and directory hierarchies * with random (but reproducible) data in them. */ private static MyFile[] createFiles(FileSystem fs, String topdir) throws IOException { Path root = new Path(topdir); MyFile[] files = new MyFile[NFILES]; for (int i = 0; i < NFILES; i++) { files[i] = createFile(root, fs); } return files; } static MyFile createFile(Path root, FileSystem fs, int levels) throws IOException { MyFile f = levels < 0 ? new MyFile() : new MyFile(levels); Path p = new Path(root, f.getName()); FSDataOutputStream out = fs.create(p); byte[] toWrite = new byte[f.getSize()]; new Random(f.getSeed()).nextBytes(toWrite); out.write(toWrite); out.close(); FileSystem.LOG.info("created: " + p + ", size=" + f.getSize()); return f; } static MyFile createFile(Path root, FileSystem fs) throws IOException { return createFile(root, fs, -1); } private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files ) throws IOException { return checkFiles(fs, topdir, files, false); } private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files, boolean existingOnly) throws IOException { Path root = new Path(topdir); for (int idx = 0; idx < files.length; idx++) { Path fPath = new Path(root, files[idx].getName()); try { fs.getFileStatus(fPath); FSDataInputStream in = fs.open(fPath); byte[] toRead = new byte[files[idx].getSize()]; byte[] toCompare = new byte[files[idx].getSize()]; Random rb = new Random(files[idx].getSeed()); rb.nextBytes(toCompare); assertEquals("Cannnot read file.", toRead.length, in.read(toRead)); in.close(); for (int i = 0; i < toRead.length; i++) { if (toRead[i] != toCompare[i]) { return false; } } toRead = null; toCompare = null; } catch(FileNotFoundException fnfe) { if (!existingOnly) { throw fnfe; } } } return true; } private static void updateFiles(FileSystem fs, String topdir, MyFile[] files, int nupdate) throws IOException { assert nupdate <= NFILES; Path root = new Path(topdir); for (int idx = 0; idx < nupdate; ++idx) { Path fPath = new Path(root, files[idx].getName()); // overwrite file assertTrue(fPath.toString() + " does not exist", fs.exists(fPath)); FSDataOutputStream out = fs.create(fPath); files[idx].reset(); byte[] toWrite = new byte[files[idx].getSize()]; Random rb = new Random(files[idx].getSeed()); rb.nextBytes(toWrite); out.write(toWrite); out.close(); } } private static FileStatus[] getFileStatus(FileSystem fs, String topdir, MyFile[] files) throws IOException { return getFileStatus(fs, topdir, files, false); } private static FileStatus[] getFileStatus(FileSystem fs, String topdir, MyFile[] files, boolean existingOnly) throws IOException { Path root = new Path(topdir); List<FileStatus> statuses = new ArrayList<FileStatus>(); for (int idx = 0; idx < NFILES; ++idx) { try { statuses.add(fs.getFileStatus(new Path(root, files[idx].getName()))); } catch(FileNotFoundException fnfe) { if (!existingOnly) { throw fnfe; } } } return statuses.toArray(new FileStatus[statuses.size()]); } private static boolean checkUpdate(FileSystem fs, FileStatus[] old, String topdir, MyFile[] upd, final int nupdate) throws IOException { Path root = new Path(topdir); // overwrote updated files for (int idx = 0; idx < nupdate; ++idx) { final FileStatus stat = fs.getFileStatus(new Path(root, upd[idx].getName())); if (stat.getModificationTime() <= old[idx].getModificationTime()) { return false; } } // did not overwrite files not updated for (int idx = nupdate; idx < NFILES; ++idx) { final FileStatus stat = fs.getFileStatus(new Path(root, upd[idx].getName())); if (stat.getModificationTime() != old[idx].getModificationTime()) { return false; } } return true; } /** delete directory and everything underneath it.*/ private static void deldir(FileSystem fs, String topdir) throws IOException { fs.delete(new Path(topdir), true); } /** copy files from local file system to local file system */ public void testCopyFromLocalToLocal() throws Exception { Configuration conf = new Configuration(); FileSystem localfs = FileSystem.get(LOCAL_FS, conf); MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat"); ToolRunner.run(new DistCp(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files)); deldir(localfs, TEST_ROOT_DIR+"/destdat"); deldir(localfs, TEST_ROOT_DIR+"/srcdat"); } /** copy files from dfs file system to dfs file system */ public void testCopyFromDfsToDfs() throws Exception { String namenode = null; MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); final FileSystem hdfs = cluster.getFileSystem(); namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); ToolRunner.run(new DistCp(conf), new String[] { "-log", namenode+"/logs", namenode+"/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(hdfs, "/destdat", files)); FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf); assertTrue("Log directory does not exist.", fs.exists(new Path(namenode+"/logs"))); deldir(hdfs, "/destdat"); deldir(hdfs, "/srcdat"); deldir(hdfs, "/logs"); } } finally { if (cluster != null) { cluster.shutdown(); } } } /** copy files from local file system to dfs file system */ public void testCopyFromLocalToDfs() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 1, true, null); final FileSystem hdfs = cluster.getFileSystem(); final String namenode = hdfs.getUri().toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat"); ToolRunner.run(new DistCp(conf), new String[] { "-log", namenode+"/logs", "file:///"+TEST_ROOT_DIR+"/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(cluster.getFileSystem(), "/destdat", files)); assertTrue("Log directory does not exist.", hdfs.exists(new Path(namenode+"/logs"))); deldir(hdfs, "/destdat"); deldir(hdfs, "/logs"); deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat"); } } finally { if (cluster != null) { cluster.shutdown(); } } } /** copy files from dfs file system to local file system */ public void testCopyFromDfsToLocal() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); final FileSystem localfs = FileSystem.get(LOCAL_FS, conf); cluster = new MiniDFSCluster(conf, 1, true, null); final FileSystem hdfs = cluster.getFileSystem(); final String namenode = FileSystem.getDefaultUri(conf).toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); ToolRunner.run(new DistCp(conf), new String[] { "-log", "/logs", namenode+"/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files)); assertTrue("Log directory does not exist.", hdfs.exists(new Path("/logs"))); deldir(localfs, TEST_ROOT_DIR+"/destdat"); deldir(hdfs, "/logs"); deldir(hdfs, "/srcdat"); } } finally { if (cluster != null) { cluster.shutdown(); } } } public void testCopyDfsToDfsUpdateOverwrite() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 2, true, null); final FileSystem hdfs = cluster.getFileSystem(); final String namenode = hdfs.getUri().toString(); if (namenode.startsWith("hdfs://")) { MyFile[] files = createFiles(URI.create(namenode), "/srcdat"); ToolRunner.run(new DistCp(conf), new String[] { "-p", "-log", namenode+"/logs", namenode+"/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(hdfs, "/destdat", files)); FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf); assertTrue("Log directory does not exist.", fs.exists(new Path(namenode+"/logs"))); FileStatus[] dchkpoint = getFileStatus(hdfs, "/destdat", files); final int nupdate = NFILES>>2; updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate); deldir(hdfs, "/logs"); ToolRunner.run(new DistCp(conf), new String[] { "-p", "-update", "-log", namenode+"/logs", namenode+"/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(hdfs, "/destdat", files)); assertTrue("Update failed to replicate all changes in src", checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate)); deldir(hdfs, "/logs"); ToolRunner.run(new DistCp(conf), new String[] { "-p", "-overwrite", "-log", namenode+"/logs", namenode+"/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(hdfs, "/destdat", files)); assertTrue("-overwrite didn't.", checkUpdate(hdfs, dchkpoint, "/destdat", files, NFILES)); deldir(hdfs, "/destdat"); deldir(hdfs, "/srcdat"); deldir(hdfs, "/logs"); } } finally { if (cluster != null) { cluster.shutdown(); } } } + public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception { + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem hdfs = cluster.getFileSystem(); + final String namenode = hdfs.getUri().toString(); + + FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration()); + // Create two files of the same name, same length but different + // contents + final String testfilename = "test"; + final String srcData = "act act act"; + final String destData = "cat cat cat"; + + if (namenode.startsWith("hdfs://")) { + deldir(hdfs,"/logs"); + + Path srcPath = new Path("/srcdat", testfilename); + Path destPath = new Path("/destdat", testfilename); + FSDataOutputStream out = fs.create(srcPath, true); + out.writeUTF(srcData); + out.close(); + + out = fs.create(destPath, true); + out.writeUTF(destData); + out.close(); + + // Run with -skipcrccheck option + ToolRunner.run(new DistCp(conf), new String[] { + "-p", + "-update", + "-skipcrccheck", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + + // File should not be overwritten + FSDataInputStream in = hdfs.open(destPath); + String s = in.readUTF(); + System.out.println("Dest had: " + s); + assertTrue("Dest got over written even with skip crc", + s.equalsIgnoreCase(destData)); + in.close(); + + deldir(hdfs, "/logs"); + + // Run without the option + ToolRunner.run(new DistCp(conf), new String[] { + "-p", + "-update", + "-log", + namenode+"/logs", + namenode+"/srcdat", + namenode+"/destdat"}); + + // File should be overwritten + in = hdfs.open(destPath); + s = in.readUTF(); + System.out.println("Dest had: " + s); + + assertTrue("Dest did not get overwritten without skip crc", + s.equalsIgnoreCase(srcData)); + in.close(); + + deldir(hdfs, "/destdat"); + deldir(hdfs, "/srcdat"); + deldir(hdfs, "/logs"); + } + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } + public void testCopyDuplication() throws Exception { final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration()); try { MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat"); ToolRunner.run(new DistCp(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/src2/srcdat"}); assertTrue("Source and destination directories do not match.", checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files)); assertEquals(DistCp.DuplicationException.ERROR_CODE, ToolRunner.run(new DistCp(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/src2/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat",})); } finally { deldir(localfs, TEST_ROOT_DIR+"/destdat"); deldir(localfs, TEST_ROOT_DIR+"/srcdat"); deldir(localfs, TEST_ROOT_DIR+"/src2"); } } public void testCopySingleFile() throws Exception { FileSystem fs = FileSystem.get(LOCAL_FS, new Configuration()); Path root = new Path(TEST_ROOT_DIR+"/srcdat"); try { MyFile[] files = {createFile(root, fs)}; //copy a dir with a single file ToolRunner.run(new DistCp(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat", "file:///"+TEST_ROOT_DIR+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, TEST_ROOT_DIR+"/destdat", files)); //copy a single file String fname = files[0].getName(); Path p = new Path(root, fname); FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p)); ToolRunner.run(new DistCp(new Configuration()), new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname, "file:///"+TEST_ROOT_DIR+"/dest2/"+fname}); assertTrue("Source and destination directories do not match.", checkFiles(fs, TEST_ROOT_DIR+"/dest2", files)); //copy single file to existing dir deldir(fs, TEST_ROOT_DIR+"/dest2"); fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2")); MyFile[] files2 = {createFile(root, fs, 0)}; String sname = files2[0].getName(); ToolRunner.run(new DistCp(new Configuration()), new String[] {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname, "file:///"+TEST_ROOT_DIR+"/dest2/"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2)); updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1); //copy single file to existing dir w/ dst name conflict ToolRunner.run(new DistCp(new Configuration()), new String[] {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname, "file:///"+TEST_ROOT_DIR+"/dest2/"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2)); } finally { deldir(fs, TEST_ROOT_DIR+"/destdat"); deldir(fs, TEST_ROOT_DIR+"/dest2"); deldir(fs, TEST_ROOT_DIR+"/srcdat"); } } public void testPreserveOption() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, 2, true, null); String nnUri = FileSystem.getDefaultUri(conf).toString(); FileSystem fs = FileSystem.get(URI.create(nnUri), conf); {//test preserving user MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); for(int i = 0; i < srcstat.length; i++) { fs.setOwner(srcstat[i].getPath(), "u" + i, null); } ToolRunner.run(new DistCp(conf), new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); FileStatus[] dststat = getFileStatus(fs, "/destdat", files); for(int i = 0; i < dststat.length; i++) { assertEquals("i=" + i, "u" + i, dststat[i].getOwner()); } deldir(fs, "/destdat"); deldir(fs, "/srcdat"); } {//test preserving group MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); for(int i = 0; i < srcstat.length; i++) { fs.setOwner(srcstat[i].getPath(), null, "g" + i); } ToolRunner.run(new DistCp(conf), new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); FileStatus[] dststat = getFileStatus(fs, "/destdat", files); for(int i = 0; i < dststat.length; i++) { assertEquals("i=" + i, "g" + i, dststat[i].getGroup()); } deldir(fs, "/destdat"); deldir(fs, "/srcdat"); } {//test preserving mode MyFile[] files = createFiles(URI.create(nnUri), "/srcdat"); FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files); FsPermission[] permissions = new FsPermission[srcstat.length]; for(int i = 0; i < srcstat.length; i++) { permissions[i] = new FsPermission((short)(i & 0666)); fs.setPermission(srcstat[i].getPath(), permissions[i]); } ToolRunner.run(new DistCp(conf), new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); FileStatus[] dststat = getFileStatus(fs, "/destdat", files); for(int i = 0; i < dststat.length; i++) { assertEquals("i=" + i, permissions[i], dststat[i].getPermission()); } deldir(fs, "/destdat"); deldir(fs, "/srcdat"); } } finally { if (cluster != null) { cluster.shutdown(); } } } public void testMapCount() throws Exception { String namenode = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; try { Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 3, true, null); FileSystem fs = dfs.getFileSystem(); final FsShell shell = new FsShell(conf); namenode = fs.getUri().toString(); mr = new MiniMRCluster(3, namenode, 1); MyFile[] files = createFiles(fs.getUri(), "/srcdat"); long totsize = 0; for (MyFile f : files) { totsize += f.getSize(); } Configuration job = mr.createJobConf(); job.setLong("distcp.bytes.per.map", totsize / 3); ToolRunner.run(new DistCp(job), new String[] {"-m", "100", "-log", namenode+"/logs", namenode+"/srcdat", namenode+"/destdat"}); assertTrue("Source and destination directories do not match.", checkFiles(fs, "/destdat", files)); String logdir = namenode + "/logs"; System.out.println(execCmd(shell, "-lsr", logdir)); FileStatus[] logs = fs.listStatus(new Path(logdir)); // rare case where splits are exact, logs.length can be 4 assertTrue("Unexpected map count, logs.length=" + logs.length, logs.length == 5 || logs.length == 4); deldir(fs, "/destdat"); deldir(fs, "/logs"); ToolRunner.run(new DistCp(job), new String[] {"-m", "1", "-log", namenode+"/logs", namenode+"/srcdat", namenode+"/destdat"}); System.out.println(execCmd(shell, "-lsr", logdir)); logs = fs.listStatus(new Path(namenode+"/logs")); assertTrue("Unexpected map count, logs.length=" + logs.length, logs.length == 2); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } } public void testLimits() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, 2, true, null); final String nnUri = FileSystem.getDefaultUri(conf).toString(); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); final DistCp distcp = new DistCp(conf); final FsShell shell = new FsShell(conf); final String srcrootdir = "/src_root"; final Path srcrootpath = new Path(srcrootdir); final String dstrootdir = "/dst_root"; final Path dstrootpath = new Path(dstrootdir); {//test -filelimit MyFile[] files = createFiles(URI.create(nnUri), srcrootdir); int filelimit = files.length / 2; System.out.println("filelimit=" + filelimit); ToolRunner.run(distcp, new String[]{"-filelimit", ""+filelimit, nnUri+srcrootdir, nnUri+dstrootdir}); String results = execCmd(shell, "-lsr", dstrootdir); results = removePrefix(results, dstrootdir); System.out.println("results=" + results); FileStatus[] dststat = getFileStatus(fs, dstrootdir, files, true); assertEquals(filelimit, dststat.length); deldir(fs, dstrootdir); deldir(fs, srcrootdir); } {//test -sizelimit createFiles(URI.create(nnUri), srcrootdir); long sizelimit = fs.getContentSummary(srcrootpath).getLength()/2; System.out.println("sizelimit=" + sizelimit); ToolRunner.run(distcp, new String[]{"-sizelimit", ""+sizelimit, nnUri+srcrootdir, nnUri+dstrootdir}); ContentSummary summary = fs.getContentSummary(dstrootpath); System.out.println("summary=" + summary); assertTrue(summary.getLength() <= sizelimit); deldir(fs, dstrootdir); deldir(fs, srcrootdir); } {//test update final MyFile[] srcs = createFiles(URI.create(nnUri), srcrootdir); final long totalsize = fs.getContentSummary(srcrootpath).getLength(); System.out.println("src.length=" + srcs.length); System.out.println("totalsize =" + totalsize); fs.mkdirs(dstrootpath); final int parts = RAN.nextInt(NFILES/3 - 1) + 2; final int filelimit = srcs.length/parts; final long sizelimit = totalsize/parts; System.out.println("filelimit=" + filelimit); System.out.println("sizelimit=" + sizelimit); System.out.println("parts =" + parts); final String[] args = {"-filelimit", ""+filelimit, "-sizelimit", ""+sizelimit, "-update", nnUri+srcrootdir, nnUri+dstrootdir}; int dstfilecount = 0; long dstsize = 0; for(int i = 0; i <= parts; i++) { ToolRunner.run(distcp, args); FileStatus[] dststat = getFileStatus(fs, dstrootdir, srcs, true); System.out.println(i + ") dststat.length=" + dststat.length); assertTrue(dststat.length - dstfilecount <= filelimit); ContentSummary summary = fs.getContentSummary(dstrootpath); System.out.println(i + ") summary.getLength()=" + summary.getLength()); assertTrue(summary.getLength() - dstsize <= sizelimit); assertTrue(checkFiles(fs, dstrootdir, srcs, true)); dstfilecount = dststat.length; dstsize = summary.getLength(); } deldir(fs, dstrootdir); deldir(fs, srcrootdir); } } finally { if (cluster != null) { cluster.shutdown(); } } } static final long now = System.currentTimeMillis(); static UnixUserGroupInformation createUGI(String name, boolean issuper) { String username = name + now; String group = issuper? "supergroup": username; return UnixUserGroupInformation.createImmutable( new String[]{username, group}); } static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi ) throws IOException { final Path home = new Path("/user/" + ugi.getUserName()); fs.mkdirs(home); fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]); fs.setPermission(home, new FsPermission((short)0700)); return home; } public void testHftpAccessControl() throws Exception { MiniDFSCluster cluster = null; try { final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true); final UnixUserGroupInformation USER_UGI = createUGI("user", false); //start cluster by DFS_UGI final Configuration dfsConf = new Configuration(); UnixUserGroupInformation.saveToConf(dfsConf, UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI); cluster = new MiniDFSCluster(dfsConf, 2, true, null); cluster.waitActive(); final String httpAdd = dfsConf.get("dfs.http.address"); final URI nnURI = FileSystem.getDefaultUri(dfsConf); final String nnUri = nnURI.toString(); final Path home = createHomeDirectory(FileSystem.get(nnURI, dfsConf), USER_UGI); //now, login as USER_UGI final Configuration userConf = new Configuration(); UnixUserGroupInformation.saveToConf(userConf, UnixUserGroupInformation.UGI_PROPERTY_NAME, USER_UGI); final FileSystem fs = FileSystem.get(nnURI, userConf); final Path srcrootpath = new Path(home, "src_root"); final String srcrootdir = srcrootpath.toString(); final Path dstrootpath = new Path(home, "dst_root"); final String dstrootdir = dstrootpath.toString(); final DistCp distcp = new DistCp(userConf); FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700)); final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir}; { //copy with permission 000, should fail fs.setPermission(srcrootpath, new FsPermission((short)0)); assertEquals(-3, ToolRunner.run(distcp, args)); } } finally { if (cluster != null) { cluster.shutdown(); } } } /** test -delete */ public void testDelete() throws Exception { final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, 2, true, null); final URI nnURI = FileSystem.getDefaultUri(conf); final String nnUri = nnURI.toString(); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); final DistCp distcp = new DistCp(conf); final FsShell shell = new FsShell(conf); final String srcrootdir = "/src_root"; final String dstrootdir = "/dst_root"; { //create source files createFiles(nnURI, srcrootdir); String srcresults = execCmd(shell, "-lsr", srcrootdir); srcresults = removePrefix(srcresults, srcrootdir); System.out.println("srcresults=" + srcresults); //create some files in dst createFiles(nnURI, dstrootdir); System.out.println("dstrootdir=" + dstrootdir); shell.run(new String[]{"-lsr", dstrootdir}); //run distcp ToolRunner.run(distcp, new String[]{"-delete", "-update", "-log", "/log", nnUri+srcrootdir, nnUri+dstrootdir}); //make sure src and dst contains the same files String dstresults = execCmd(shell, "-lsr", dstrootdir); dstresults = removePrefix(dstresults, dstrootdir); System.out.println("first dstresults=" + dstresults); assertEquals(srcresults, dstresults); //create additional file in dst create(fs, new Path(dstrootdir, "foo")); create(fs, new Path(dstrootdir, "foobar")); //run distcp again ToolRunner.run(distcp, new String[]{"-delete", "-update", "-log", "/log2", nnUri+srcrootdir, nnUri+dstrootdir}); //make sure src and dst contains the same files dstresults = execCmd(shell, "-lsr", dstrootdir); dstresults = removePrefix(dstresults, dstrootdir); System.out.println("second dstresults=" + dstresults); assertEquals(srcresults, dstresults); //cleanup deldir(fs, dstrootdir); deldir(fs, srcrootdir); } } finally { if (cluster != null) { cluster.shutdown(); } } } static void create(FileSystem fs, Path f) throws IOException { FSDataOutputStream out = fs.create(f); try { byte[] b = new byte[1024 + RAN.nextInt(1024)]; RAN.nextBytes(b); out.write(b); } finally { if (out != null) out.close(); } } static String execCmd(FsShell shell, String... args) throws Exception { ByteArrayOutputStream baout = new ByteArrayOutputStream(); PrintStream out = new PrintStream(baout, true); PrintStream old = System.out; System.setOut(out); shell.run(args); out.close(); System.setOut(old); return baout.toString(); } private static String removePrefix(String lines, String prefix) { final int prefixlen = prefix.length(); final StringTokenizer t = new StringTokenizer(lines, "\n"); final StringBuffer results = new StringBuffer(); for(; t.hasMoreTokens(); ) { String s = t.nextToken(); results.append(s.substring(s.indexOf(prefix) + prefixlen) + "\n"); } return results.toString(); } -} \ No newline at end of file +} diff --git a/src/tools/org/apache/hadoop/tools/DistCp.java b/src/tools/org/apache/hadoop/tools/DistCp.java index 0a332b1..c431151 100644 --- a/src/tools/org/apache/hadoop/tools/DistCp.java +++ b/src/tools/org/apache/hadoop/tools/DistCp.java @@ -1,1355 +1,1377 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.tools; import java.io.BufferedReader; import java.io.DataInput; import java.io.DataOutput; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.EnumSet; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.Stack; import java.util.StringTokenizer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.FileSplit; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.InvalidInputException; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.SequenceFileRecordReader; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; /** * A Map-reduce program to recursively copy directories between * different file-systems. */ public class DistCp implements Tool { public static final Log LOG = LogFactory.getLog(DistCp.class); private static final String NAME = "distcp"; private static final String usage = NAME + " [OPTIONS] <srcurl>* <desturl>" + "\n\nOPTIONS:" + "\n-p[rbugp] Preserve status" + "\n r: replication number" + "\n b: block size" + "\n u: user" + "\n g: group" + "\n p: permission" + "\n -p alone is equivalent to -prbugp" + "\n-i Ignore failures" + "\n-log <logdir> Write logs to <logdir>" + "\n-m <num_maps> Maximum number of simultaneous copies" + "\n-overwrite Overwrite destination" + "\n-update Overwrite if src size different from dst size" + + "\n-skipcrccheck Do not use CRC check to determine if src is " + + "\n different from dest. Relevant only if -update" + + "\n is specified" + "\n-f <urilist_uri> Use list at <urilist_uri> as src list" + "\n-filelimit <n> Limit the total number of files to be <= n" + "\n-sizelimit <n> Limit the total size to be <= n bytes" + "\n-delete Delete the files existing in the dst but not in src" + "\n-mapredSslConf <f> Filename of SSL configuration for mapper task" + "\n\nNOTE 1: if -overwrite or -update are set, each source URI is " + "\n interpreted as an isomorphic update to an existing directory." + "\nFor example:" + "\nhadoop " + NAME + " -p -update \"hdfs://A:8020/user/foo/bar\" " + "\"hdfs://B:8020/user/foo/baz\"\n" + "\n would update all descendants of 'baz' also in 'bar'; it would " + "\n *not* update /user/foo/baz/bar" + "\n\nNOTE 2: The parameter <n> in -filelimit and -sizelimit can be " + "\n specified with symbolic representation. For examples," + "\n 1230k = 1230 * 1024 = 1259520" + "\n 891g = 891 * 1024^3 = 956703965184" + "\n"; private static final long BYTES_PER_MAP = 256 * 1024 * 1024; private static final int MAX_MAPS_PER_NODE = 20; private static final int SYNC_FILE_MAX = 10; static enum Counter { COPY, SKIP, FAIL, BYTESCOPIED, BYTESEXPECTED } static enum Options { DELETE("-delete", NAME + ".delete"), FILE_LIMIT("-filelimit", NAME + ".limit.file"), SIZE_LIMIT("-sizelimit", NAME + ".limit.size"), IGNORE_READ_FAILURES("-i", NAME + ".ignore.read.failures"), PRESERVE_STATUS("-p", NAME + ".preserve.status"), OVERWRITE("-overwrite", NAME + ".overwrite.always"), - UPDATE("-update", NAME + ".overwrite.ifnewer"); + UPDATE("-update", NAME + ".overwrite.ifnewer"), + SKIPCRC("-skipcrccheck", NAME + ".skip.crc.check"); final String cmd, propertyname; private Options(String cmd, String propertyname) { this.cmd = cmd; this.propertyname = propertyname; } private long parseLong(String[] args, int offset) { if (offset == args.length) { throw new IllegalArgumentException("<n> not specified in " + cmd); } long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]); if (n <= 0) { throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd); } return n; } } static enum FileAttribute { BLOCK_SIZE, REPLICATION, USER, GROUP, PERMISSION; final char symbol; private FileAttribute() {symbol = toString().toLowerCase().charAt(0);} static EnumSet<FileAttribute> parse(String s) { if (s == null || s.length() == 0) { return EnumSet.allOf(FileAttribute.class); } EnumSet<FileAttribute> set = EnumSet.noneOf(FileAttribute.class); FileAttribute[] attributes = values(); for(char c : s.toCharArray()) { int i = 0; for(; i < attributes.length && c != attributes[i].symbol; i++); if (i < attributes.length) { if (!set.contains(attributes[i])) { set.add(attributes[i]); } else { throw new IllegalArgumentException("There are more than one '" + attributes[i].symbol + "' in " + s); } } else { throw new IllegalArgumentException("'" + c + "' in " + s + " is undefined."); } } return set; } } static final String TMP_DIR_LABEL = NAME + ".tmp.dir"; static final String DST_DIR_LABEL = NAME + ".dest.path"; static final String JOB_DIR_LABEL = NAME + ".job.dir"; static final String MAX_MAPS_LABEL = NAME + ".max.map.tasks"; static final String SRC_LIST_LABEL = NAME + ".src.list"; static final String SRC_COUNT_LABEL = NAME + ".src.count"; static final String TOTAL_SIZE_LABEL = NAME + ".total.size"; static final String DST_DIR_LIST_LABEL = NAME + ".dst.dir.list"; static final String BYTES_PER_MAP_LABEL = NAME + ".bytes.per.map"; static final String PRESERVE_STATUS_LABEL = Options.PRESERVE_STATUS.propertyname + ".value"; private JobConf conf; public void setConf(Configuration conf) { if (conf instanceof JobConf) { this.conf = (JobConf) conf; } else { this.conf = new JobConf(conf); } } public Configuration getConf() { return conf; } public DistCp(Configuration conf) { setConf(conf); } /** * An input/output pair of filenames. */ static class FilePair implements Writable { FileStatus input = new FileStatus(); String output; FilePair() { } FilePair(FileStatus input, String output) { this.input = input; this.output = output; } public void readFields(DataInput in) throws IOException { input.readFields(in); output = Text.readString(in); } public void write(DataOutput out) throws IOException { input.write(out); Text.writeString(out, output); } public String toString() { return input + " : " + output; } } /** * InputFormat of a distcp job responsible for generating splits of the src * file list. */ static class CopyInputFormat implements InputFormat<Text, Text> { /** * Produce splits such that each is no greater than the quotient of the * total size and the number of splits requested. * @param job The handle to the JobConf object * @param numSplits Number of splits requested */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { int cnfiles = job.getInt(SRC_COUNT_LABEL, -1); long cbsize = job.getLong(TOTAL_SIZE_LABEL, -1); String srcfilelist = job.get(SRC_LIST_LABEL, ""); if (cnfiles < 0 || cbsize < 0 || "".equals(srcfilelist)) { throw new RuntimeException("Invalid metadata: #files(" + cnfiles + ") total_size(" + cbsize + ") listuri(" + srcfilelist + ")"); } Path src = new Path(srcfilelist); FileSystem fs = src.getFileSystem(job); FileStatus srcst = fs.getFileStatus(src); ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits); LongWritable key = new LongWritable(); FilePair value = new FilePair(); final long targetsize = cbsize / numSplits; long pos = 0L; long last = 0L; long acc = 0L; long cbrem = srcst.getLen(); SequenceFile.Reader sl = null; try { sl = new SequenceFile.Reader(fs, src, job); for (; sl.next(key, value); last = sl.getPosition()) { // if adding this split would put this split past the target size, // cut the last split and put this next file in the next split. if (acc + key.get() > targetsize && acc != 0) { long splitsize = last - pos; splits.add(new FileSplit(src, pos, splitsize, (String[])null)); cbrem -= splitsize; pos = last; acc = 0L; } acc += key.get(); } } finally { checkAndClose(sl); } if (cbrem != 0) { splits.add(new FileSplit(src, pos, cbrem, (String[])null)); } return splits.toArray(new FileSplit[splits.size()]); } /** * Returns a reader for this split of the src file list. */ public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split); } } /** * FSCopyFilesMapper: The mapper for copying files between FileSystems. */ static class CopyFilesMapper implements Mapper<LongWritable, FilePair, WritableComparable<?>, Text> { // config private int sizeBuf = 128 * 1024; private FileSystem destFileSys = null; private boolean ignoreReadFailures; private boolean preserve_status; private EnumSet<FileAttribute> preseved; private boolean overwrite; private boolean update; private Path destPath = null; private byte[] buffer = null; private JobConf job; + private boolean skipCRCCheck = false; // stats private int failcount = 0; private int skipcount = 0; private int copycount = 0; private String getCountString() { return "Copied: " + copycount + " Skipped: " + skipcount + " Failed: " + failcount; } private void updateStatus(Reporter reporter) { reporter.setStatus(getCountString()); } /** * Return true if dst should be replaced by src and the update flag is set. * Right now, this merely checks that the src and dst len are not equal. * This should be improved on once modification times, CRCs, etc. can * be meaningful in this context. * @throws IOException */ private boolean needsUpdate(FileStatus srcstatus, FileSystem dstfs, Path dstpath) throws IOException { return update && !sameFile(srcstatus.getPath().getFileSystem(job), - srcstatus, dstfs, dstpath); + srcstatus, dstfs, dstpath, skipCRCCheck); } private FSDataOutputStream create(Path f, Reporter reporter, FileStatus srcstat) throws IOException { if (destFileSys.exists(f)) { destFileSys.delete(f, false); } if (!preserve_status) { return destFileSys.create(f, true, sizeBuf, reporter); } FsPermission permission = preseved.contains(FileAttribute.PERMISSION)? srcstat.getPermission(): null; short replication = preseved.contains(FileAttribute.REPLICATION)? srcstat.getReplication(): destFileSys.getDefaultReplication(); long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)? srcstat.getBlockSize(): destFileSys.getDefaultBlockSize(); return destFileSys.create(f, permission, true, sizeBuf, replication, blockSize, reporter); } /** * Copy a file to a destination. * @param srcstat src path and metadata * @param dstpath dst path * @param reporter */ private void copy(FileStatus srcstat, Path relativedst, OutputCollector<WritableComparable<?>, Text> outc, Reporter reporter) throws IOException { Path absdst = new Path(destPath, relativedst); int totfiles = job.getInt(SRC_COUNT_LABEL, -1); assert totfiles >= 0 : "Invalid file count " + totfiles; // if a directory, ensure created even if empty if (srcstat.isDir()) { if (destFileSys.exists(absdst)) { if (!destFileSys.getFileStatus(absdst).isDir()) { throw new IOException("Failed to mkdirs: " + absdst+" is a file."); } } else if (!destFileSys.mkdirs(absdst)) { throw new IOException("Failed to mkdirs " + absdst); } // TODO: when modification times can be set, directories should be // emitted to reducers so they might be preserved. Also, mkdirs does // not currently return an error when the directory already exists; // if this changes, all directory work might as well be done in reduce return; } if (destFileSys.exists(absdst) && !overwrite && !needsUpdate(srcstat, destFileSys, absdst)) { outc.collect(null, new Text("SKIP: " + srcstat.getPath())); ++skipcount; reporter.incrCounter(Counter.SKIP, 1); updateStatus(reporter); return; } Path tmpfile = new Path(job.get(TMP_DIR_LABEL), relativedst); long cbcopied = 0L; FSDataInputStream in = null; FSDataOutputStream out = null; try { // open src file in = srcstat.getPath().getFileSystem(job).open(srcstat.getPath()); reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen()); // open tmp file out = create(tmpfile, reporter, srcstat); // copy file for(int cbread; (cbread = in.read(buffer)) >= 0; ) { out.write(buffer, 0, cbread); cbcopied += cbread; reporter.setStatus( String.format("%.2f ", cbcopied*100.0/srcstat.getLen()) + absdst + " [ " + StringUtils.humanReadableInt(cbcopied) + " / " + StringUtils.humanReadableInt(srcstat.getLen()) + " ]"); } } finally { checkAndClose(in); checkAndClose(out); } if (cbcopied != srcstat.getLen()) { throw new IOException("File size not matched: copied " + bytesString(cbcopied) + " to tmpfile (=" + tmpfile + ") but expected " + bytesString(srcstat.getLen()) + " from " + srcstat.getPath()); } else { if (totfiles == 1) { // Copying a single file; use dst path provided by user as destination // rather than destination directory, if a file Path dstparent = absdst.getParent(); if (!(destFileSys.exists(dstparent) && destFileSys.getFileStatus(dstparent).isDir())) { absdst = dstparent; } } if (destFileSys.exists(absdst) && destFileSys.getFileStatus(absdst).isDir()) { throw new IOException(absdst + " is a directory"); } if (!destFileSys.mkdirs(absdst.getParent())) { throw new IOException("Failed to craete parent dir: " + absdst.getParent()); } rename(tmpfile, absdst); FileStatus dststat = destFileSys.getFileStatus(absdst); if (dststat.getLen() != srcstat.getLen()) { destFileSys.delete(absdst, false); throw new IOException("File size not matched: copied " + bytesString(dststat.getLen()) + " to dst (=" + absdst + ") but expected " + bytesString(srcstat.getLen()) + " from " + srcstat.getPath()); } updatePermissions(srcstat, dststat); } // report at least once for each file ++copycount; reporter.incrCounter(Counter.BYTESCOPIED, cbcopied); reporter.incrCounter(Counter.COPY, 1); updateStatus(reporter); } /** rename tmp to dst, delete dst if already exists */ private void rename(Path tmp, Path dst) throws IOException { try { if (destFileSys.exists(dst)) { destFileSys.delete(dst, true); } if (!destFileSys.rename(tmp, dst)) { throw new IOException(); } } catch(IOException cause) { throw (IOException)new IOException("Fail to rename tmp file (=" + tmp + ") to destination file (=" + dst + ")").initCause(cause); } } private void updatePermissions(FileStatus src, FileStatus dst ) throws IOException { if (preserve_status) { DistCp.updatePermissions(src, dst, preseved, destFileSys); } } static String bytesString(long b) { return b + " bytes (" + StringUtils.humanReadableInt(b) + ")"; } /** Mapper configuration. * Extracts source and destination file system, as well as * top-level paths on source and destination directories. * Gets the named file systems, to be used later in map. */ public void configure(JobConf job) { destPath = new Path(job.get(DST_DIR_LABEL, "/")); try { destFileSys = destPath.getFileSystem(job); } catch (IOException ex) { throw new RuntimeException("Unable to get the named file system.", ex); } sizeBuf = job.getInt("copy.buf.size", 128 * 1024); buffer = new byte[sizeBuf]; ignoreReadFailures = job.getBoolean(Options.IGNORE_READ_FAILURES.propertyname, false); preserve_status = job.getBoolean(Options.PRESERVE_STATUS.propertyname, false); if (preserve_status) { preseved = FileAttribute.parse(job.get(PRESERVE_STATUS_LABEL)); } update = job.getBoolean(Options.UPDATE.propertyname, false); overwrite = !update && job.getBoolean(Options.OVERWRITE.propertyname, false); + skipCRCCheck = job.getBoolean(Options.SKIPCRC.propertyname, false); this.job = job; } /** Map method. Copies one file from source file system to destination. * @param key src len * @param value FilePair (FileStatus src, Path dst) * @param out Log of failed copies * @param reporter */ public void map(LongWritable key, FilePair value, OutputCollector<WritableComparable<?>, Text> out, Reporter reporter) throws IOException { final FileStatus srcstat = value.input; final Path relativedst = new Path(value.output); try { copy(srcstat, relativedst, out, reporter); } catch (IOException e) { ++failcount; reporter.incrCounter(Counter.FAIL, 1); updateStatus(reporter); final String sfailure = "FAIL " + relativedst + " : " + StringUtils.stringifyException(e); out.collect(null, new Text(sfailure)); LOG.info(sfailure); try { for (int i = 0; i < 3; ++i) { try { final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst); if (destFileSys.delete(tmp, true)) break; } catch (Throwable ex) { // ignore, we are just cleaning up LOG.debug("Ignoring cleanup exception", ex); } // update status, so we don't get timed out updateStatus(reporter); Thread.sleep(3 * 1000); } } catch (InterruptedException inte) { throw (IOException)new IOException().initCause(inte); } } finally { updateStatus(reporter); } } public void close() throws IOException { if (0 == failcount || ignoreReadFailures) { return; } throw new IOException(getCountString()); } } private static List<Path> fetchFileList(Configuration conf, Path srcList) throws IOException { List<Path> result = new ArrayList<Path>(); FileSystem fs = srcList.getFileSystem(conf); BufferedReader input = null; try { input = new BufferedReader(new InputStreamReader(fs.open(srcList))); String line = input.readLine(); while (line != null) { result.add(new Path(line)); line = input.readLine(); } } finally { checkAndClose(input); } return result; } @Deprecated public static void copy(Configuration conf, String srcPath, String destPath, Path logPath, boolean srcAsList, boolean ignoreReadFailures) throws IOException { final Path src = new Path(srcPath); List<Path> tmp = new ArrayList<Path>(); if (srcAsList) { tmp.addAll(fetchFileList(conf, src)); } else { tmp.add(src); } EnumSet<Options> flags = ignoreReadFailures ? EnumSet.of(Options.IGNORE_READ_FAILURES) : EnumSet.noneOf(Options.class); final Path dst = new Path(destPath); copy(conf, new Arguments(tmp, dst, logPath, flags, null, Long.MAX_VALUE, Long.MAX_VALUE, null)); } /** Sanity check for srcPath */ private static void checkSrcPath(Configuration conf, List<Path> srcPaths ) throws IOException { List<IOException> rslt = new ArrayList<IOException>(); for (Path p : srcPaths) { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { rslt.add(new IOException("Input source " + p + " does not exist.")); } } if (!rslt.isEmpty()) { throw new InvalidInputException(rslt); } } /** * Driver to copy srcPath to destPath depending on required protocol. * @param args arguments */ static void copy(final Configuration conf, final Arguments args ) throws IOException { LOG.info("srcPaths=" + args.srcs); LOG.info("destPath=" + args.dst); checkSrcPath(conf, args.srcs); JobConf job = createJobConf(conf); if (args.preservedAttributes != null) { job.set(PRESERVE_STATUS_LABEL, args.preservedAttributes); } if (args.mapredSslConf != null) { job.set("dfs.https.client.keystore.resource", args.mapredSslConf); } //Initialize the mapper try { if (setup(conf, job, args)) { JobClient.runJob(job); } finalize(conf, job, args.dst, args.preservedAttributes); } finally { //delete tmp fullyDelete(job.get(TMP_DIR_LABEL), job); //delete jobDirectory fullyDelete(job.get(JOB_DIR_LABEL), job); } } private static void updatePermissions(FileStatus src, FileStatus dst, EnumSet<FileAttribute> preseved, FileSystem destFileSys ) throws IOException { String owner = null; String group = null; if (preseved.contains(FileAttribute.USER) && !src.getOwner().equals(dst.getOwner())) { owner = src.getOwner(); } if (preseved.contains(FileAttribute.GROUP) && !src.getGroup().equals(dst.getGroup())) { group = src.getGroup(); } if (owner != null || group != null) { destFileSys.setOwner(dst.getPath(), owner, group); } if (preseved.contains(FileAttribute.PERMISSION) && !src.getPermission().equals(dst.getPermission())) { destFileSys.setPermission(dst.getPath(), src.getPermission()); } } static private void finalize(Configuration conf, JobConf jobconf, final Path destPath, String presevedAttributes) throws IOException { if (presevedAttributes == null) { return; } EnumSet<FileAttribute> preseved = FileAttribute.parse(presevedAttributes); if (!preseved.contains(FileAttribute.USER) && !preseved.contains(FileAttribute.GROUP) && !preseved.contains(FileAttribute.PERMISSION)) { return; } FileSystem dstfs = destPath.getFileSystem(conf); Path dstdirlist = new Path(jobconf.get(DST_DIR_LIST_LABEL)); SequenceFile.Reader in = null; try { in = new SequenceFile.Reader(dstdirlist.getFileSystem(jobconf), dstdirlist, jobconf); Text dsttext = new Text(); FilePair pair = new FilePair(); for(; in.next(dsttext, pair); ) { Path absdst = new Path(destPath, pair.output); updatePermissions(pair.input, dstfs.getFileStatus(absdst), preseved, dstfs); } } finally { checkAndClose(in); } } static private class Arguments { final List<Path> srcs; final Path dst; final Path log; final EnumSet<Options> flags; final String preservedAttributes; final long filelimit; final long sizelimit; final String mapredSslConf; /** * Arguments for distcp * @param srcs List of source paths * @param dst Destination path * @param log Log output directory * @param flags Command-line flags * @param preservedAttributes Preserved attributes * @param filelimit File limit * @param sizelimit Size limit */ Arguments(List<Path> srcs, Path dst, Path log, EnumSet<Options> flags, String preservedAttributes, long filelimit, long sizelimit, String mapredSslConf) { this.srcs = srcs; this.dst = dst; this.log = log; this.flags = flags; this.preservedAttributes = preservedAttributes; this.filelimit = filelimit; this.sizelimit = sizelimit; this.mapredSslConf = mapredSslConf; if (LOG.isTraceEnabled()) { LOG.trace("this = " + this); } } static Arguments valueOf(String[] args, Configuration conf ) throws IOException { List<Path> srcs = new ArrayList<Path>(); Path dst = null; Path log = null; EnumSet<Options> flags = EnumSet.noneOf(Options.class); String presevedAttributes = null; String mapredSslConf = null; long filelimit = Long.MAX_VALUE; long sizelimit = Long.MAX_VALUE; for (int idx = 0; idx < args.length; idx++) { Options[] opt = Options.values(); int i = 0; for(; i < opt.length && !args[idx].startsWith(opt[i].cmd); i++); if (i < opt.length) { flags.add(opt[i]); if (opt[i] == Options.PRESERVE_STATUS) { presevedAttributes = args[idx].substring(2); FileAttribute.parse(presevedAttributes); //validation } else if (opt[i] == Options.FILE_LIMIT) { filelimit = Options.FILE_LIMIT.parseLong(args, ++idx); } else if (opt[i] == Options.SIZE_LIMIT) { sizelimit = Options.SIZE_LIMIT.parseLong(args, ++idx); } } else if ("-f".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("urilist_uri not specified in -f"); } srcs.addAll(fetchFileList(conf, new Path(args[idx]))); } else if ("-log".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("logdir not specified in -log"); } log = new Path(args[idx]); } else if ("-mapredSslConf".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("ssl conf file not specified in -mapredSslConf"); } mapredSslConf = args[idx]; } else if ("-m".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("num_maps not specified in -m"); } try { conf.setInt(MAX_MAPS_LABEL, Integer.valueOf(args[idx])); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid argument to -m: " + args[idx]); } } else if ('-' == args[idx].codePointAt(0)) { throw new IllegalArgumentException("Invalid switch " + args[idx]); } else if (idx == args.length -1) { dst = new Path(args[idx]); } else { srcs.add(new Path(args[idx])); } } // mandatory command-line parameters if (srcs.isEmpty() || dst == null) { throw new IllegalArgumentException("Missing " + (dst == null ? "dst path" : "src")); } // incompatible command-line flags final boolean isOverwrite = flags.contains(Options.OVERWRITE); final boolean isUpdate = flags.contains(Options.UPDATE); final boolean isDelete = flags.contains(Options.DELETE); + final boolean skipCRC = flags.contains(Options.SKIPCRC); + if (isOverwrite && isUpdate) { throw new IllegalArgumentException("Conflicting overwrite policies"); } if (isDelete && !isOverwrite && !isUpdate) { throw new IllegalArgumentException(Options.DELETE.cmd + " must be specified with " + Options.OVERWRITE + " or " + Options.UPDATE + "."); } + if (!isUpdate && skipCRC) { + throw new IllegalArgumentException( + Options.SKIPCRC.cmd + " is relevant only with the " + + Options.UPDATE.cmd + " option"); + } return new Arguments(srcs, dst, log, flags, presevedAttributes, filelimit, sizelimit, mapredSslConf); } /** {@inheritDoc} */ public String toString() { return getClass().getName() + "{" + "\n srcs = " + srcs + "\n dst = " + dst + "\n log = " + log + "\n flags = " + flags + "\n preservedAttributes = " + preservedAttributes + "\n filelimit = " + filelimit + "\n sizelimit = " + sizelimit + "\n mapredSslConf = " + mapredSslConf + "\n}"; } } /** * This is the main driver for recursively copying directories * across file systems. It takes at least two cmdline parameters. A source * URL and a destination URL. It then essentially does an "ls -lR" on the * source URL, and writes the output in a round-robin manner to all the map * input files. The mapper actually copies the files allotted to it. The * reduce is empty. */ public int run(String[] args) { try { copy(conf, Arguments.valueOf(args, conf)); return 0; } catch (IllegalArgumentException e) { System.err.println(StringUtils.stringifyException(e) + "\n" + usage); ToolRunner.printGenericCommandUsage(System.err); return -1; } catch (DuplicationException e) { System.err.println(StringUtils.stringifyException(e)); return DuplicationException.ERROR_CODE; } catch (RemoteException e) { final IOException unwrapped = e.unwrapRemoteException( FileNotFoundException.class, AccessControlException.class, QuotaExceededException.class); System.err.println(StringUtils.stringifyException(unwrapped)); return -3; } catch (Exception e) { System.err.println("With failures, global counters are inaccurate; " + "consider running with -i"); System.err.println("Copy failed: " + StringUtils.stringifyException(e)); return -999; } } public static void main(String[] args) throws Exception { JobConf job = new JobConf(DistCp.class); DistCp distcp = new DistCp(job); int res = ToolRunner.run(distcp, args); System.exit(res); } /** * Make a path relative with respect to a root path. * absPath is always assumed to descend from root. * Otherwise returned path is null. */ static String makeRelative(Path root, Path absPath) { if (!absPath.isAbsolute()) { throw new IllegalArgumentException("!absPath.isAbsolute(), absPath=" + absPath); } String p = absPath.toUri().getPath(); StringTokenizer pathTokens = new StringTokenizer(p, "/"); for(StringTokenizer rootTokens = new StringTokenizer( root.toUri().getPath(), "/"); rootTokens.hasMoreTokens(); ) { if (!rootTokens.nextToken().equals(pathTokens.nextToken())) { return null; } } StringBuilder sb = new StringBuilder(); for(; pathTokens.hasMoreTokens(); ) { sb.append(pathTokens.nextToken()); if (pathTokens.hasMoreTokens()) { sb.append(Path.SEPARATOR); } } return sb.length() == 0? ".": sb.toString(); } /** * Calculate how many maps to run. * Number of maps is bounded by a minimum of the cumulative size of the * copy / (distcp.bytes.per.map, default BYTES_PER_MAP or -m on the * command line) and at most (distcp.max.map.tasks, default * MAX_MAPS_PER_NODE * nodes in the cluster). * @param totalBytes Count of total bytes for job * @param job The job to configure * @return Count of maps to run. */ private static void setMapCount(long totalBytes, JobConf job) throws IOException { int numMaps = (int)(totalBytes / job.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP)); numMaps = Math.min(numMaps, job.getInt(MAX_MAPS_LABEL, MAX_MAPS_PER_NODE * new JobClient(job).getClusterStatus().getTaskTrackers())); job.setNumMapTasks(Math.max(numMaps, 1)); } /** Fully delete dir */ static void fullyDelete(String dir, Configuration conf) throws IOException { if (dir != null) { Path tmp = new Path(dir); tmp.getFileSystem(conf).delete(tmp, true); } } //Job configuration private static JobConf createJobConf(Configuration conf) { JobConf jobconf = new JobConf(conf, DistCp.class); jobconf.setJobName(NAME); // turn off speculative execution, because DFS doesn't handle // multiple writers to the same file. jobconf.setMapSpeculativeExecution(false); jobconf.setInputFormat(CopyInputFormat.class); jobconf.setOutputKeyClass(Text.class); jobconf.setOutputValueClass(Text.class); jobconf.setMapperClass(CopyFilesMapper.class); jobconf.setNumReduceTasks(0); return jobconf; } private static final Random RANDOM = new Random(); public static String getRandomId() { return Integer.toString(RANDOM.nextInt(Integer.MAX_VALUE), 36); } /** * Initialize DFSCopyFileMapper specific job-configuration. * @param conf : The dfs/mapred configuration. * @param jobConf : The handle to the jobConf object to be initialized. * @param args Arguments * @return true if it is necessary to launch a job. */ private static boolean setup(Configuration conf, JobConf jobConf, final Arguments args) throws IOException { jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString()); //set boolean values final boolean update = args.flags.contains(Options.UPDATE); + final boolean skipCRCCheck = args.flags.contains(Options.SKIPCRC); final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE); jobConf.setBoolean(Options.UPDATE.propertyname, update); + jobConf.setBoolean(Options.SKIPCRC.propertyname, skipCRCCheck); jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite); jobConf.setBoolean(Options.IGNORE_READ_FAILURES.propertyname, args.flags.contains(Options.IGNORE_READ_FAILURES)); jobConf.setBoolean(Options.PRESERVE_STATUS.propertyname, args.flags.contains(Options.PRESERVE_STATUS)); final String randomId = getRandomId(); JobClient jClient = new JobClient(jobConf); Path jobDirectory = new Path(jClient.getSystemDir(), NAME + "_" + randomId); jobConf.set(JOB_DIR_LABEL, jobDirectory.toString()); FileSystem dstfs = args.dst.getFileSystem(conf); boolean dstExists = dstfs.exists(args.dst); boolean dstIsDir = false; if (dstExists) { dstIsDir = dstfs.getFileStatus(args.dst).isDir(); } // default logPath Path logPath = args.log; if (logPath == null) { String filename = "_distcp_logs_" + randomId; if (!dstExists || !dstIsDir) { Path parent = args.dst.getParent(); if (!dstfs.exists(parent)) { dstfs.mkdirs(parent); } logPath = new Path(parent, filename); } else { logPath = new Path(args.dst, filename); } } FileOutputFormat.setOutputPath(jobConf, logPath); // create src list, dst list FileSystem jobfs = jobDirectory.getFileSystem(jobConf); Path srcfilelist = new Path(jobDirectory, "_distcp_src_files"); jobConf.set(SRC_LIST_LABEL, srcfilelist.toString()); SequenceFile.Writer src_writer = SequenceFile.createWriter(jobfs, jobConf, srcfilelist, LongWritable.class, FilePair.class, SequenceFile.CompressionType.NONE); Path dstfilelist = new Path(jobDirectory, "_distcp_dst_files"); SequenceFile.Writer dst_writer = SequenceFile.createWriter(jobfs, jobConf, dstfilelist, Text.class, Text.class, SequenceFile.CompressionType.NONE); Path dstdirlist = new Path(jobDirectory, "_distcp_dst_dirs"); jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString()); SequenceFile.Writer dir_writer = SequenceFile.createWriter(jobfs, jobConf, dstdirlist, Text.class, FilePair.class, SequenceFile.CompressionType.NONE); // handle the case where the destination directory doesn't exist // and we've only a single src directory OR we're updating/overwriting // the contents of the destination directory. final boolean special = (args.srcs.size() == 1 && !dstExists) || update || overwrite; int srcCount = 0, cnsyncf = 0, dirsyn = 0; long fileCount = 0L, byteCount = 0L, cbsyncs = 0L; try { for(Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext(); ) { final Path src = srcItr.next(); FileSystem srcfs = src.getFileSystem(conf); FileStatus srcfilestat = srcfs.getFileStatus(src); Path root = special && srcfilestat.isDir()? src: src.getParent(); if (srcfilestat.isDir()) { ++srcCount; } Stack<FileStatus> pathstack = new Stack<FileStatus>(); for(pathstack.push(srcfilestat); !pathstack.empty(); ) { FileStatus cur = pathstack.pop(); FileStatus[] children = srcfs.listStatus(cur.getPath()); for(int i = 0; i < children.length; i++) { boolean skipfile = false; final FileStatus child = children[i]; final String dst = makeRelative(root, child.getPath()); ++srcCount; if (child.isDir()) { pathstack.push(child); } else { //skip file if the src and the dst files are the same. - skipfile = update && sameFile(srcfs, child, dstfs, new Path(args.dst, dst)); + skipfile = update && + sameFile(srcfs, child, dstfs, + new Path(args.dst, dst), skipCRCCheck); //skip file if it exceed file limit or size limit skipfile |= fileCount == args.filelimit || byteCount + child.getLen() > args.sizelimit; if (!skipfile) { ++fileCount; byteCount += child.getLen(); if (LOG.isTraceEnabled()) { LOG.trace("adding file " + child.getPath()); } ++cnsyncf; cbsyncs += child.getLen(); if (cnsyncf > SYNC_FILE_MAX || cbsyncs > BYTES_PER_MAP) { src_writer.sync(); dst_writer.sync(); cnsyncf = 0; cbsyncs = 0L; } } } if (!skipfile) { src_writer.append(new LongWritable(child.isDir()? 0: child.getLen()), new FilePair(child, dst)); } dst_writer.append(new Text(dst), new Text(child.getPath().toString())); } if (cur.isDir()) { String dst = makeRelative(root, cur.getPath()); dir_writer.append(new Text(dst), new FilePair(cur, dst)); if (++dirsyn > SYNC_FILE_MAX) { dirsyn = 0; dir_writer.sync(); } } } } } finally { checkAndClose(src_writer); checkAndClose(dst_writer); checkAndClose(dir_writer); } FileStatus dststatus = null; try { dststatus = dstfs.getFileStatus(args.dst); } catch(FileNotFoundException fnfe) { LOG.info(args.dst + " does not exist."); } // create dest path dir if copying > 1 file if (dststatus == null) { if (srcCount > 1 && !dstfs.mkdirs(args.dst)) { throw new IOException("Failed to create" + args.dst); } } final Path sorted = new Path(jobDirectory, "_distcp_sorted"); checkDuplication(jobfs, dstfilelist, sorted, conf); if (dststatus != null && args.flags.contains(Options.DELETE)) { deleteNonexisting(dstfs, dststatus, sorted, jobfs, jobDirectory, jobConf, conf); } Path tmpDir = new Path( (dstExists && !dstIsDir) || (!dstExists && srcCount == 1)? args.dst.getParent(): args.dst, "_distcp_tmp_" + randomId); jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString()); LOG.info("sourcePathsCount=" + srcCount); LOG.info("filesToCopyCount=" + fileCount); LOG.info("bytesToCopyCount=" + StringUtils.humanReadableInt(byteCount)); jobConf.setInt(SRC_COUNT_LABEL, srcCount); jobConf.setLong(TOTAL_SIZE_LABEL, byteCount); setMapCount(byteCount, jobConf); return fileCount > 0; } /** * Check whether the contents of src and dst are the same. * * Return false if dstpath does not exist * * If the files have different sizes, return false. * * If the files have the same sizes, the file checksums will be compared. * * When file checksum is not supported in any of file systems, * two files are considered as the same if they have the same size. */ static private boolean sameFile(FileSystem srcfs, FileStatus srcstatus, - FileSystem dstfs, Path dstpath) throws IOException { + FileSystem dstfs, Path dstpath, boolean skipCRCCheck) throws IOException { FileStatus dststatus; try { dststatus = dstfs.getFileStatus(dstpath); } catch(FileNotFoundException fnfe) { return false; } //same length? if (srcstatus.getLen() != dststatus.getLen()) { return false; } + if (skipCRCCheck) { + LOG.debug("Skipping CRC Check"); + return true; + } + //get src checksum final FileChecksum srccs; try { srccs = srcfs.getFileChecksum(srcstatus.getPath()); } catch(FileNotFoundException fnfe) { /* * Two possible cases: * (1) src existed once but was deleted between the time period that * srcstatus was obtained and the try block above. * (2) srcfs does not support file checksum and (incorrectly) throws * FNFE, e.g. some previous versions of HftpFileSystem. * For case (1), it is okay to return true since src was already deleted. * For case (2), true should be returned. */ return true; } //compare checksums try { final FileChecksum dstcs = dstfs.getFileChecksum(dststatus.getPath()); //return true if checksum is not supported //(i.e. some of the checksums is null) return srccs == null || dstcs == null || srccs.equals(dstcs); } catch(FileNotFoundException fnfe) { return false; } } /** Delete the dst files/dirs which do not exist in src */ static private void deleteNonexisting( FileSystem dstfs, FileStatus dstroot, Path dstsorted, FileSystem jobfs, Path jobdir, JobConf jobconf, Configuration conf ) throws IOException { if (!dstroot.isDir()) { throw new IOException("dst must be a directory when option " + Options.DELETE.cmd + " is set, but dst (= " + dstroot.getPath() + ") is not a directory."); } //write dst lsr results final Path dstlsr = new Path(jobdir, "_distcp_dst_lsr"); final SequenceFile.Writer writer = SequenceFile.createWriter(jobfs, jobconf, dstlsr, Text.class, FileStatus.class, SequenceFile.CompressionType.NONE); try { //do lsr to get all file statuses in dstroot final Stack<FileStatus> lsrstack = new Stack<FileStatus>(); for(lsrstack.push(dstroot); !lsrstack.isEmpty(); ) { final FileStatus status = lsrstack.pop(); if (status.isDir()) { for(FileStatus child : dstfs.listStatus(status.getPath())) { String relative = makeRelative(dstroot.getPath(), child.getPath()); writer.append(new Text(relative), child); lsrstack.push(child); } } } } finally { checkAndClose(writer); } //sort lsr results final Path sortedlsr = new Path(jobdir, "_distcp_dst_lsr_sorted"); SequenceFile.Sorter sorter = new SequenceFile.Sorter(jobfs, new Text.Comparator(), Text.class, FileStatus.class, jobconf); sorter.sort(dstlsr, sortedlsr); //compare lsr list and dst list SequenceFile.Reader lsrin = null; SequenceFile.Reader dstin = null; try { lsrin = new SequenceFile.Reader(jobfs, sortedlsr, jobconf); dstin = new SequenceFile.Reader(jobfs, dstsorted, jobconf); //compare sorted lsr list and sorted dst list final Text lsrpath = new Text(); final FileStatus lsrstatus = new FileStatus(); final Text dstpath = new Text(); final Text dstfrom = new Text(); final FsShell shell = new FsShell(conf); final String[] shellargs = {"-rmr", null}; boolean hasnext = dstin.next(dstpath, dstfrom); for(; lsrin.next(lsrpath, lsrstatus); ) { int dst_cmp_lsr = dstpath.compareTo(lsrpath); for(; hasnext && dst_cmp_lsr < 0; ) { hasnext = dstin.next(dstpath, dstfrom); dst_cmp_lsr = dstpath.compareTo(lsrpath); } if (dst_cmp_lsr == 0) { //lsrpath exists in dst, skip it hasnext = dstin.next(dstpath, dstfrom); } else { //lsrpath does not exist, delete it String s = new Path(dstroot.getPath(), lsrpath.toString()).toString(); if (shellargs[1] == null || !isAncestorPath(shellargs[1], s)) { shellargs[1] = s; int r = 0; try { r = shell.run(shellargs); } catch(Exception e) { throw new IOException("Exception from shell.", e); } if (r != 0) { throw new IOException("\"" + shellargs[0] + " " + shellargs[1] + "\" returns non-zero value " + r); } } } } } finally { checkAndClose(lsrin); checkAndClose(dstin); } } //is x an ancestor path of y? static private boolean isAncestorPath(String x, String y) { if (!y.startsWith(x)) { return false; } final int len = x.length(); return y.length() == len || y.charAt(len) == Path.SEPARATOR_CHAR; } /** Check whether the file list have duplication. */ static private void checkDuplication(FileSystem fs, Path file, Path sorted, Configuration conf) throws IOException { SequenceFile.Reader in = null; try { SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, new Text.Comparator(), Text.class, Text.class, conf); sorter.sort(file, sorted); in = new SequenceFile.Reader(fs, sorted, conf); Text prevdst = null, curdst = new Text(); Text prevsrc = null, cursrc = new Text(); for(; in.next(curdst, cursrc); ) { if (prevdst != null && curdst.equals(prevdst)) { throw new DuplicationException( "Invalid input, there are duplicated files in the sources: " + prevsrc + ", " + cursrc); } prevdst = curdst; curdst = new Text(); prevsrc = cursrc; cursrc = new Text(); } } finally { checkAndClose(in); } } static boolean checkAndClose(java.io.Closeable io) { if (io != null) { try { io.close(); } catch(IOException ioe) { LOG.warn(StringUtils.stringifyException(ioe)); return false; } } return true; } /** An exception class for duplicated source files. */ public static class DuplicationException extends IOException { private static final long serialVersionUID = 1L; /** Error code for this exception */ public static final int ERROR_CODE = -2; DuplicationException(String message) {super(message);} } }
jaxlaw/hadoop-common
edefba816f1895d1cea842f43f5ab55b5c7e5c9d
MAPREDUCE-1186. Fix DistributedCache to do a recursive chmod on just the per-cache directory, not all of mapred.local.dir. Contributed by Amareshwari Sriramadasu.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index b11813e..995b19e 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,413 +1,417 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the configured threshold. (cdouglas) yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) + MAPREDUCE-1186. Fixed DistributedCache to do a recursive chmod on just the + per-cache directory, not all of mapred.local.dir. + (Amareshwari Sriramadasu via acmurthy) + yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/filecache/DistributedCache.java b/src/core/org/apache/hadoop/filecache/DistributedCache.java index 6f96c5c..f9e9c88 100644 --- a/src/core/org/apache/hadoop/filecache/DistributedCache.java +++ b/src/core/org/apache/hadoop/filecache/DistributedCache.java @@ -1,916 +1,926 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.filecache; import org.apache.commons.logging.*; import java.io.*; import java.util.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.util.*; import org.apache.hadoop.fs.*; import java.net.URI; /** * Distribute application-specific large, read-only files efficiently. * * <p><code>DistributedCache</code> is a facility provided by the Map-Reduce * framework to cache files (text, archives, jars etc.) needed by applications. * </p> * * <p>Applications specify the files, via urls (hdfs:// or http://) to be cached * via the {@link org.apache.hadoop.mapred.JobConf}. * The <code>DistributedCache</code> assumes that the * files specified via hdfs:// urls are already present on the * {@link FileSystem} at the path specified by the url.</p> * * <p>The framework will copy the necessary files on to the slave node before * any tasks for the job are executed on that node. Its efficiency stems from * the fact that the files are only copied once per job and the ability to * cache archives which are un-archived on the slaves.</p> * * <p><code>DistributedCache</code> can be used to distribute simple, read-only * data/text files and/or more complex types such as archives, jars etc. * Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes. * Jars may be optionally added to the classpath of the tasks, a rudimentary * software distribution mechanism. Files have execution permissions. * Optionally users can also direct it to symlink the distributed cache file(s) * into the working directory of the task.</p> * * <p><code>DistributedCache</code> tracks modification timestamps of the cache * files. Clearly the cache files should not be modified by the application * or externally while the job is executing.</p> * * <p>Here is an illustrative example on how to use the * <code>DistributedCache</code>:</p> * <p><blockquote><pre> * // Setting up the cache for the application * * 1. Copy the requisite files to the <code>FileSystem</code>: * * $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat * $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip * $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar * $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar * $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz * $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz * * 2. Setup the application's <code>JobConf</code>: * * JobConf job = new JobConf(); * DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), * job); * DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job); * DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job); * DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job); * DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job); * * 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper} * or {@link org.apache.hadoop.mapred.Reducer}: * * public static class MapClass extends MapReduceBase * implements Mapper&lt;K, V, K, V&gt; { * * private Path[] localArchives; * private Path[] localFiles; * * public void configure(JobConf job) { * // Get the cached archives/files * localArchives = DistributedCache.getLocalCacheArchives(job); * localFiles = DistributedCache.getLocalCacheFiles(job); * } * * public void map(K key, V value, * OutputCollector&lt;K, V&gt; output, Reporter reporter) * throws IOException { * // Use data from the cached archives/files here * // ... * // ... * output.collect(k, v); * } * } * * </pre></blockquote></p> * * @see org.apache.hadoop.mapred.JobConf * @see org.apache.hadoop.mapred.JobClient */ public class DistributedCache { // cacheID to cacheStatus mapping private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>(); private static TreeMap<Path, Long> baseDirSize = new TreeMap<Path, Long>(); // default total cache size private static final long DEFAULT_CACHE_SIZE = 10737418240L; private static final Log LOG = LogFactory.getLog(DistributedCache.class); private static Random random = new Random(); /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred is * returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path baseDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir) throws IOException { return getLocalCache(cache, conf, baseDir, fileStatus, isArchive, confFileStamp, currentWorkDir, true, new LocalDirAllocator("mapred.local.dir")); } /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param subDir The sub cache Dir where you want to localize the files/archives * @param fileStatus The file status on the dfs. * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred is * returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @param honorSymLinkConf if this is false, then the symlinks are not * created even if conf says so (this is required for an optimization in task * launches * @param lDirAllocator LocalDirAllocator of the tracker * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path subDir, FileStatus fileStatus, boolean isArchive, long confFileStamp, Path currentWorkDir, boolean honorSymLinkConf, LocalDirAllocator lDirAllocator) throws IOException { String key = getKey(cache, conf, confFileStamp); CacheStatus lcacheStatus; Path localizedPath; synchronized (cachedArchives) { lcacheStatus = cachedArchives.get(key); if (lcacheStatus == null) { // was never localized - String cachePath = new Path (subDir, - new Path(String.valueOf(random.nextLong()), - makeRelative(cache, conf))).toString(); + Path uniqueParentDir = + new Path(subDir, String.valueOf(random.nextLong())); + String cachePath = new Path(uniqueParentDir, + makeRelative(cache, conf)).toString(); Path localPath = lDirAllocator.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf); - lcacheStatus = new CacheStatus( - new Path(localPath.toString().replace(cachePath, "")), localPath); + lcacheStatus = + new CacheStatus(new Path(localPath.toString().replace(cachePath, "")), + localPath, uniqueParentDir); cachedArchives.put(key, lcacheStatus); } lcacheStatus.refcount++; } synchronized (lcacheStatus) { if (!lcacheStatus.isInited()) { localizedPath = localizeCache(conf, cache, confFileStamp, lcacheStatus, fileStatus, isArchive); lcacheStatus.initComplete(); } else { localizedPath = checkCacheStatusValidity(conf, cache, confFileStamp, lcacheStatus, fileStatus, isArchive); } createSymlink(conf, cache, lcacheStatus, isArchive, currentWorkDir, honorSymLinkConf); } // try deleting stuff if you can long size = 0; synchronized (lcacheStatus) { synchronized (baseDirSize) { Long get = baseDirSize.get(lcacheStatus.getBaseDir()); if ( get != null ) { size = get.longValue(); } else { LOG.warn("Cannot find size of baseDir: " + lcacheStatus.getBaseDir()); } } } // setting the cache size to a default of 10GB long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE); if (allowedSize < size) { // try some cache deletions deleteCache(conf); } return localizedPath; } /** * Get the locally cached file or archive; it could either be * previously cached (and valid) or copy it from the {@link FileSystem} now. * * @param cache the cache to be localized, this should be specified as * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema * or hostname:port is provided the file is assumed to be in the filesystem * being used in the Configuration * @param conf The Confguration file which contains the filesystem * @param baseDir The base cache Dir where you wnat to localize the files/archives * @param isArchive if the cache is an archive or a file. In case it is an * archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will * be unzipped/unjarred/untarred automatically * and the directory where the archive is unzipped/unjarred/untarred * is returned as the Path. * In case of a file, the path to the file is returned * @param confFileStamp this is the hdfs file modification timestamp to verify that the * file to be cached hasn't changed since the job started * @param currentWorkDir this is the directory where you would want to create symlinks * for the locally cached files/archives * @return the path to directory where the archives are unjarred in case of archives, * the path to the file where the file is copied locally * @throws IOException */ public static Path getLocalCache(URI cache, Configuration conf, Path baseDir, boolean isArchive, long confFileStamp, Path currentWorkDir) throws IOException { return getLocalCache(cache, conf, baseDir, null, isArchive, confFileStamp, currentWorkDir); } /** * This is the opposite of getlocalcache. When you are done with * using the cache, you need to release the cache * @param cache The cache URI to be released * @param conf configuration which contains the filesystem the cache * is contained in. * @throws IOException */ public static void releaseCache(URI cache, Configuration conf, long timeStamp) throws IOException { String cacheId = getKey(cache, conf, timeStamp); synchronized (cachedArchives) { CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus == null) { LOG.warn("Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!"); return; } lcacheStatus.refcount--; } } // To delete the caches which have a refcount of zero private static void deleteCache(Configuration conf) throws IOException { Set<CacheStatus> deleteSet = new HashSet<CacheStatus>(); // try deleting cache Status with refcount of zero synchronized (cachedArchives) { for (Iterator it = cachedArchives.keySet().iterator(); it.hasNext();) { String cacheId = (String) it.next(); CacheStatus lcacheStatus = cachedArchives.get(cacheId); if (lcacheStatus.refcount == 0) { // delete this cache entry from the global list // and mark the localized file for deletion deleteSet.add(lcacheStatus); it.remove(); } } } // do the deletion, after releasing the global lock for (CacheStatus lcacheStatus : deleteSet) { synchronized (lcacheStatus) { - FileSystem.getLocal(conf).delete(lcacheStatus.localLoadPath, true); - LOG.info("Deleted path " + lcacheStatus.localLoadPath); + FileSystem.getLocal(conf).delete(lcacheStatus.localizedLoadPath, true); + LOG.info("Deleted path " + lcacheStatus.localizedLoadPath); // decrement the size of the cache from baseDirSize synchronized (baseDirSize) { - Long dirSize = baseDirSize.get(lcacheStatus.baseDir); + Long dirSize = baseDirSize.get(lcacheStatus.localizedBaseDir); if ( dirSize != null ) { dirSize -= lcacheStatus.size; - baseDirSize.put(lcacheStatus.baseDir, dirSize); + baseDirSize.put(lcacheStatus.localizedBaseDir, dirSize); } else { LOG.warn("Cannot find record of the baseDir: " + - lcacheStatus.baseDir + " during delete!"); + lcacheStatus.localizedBaseDir + " during delete!"); } } } } } /* * Returns the relative path of the dir this cache will be localized in * relative path that this cache will be localized in. For * hdfs://hostname:port/absolute_path -- the relative path is * hostname/absolute path -- if it is just /absolute_path -- then the * relative path is hostname of DFS this mapred cluster is running * on/absolute_path */ public static String makeRelative(URI cache, Configuration conf) throws IOException { String host = cache.getHost(); if (host == null) { host = cache.getScheme(); } if (host == null) { URI defaultUri = FileSystem.get(conf).getUri(); host = defaultUri.getHost(); if (host == null) { host = defaultUri.getScheme(); } } String path = host + cache.getPath(); path = path.replace(":/","/"); // remove windows device colon return path; } static String getKey(URI cache, Configuration conf, long timeStamp) throws IOException { return makeRelative(cache, conf) + String.valueOf(timeStamp); } private static Path checkCacheStatusValidity(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive ) throws IOException { FileSystem fs = FileSystem.get(cache, conf); // Has to be if (!ifExistsAndFresh(conf, fs, cache, confFileStamp, cacheStatus, fileStatus)) { - throw new IOException("Stale cache file: " + cacheStatus.localLoadPath + + throw new IOException("Stale cache file: " + + cacheStatus.localizedLoadPath + " for cache-file: " + cache); } LOG.info(String.format("Using existing cache of %s->%s", - cache.toString(), cacheStatus.localLoadPath)); - return cacheStatus.localLoadPath; + cache.toString(), cacheStatus.localizedLoadPath)); + return cacheStatus.localizedLoadPath; } private static void createSymlink(Configuration conf, URI cache, CacheStatus cacheStatus, boolean isArchive, Path currentWorkDir, boolean honorSymLinkConf) throws IOException { boolean doSymlink = honorSymLinkConf && DistributedCache.getSymlink(conf); if(cache.getFragment() == null) { doSymlink = false; } String link = currentWorkDir.toString() + Path.SEPARATOR + cache.getFragment(); File flink = new File(link); if (doSymlink){ if (!flink.exists()) { - FileUtil.symLink(cacheStatus.localLoadPath.toString(), link); + FileUtil.symLink(cacheStatus.localizedLoadPath.toString(), link); } } } // the method which actually copies the caches locally and unjars/unzips them // and does chmod for the files private static Path localizeCache(Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive) throws IOException { FileSystem fs = getFileSystem(cache, conf); FileSystem localFs = FileSystem.getLocal(conf); Path parchive = null; if (isArchive) { - parchive = new Path(cacheStatus.localLoadPath, - new Path(cacheStatus.localLoadPath.getName())); + parchive = new Path(cacheStatus.localizedLoadPath, + new Path(cacheStatus.localizedLoadPath.getName())); } else { - parchive = cacheStatus.localLoadPath; + parchive = cacheStatus.localizedLoadPath; } if (!localFs.mkdirs(parchive.getParent())) { throw new IOException("Mkdirs failed to create directory " + - cacheStatus.localLoadPath.toString()); + cacheStatus.localizedLoadPath.toString()); } String cacheId = cache.getPath(); fs.copyToLocalFile(new Path(cacheId), parchive); if (isArchive) { String tmpArchive = parchive.toString().toLowerCase(); File srcFile = new File(parchive.toString()); File destDir = new File(parchive.getParent().toString()); if (tmpArchive.endsWith(".jar")) { RunJar.unJar(srcFile, destDir); } else if (tmpArchive.endsWith(".zip")) { FileUtil.unZip(srcFile, destDir); } else if (isTarFile(tmpArchive)) { FileUtil.unTar(srcFile, destDir); } // else will not do anyhting // and copy the file into the dir as it is } long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString())); cacheStatus.size = cacheSize; synchronized (baseDirSize) { - Long dirSize = baseDirSize.get(cacheStatus.baseDir); + Long dirSize = baseDirSize.get(cacheStatus.localizedBaseDir); if (dirSize == null) { dirSize = Long.valueOf(cacheSize); } else { dirSize += cacheSize; } - baseDirSize.put(cacheStatus.baseDir, dirSize); + baseDirSize.put(cacheStatus.localizedBaseDir, dirSize); } // do chmod here try { //Setting recursive permission to grant everyone read and execute - FileUtil.chmod(cacheStatus.baseDir.toString(), "ugo+rx",true); + Path localDir = new Path(cacheStatus.localizedBaseDir, + cacheStatus.uniqueParentDir); + LOG.info("Doing chmod on localdir :" + localDir); + FileUtil.chmod(localDir.toString(), "ugo+rx", true); } catch(InterruptedException e) { LOG.warn("Exception in chmod" + e.toString()); } // update cacheStatus to reflect the newly cached file cacheStatus.mtime = getTimestamp(conf, cache); - return cacheStatus.localLoadPath; + return cacheStatus.localizedLoadPath; } private static boolean isTarFile(String filename) { return (filename.endsWith(".tgz") || filename.endsWith(".tar.gz") || filename.endsWith(".tar")); } // Checks if the cache has already been localized and is fresh private static boolean ifExistsAndFresh(Configuration conf, FileSystem fs, URI cache, long confFileStamp, CacheStatus lcacheStatus, FileStatus fileStatus) throws IOException { // check for existence of the cache long dfsFileStamp; if (fileStatus != null) { dfsFileStamp = fileStatus.getModificationTime(); } else { dfsFileStamp = getTimestamp(conf, cache); } // ensure that the file on hdfs hasn't been modified since the job started if (dfsFileStamp != confFileStamp) { LOG.fatal("File: " + cache + " has changed on HDFS since job started"); throw new IOException("File: " + cache + " has changed on HDFS since job started"); } if (dfsFileStamp != lcacheStatus.mtime) { return false; } return true; } /** * Returns mtime of a given cache file on hdfs. * @param conf configuration * @param cache cache file * @return mtime of a given cache file on hdfs * @throws IOException */ public static long getTimestamp(Configuration conf, URI cache) throws IOException { FileSystem fileSystem = FileSystem.get(cache, conf); Path filePath = new Path(cache.getPath()); return fileSystem.getFileStatus(filePath).getModificationTime(); } /** * This method create symlinks for all files in a given dir in another directory * @param conf the configuration * @param jobCacheDir the target directory for creating symlinks * @param workDir the directory in which the symlinks are created * @throws IOException */ public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir) throws IOException{ if ((jobCacheDir == null || !jobCacheDir.isDirectory()) || workDir == null || (!workDir.isDirectory())) { return; } boolean createSymlink = getSymlink(conf); if (createSymlink){ File[] list = jobCacheDir.listFiles(); for (int i=0; i < list.length; i++){ FileUtil.symLink(list[i].getAbsolutePath(), new File(workDir, list[i].getName()).toString()); } } } private static String getFileSysName(URI url) { String fsname = url.getScheme(); if ("hdfs".equals(fsname)) { String host = url.getHost(); int port = url.getPort(); return (port == (-1)) ? host : (host + ":" + port); } else { return null; } } private static FileSystem getFileSystem(URI cache, Configuration conf) throws IOException { String fileSysName = getFileSysName(cache); if (fileSysName != null) return FileSystem.getNamed(fileSysName, conf); else return FileSystem.get(conf); } /** * Set the configuration with the given set of archives * @param archives The list of archives that need to be localized * @param conf Configuration which will be changed */ public static void setCacheArchives(URI[] archives, Configuration conf) { String sarchives = StringUtils.uriToString(archives); conf.set("mapred.cache.archives", sarchives); } /** * Set the configuration with the given set of files * @param files The list of files that need to be localized * @param conf Configuration which will be changed */ public static void setCacheFiles(URI[] files, Configuration conf) { String sfiles = StringUtils.uriToString(files); conf.set("mapred.cache.files", sfiles); } /** * Get cache archives set in the Configuration * @param conf The configuration which contains the archives * @return A URI array of the caches set in the Configuration * @throws IOException */ public static URI[] getCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives")); } /** * Get cache files set in the Configuration * @param conf The configuration which contains the files * @return A URI array of the files set in the Configuration * @throws IOException */ public static URI[] getCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToURI(conf.getStrings("mapred.cache.files")); } /** * Return the path array of the localized caches * @param conf Configuration that contains the localized archives * @return A path array of localized caches * @throws IOException */ public static Path[] getLocalCacheArchives(Configuration conf) throws IOException { return StringUtils.stringToPath(conf .getStrings("mapred.cache.localArchives")); } /** * Return the path array of the localized files * @param conf Configuration that contains the localized files * @return A path array of localized files * @throws IOException */ public static Path[] getLocalCacheFiles(Configuration conf) throws IOException { return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles")); } /** * Get the timestamps of the archives * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getArchiveTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.archives.timestamps"); } /** * Get the timestamps of the files * @param conf The configuration which stored the timestamps * @return a string array of timestamps * @throws IOException */ public static String[] getFileTimestamps(Configuration conf) { return conf.getStrings("mapred.cache.files.timestamps"); } /** * This is to check the timestamp of the archives to be localized * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of archives. * The order should be the same as the order in which the archives are added. */ public static void setArchiveTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.archives.timestamps", timestamps); } /** * This is to check the timestamp of the files to be localized * @param conf Configuration which stores the timestamp's * @param timestamps comma separated list of timestamps of files. * The order should be the same as the order in which the files are added. */ public static void setFileTimestamps(Configuration conf, String timestamps) { conf.set("mapred.cache.files.timestamps", timestamps); } /** * Set the conf to contain the location for localized archives * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local archives */ public static void setLocalArchives(Configuration conf, String str) { conf.set("mapred.cache.localArchives", str); } /** * Set the conf to contain the location for localized files * @param conf The conf to modify to contain the localized caches * @param str a comma separated list of local files */ public static void setLocalFiles(Configuration conf, String str) { conf.set("mapred.cache.localFiles", str); } /** * Add a archives to be localized to the conf * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheArchive(URI uri, Configuration conf) { String archives = conf.get("mapred.cache.archives"); conf.set("mapred.cache.archives", archives == null ? uri.toString() : archives + "," + uri.toString()); } /** * Add a file to be localized to the conf * @param uri The uri of the cache to be localized * @param conf Configuration to add the cache to */ public static void addCacheFile(URI uri, Configuration conf) { String files = conf.get("mapred.cache.files"); conf.set("mapred.cache.files", files == null ? uri.toString() : files + "," + uri.toString()); } /** * Add an file path to the current set of classpath entries It adds the file * to cache as well. * * @param file Path of the file to be added * @param conf Configuration that contains the classpath setting */ public static void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set("mapred.job.classpath.files", classpath == null ? file.toString() : classpath + System.getProperty("path.separator") + file.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(file).toUri(); addCacheFile(uri, conf); } /** * Get the file entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getFileClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.files"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System .getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } /** * Add an archive path to the current set of classpath entries. It adds the * archive to cache as well. * * @param archive Path of the archive to be added * @param conf Configuration that contains the classpath setting */ public static void addArchiveToClassPath(Path archive, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.archives"); conf.set("mapred.job.classpath.archives", classpath == null ? archive .toString() : classpath + System.getProperty("path.separator") + archive.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(archive).toUri(); addCacheArchive(uri, conf); } /** * Get the archive entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getArchiveClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.archives"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System .getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; } /** * This method allows you to create symlinks in the current working directory * of the task to all the cache files/archives * @param conf the jobconf */ public static void createSymlink(Configuration conf){ conf.set("mapred.create.symlink", "yes"); } /** * This method checks to see if symlinks are to be create for the * localized cache files in the current working directory * @param conf the jobconf * @return true if symlinks are to be created- else return false */ public static boolean getSymlink(Configuration conf){ String result = conf.get("mapred.create.symlink"); if ("yes".equals(result)){ return true; } return false; } /** * This method checks if there is a conflict in the fragment names * of the uris. Also makes sure that each uri has a fragment. It * is only to be called if you want to create symlinks for * the various archives and files. * @param uriFiles The uri array of urifiles * @param uriArchives the uri array of uri archives */ public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives){ if ((uriFiles == null) && (uriArchives == null)){ return true; } if (uriFiles != null){ for (int i = 0; i < uriFiles.length; i++){ String frag1 = uriFiles[i].getFragment(); if (frag1 == null) return false; for (int j=i+1; j < uriFiles.length; j++){ String frag2 = uriFiles[j].getFragment(); if (frag2 == null) return false; if (frag1.equalsIgnoreCase(frag2)) return false; } if (uriArchives != null){ for (int j = 0; j < uriArchives.length; j++){ String frag2 = uriArchives[j].getFragment(); if (frag2 == null){ return false; } if (frag1.equalsIgnoreCase(frag2)) return false; for (int k=j+1; k < uriArchives.length; k++){ String frag3 = uriArchives[k].getFragment(); if (frag3 == null) return false; if (frag2.equalsIgnoreCase(frag3)) return false; } } } } } return true; } private static class CacheStatus { // the local load path of this cache - Path localLoadPath; + Path localizedLoadPath; //the base dir where the cache lies - Path baseDir; + Path localizedBaseDir; + + // the unique directory in localizedBaseDir, where the cache lies + Path uniqueParentDir; //the size of this cache long size; // number of instances using this cache int refcount; // the cache-file modification time long mtime; // is it initialized? boolean inited = false; - public CacheStatus(Path baseDir, Path localLoadPath) { + public CacheStatus(Path baseDir, Path localLoadPath, Path uniqueParentDir) { super(); - this.localLoadPath = localLoadPath; + this.localizedLoadPath = localLoadPath; this.refcount = 0; this.mtime = -1; - this.baseDir = baseDir; + this.localizedBaseDir = baseDir; this.size = 0; + this.uniqueParentDir = uniqueParentDir; } // get the base dir for the cache Path getBaseDir() { - return baseDir; + return localizedBaseDir; } // Is it initialized? boolean isInited() { return inited; } // mark it as initalized void initComplete() { inited = true; } } /** * Clear the entire contents of the cache and delete the backing files. This * should only be used when the server is reinitializing, because the users * are going to lose their files. */ public static void purgeCache(Configuration conf) throws IOException { synchronized (cachedArchives) { FileSystem localFs = FileSystem.getLocal(conf); for (Map.Entry<String,CacheStatus> f: cachedArchives.entrySet()) { try { - localFs.delete(f.getValue().localLoadPath, true); + localFs.delete(f.getValue().localizedLoadPath, true); } catch (IOException ie) { LOG.debug("Error cleaning up cache", ie); } } cachedArchives.clear(); } } }
jaxlaw/hadoop-common
bd41cceb3b61fa84645377d0036002a97dd65718
MAPREDUCE:1182 from https://issues.apache.org/jira/secure/attachment/12424116/M1182-1v20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index ac31cbe..b11813e 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,410 +1,413 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via cdouglas) + MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the + configured threshold. (cdouglas) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java index f299dbd..7d12fe2 100644 --- a/src/mapred/org/apache/hadoop/mapred/ReduceTask.java +++ b/src/mapred/org/apache/hadoop/mapred/ReduceTask.java @@ -479,1162 +479,1158 @@ class ReduceTask extends Task { reducer.close(); } catch (IOException ignored) {} try { out.close(reporter); } catch (IOException ignored) {} throw ioe; } } static class NewTrackingRecordWriter<K,V> extends org.apache.hadoop.mapreduce.RecordWriter<K,V> { private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real; private final org.apache.hadoop.mapreduce.Counter outputRecordCounter; NewTrackingRecordWriter(org.apache.hadoop.mapreduce.RecordWriter<K,V> real, org.apache.hadoop.mapreduce.Counter recordCounter) { this.real = real; this.outputRecordCounter = recordCounter; } @Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { real.close(context); } @Override public void write(K key, V value) throws IOException, InterruptedException { real.write(key,value); outputRecordCounter.increment(1); } } @SuppressWarnings("unchecked") private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runNewReducer(JobConf job, final TaskUmbilicalProtocol umbilical, final TaskReporter reporter, RawKeyValueIterator rIter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass ) throws IOException,InterruptedException, ClassNotFoundException { // wrap value iterator to report progress. final RawKeyValueIterator rawIter = rIter; rIter = new RawKeyValueIterator() { public void close() throws IOException { rawIter.close(); } public DataInputBuffer getKey() throws IOException { return rawIter.getKey(); } public Progress getProgress() { return rawIter.getProgress(); } public DataInputBuffer getValue() throws IOException { return rawIter.getValue(); } public boolean next() throws IOException { boolean ret = rawIter.next(); reducePhase.set(rawIter.getProgress().get()); reporter.progress(); return ret; } }; // make a task context so we can get the classes org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID()); // make a reducer org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer = (org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>) ReflectionUtils.newInstance(taskContext.getReducerClass(), job); org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output = (org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE>) outputFormat.getRecordWriter(taskContext); org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW = new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(output, reduceOutputCounter); job.setBoolean("mapred.skip.on", isSkipping()); org.apache.hadoop.mapreduce.Reducer.Context reducerContext = createReduceContext(reducer, job, getTaskID(), rIter, reduceInputKeyCounter, reduceInputValueCounter, trackedRW, committer, reporter, comparator, keyClass, valueClass); reducer.run(reducerContext); output.close(reducerContext); } private static enum CopyOutputErrorType { NO_ERROR, READ_ERROR, OTHER_ERROR }; class ReduceCopier<K, V> implements MRConstants { /** Reference to the umbilical object */ private TaskUmbilicalProtocol umbilical; private final TaskReporter reporter; /** Reference to the task object */ /** Number of ms before timing out a copy */ private static final int STALLED_COPY_TIMEOUT = 3 * 60 * 1000; /** Max events to fetch in one go from the tasktracker */ private static final int MAX_EVENTS_TO_FETCH = 10000; /** * our reduce task instance */ private ReduceTask reduceTask; /** * the list of map outputs currently being copied */ private List<MapOutputLocation> scheduledCopies; /** * the results of dispatched copy attempts */ private List<CopyResult> copyResults; /** * the number of outputs to copy in parallel */ private int numCopiers; /** * a number that is set to the max #fetches we'd schedule and then * pause the schduling */ private int maxInFlight; /** * the amount of time spent on fetching one map output before considering * it as failed and notifying the jobtracker about it. */ private int maxBackoff; /** * busy hosts from which copies are being backed off * Map of host -> next contact time */ private Map<String, Long> penaltyBox; /** * the set of unique hosts from which we are copying */ private Set<String> uniqueHosts; /** * A reference to the RamManager for writing the map outputs to. */ private ShuffleRamManager ramManager; /** * A reference to the local file system for writing the map outputs to. */ private FileSystem localFileSys; private FileSystem rfs; /** * Number of files to merge at a time */ private int ioSortFactor; /** * A reference to the throwable object (if merge throws an exception) */ private volatile Throwable mergeThrowable; /** * A flag to indicate when to exit localFS merge */ private volatile boolean exitLocalFSMerge = false; /** * A flag to indicate when to exit getMapEvents thread */ private volatile boolean exitGetMapEvents = false; /** * When we accumulate maxInMemOutputs number of files in ram, we merge/spill */ private final int maxInMemOutputs; /** * Usage threshold for in-memory output accumulation. */ private final float maxInMemCopyPer; /** * Maximum memory usage of map outputs to merge from memory into * the reduce, in bytes. */ private final long maxInMemReduce; /** * The threads for fetching the files. */ private List<MapOutputCopier> copiers = null; /** * The object for metrics reporting. */ private ShuffleClientMetrics shuffleClientMetrics = null; /** * the minimum interval between tasktracker polls */ private static final long MIN_POLL_INTERVAL = 1000; /** * a list of map output locations for fetch retrials */ private List<MapOutputLocation> retryFetches = new ArrayList<MapOutputLocation>(); /** * The set of required map outputs */ private Set <TaskID> copiedMapOutputs = Collections.synchronizedSet(new TreeSet<TaskID>()); /** * The set of obsolete map taskids. */ private Set <TaskAttemptID> obsoleteMapIds = Collections.synchronizedSet(new TreeSet<TaskAttemptID>()); private Random random = null; /** * the max of all the map completion times */ private int maxMapRuntime; /** * Maximum number of fetch-retries per-map. */ private volatile int maxFetchRetriesPerMap; /** * Combiner runner, if a combiner is needed */ private CombinerRunner combinerRunner; /** * Resettable collector used for combine. */ private CombineOutputCollector combineCollector = null; /** * Maximum percent of failed fetch attempt before killing the reduce task. */ private static final float MAX_ALLOWED_FAILED_FETCH_ATTEMPT_PERCENT = 0.5f; /** * Minimum percent of progress required to keep the reduce alive. */ private static final float MIN_REQUIRED_PROGRESS_PERCENT = 0.5f; /** * Maximum percent of shuffle execution time required to keep the reducer alive. */ private static final float MAX_ALLOWED_STALL_TIME_PERCENT = 0.5f; /** * Minimum number of map fetch retries. */ private static final int MIN_FETCH_RETRIES_PER_MAP = 2; /** * The minimum percentage of maps yet to be copied, * which indicates end of shuffle */ private static final float MIN_PENDING_MAPS_PERCENT = 0.25f; /** * Maximum no. of unique maps from which we failed to fetch map-outputs * even after {@link #maxFetchRetriesPerMap} retries; after this the * reduce task is failed. */ private int maxFailedUniqueFetches = 5; /** * The maps from which we fail to fetch map-outputs * even after {@link #maxFetchRetriesPerMap} retries. */ Set<TaskID> fetchFailedMaps = new TreeSet<TaskID>(); /** * A map of taskId -> no. of failed fetches */ Map<TaskAttemptID, Integer> mapTaskToFailedFetchesMap = new HashMap<TaskAttemptID, Integer>(); /** * Initial backoff interval (milliseconds) */ private static final int BACKOFF_INIT = 4000; /** * The interval for logging in the shuffle */ private static final int MIN_LOG_TIME = 60000; /** * List of in-memory map-outputs. */ private final List<MapOutput> mapOutputsFilesInMemory = Collections.synchronizedList(new LinkedList<MapOutput>()); /** * The map for (Hosts, List of MapIds from this Host) maintaining * map output locations */ private final Map<String, List<MapOutputLocation>> mapLocations = new ConcurrentHashMap<String, List<MapOutputLocation>>(); /** * This class contains the methods that should be used for metrics-reporting * the specific metrics for shuffle. This class actually reports the * metrics for the shuffle client (the ReduceTask), and hence the name * ShuffleClientMetrics. */ class ShuffleClientMetrics implements Updater { private MetricsRecord shuffleMetrics = null; private int numFailedFetches = 0; private int numSuccessFetches = 0; private long numBytes = 0; private int numThreadsBusy = 0; ShuffleClientMetrics(JobConf conf) { MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", conf.getUser()); this.shuffleMetrics.setTag("jobName", conf.getJobName()); this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString()); this.shuffleMetrics.setTag("taskId", getTaskID().toString()); this.shuffleMetrics.setTag("sessionId", conf.getSessionId()); metricsContext.registerUpdater(this); } public synchronized void inputBytes(long numBytes) { this.numBytes += numBytes; } public synchronized void failedFetch() { ++numFailedFetches; } public synchronized void successFetch() { ++numSuccessFetches; } public synchronized void threadBusy() { ++numThreadsBusy; } public synchronized void threadFree() { --numThreadsBusy; } public void doUpdates(MetricsContext unused) { synchronized (this) { shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes); shuffleMetrics.incrMetric("shuffle_failed_fetches", numFailedFetches); shuffleMetrics.incrMetric("shuffle_success_fetches", numSuccessFetches); if (numCopiers != 0) { shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 100*((float)numThreadsBusy/numCopiers)); } else { shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0); } numBytes = 0; numSuccessFetches = 0; numFailedFetches = 0; } shuffleMetrics.update(); } } /** Represents the result of an attempt to copy a map output */ private class CopyResult { // the map output location against which a copy attempt was made private final MapOutputLocation loc; // the size of the file copied, -1 if the transfer failed private final long size; //a flag signifying whether a copy result is obsolete private static final int OBSOLETE = -2; private CopyOutputErrorType error = CopyOutputErrorType.NO_ERROR; CopyResult(MapOutputLocation loc, long size) { this.loc = loc; this.size = size; } CopyResult(MapOutputLocation loc, long size, CopyOutputErrorType error) { this.loc = loc; this.size = size; this.error = error; } public boolean getSuccess() { return size >= 0; } public boolean isObsolete() { return size == OBSOLETE; } public long getSize() { return size; } public String getHost() { return loc.getHost(); } public MapOutputLocation getLocation() { return loc; } public CopyOutputErrorType getError() { return error; } } private int nextMapOutputCopierId = 0; /** * Abstraction to track a map-output. */ private class MapOutputLocation { TaskAttemptID taskAttemptId; TaskID taskId; String ttHost; URL taskOutput; public MapOutputLocation(TaskAttemptID taskAttemptId, String ttHost, URL taskOutput) { this.taskAttemptId = taskAttemptId; this.taskId = this.taskAttemptId.getTaskID(); this.ttHost = ttHost; this.taskOutput = taskOutput; } public TaskAttemptID getTaskAttemptId() { return taskAttemptId; } public TaskID getTaskId() { return taskId; } public String getHost() { return ttHost; } public URL getOutputLocation() { return taskOutput; } } /** Describes the output of a map; could either be on disk or in-memory. */ private class MapOutput { final TaskID mapId; final TaskAttemptID mapAttemptId; final Path file; final Configuration conf; byte[] data; final boolean inMemory; long compressedSize; public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, Configuration conf, Path file, long size) { this.mapId = mapId; this.mapAttemptId = mapAttemptId; this.conf = conf; this.file = file; this.compressedSize = size; this.data = null; this.inMemory = false; } public MapOutput(TaskID mapId, TaskAttemptID mapAttemptId, byte[] data, int compressedLength) { this.mapId = mapId; this.mapAttemptId = mapAttemptId; this.file = null; this.conf = null; this.data = data; this.compressedSize = compressedLength; this.inMemory = true; } public void discard() throws IOException { if (inMemory) { data = null; } else { FileSystem fs = file.getFileSystem(conf); fs.delete(file, true); } } } class ShuffleRamManager implements RamManager { /* Maximum percentage of the in-memory limit that a single shuffle can * consume*/ private static final float MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION = 0.25f; /* Maximum percentage of shuffle-threads which can be stalled * simultaneously after which a merge is triggered. */ private static final float MAX_STALLED_SHUFFLE_THREADS_FRACTION = 0.75f; - private final int maxSize; - private final int maxSingleShuffleLimit; + private final long maxSize; + private final long maxSingleShuffleLimit; - private int size = 0; + private long size = 0; private Object dataAvailable = new Object(); - private int fullSize = 0; + private long fullSize = 0; private int numPendingRequests = 0; private int numRequiredMapOutputs = 0; private int numClosed = 0; private boolean closed = false; public ShuffleRamManager(Configuration conf) throws IOException { final float maxInMemCopyUse = conf.getFloat("mapred.job.shuffle.input.buffer.percent", 0.70f); if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) { throw new IOException("mapred.job.shuffle.input.buffer.percent" + maxInMemCopyUse); } - maxSize = (int)Math.min( + maxSize = (long)Math.min( Runtime.getRuntime().maxMemory() * maxInMemCopyUse, Integer.MAX_VALUE); - maxSingleShuffleLimit = (int)(maxSize * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION); + maxSingleShuffleLimit = (long)(maxSize * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION); LOG.info("ShuffleRamManager: MemoryLimit=" + maxSize + ", MaxSingleShuffleLimit=" + maxSingleShuffleLimit); } public synchronized boolean reserve(int requestedSize, InputStream in) throws InterruptedException { // Wait till the request can be fulfilled... while ((size + requestedSize) > maxSize) { // Close the input... if (in != null) { try { in.close(); } catch (IOException ie) { LOG.info("Failed to close connection with: " + ie); } finally { in = null; } } // Track pending requests synchronized (dataAvailable) { ++numPendingRequests; dataAvailable.notify(); } // Wait for memory to free up wait(); // Track pending requests synchronized (dataAvailable) { --numPendingRequests; } } size += requestedSize; return (in != null); } public synchronized void unreserve(int requestedSize) { size -= requestedSize; synchronized (dataAvailable) { fullSize -= requestedSize; --numClosed; } // Notify the threads blocked on RamManager.reserve notifyAll(); } public boolean waitForDataToMerge() throws InterruptedException { boolean done = false; synchronized (dataAvailable) { // Start in-memory merge if manager has been closed or... while (!closed && // In-memory threshold exceeded and at least two segments // have been fetched (getPercentUsed() < maxInMemCopyPer || numClosed < 2) && // More than "mapred.inmem.merge.threshold" map outputs // have been fetched into memory (maxInMemOutputs <= 0 || numClosed < maxInMemOutputs) && // More than MAX... threads are blocked on the RamManager // or the blocked threads are the last map outputs to be // fetched. If numRequiredMapOutputs is zero, either // setNumCopiedMapOutputs has not been called (no map ouputs // have been fetched, so there is nothing to merge) or the // last map outputs being transferred without // contention, so a merge would be premature. (numPendingRequests < numCopiers*MAX_STALLED_SHUFFLE_THREADS_FRACTION && (0 == numRequiredMapOutputs || numPendingRequests < numRequiredMapOutputs))) { dataAvailable.wait(); } done = closed; } return done; } public void closeInMemoryFile(int requestedSize) { synchronized (dataAvailable) { fullSize += requestedSize; ++numClosed; dataAvailable.notify(); } } public void setNumCopiedMapOutputs(int numRequiredMapOutputs) { synchronized (dataAvailable) { this.numRequiredMapOutputs = numRequiredMapOutputs; dataAvailable.notify(); } } public void close() { synchronized (dataAvailable) { closed = true; LOG.info("Closed ram manager"); dataAvailable.notify(); } } private float getPercentUsed() { return (float)fullSize/maxSize; } - int getMemoryLimit() { - return maxSize; - } - boolean canFitInMemory(long requestedSize) { return (requestedSize < Integer.MAX_VALUE && requestedSize < maxSingleShuffleLimit); } } /** Copies map outputs as they become available */ private class MapOutputCopier extends Thread { // basic/unit connection timeout (in milliseconds) private final static int UNIT_CONNECT_TIMEOUT = 30 * 1000; // default read timeout (in milliseconds) private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000; private MapOutputLocation currentLocation = null; private int id = nextMapOutputCopierId++; private Reporter reporter; private boolean readError = false; // Decompression of map-outputs private CompressionCodec codec = null; private Decompressor decompressor = null; public MapOutputCopier(JobConf job, Reporter reporter) { setName("MapOutputCopier " + reduceTask.getTaskID() + "." + id); LOG.debug(getName() + " created"); this.reporter = reporter; if (job.getCompressMapOutput()) { Class<? extends CompressionCodec> codecClass = job.getMapOutputCompressorClass(DefaultCodec.class); codec = ReflectionUtils.newInstance(codecClass, job); decompressor = CodecPool.getDecompressor(codec); } } /** * Fail the current file that we are fetching * @return were we currently fetching? */ public synchronized boolean fail() { if (currentLocation != null) { finish(-1, CopyOutputErrorType.OTHER_ERROR); return true; } else { return false; } } /** * Get the current map output location. */ public synchronized MapOutputLocation getLocation() { return currentLocation; } private synchronized void start(MapOutputLocation loc) { currentLocation = loc; } private synchronized void finish(long size, CopyOutputErrorType error) { if (currentLocation != null) { LOG.debug(getName() + " finishing " + currentLocation + " =" + size); synchronized (copyResults) { copyResults.add(new CopyResult(currentLocation, size, error)); copyResults.notify(); } currentLocation = null; } } /** Loop forever and fetch map outputs as they become available. * The thread exits when it is interrupted by {@link ReduceTaskRunner} */ @Override public void run() { while (true) { try { MapOutputLocation loc = null; long size = -1; synchronized (scheduledCopies) { while (scheduledCopies.isEmpty()) { scheduledCopies.wait(); } loc = scheduledCopies.remove(0); } CopyOutputErrorType error = CopyOutputErrorType.OTHER_ERROR; readError = false; try { shuffleClientMetrics.threadBusy(); start(loc); size = copyOutput(loc); shuffleClientMetrics.successFetch(); error = CopyOutputErrorType.NO_ERROR; } catch (IOException e) { LOG.warn(reduceTask.getTaskID() + " copy failed: " + loc.getTaskAttemptId() + " from " + loc.getHost()); LOG.warn(StringUtils.stringifyException(e)); shuffleClientMetrics.failedFetch(); if (readError) { error = CopyOutputErrorType.READ_ERROR; } // Reset size = -1; } finally { shuffleClientMetrics.threadFree(); finish(size, error); } } catch (InterruptedException e) { break; // ALL DONE } catch (FSError e) { LOG.error("Task: " + reduceTask.getTaskID() + " - FSError: " + StringUtils.stringifyException(e)); try { umbilical.fsError(reduceTask.getTaskID(), e.getMessage()); } catch (IOException io) { LOG.error("Could not notify TT of FSError: " + StringUtils.stringifyException(io)); } } catch (Throwable th) { String msg = getTaskID() + " : Map output copy failure : " + StringUtils.stringifyException(th); reportFatalError(getTaskID(), th, msg); } } if (decompressor != null) { CodecPool.returnDecompressor(decompressor); } } /** Copies a a map output from a remote host, via HTTP. * @param currentLocation the map output location to be copied * @return the path (fully qualified) of the copied file * @throws IOException if there is an error copying the file * @throws InterruptedException if the copier should give up */ private long copyOutput(MapOutputLocation loc ) throws IOException, InterruptedException { // check if we still need to copy the output from this location if (copiedMapOutputs.contains(loc.getTaskId()) || obsoleteMapIds.contains(loc.getTaskAttemptId())) { return CopyResult.OBSOLETE; } // a temp filename. If this file gets created in ramfs, we're fine, // else, we will check the localFS to find a suitable final location // for this path TaskAttemptID reduceId = reduceTask.getTaskID(); Path filename = new Path("/" + TaskTracker.getIntermediateOutputDir( reduceId.getJobID().toString(), reduceId.toString()) + "/map_" + loc.getTaskId().getId() + ".out"); // Copy the map output to a temp file whose name is unique to this attempt Path tmpMapOutput = new Path(filename+"-"+id); // Copy the map output MapOutput mapOutput = getMapOutput(loc, tmpMapOutput, reduceId.getTaskID().getId()); if (mapOutput == null) { throw new IOException("Failed to fetch map-output for " + loc.getTaskAttemptId() + " from " + loc.getHost()); } // The size of the map-output long bytes = mapOutput.compressedSize; // lock the ReduceTask while we do the rename synchronized (ReduceTask.this) { if (copiedMapOutputs.contains(loc.getTaskId())) { mapOutput.discard(); return CopyResult.OBSOLETE; } // Special case: discard empty map-outputs if (bytes == 0) { try { mapOutput.discard(); } catch (IOException ioe) { LOG.info("Couldn't discard output of " + loc.getTaskId()); } // Note that we successfully copied the map-output noteCopiedMapOutput(loc.getTaskId()); return bytes; } // Process map-output if (mapOutput.inMemory) { // Save it in the synchronized list of map-outputs mapOutputsFilesInMemory.add(mapOutput); } else { // Rename the temporary file to the final file; // ensure it is on the same partition tmpMapOutput = mapOutput.file; filename = new Path(tmpMapOutput.getParent(), filename.getName()); if (!localFileSys.rename(tmpMapOutput, filename)) { localFileSys.delete(tmpMapOutput, true); bytes = -1; throw new IOException("Failed to rename map output " + tmpMapOutput + " to " + filename); } synchronized (mapOutputFilesOnDisk) { addToMapOutputFilesOnDisk(localFileSys.getFileStatus(filename)); } } // Note that we successfully copied the map-output noteCopiedMapOutput(loc.getTaskId()); } return bytes; } /** * Save the map taskid whose output we just copied. * This function assumes that it has been synchronized on ReduceTask.this. * * @param taskId map taskid */ private void noteCopiedMapOutput(TaskID taskId) { copiedMapOutputs.add(taskId); ramManager.setNumCopiedMapOutputs(numMaps - copiedMapOutputs.size()); } /** * Get the map output into a local file (either in the inmemory fs or on the * local fs) from the remote server. * We use the file system so that we generate checksum files on the data. * @param mapOutputLoc map-output to be fetched * @param filename the filename to write the data into * @param connectionTimeout number of milliseconds for connection timeout * @param readTimeout number of milliseconds for read timeout * @return the path of the file that got created * @throws IOException when something goes wrong */ private MapOutput getMapOutput(MapOutputLocation mapOutputLoc, Path filename, int reduce) throws IOException, InterruptedException { // Connect URLConnection connection = mapOutputLoc.getOutputLocation().openConnection(); InputStream input = getInputStream(connection, STALLED_COPY_TIMEOUT, DEFAULT_READ_TIMEOUT); // Validate header from map output TaskAttemptID mapId = null; try { mapId = TaskAttemptID.forName(connection.getHeaderField(FROM_MAP_TASK)); } catch (IllegalArgumentException ia) { LOG.warn("Invalid map id ", ia); return null; } TaskAttemptID expectedMapId = mapOutputLoc.getTaskAttemptId(); if (!mapId.equals(expectedMapId)) { LOG.warn("data from wrong map:" + mapId + " arrived to reduce task " + reduce + ", where as expected map output should be from " + expectedMapId); return null; } long decompressedLength = Long.parseLong(connection.getHeaderField(RAW_MAP_OUTPUT_LENGTH)); long compressedLength = Long.parseLong(connection.getHeaderField(MAP_OUTPUT_LENGTH)); if (compressedLength < 0 || decompressedLength < 0) { LOG.warn(getName() + " invalid lengths in map output header: id: " + mapId + " compressed len: " + compressedLength + ", decompressed len: " + decompressedLength); return null; } int forReduce = (int)Integer.parseInt(connection.getHeaderField(FOR_REDUCE_TASK)); if (forReduce != reduce) { LOG.warn("data for the wrong reduce: " + forReduce + " with compressed len: " + compressedLength + ", decompressed len: " + decompressedLength + " arrived to reduce task " + reduce); return null; } LOG.info("header: " + mapId + ", compressed len: " + compressedLength + ", decompressed len: " + decompressedLength); //We will put a file in memory if it meets certain criteria: //1. The size of the (decompressed) file should be less than 25% of // the total inmem fs //2. There is space available in the inmem fs // Check if this map-output can be saved in-memory boolean shuffleInMemory = ramManager.canFitInMemory(decompressedLength); // Shuffle MapOutput mapOutput = null; if (shuffleInMemory) { LOG.info("Shuffling " + decompressedLength + " bytes (" + compressedLength + " raw bytes) " + "into RAM from " + mapOutputLoc.getTaskAttemptId()); mapOutput = shuffleInMemory(mapOutputLoc, connection, input, (int)decompressedLength, (int)compressedLength); } else { LOG.info("Shuffling " + decompressedLength + " bytes (" + compressedLength + " raw bytes) " + "into Local-FS from " + mapOutputLoc.getTaskAttemptId()); mapOutput = shuffleToDisk(mapOutputLoc, input, filename, compressedLength); } return mapOutput; } /** * The connection establishment is attempted multiple times and is given up * only on the last failure. Instead of connecting with a timeout of * X, we try connecting with a timeout of x < X but multiple times. */ private InputStream getInputStream(URLConnection connection, int connectionTimeout, int readTimeout) throws IOException { int unit = 0; if (connectionTimeout < 0) { throw new IOException("Invalid timeout " + "[timeout = " + connectionTimeout + " ms]"); } else if (connectionTimeout > 0) { unit = (UNIT_CONNECT_TIMEOUT > connectionTimeout) ? connectionTimeout : UNIT_CONNECT_TIMEOUT; } // set the read timeout to the total timeout connection.setReadTimeout(readTimeout); // set the connect timeout to the unit-connect-timeout connection.setConnectTimeout(unit); while (true) { try { connection.connect(); break; } catch (IOException ioe) { // update the total remaining connect-timeout connectionTimeout -= unit; // throw an exception if we have waited for timeout amount of time // note that the updated value if timeout is used here if (connectionTimeout == 0) { throw ioe; } // reset the connect timeout for the last try if (connectionTimeout < unit) { unit = connectionTimeout; // reset the connect time out for the final connect connection.setConnectTimeout(unit); } } } try { return connection.getInputStream(); } catch (IOException ioe) { readError = true; throw ioe; } } private MapOutput shuffleInMemory(MapOutputLocation mapOutputLoc, URLConnection connection, InputStream input, int mapOutputLength, int compressedLength) throws IOException, InterruptedException { // Reserve ram for the map-output boolean createdNow = ramManager.reserve(mapOutputLength, input); // Reconnect if we need to if (!createdNow) { // Reconnect try { connection = mapOutputLoc.getOutputLocation().openConnection(); input = getInputStream(connection, STALLED_COPY_TIMEOUT, DEFAULT_READ_TIMEOUT); } catch (IOException ioe) { LOG.info("Failed reopen connection to fetch map-output from " + mapOutputLoc.getHost()); // Inform the ram-manager ramManager.closeInMemoryFile(mapOutputLength); ramManager.unreserve(mapOutputLength); throw ioe; } } IFileInputStream checksumIn = new IFileInputStream(input,compressedLength); input = checksumIn; // Are map-outputs compressed? if (codec != null) { decompressor.reset(); input = codec.createInputStream(input, decompressor); } // Copy map-output into an in-memory buffer byte[] shuffleData = new byte[mapOutputLength]; MapOutput mapOutput = new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(), shuffleData, compressedLength); int bytesRead = 0; try { int n = input.read(shuffleData, 0, shuffleData.length); while (n > 0) { bytesRead += n; shuffleClientMetrics.inputBytes(n); // indicate we're making progress reporter.progress(); n = input.read(shuffleData, bytesRead, (shuffleData.length-bytesRead)); } LOG.info("Read " + bytesRead + " bytes from map-output for " + mapOutputLoc.getTaskAttemptId()); input.close(); } catch (IOException ioe) { LOG.info("Failed to shuffle from " + mapOutputLoc.getTaskAttemptId(), ioe); // Inform the ram-manager ramManager.closeInMemoryFile(mapOutputLength); ramManager.unreserve(mapOutputLength); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; // Close the streams IOUtils.cleanup(LOG, input); // Re-throw readError = true; throw ioe; } // Close the in-memory file ramManager.closeInMemoryFile(mapOutputLength); // Sanity check if (bytesRead != mapOutputLength) { // Inform the ram-manager ramManager.unreserve(mapOutputLength); // Discard the map-output try { mapOutput.discard(); } catch (IOException ignored) { // IGNORED because we are cleaning up LOG.info("Failed to discard map-output from " + mapOutputLoc.getTaskAttemptId(), ignored); } mapOutput = null; throw new IOException("Incomplete map output received for " + mapOutputLoc.getTaskAttemptId() + " from " + mapOutputLoc.getOutputLocation() + " (" + bytesRead + " instead of " + mapOutputLength + ")" ); } // TODO: Remove this after a 'fix' for HADOOP-3647 if (mapOutputLength > 0) { DataInputBuffer dib = new DataInputBuffer(); dib.reset(shuffleData, 0, shuffleData.length); LOG.info("Rec #1 from " + mapOutputLoc.getTaskAttemptId() + " -> (" + WritableUtils.readVInt(dib) + ", " + WritableUtils.readVInt(dib) + ") from " + mapOutputLoc.getHost()); } return mapOutput; } private MapOutput shuffleToDisk(MapOutputLocation mapOutputLoc, InputStream input, Path filename, long mapOutputLength) throws IOException { // Find out a suitable location for the output on local-filesystem Path localFilename = lDirAlloc.getLocalPathForWrite(filename.toUri().getPath(), mapOutputLength, conf); MapOutput mapOutput = new MapOutput(mapOutputLoc.getTaskId(), mapOutputLoc.getTaskAttemptId(),
jaxlaw/hadoop-common
ed82f69b2e8c7c12031ddbfdbf01edb159b483c0
MAPREDUCE:1147 from https://issues.apache.org/jira/secure/attachment/12424714/mapred-1147-v1.4-y20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 64aaf38..ac31cbe 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,407 +1,410 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) + MAPREDUCE-1147. Add map output counters to new API. (Amar Kamat via + cdouglas) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/MapTask.java b/src/mapred/org/apache/hadoop/mapred/MapTask.java index bf6882f..1bdca71 100644 --- a/src/mapred/org/apache/hadoop/mapred/MapTask.java +++ b/src/mapred/org/apache/hadoop/mapred/MapTask.java @@ -1,1085 +1,1123 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import static org.apache.hadoop.mapred.Task.Counter.COMBINE_INPUT_RECORDS; import static org.apache.hadoop.mapred.Task.Counter.COMBINE_OUTPUT_RECORDS; import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_BYTES; import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_RECORDS; import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_BYTES; import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_RECORDS; import java.io.DataInput; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.io.OutputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.SerializationFactory; import org.apache.hadoop.io.serializer.Serializer; import org.apache.hadoop.mapred.IFile.Writer; import org.apache.hadoop.mapred.Merger.Segment; import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.util.IndexedSortable; import org.apache.hadoop.util.IndexedSorter; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.QuickSort; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; /** A Map task. */ class MapTask extends Task { /** * The size of each record in the index file for the map-outputs. */ public static final int MAP_OUTPUT_INDEX_RECORD_LENGTH = 24; private BytesWritable split = new BytesWritable(); private String splitClass; private final static int APPROX_HEADER_LENGTH = 150; private static final Log LOG = LogFactory.getLog(MapTask.class.getName()); { // set phase for this task setPhase(TaskStatus.Phase.MAP); } public MapTask() { super(); } public MapTask(String jobFile, TaskAttemptID taskId, int partition, String splitClass, BytesWritable split, int numSlotsRequired, String username) { super(jobFile, taskId, partition, numSlotsRequired, username); this.splitClass = splitClass; this.split = split; } @Override public boolean isMapTask() { return true; } @Override public void localizeConfiguration(JobConf conf) throws IOException { super.localizeConfiguration(conf); if (isMapOrReduce()) { Path localSplit = new Path(new Path(getJobFile()).getParent(), "split.dta"); LOG.debug("Writing local split to " + localSplit); DataOutputStream out = FileSystem.getLocal(conf).create(localSplit); Text.writeString(out, splitClass); split.write(out); out.close(); } } @Override public TaskRunner createRunner(TaskTracker tracker, TaskTracker.TaskInProgress tip) { return new MapTaskRunner(tip, tracker, this.conf); } @Override public void write(DataOutput out) throws IOException { super.write(out); if (isMapOrReduce()) { Text.writeString(out, splitClass); split.write(out); split = null; } } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); if (isMapOrReduce()) { splitClass = Text.readString(in); split.readFields(in); } } /** * This class wraps the user's record reader to update the counters and progress * as records are read. * @param <K> * @param <V> */ class TrackedRecordReader<K, V> implements RecordReader<K,V> { private RecordReader<K,V> rawIn; private Counters.Counter inputByteCounter; private Counters.Counter inputRecordCounter; private TaskReporter reporter; private long beforePos = -1; private long afterPos = -1; TrackedRecordReader(RecordReader<K,V> raw, TaskReporter reporter) throws IOException{ rawIn = raw; inputRecordCounter = reporter.getCounter(MAP_INPUT_RECORDS); inputByteCounter = reporter.getCounter(MAP_INPUT_BYTES); this.reporter = reporter; } public K createKey() { return rawIn.createKey(); } public V createValue() { return rawIn.createValue(); } public synchronized boolean next(K key, V value) throws IOException { boolean ret = moveToNext(key, value); if (ret) { incrCounters(); } return ret; } protected void incrCounters() { inputRecordCounter.increment(1); inputByteCounter.increment(afterPos - beforePos); } protected synchronized boolean moveToNext(K key, V value) throws IOException { reporter.setProgress(getProgress()); beforePos = getPos(); boolean ret = rawIn.next(key, value); afterPos = getPos(); return ret; } public long getPos() throws IOException { return rawIn.getPos(); } public void close() throws IOException { rawIn.close(); } public float getProgress() throws IOException { return rawIn.getProgress(); } TaskReporter getTaskReporter() { return reporter; } } /** * This class skips the records based on the failed ranges from previous * attempts. */ class SkippingRecordReader<K, V> extends TrackedRecordReader<K,V> { private SkipRangeIterator skipIt; private SequenceFile.Writer skipWriter; private boolean toWriteSkipRecs; private TaskUmbilicalProtocol umbilical; private Counters.Counter skipRecCounter; private long recIndex = -1; SkippingRecordReader(RecordReader<K,V> raw, TaskUmbilicalProtocol umbilical, TaskReporter reporter) throws IOException{ super(raw, reporter); this.umbilical = umbilical; this.skipRecCounter = reporter.getCounter(Counter.MAP_SKIPPED_RECORDS); this.toWriteSkipRecs = toWriteSkipRecs() && SkipBadRecords.getSkipOutputPath(conf)!=null; skipIt = getSkipRanges().skipRangeIterator(); } public synchronized boolean next(K key, V value) throws IOException { if(!skipIt.hasNext()) { LOG.warn("Further records got skipped."); return false; } boolean ret = moveToNext(key, value); long nextRecIndex = skipIt.next(); long skip = 0; while(recIndex<nextRecIndex && ret) { if(toWriteSkipRecs) { writeSkippedRec(key, value); } ret = moveToNext(key, value); skip++; } //close the skip writer once all the ranges are skipped if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) { skipWriter.close(); } skipRecCounter.increment(skip); reportNextRecordRange(umbilical, recIndex); if (ret) { incrCounters(); } return ret; } protected synchronized boolean moveToNext(K key, V value) throws IOException { recIndex++; return super.moveToNext(key, value); } @SuppressWarnings("unchecked") private void writeSkippedRec(K key, V value) throws IOException{ if(skipWriter==null) { Path skipDir = SkipBadRecords.getSkipOutputPath(conf); Path skipFile = new Path(skipDir, getTaskID().toString()); skipWriter = SequenceFile.createWriter( skipFile.getFileSystem(conf), conf, skipFile, (Class<K>) createKey().getClass(), (Class<V>) createValue().getClass(), CompressionType.BLOCK, getTaskReporter()); } skipWriter.append(key, value); } } @Override public void run(final JobConf job, final TaskUmbilicalProtocol umbilical) throws IOException, ClassNotFoundException, InterruptedException { this.umbilical = umbilical; // start thread that will handle communication with parent TaskReporter reporter = new TaskReporter(getProgress(), umbilical); reporter.startCommunicationThread(); boolean useNewApi = job.getUseNewMapper(); initialize(job, getJobID(), reporter, useNewApi); // check if it is a cleanupJobTask if (jobCleanup) { runJobCleanupTask(umbilical, reporter); return; } if (jobSetup) { runJobSetupTask(umbilical, reporter); return; } if (taskCleanup) { runTaskCleanupTask(umbilical, reporter); return; } if (useNewApi) { runNewMapper(job, split, umbilical, reporter); } else { runOldMapper(job, split, umbilical, reporter); } done(umbilical, reporter); } @SuppressWarnings("unchecked") private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runOldMapper(final JobConf job, final BytesWritable rawSplit, final TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, InterruptedException, ClassNotFoundException { InputSplit inputSplit = null; // reinstantiate the split try { inputSplit = (InputSplit) ReflectionUtils.newInstance(job.getClassByName(splitClass), job); } catch (ClassNotFoundException exp) { IOException wrap = new IOException("Split class " + splitClass + " not found"); wrap.initCause(exp); throw wrap; } DataInputBuffer splitBuffer = new DataInputBuffer(); splitBuffer.reset(split.getBytes(), 0, split.getLength()); inputSplit.readFields(splitBuffer); updateJobWithSplit(job, inputSplit); reporter.setInputSplit(inputSplit); RecordReader<INKEY,INVALUE> rawIn = // open input job.getInputFormat().getRecordReader(inputSplit, job, reporter); RecordReader<INKEY,INVALUE> in = isSkipping() ? new SkippingRecordReader<INKEY,INVALUE>(rawIn, umbilical, reporter) : new TrackedRecordReader<INKEY,INVALUE>(rawIn, reporter); job.setBoolean("mapred.skip.on", isSkipping()); int numReduceTasks = conf.getNumReduceTasks(); LOG.info("numReduceTasks: " + numReduceTasks); MapOutputCollector collector = null; if (numReduceTasks > 0) { collector = new MapOutputBuffer(umbilical, job, reporter); } else { collector = new DirectMapOutputCollector(umbilical, job, reporter); } MapRunnable<INKEY,INVALUE,OUTKEY,OUTVALUE> runner = ReflectionUtils.newInstance(job.getMapRunnerClass(), job); try { runner.run(in, new OldOutputCollector(collector, conf), reporter); collector.flush(); } finally { //close in.close(); // close input collector.close(); } } /** * Update the job with details about the file split * @param job the job configuration to update * @param inputSplit the file split */ private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) { if (inputSplit instanceof FileSplit) { FileSplit fileSplit = (FileSplit) inputSplit; job.set("map.input.file", fileSplit.getPath().toString()); job.setLong("map.input.start", fileSplit.getStart()); job.setLong("map.input.length", fileSplit.getLength()); } } static class NewTrackingRecordReader<K,V> extends org.apache.hadoop.mapreduce.RecordReader<K,V> { private final org.apache.hadoop.mapreduce.RecordReader<K,V> real; private final org.apache.hadoop.mapreduce.Counter inputRecordCounter; private final TaskReporter reporter; NewTrackingRecordReader(org.apache.hadoop.mapreduce.RecordReader<K,V> real, TaskReporter reporter) { this.real = real; this.reporter = reporter; this.inputRecordCounter = reporter.getCounter(MAP_INPUT_RECORDS); } @Override public void close() throws IOException { real.close(); } @Override public K getCurrentKey() throws IOException, InterruptedException { return real.getCurrentKey(); } @Override public V getCurrentValue() throws IOException, InterruptedException { return real.getCurrentValue(); } @Override public float getProgress() throws IOException, InterruptedException { return real.getProgress(); } @Override public void initialize(org.apache.hadoop.mapreduce.InputSplit split, org.apache.hadoop.mapreduce.TaskAttemptContext context ) throws IOException, InterruptedException { real.initialize(split, context); } @Override public boolean nextKeyValue() throws IOException, InterruptedException { boolean result = real.nextKeyValue(); if (result) { inputRecordCounter.increment(1); } reporter.setProgress(getProgress()); return result; } } /** * Since the mapred and mapreduce Partitioners don't share a common interface * (JobConfigurable is deprecated and a subtype of mapred.Partitioner), the * partitioner lives in Old/NewOutputCollector. Note that, for map-only jobs, * the configured partitioner should not be called. It's common for * partitioners to compute a result mod numReduces, which causes a div0 error */ private static class OldOutputCollector<K,V> implements OutputCollector<K,V> { private final Partitioner<K,V> partitioner; private final MapOutputCollector<K,V> collector; private final int numPartitions; @SuppressWarnings("unchecked") OldOutputCollector(MapOutputCollector<K,V> collector, JobConf conf) { numPartitions = conf.getNumReduceTasks(); if (numPartitions > 0) { partitioner = (Partitioner<K,V>) ReflectionUtils.newInstance(conf.getPartitionerClass(), conf); } else { partitioner = new Partitioner<K,V>() { @Override public void configure(JobConf job) { } @Override public int getPartition(K key, V value, int numPartitions) { return -1; } }; } this.collector = collector; } @Override public void collect(K key, V value) throws IOException { try { collector.collect(key, value, partitioner.getPartition(key, value, numPartitions)); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new IOException("interrupt exception", ie); } } } + private class NewDirectOutputCollector<K,V> + extends org.apache.hadoop.mapreduce.RecordWriter<K,V> { + private final org.apache.hadoop.mapreduce.RecordWriter out; + + private final TaskReporter reporter; + + private final Counters.Counter mapOutputRecordCounter; + + @SuppressWarnings("unchecked") + NewDirectOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext, + JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter) + throws IOException, ClassNotFoundException, InterruptedException { + this.reporter = reporter; + out = outputFormat.getRecordWriter(taskContext); + mapOutputRecordCounter = + reporter.getCounter(MAP_OUTPUT_RECORDS); + } + + @Override + @SuppressWarnings("unchecked") + public void write(K key, V value) + throws IOException, InterruptedException { + reporter.progress(); + out.write(key, value); + mapOutputRecordCounter.increment(1); + } + + @Override + public void close(TaskAttemptContext context) + throws IOException,InterruptedException { + reporter.progress(); + if (out != null) { + out.close(context); + } + } + } + private class NewOutputCollector<K,V> extends org.apache.hadoop.mapreduce.RecordWriter<K,V> { private final MapOutputCollector<K,V> collector; private final org.apache.hadoop.mapreduce.Partitioner<K,V> partitioner; private final int partitions; @SuppressWarnings("unchecked") NewOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext, JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, ClassNotFoundException { collector = new MapOutputBuffer<K,V>(umbilical, job, reporter); partitions = jobContext.getNumReduceTasks(); if (partitions > 0) { partitioner = (org.apache.hadoop.mapreduce.Partitioner<K,V>) ReflectionUtils.newInstance(jobContext.getPartitionerClass(), job); } else { partitioner = new org.apache.hadoop.mapreduce.Partitioner<K,V>() { @Override public int getPartition(K key, V value, int numPartitions) { return -1; } }; } } @Override public void write(K key, V value) throws IOException, InterruptedException { collector.collect(key, value, partitioner.getPartition(key, value, partitions)); } @Override public void close(TaskAttemptContext context ) throws IOException,InterruptedException { try { collector.flush(); } catch (ClassNotFoundException cnf) { throw new IOException("can't find class ", cnf); } collector.close(); } } @SuppressWarnings("unchecked") private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runNewMapper(final JobConf job, final BytesWritable rawSplit, final TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, ClassNotFoundException, InterruptedException { // make a task context so we can get the classes org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.TaskAttemptContext(job, getTaskID()); // make a mapper org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE> mapper = (org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>) ReflectionUtils.newInstance(taskContext.getMapperClass(), job); // make the input format org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE> inputFormat = (org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>) ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job); // rebuild the input split org.apache.hadoop.mapreduce.InputSplit split = null; DataInputBuffer splitBuffer = new DataInputBuffer(); splitBuffer.reset(rawSplit.getBytes(), 0, rawSplit.getLength()); SerializationFactory factory = new SerializationFactory(job); Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit> deserializer = (Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>) factory.getDeserializer(job.getClassByName(splitClass)); deserializer.open(splitBuffer); split = deserializer.deserialize(null); org.apache.hadoop.mapreduce.RecordReader<INKEY,INVALUE> input = new NewTrackingRecordReader<INKEY,INVALUE> (inputFormat.createRecordReader(split, taskContext), reporter); job.setBoolean("mapred.skip.on", isSkipping()); org.apache.hadoop.mapreduce.RecordWriter output = null; org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context mapperContext = null; try { Constructor<org.apache.hadoop.mapreduce.Mapper.Context> contextConstructor = org.apache.hadoop.mapreduce.Mapper.Context.class.getConstructor (new Class[]{org.apache.hadoop.mapreduce.Mapper.class, Configuration.class, org.apache.hadoop.mapreduce.TaskAttemptID.class, org.apache.hadoop.mapreduce.RecordReader.class, org.apache.hadoop.mapreduce.RecordWriter.class, org.apache.hadoop.mapreduce.OutputCommitter.class, org.apache.hadoop.mapreduce.StatusReporter.class, org.apache.hadoop.mapreduce.InputSplit.class}); // get an output object if (job.getNumReduceTasks() == 0) { - output = outputFormat.getRecordWriter(taskContext); + output = + new NewDirectOutputCollector(taskContext, job, umbilical, reporter); } else { output = new NewOutputCollector(taskContext, job, umbilical, reporter); } mapperContext = contextConstructor.newInstance(mapper, job, getTaskID(), input, output, committer, reporter, split); input.initialize(split, mapperContext); mapper.run(mapperContext); input.close(); output.close(mapperContext); } catch (NoSuchMethodException e) { throw new IOException("Can't find Context constructor", e); } catch (InstantiationException e) { throw new IOException("Can't create Context", e); } catch (InvocationTargetException e) { throw new IOException("Can't invoke Context constructor", e); } catch (IllegalAccessException e) { throw new IOException("Can't invoke Context constructor", e); } } interface MapOutputCollector<K, V> { public void collect(K key, V value, int partition ) throws IOException, InterruptedException; public void close() throws IOException, InterruptedException; public void flush() throws IOException, InterruptedException, ClassNotFoundException; } class DirectMapOutputCollector<K, V> implements MapOutputCollector<K, V> { private RecordWriter<K, V> out = null; private TaskReporter reporter = null; private final Counters.Counter mapOutputRecordCounter; @SuppressWarnings("unchecked") public DirectMapOutputCollector(TaskUmbilicalProtocol umbilical, JobConf job, TaskReporter reporter) throws IOException { this.reporter = reporter; String finalName = getOutputName(getPartition()); FileSystem fs = FileSystem.get(job); out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter); mapOutputRecordCounter = reporter.getCounter(MAP_OUTPUT_RECORDS); } public void close() throws IOException { if (this.out != null) { out.close(this.reporter); } } public void flush() throws IOException, InterruptedException, ClassNotFoundException { } public void collect(K key, V value, int partition) throws IOException { reporter.progress(); out.write(key, value); mapOutputRecordCounter.increment(1); } } class MapOutputBuffer<K extends Object, V extends Object> implements MapOutputCollector<K, V>, IndexedSortable { private final int partitions; private final JobConf job; private final TaskReporter reporter; private final Class<K> keyClass; private final Class<V> valClass; private final RawComparator<K> comparator; private final SerializationFactory serializationFactory; private final Serializer<K> keySerializer; private final Serializer<V> valSerializer; private final CombinerRunner<K,V> combinerRunner; private final CombineOutputCollector<K, V> combineCollector; // Compression for map-outputs private CompressionCodec codec = null; // k/v accounting private volatile int kvstart = 0; // marks beginning of spill private volatile int kvend = 0; // marks beginning of collectable private int kvindex = 0; // marks end of collected private final int[] kvoffsets; // indices into kvindices private final int[] kvindices; // partition, k/v offsets into kvbuffer private volatile int bufstart = 0; // marks beginning of spill private volatile int bufend = 0; // marks beginning of collectable private volatile int bufvoid = 0; // marks the point where we should stop // reading at the end of the buffer private int bufindex = 0; // marks end of collected private int bufmark = 0; // marks end of record private byte[] kvbuffer; // main output buffer private static final int PARTITION = 0; // partition offset in acct private static final int KEYSTART = 1; // key offset in acct private static final int VALSTART = 2; // val offset in acct private static final int ACCTSIZE = 3; // total #fields in acct private static final int RECSIZE = (ACCTSIZE + 1) * 4; // acct bytes per record // spill accounting private volatile int numSpills = 0; private volatile Throwable sortSpillException = null; private final int softRecordLimit; private final int softBufferLimit; private final int minSpillsForCombine; private final IndexedSorter sorter; private final ReentrantLock spillLock = new ReentrantLock(); private final Condition spillDone = spillLock.newCondition(); private final Condition spillReady = spillLock.newCondition(); private final BlockingBuffer bb = new BlockingBuffer(); private volatile boolean spillThreadRunning = false; private final SpillThread spillThread = new SpillThread(); private final FileSystem localFs; private final FileSystem rfs; private final Counters.Counter mapOutputByteCounter; private final Counters.Counter mapOutputRecordCounter; private final Counters.Counter combineOutputCounter; private ArrayList<SpillRecord> indexCacheList; private int totalIndexCacheMemory; private static final int INDEX_CACHE_MEMORY_LIMIT = 1024 * 1024; @SuppressWarnings("unchecked") public MapOutputBuffer(TaskUmbilicalProtocol umbilical, JobConf job, TaskReporter reporter ) throws IOException, ClassNotFoundException { this.job = job; this.reporter = reporter; localFs = FileSystem.getLocal(job); partitions = job.getNumReduceTasks(); rfs = ((LocalFileSystem)localFs).getRaw(); indexCacheList = new ArrayList<SpillRecord>(); //sanity checks final float spillper = job.getFloat("io.sort.spill.percent",(float)0.8); final float recper = job.getFloat("io.sort.record.percent",(float)0.05); final int sortmb = job.getInt("io.sort.mb", 100); if (spillper > (float)1.0 || spillper < (float)0.0) { throw new IOException("Invalid \"io.sort.spill.percent\": " + spillper); } if (recper > (float)1.0 || recper < (float)0.01) { throw new IOException("Invalid \"io.sort.record.percent\": " + recper); } if ((sortmb & 0x7FF) != sortmb) { throw new IOException("Invalid \"io.sort.mb\": " + sortmb); } sorter = ReflectionUtils.newInstance( job.getClass("map.sort.class", QuickSort.class, IndexedSorter.class), job); LOG.info("io.sort.mb = " + sortmb); // buffers and accounting int maxMemUsage = sortmb << 20; int recordCapacity = (int)(maxMemUsage * recper); recordCapacity -= recordCapacity % RECSIZE; kvbuffer = new byte[maxMemUsage - recordCapacity]; bufvoid = kvbuffer.length; recordCapacity /= RECSIZE; kvoffsets = new int[recordCapacity]; kvindices = new int[recordCapacity * ACCTSIZE]; softBufferLimit = (int)(kvbuffer.length * spillper); softRecordLimit = (int)(kvoffsets.length * spillper); LOG.info("data buffer = " + softBufferLimit + "/" + kvbuffer.length); LOG.info("record buffer = " + softRecordLimit + "/" + kvoffsets.length); // k/v serialization comparator = job.getOutputKeyComparator(); keyClass = (Class<K>)job.getMapOutputKeyClass(); valClass = (Class<V>)job.getMapOutputValueClass(); serializationFactory = new SerializationFactory(job); keySerializer = serializationFactory.getSerializer(keyClass); keySerializer.open(bb); valSerializer = serializationFactory.getSerializer(valClass); valSerializer.open(bb); // counters mapOutputByteCounter = reporter.getCounter(MAP_OUTPUT_BYTES); mapOutputRecordCounter = reporter.getCounter(MAP_OUTPUT_RECORDS); Counters.Counter combineInputCounter = reporter.getCounter(COMBINE_INPUT_RECORDS); combineOutputCounter = reporter.getCounter(COMBINE_OUTPUT_RECORDS); // compression if (job.getCompressMapOutput()) { Class<? extends CompressionCodec> codecClass = job.getMapOutputCompressorClass(DefaultCodec.class); codec = ReflectionUtils.newInstance(codecClass, job); } // combiner combinerRunner = CombinerRunner.create(job, getTaskID(), combineInputCounter, reporter, null); if (combinerRunner != null) { combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter); } else { combineCollector = null; } minSpillsForCombine = job.getInt("min.num.spills.for.combine", 3); spillThread.setDaemon(true); spillThread.setName("SpillThread"); spillLock.lock(); try { spillThread.start(); while (!spillThreadRunning) { spillDone.await(); } } catch (InterruptedException e) { throw (IOException)new IOException("Spill thread failed to initialize" ).initCause(sortSpillException); } finally { spillLock.unlock(); } if (sortSpillException != null) { throw (IOException)new IOException("Spill thread failed to initialize" ).initCause(sortSpillException); } } public synchronized void collect(K key, V value, int partition ) throws IOException { reporter.progress(); if (key.getClass() != keyClass) { throw new IOException("Type mismatch in key from map: expected " + keyClass.getName() + ", recieved " + key.getClass().getName()); } if (value.getClass() != valClass) { throw new IOException("Type mismatch in value from map: expected " + valClass.getName() + ", recieved " + value.getClass().getName()); } final int kvnext = (kvindex + 1) % kvoffsets.length; spillLock.lock(); try { boolean kvfull; do { if (sortSpillException != null) { throw (IOException)new IOException("Spill failed" ).initCause(sortSpillException); } // sufficient acct space kvfull = kvnext == kvstart; final boolean kvsoftlimit = ((kvnext > kvend) ? kvnext - kvend > softRecordLimit : kvend - kvnext <= kvoffsets.length - softRecordLimit); if (kvstart == kvend && kvsoftlimit) { LOG.info("Spilling map output: record full = " + kvsoftlimit); startSpill(); } if (kvfull) { try { while (kvstart != kvend) { reporter.progress(); spillDone.await(); } } catch (InterruptedException e) { throw (IOException)new IOException( "Collector interrupted while waiting for the writer" ).initCause(e); } } } while (kvfull); } finally { spillLock.unlock(); } try { // serialize key bytes into buffer int keystart = bufindex; keySerializer.serialize(key); if (bufindex < keystart) { // wrapped the key; reset required bb.reset(); keystart = 0; } // serialize value bytes into buffer final int valstart = bufindex; valSerializer.serialize(value); int valend = bb.markRecord(); if (partition < 0 || partition >= partitions) { throw new IOException("Illegal partition for " + key + " (" + partition + ")"); } mapOutputRecordCounter.increment(1); mapOutputByteCounter.increment(valend >= keystart ? valend - keystart : (bufvoid - keystart) + valend); // update accounting info int ind = kvindex * ACCTSIZE; kvoffsets[kvindex] = ind; kvindices[ind + PARTITION] = partition; kvindices[ind + KEYSTART] = keystart; kvindices[ind + VALSTART] = valstart; kvindex = kvnext; } catch (MapBufferTooSmallException e) { LOG.info("Record too large for in-memory buffer: " + e.getMessage()); spillSingleRecord(key, value, partition); mapOutputRecordCounter.increment(1); return; } } /** * Compare logical range, st i, j MOD offset capacity. * Compare by partition, then by key. * @see IndexedSortable#compare */ public int compare(int i, int j) { final int ii = kvoffsets[i % kvoffsets.length]; final int ij = kvoffsets[j % kvoffsets.length]; // sort by partition if (kvindices[ii + PARTITION] != kvindices[ij + PARTITION]) { return kvindices[ii + PARTITION] - kvindices[ij + PARTITION]; } // sort by key return comparator.compare(kvbuffer, kvindices[ii + KEYSTART], kvindices[ii + VALSTART] - kvindices[ii + KEYSTART], kvbuffer, kvindices[ij + KEYSTART], kvindices[ij + VALSTART] - kvindices[ij + KEYSTART]); } /** * Swap logical indices st i, j MOD offset capacity. * @see IndexedSortable#swap */ public void swap(int i, int j) { i %= kvoffsets.length; j %= kvoffsets.length; int tmp = kvoffsets[i]; kvoffsets[i] = kvoffsets[j]; kvoffsets[j] = tmp; } /** * Inner class managing the spill of serialized records to disk. */ protected class BlockingBuffer extends DataOutputStream { public BlockingBuffer() { this(new Buffer()); } private BlockingBuffer(OutputStream out) { super(out); } /** * Mark end of record. Note that this is required if the buffer is to * cut the spill in the proper place. */ public int markRecord() { bufmark = bufindex; return bufindex; } /** * Set position from last mark to end of writable buffer, then rewrite * the data between last mark and kvindex. * This handles a special case where the key wraps around the buffer. * If the key is to be passed to a RawComparator, then it must be * contiguous in the buffer. This recopies the data in the buffer back * into itself, but starting at the beginning of the buffer. Note that * reset() should <b>only</b> be called immediately after detecting * this condition. To call it at any other time is undefined and would * likely result in data loss or corruption. * @see #markRecord() */ protected synchronized void reset() throws IOException { // spillLock unnecessary; If spill wraps, then // bufindex < bufstart < bufend so contention is impossible // a stale value for bufstart does not affect correctness, since // we can only get false negatives that force the more // conservative path int headbytelen = bufvoid - bufmark; bufvoid = bufmark; if (bufindex + headbytelen < bufstart) { System.arraycopy(kvbuffer, 0, kvbuffer, headbytelen, bufindex); System.arraycopy(kvbuffer, bufvoid, kvbuffer, 0, headbytelen); bufindex += headbytelen; } else { byte[] keytmp = new byte[bufindex]; System.arraycopy(kvbuffer, 0, keytmp, 0, bufindex); bufindex = 0; out.write(kvbuffer, bufmark, headbytelen); out.write(keytmp); } } } public class Buffer extends OutputStream { private final byte[] scratch = new byte[1]; @Override public synchronized void write(int v) throws IOException { scratch[0] = (byte)v; write(scratch, 0, 1); } /** * Attempt to write a sequence of bytes to the collection buffer. * This method will block if the spill thread is running and it * cannot write. * @throws MapBufferTooSmallException if record is too large to * deserialize into the collection buffer. */ @Override public synchronized void write(byte b[], int off, int len) throws IOException { boolean buffull = false; boolean wrap = false; spillLock.lock(); try { do { if (sortSpillException != null) { throw (IOException)new IOException("Spill failed" ).initCause(sortSpillException); } // sufficient buffer space? if (bufstart <= bufend && bufend <= bufindex) { buffull = bufindex + len > bufvoid; wrap = (bufvoid - bufindex) + bufstart > len; } else { // bufindex <= bufstart <= bufend // bufend <= bufindex <= bufstart wrap = false; buffull = bufindex + len > bufstart; } if (kvstart == kvend) { // spill thread not running if (kvend != kvindex) { // we have records we can spill final boolean bufsoftlimit = (bufindex > bufend) ? bufindex - bufend > softBufferLimit : bufend - bufindex < bufvoid - softBufferLimit; if (bufsoftlimit || (buffull && !wrap)) { LOG.info("Spilling map output: buffer full= " + bufsoftlimit); startSpill(); } } else if (buffull && !wrap) { // We have no buffered records, and this record is too large // to write into kvbuffer. We must spill it directly from // collect final int size = ((bufend <= bufindex) ? bufindex - bufend : (bufvoid - bufend) + bufindex) + len; bufstart = bufend = bufindex = bufmark = 0; kvstart = kvend = kvindex = 0; bufvoid = kvbuffer.length; throw new MapBufferTooSmallException(size + " bytes"); } } if (buffull && !wrap) { try { while (kvstart != kvend) { reporter.progress(); spillDone.await(); } } catch (InterruptedException e) { throw (IOException)new IOException( "Buffer interrupted while waiting for the writer" ).initCause(e); } } } while (buffull && !wrap); } finally { spillLock.unlock(); } // here, we know that we have sufficient space to write if (buffull) { final int gaplen = bufvoid - bufindex; System.arraycopy(b, off, kvbuffer, bufindex, gaplen); len -= gaplen; off += gaplen; bufindex = 0; } System.arraycopy(b, off, kvbuffer, bufindex, len); bufindex += len; } } public synchronized void flush() throws IOException, ClassNotFoundException, InterruptedException { LOG.info("Starting flush of map output"); spillLock.lock(); try { while (kvstart != kvend) { reporter.progress(); spillDone.await(); } if (sortSpillException != null) { throw (IOException)new IOException("Spill failed" diff --git a/src/test/org/apache/hadoop/mapred/TestJobCounters.java b/src/test/org/apache/hadoop/mapred/TestJobCounters.java new file mode 100644 index 0000000..53e07ba --- /dev/null +++ b/src/test/org/apache/hadoop/mapred/TestJobCounters.java @@ -0,0 +1,376 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.FileWriter; +import java.io.Writer; +import java.io.BufferedWriter; +import java.io.IOException; +import java.util.StringTokenizer; + +import junit.framework.TestCase; +import junit.extensions.TestSetup; +import junit.framework.Test; +import junit.framework.TestSuite; + +import static org.apache.hadoop.mapred.Task.Counter.SPILLED_RECORDS; +import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_RECORDS; +import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_RECORDS; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.Reducer; + +/** + * This is an wordcount application that tests job counters. + * It generates simple text input files. Then + * runs the wordcount map/reduce application on (1) 3 i/p files(with 3 maps + * and 1 reduce) and verifies the counters and (2) 4 i/p files(with 4 maps + * and 1 reduce) and verifies counters. Wordcount application reads the + * text input files, breaks each line into words and counts them. The output + * is a locally sorted list of words and the count of how often they occurred. + * + */ +public class TestJobCounters extends TestCase { + + String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", + File.separator + "tmp")).toString().replace(' ', '+'); + + private void validateMapredCounters(Counters counter, long spillRecCnt, + long mapInputRecords, long mapOutputRecords) { + // Check if the numer of Spilled Records is same as expected + assertEquals(spillRecCnt, + counter.findCounter(SPILLED_RECORDS).getCounter()); + assertEquals(mapInputRecords, + counter.findCounter(MAP_INPUT_RECORDS).getCounter()); + assertEquals(mapOutputRecords, + counter.findCounter(MAP_OUTPUT_RECORDS).getCounter()); + } + + private void validateCounters(org.apache.hadoop.mapreduce.Counters counter, + long spillRecCnt, + long mapInputRecords, long mapOutputRecords) { + // Check if the numer of Spilled Records is same as expected + assertEquals(spillRecCnt, + counter.findCounter(SPILLED_RECORDS).getValue()); + assertEquals(mapInputRecords, + counter.findCounter(MAP_INPUT_RECORDS).getValue()); + assertEquals(mapOutputRecords, + counter.findCounter(MAP_OUTPUT_RECORDS).getValue()); + } + + private void createWordsFile(File inpFile) throws Exception { + Writer out = new BufferedWriter(new FileWriter(inpFile)); + try { + // 500*4 unique words --- repeated 5 times => 5*2K words + int REPLICAS=5, NUMLINES=500, NUMWORDSPERLINE=4; + + for (int i = 0; i < REPLICAS; i++) { + for (int j = 1; j <= NUMLINES*NUMWORDSPERLINE; j+=NUMWORDSPERLINE) { + out.write("word" + j + " word" + (j+1) + " word" + (j+2) + + " word" + (j+3) + '\n'); + } + } + } finally { + out.close(); + } + } + + + /** + * The main driver for word count map/reduce program. + * Invoke this method to submit the map/reduce job. + * @throws IOException When there is communication problems with the + * job tracker. + */ + public void testOldJobWithMapAndReducers() throws Exception { + JobConf conf = new JobConf(TestJobCounters.class); + conf.setJobName("wordcount-map-reducers"); + + // the keys are words (strings) + conf.setOutputKeyClass(Text.class); + // the values are counts (ints) + conf.setOutputValueClass(IntWritable.class); + + conf.setMapperClass(WordCount.MapClass.class); + conf.setCombinerClass(WordCount.Reduce.class); + conf.setReducerClass(WordCount.Reduce.class); + + conf.setNumMapTasks(3); + conf.setNumReduceTasks(1); + conf.setInt("io.sort.mb", 1); + conf.setInt("io.sort.factor", 2); + conf.set("io.sort.record.percent", "0.05"); + conf.set("io.sort.spill.percent", "0.80"); + + FileSystem fs = FileSystem.get(conf); + Path testDir = new Path(TEST_ROOT_DIR, "countertest"); + conf.set("test.build.data", testDir.toString()); + try { + if (fs.exists(testDir)) { + fs.delete(testDir, true); + } + if (!fs.mkdirs(testDir)) { + throw new IOException("Mkdirs failed to create " + testDir.toString()); + } + + String inDir = testDir + File.separator + "genins" + File.separator; + String outDir = testDir + File.separator; + Path wordsIns = new Path(inDir); + if (!fs.mkdirs(wordsIns)) { + throw new IOException("Mkdirs failed to create " + wordsIns.toString()); + } + + //create 3 input files each with 5*2k words + File inpFile = new File(inDir + "input5_2k_1"); + createWordsFile(inpFile); + inpFile = new File(inDir + "input5_2k_2"); + createWordsFile(inpFile); + inpFile = new File(inDir + "input5_2k_3"); + createWordsFile(inpFile); + + FileInputFormat.setInputPaths(conf, inDir); + Path outputPath1 = new Path(outDir, "output5_2k_3"); + FileOutputFormat.setOutputPath(conf, outputPath1); + + RunningJob myJob = JobClient.runJob(conf); + Counters c1 = myJob.getCounters(); + // 3maps & in each map, 4 first level spills --- So total 12. + // spilled records count: + // Each Map: 1st level:2k+2k+2k+2k=8k;2ndlevel=4k+4k=8k; + // 3rd level=2k(4k from 1st level & 4k from 2nd level & combineAndSpill) + // So total 8k+8k+2k=18k + // For 3 Maps, total = 3*18=54k + // Reduce: each of the 3 map o/p's(2k each) will be spilled in shuffleToDisk() + // So 3*2k=6k in 1st level; 2nd level:4k(2k+2k); + // 3rd level directly given to reduce(4k+2k --- combineAndSpill => 2k. + // So 0 records spilled to disk in 3rd level) + // So total of 6k+4k=10k + // Total job counter will be 54k+10k = 64k + + //3 maps and 2.5k lines --- So total 7.5k map input records + //3 maps and 10k words in each --- So total of 30k map output recs + validateMapredCounters(c1, 64000, 7500, 30000); + + //create 4th input file each with 5*2k words and test with 4 maps + inpFile = new File(inDir + "input5_2k_4"); + createWordsFile(inpFile); + conf.setNumMapTasks(4); + Path outputPath2 = new Path(outDir, "output5_2k_4"); + FileOutputFormat.setOutputPath(conf, outputPath2); + + myJob = JobClient.runJob(conf); + c1 = myJob.getCounters(); + // 4maps & in each map 4 first level spills --- So total 16. + // spilled records count: + // Each Map: 1st level:2k+2k+2k+2k=8k;2ndlevel=4k+4k=8k; + // 3rd level=2k(4k from 1st level & 4k from 2nd level & combineAndSpill) + // So total 8k+8k+2k=18k + // For 3 Maps, total = 4*18=72k + // Reduce: each of the 4 map o/p's(2k each) will be spilled in shuffleToDisk() + // So 4*2k=8k in 1st level; 2nd level:4k+4k=8k; + // 3rd level directly given to reduce(4k+4k --- combineAndSpill => 2k. + // So 0 records spilled to disk in 3rd level) + // So total of 8k+8k=16k + // Total job counter will be 72k+16k = 88k + + // 4 maps and 2.5k words in each --- So 10k map input records + // 4 maps and 10k unique words --- So 40k map output records + validateMapredCounters(c1, 88000, 10000, 40000); + + // check for a map only job + conf.setNumReduceTasks(0); + Path outputPath3 = new Path(outDir, "output5_2k_5"); + FileOutputFormat.setOutputPath(conf, outputPath3); + + myJob = JobClient.runJob(conf); + c1 = myJob.getCounters(); + // 4 maps and 2.5k words in each --- So 10k map input records + // 4 maps and 10k unique words --- So 40k map output records + validateMapredCounters(c1, 0, 10000, 40000); + } finally { + //clean up the input and output files + if (fs.exists(testDir)) { + fs.delete(testDir, true); + } + } + } + + public static class NewMapTokenizer + extends Mapper<Object, Text, Text, IntWritable> { + private final static IntWritable one = new IntWritable(1); + private Text word = new Text(); + + public void map(Object key, Text value, Context context) + throws IOException, InterruptedException { + StringTokenizer itr = new StringTokenizer(value.toString()); + while (itr.hasMoreTokens()) { + word.set(itr.nextToken()); + context.write(word, one); + } + } + } + + public static class NewIdentityReducer + extends Reducer<Text, IntWritable, Text, IntWritable> { + private IntWritable result = new IntWritable(); + + public void reduce(Text key, Iterable<IntWritable> values, + Context context) throws IOException, InterruptedException { + int sum = 0; + for (IntWritable val : values) { + sum += val.get(); + } + result.set(sum); + context.write(key, result); + } + } + + /** + * The main driver for word count map/reduce program. + * Invoke this method to submit the map/reduce job. + * @throws IOException When there is communication problems with the + * job tracker. + */ + public void testNewJobWithMapAndReducers() throws Exception { + JobConf conf = new JobConf(TestJobCounters.class); + conf.setInt("io.sort.mb", 1); + conf.setInt("io.sort.factor", 2); + conf.set("io.sort.record.percent", "0.05"); + conf.set("io.sort.spill.percent", "0.80"); + + FileSystem fs = FileSystem.get(conf); + Path testDir = new Path(TEST_ROOT_DIR, "countertest2"); + conf.set("test.build.data", testDir.toString()); + try { + if (fs.exists(testDir)) { + fs.delete(testDir, true); + } + if (!fs.mkdirs(testDir)) { + throw new IOException("Mkdirs failed to create " + testDir.toString()); + } + + String inDir = testDir + File.separator + "genins" + File.separator; + Path wordsIns = new Path(inDir); + if (!fs.mkdirs(wordsIns)) { + throw new IOException("Mkdirs failed to create " + wordsIns.toString()); + } + String outDir = testDir + File.separator; + + //create 3 input files each with 5*2k words + File inpFile = new File(inDir + "input5_2k_1"); + createWordsFile(inpFile); + inpFile = new File(inDir + "input5_2k_2"); + createWordsFile(inpFile); + inpFile = new File(inDir + "input5_2k_3"); + createWordsFile(inpFile); + + FileInputFormat.setInputPaths(conf, inDir); + Path outputPath1 = new Path(outDir, "output5_2k_3"); + FileOutputFormat.setOutputPath(conf, outputPath1); + + Job job = new Job(conf); + job.setJobName("wordcount-map-reducers"); + + // the keys are words (strings) + job.setOutputKeyClass(Text.class); + // the values are counts (ints) + job.setOutputValueClass(IntWritable.class); + + job.setMapperClass(NewMapTokenizer.class); + job.setCombinerClass(NewIdentityReducer.class); + job.setReducerClass(NewIdentityReducer.class); + + job.setNumReduceTasks(1); + + job.waitForCompletion(false); + + org.apache.hadoop.mapreduce.Counters c1 = job.getCounters(); + // 3maps & in each map, 4 first level spills --- So total 12. + // spilled records count: + // Each Map: 1st level:2k+2k+2k+2k=8k;2ndlevel=4k+4k=8k; + // 3rd level=2k(4k from 1st level & 4k from 2nd level & combineAndSpill) + // So total 8k+8k+2k=18k + // For 3 Maps, total = 3*18=54k + // Reduce: each of the 3 map o/p's(2k each) will be spilled in shuffleToDisk() + // So 3*2k=6k in 1st level; 2nd level:4k(2k+2k); + // 3rd level directly given to reduce(4k+2k --- combineAndSpill => 2k. + // So 0 records spilled to disk in 3rd level) + // So total of 6k+4k=10k + // Total job counter will be 54k+10k = 64k + + //3 maps and 2.5k lines --- So total 7.5k map input records + //3 maps and 10k words in each --- So total of 30k map output recs + validateCounters(c1, 64000, 7500, 30000); + + //create 4th input file each with 5*2k words and test with 4 maps + inpFile = new File(inDir + "input5_2k_4"); + createWordsFile(inpFile); + JobConf newJobConf = new JobConf(job.getConfiguration()); + + Path outputPath2 = new Path(outDir, "output5_2k_4"); + + FileOutputFormat.setOutputPath(newJobConf, outputPath2); + + Job newJob = new Job(newJobConf); + newJob.waitForCompletion(false); + c1 = newJob.getCounters(); + // 4maps & in each map 4 first level spills --- So total 16. + // spilled records count: + // Each Map: 1st level:2k+2k+2k+2k=8k;2ndlevel=4k+4k=8k; + // 3rd level=2k(4k from 1st level & 4k from 2nd level & combineAndSpill) + // So total 8k+8k+2k=18k + // For 3 Maps, total = 4*18=72k + // Reduce: each of the 4 map o/p's(2k each) will be spilled in shuffleToDisk() + // So 4*2k=8k in 1st level; 2nd level:4k+4k=8k; + // 3rd level directly given to reduce(4k+4k --- combineAndSpill => 2k. + // So 0 records spilled to disk in 3rd level) + // So total of 8k+8k=16k + // Total job counter will be 72k+16k = 88k + + // 4 maps and 2.5k words in each --- So 10k map input records + // 4 maps and 10k unique words --- So 40k map output records + validateCounters(c1, 88000, 10000, 40000); + + JobConf newJobConf2 = new JobConf(newJob.getConfiguration()); + + Path outputPath3 = new Path(outDir, "output5_2k_5"); + + FileOutputFormat.setOutputPath(newJobConf2, outputPath3); + + Job newJob2 = new Job(newJobConf2); + newJob2.setNumReduceTasks(0); + newJob2.waitForCompletion(false); + c1 = newJob2.getCounters(); + // 4 maps and 2.5k words in each --- So 10k map input records + // 4 maps and 10k unique words --- So 40k map output records + validateCounters(c1, 0, 10000, 40000); + } finally { + //clean up the input and output files + if (fs.exists(testDir)) { + fs.delete(testDir, true); + } + } + } +} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java b/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java deleted file mode 100644 index 32b4278..0000000 --- a/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.FileWriter; -import java.io.Writer; -import java.io.BufferedWriter; -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; - -/** - * This is an wordcount application that tests the count of records - * got spilled to disk. It generates simple text input files. Then - * runs the wordcount map/reduce application on (1) 3 i/p files(with 3 maps - * and 1 reduce) and verifies the counters and (2) 4 i/p files(with 4 maps - * and 1 reduce) and verifies counters. Wordcount application reads the - * text input files, breaks each line into words and counts them. The output - * is a locally sorted list of words and the count of how often they occurred. - * - */ -public class TestSpilledRecordsCounter extends TestCase { - - private void validateCounters(Counters counter, long spillRecCnt) { - // Check if the numer of Spilled Records is same as expected - assertEquals(counter.findCounter(Task.Counter.SPILLED_RECORDS). - getCounter(), spillRecCnt); - } - - private void createWordsFile(File inpFile) throws Exception { - Writer out = new BufferedWriter(new FileWriter(inpFile)); - try { - // 500*4 unique words --- repeated 5 times => 5*2K words - int REPLICAS=5, NUMLINES=500, NUMWORDSPERLINE=4; - - for (int i = 0; i < REPLICAS; i++) { - for (int j = 1; j <= NUMLINES*NUMWORDSPERLINE; j+=NUMWORDSPERLINE) { - out.write("word" + j + " word" + (j+1) + " word" + (j+2) + " word" + (j+3) + '\n'); - } - } - } finally { - out.close(); - } - } - - - /** - * The main driver for word count map/reduce program. - * Invoke this method to submit the map/reduce job. - * @throws IOException When there is communication problems with the - * job tracker. - */ - public void testSpillCounter() throws Exception { - JobConf conf = new JobConf(TestSpilledRecordsCounter.class); - conf.setJobName("wordcountSpilledRecordsCounter"); - - // the keys are words (strings) - conf.setOutputKeyClass(Text.class); - // the values are counts (ints) - conf.setOutputValueClass(IntWritable.class); - - conf.setMapperClass(WordCount.MapClass.class); - conf.setCombinerClass(WordCount.Reduce.class); - conf.setReducerClass(WordCount.Reduce.class); - - conf.setNumMapTasks(3); - conf.setNumReduceTasks(1); - conf.setInt("io.sort.mb", 1); - conf.setInt("io.sort.factor", 2); - conf.set("io.sort.record.percent", "0.05"); - conf.set("io.sort.spill.percent", "0.80"); - - - String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", - File.separator + "tmp")) - .toString().replace(' ', '+'); - conf.set("test.build.data", TEST_ROOT_DIR); - String IN_DIR = TEST_ROOT_DIR + File.separator + - "spilledRecords.countertest" + File.separator + - "genins" + File.separator; - String OUT_DIR = TEST_ROOT_DIR + File.separator + - "spilledRecords.countertest" + File.separator; - - FileSystem fs = FileSystem.get(conf); - Path testdir = new Path(TEST_ROOT_DIR, "spilledRecords.countertest"); - try { - if (fs.exists(testdir)) { - fs.delete(testdir, true); - } - if (!fs.mkdirs(testdir)) { - throw new IOException("Mkdirs failed to create " + testdir.toString()); - } - - Path wordsIns = new Path(testdir, "genins"); - if (!fs.mkdirs(wordsIns)) { - throw new IOException("Mkdirs failed to create " + wordsIns.toString()); - } - - //create 3 input files each with 5*2k words - File inpFile = new File(IN_DIR + "input5_2k_1"); - createWordsFile(inpFile); - inpFile = new File(IN_DIR + "input5_2k_2"); - createWordsFile(inpFile); - inpFile = new File(IN_DIR + "input5_2k_3"); - createWordsFile(inpFile); - - FileInputFormat.setInputPaths(conf, IN_DIR); - Path outputPath1=new Path(OUT_DIR, "output5_2k_3"); - FileOutputFormat.setOutputPath(conf, outputPath1); - - RunningJob myJob = JobClient.runJob(conf); - Counters c1 = myJob.getCounters(); - // 3maps & in each map, 4 first level spills --- So total 12. - // spilled records count: - // Each Map: 1st level:2k+2k+2k+2k=8k;2ndlevel=4k+4k=8k; - // 3rd level=2k(4k from 1st level & 4k from 2nd level & combineAndSpill) - // So total 8k+8k+2k=18k - // For 3 Maps, total = 3*18=54k - // Reduce: each of the 3 map o/p's(2k each) will be spilled in shuffleToDisk() - // So 3*2k=6k in 1st level; 2nd level:4k(2k+2k); - // 3rd level directly given to reduce(4k+2k --- combineAndSpill => 2k. - // So 0 records spilled to disk in 3rd level) - // So total of 6k+4k=10k - // Total job counter will be 54k+10k = 64k - validateCounters(c1, 64000); - - //create 4th input file each with 5*2k words and test with 4 maps - inpFile = new File(IN_DIR + "input5_2k_4"); - createWordsFile(inpFile); - conf.setNumMapTasks(4); - Path outputPath2=new Path(OUT_DIR, "output5_2k_4"); - FileOutputFormat.setOutputPath(conf, outputPath2); - - myJob = JobClient.runJob(conf); - c1 = myJob.getCounters(); - // 4maps & in each map 4 first level spills --- So total 16. - // spilled records count: - // Each Map: 1st level:2k+2k+2k+2k=8k;2ndlevel=4k+4k=8k; - // 3rd level=2k(4k from 1st level & 4k from 2nd level & combineAndSpill) - // So total 8k+8k+2k=18k - // For 3 Maps, total = 4*18=72k - // Reduce: each of the 4 map o/p's(2k each) will be spilled in shuffleToDisk() - // So 4*2k=8k in 1st level; 2nd level:4k+4k=8k; - // 3rd level directly given to reduce(4k+4k --- combineAndSpill => 2k. - // So 0 records spilled to disk in 3rd level) - // So total of 8k+8k=16k - // Total job counter will be 72k+16k = 88k - validateCounters(c1, 88000); - } finally { - //clean up the input and output files - if (fs.exists(testdir)) { - fs.delete(testdir, true); - } - } - } -}
jaxlaw/hadoop-common
44c259d7837cf6ed134f59700f32f84d2b1435ae
Adjust YAHOO-CHANGES to place HADOOP-6234 into the correct region to list the build
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index c10086e..64aaf38 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,407 +1,407 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. HDFS-758. Changes to report status of decommissioining on the namenode web UI. (jitendra) -yahoo-hadoop-0.20.1-3195383000 HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) +yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch
jaxlaw/hadoop-common
5e2beb70e8e2f5fa213cc0fef9c8a254b05cda23
HDFS-758. Changes to report decommissioning status on namenode web UI. Patch URL: https://issues.apache.org/jira/secure/attachment/12426000/HDFS-758.5.0-20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 64fa509..c10086e 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,404 +1,407 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. + HDFS-758. Changes to report status of decommissioining on the namenode web + UI. (jitendra) + yahoo-hadoop-0.20.1-3195383000 HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration to use octal or symbolic instead of decimal. (Jakob Homan via suresh) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java index 454524d..d1febd3 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java @@ -1,465 +1,519 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.DataInput; import java.io.IOException; import java.util.*; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.UTF8; import org.apache.hadoop.io.WritableUtils; /************************************************** * DatanodeDescriptor tracks stats on a given DataNode, * such as available storage capacity, last update time, etc., * and maintains a set of blocks stored on the datanode. * * This data structure is a data structure that is internal * to the namenode. It is *not* sent over-the-wire to the Client * or the Datnodes. Neither is it stored persistently in the * fsImage. **************************************************/ public class DatanodeDescriptor extends DatanodeInfo { + + // Stores status of decommissioning. + // If node is not decommissioning, do not use this object for anything. + DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + /** Block and targets pair */ public static class BlockTargetPair { public final Block block; public final DatanodeDescriptor[] targets; BlockTargetPair(Block block, DatanodeDescriptor[] targets) { this.block = block; this.targets = targets; } } /** A BlockTargetPair queue. */ private static class BlockQueue { private final Queue<BlockTargetPair> blockq = new LinkedList<BlockTargetPair>(); /** Size of the queue */ synchronized int size() {return blockq.size();} /** Enqueue */ synchronized boolean offer(Block block, DatanodeDescriptor[] targets) { return blockq.offer(new BlockTargetPair(block, targets)); } /** Dequeue */ synchronized List<BlockTargetPair> poll(int numBlocks) { if (numBlocks <= 0 || blockq.isEmpty()) { return null; } List<BlockTargetPair> results = new ArrayList<BlockTargetPair>(); for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) { results.add(blockq.poll()); } return results; } } private volatile BlockInfo blockList = null; // isAlive == heartbeats.contains(this) // This is an optimization, because contains takes O(n) time on Arraylist protected boolean isAlive = false; /** A queue of blocks to be replicated by this datanode */ private BlockQueue replicateBlocks = new BlockQueue(); /** A queue of blocks to be recovered by this datanode */ private BlockQueue recoverBlocks = new BlockQueue(); /** A set of blocks to be invalidated by this datanode */ private Set<Block> invalidateBlocks = new TreeSet<Block>(); /* Variables for maintaning number of blocks scheduled to be written to * this datanode. This count is approximate and might be slightly higger * in case of errors (e.g. datanode does not report if an error occurs * while writing the block). */ private int currApproxBlocksScheduled = 0; private int prevApproxBlocksScheduled = 0; private long lastBlocksScheduledRollTime = 0; private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min /** Default constructor */ public DatanodeDescriptor() {} /** DatanodeDescriptor constructor * @param nodeID id of the data node */ public DatanodeDescriptor(DatanodeID nodeID) { this(nodeID, 0L, 0L, 0L, 0); } /** DatanodeDescriptor constructor * * @param nodeID id of the data node * @param networkLocation location of the data node in network */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation) { this(nodeID, networkLocation, null); } /** DatanodeDescriptor constructor * * @param nodeID id of the data node * @param networkLocation location of the data node in network * @param hostName it could be different from host specified for DatanodeID */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation, String hostName) { this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0); } /** DatanodeDescriptor constructor * * @param nodeID id of the data node * @param capacity capacity of the data node * @param dfsUsed space used by the data node * @param remaining remaing capacity of the data node * @param xceiverCount # of data transfers at the data node */ public DatanodeDescriptor(DatanodeID nodeID, long capacity, long dfsUsed, long remaining, int xceiverCount) { super(nodeID); updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount); } /** DatanodeDescriptor constructor * * @param nodeID id of the data node * @param networkLocation location of the data node in network * @param capacity capacity of the data node, including space used by non-dfs * @param dfsUsed the used space by dfs datanode * @param remaining remaing capacity of the data node * @param xceiverCount # of data transfers at the data node */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation, String hostName, long capacity, long dfsUsed, long remaining, int xceiverCount) { super(nodeID, networkLocation, hostName); updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount); } /** * Add data-node to the block. * Add block to the head of the list of blocks belonging to the data-node. */ boolean addBlock(BlockInfo b) { if(!b.addNode(this)) return false; // add to the head of the data-node list blockList = b.listInsert(blockList, this); return true; } /** * Remove block from the list of blocks belonging to the data-node. * Remove data-node from the block. */ boolean removeBlock(BlockInfo b) { blockList = b.listRemove(blockList, this); return b.removeNode(this); } /** * Move block to the head of the list of blocks belonging to the data-node. */ void moveBlockToHead(BlockInfo b) { blockList = b.listRemove(blockList, this); blockList = b.listInsert(blockList, this); } void resetBlocks() { this.capacity = 0; this.remaining = 0; this.dfsUsed = 0; this.xceiverCount = 0; this.blockList = null; this.invalidateBlocks.clear(); } public int numBlocks() { return blockList == null ? 0 : blockList.listCount(this); } /** */ void updateHeartbeat(long capacity, long dfsUsed, long remaining, int xceiverCount) { this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; this.lastUpdate = System.currentTimeMillis(); this.xceiverCount = xceiverCount; rollBlocksScheduled(lastUpdate); } /** * Iterates over the list of blocks belonging to the data-node. */ static private class BlockIterator implements Iterator<Block> { private BlockInfo current; private DatanodeDescriptor node; BlockIterator(BlockInfo head, DatanodeDescriptor dn) { this.current = head; this.node = dn; } public boolean hasNext() { return current != null; } public BlockInfo next() { BlockInfo res = current; current = current.getNext(current.findDatanode(node)); return res; } public void remove() { throw new UnsupportedOperationException("Sorry. can't remove."); } } Iterator<Block> getBlockIterator() { return new BlockIterator(this.blockList, this); } /** * Store block replication work. */ void addBlockToBeReplicated(Block block, DatanodeDescriptor[] targets) { assert(block != null && targets != null && targets.length > 0); replicateBlocks.offer(block, targets); } /** * Store block recovery work. */ void addBlockToBeRecovered(Block block, DatanodeDescriptor[] targets) { assert(block != null && targets != null && targets.length > 0); recoverBlocks.offer(block, targets); } /** * Store block invalidation work. */ void addBlocksToBeInvalidated(List<Block> blocklist) { assert(blocklist != null && blocklist.size() > 0); synchronized (invalidateBlocks) { for(Block blk : blocklist) { invalidateBlocks.add(blk); } } } /** * The number of work items that are pending to be replicated */ int getNumberOfBlocksToBeReplicated() { return replicateBlocks.size(); } /** * The number of block invalidation items that are pending to * be sent to the datanode */ int getNumberOfBlocksToBeInvalidated() { synchronized (invalidateBlocks) { return invalidateBlocks.size(); } } BlockCommand getReplicationCommand(int maxTransfers) { List<BlockTargetPair> blocktargetlist = replicateBlocks.poll(maxTransfers); return blocktargetlist == null? null: new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blocktargetlist); } BlockCommand getLeaseRecoveryCommand(int maxTransfers) { List<BlockTargetPair> blocktargetlist = recoverBlocks.poll(maxTransfers); return blocktargetlist == null? null: new BlockCommand(DatanodeProtocol.DNA_RECOVERBLOCK, blocktargetlist); } /** * Remove the specified number of blocks to be invalidated */ BlockCommand getInvalidateBlocks(int maxblocks) { Block[] deleteList = getBlockArray(invalidateBlocks, maxblocks); return deleteList == null? null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList); } static private Block[] getBlockArray(Collection<Block> blocks, int max) { Block[] blockarray = null; synchronized(blocks) { int available = blocks.size(); int n = available; if (max > 0 && n > 0) { if (max < n) { n = max; } // allocate the properly sized block array ... blockarray = new Block[n]; // iterate tree collecting n blocks... Iterator<Block> e = blocks.iterator(); int blockCount = 0; while (blockCount < n && e.hasNext()) { // insert into array ... blockarray[blockCount++] = e.next(); // remove from tree via iterator, if we are removing // less than total available blocks if (n < available){ e.remove(); } } assert(blockarray.length == n); // now if the number of blocks removed equals available blocks, // them remove all blocks in one fell swoop via clear if (n == available) { blocks.clear(); } } } return blockarray; } void reportDiff(BlocksMap blocksMap, BlockListAsLongs newReport, Collection<Block> toAdd, Collection<Block> toRemove, Collection<Block> toInvalidate) { // place a deilimiter in the list which separates blocks // that have been reported from those that have not BlockInfo delimiter = new BlockInfo(new Block(), 1); boolean added = this.addBlock(delimiter); assert added : "Delimiting block cannot be present in the node"; if(newReport == null) newReport = new BlockListAsLongs( new long[0]); // scan the report and collect newly reported blocks // Note we are taking special precaution to limit tmp blocks allocated // as part this block report - which why block list is stored as longs Block iblk = new Block(); // a fixed new'ed block to be reused with index i for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) { iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i), newReport.getBlockGenStamp(i)); BlockInfo storedBlock = blocksMap.getStoredBlock(iblk); if(storedBlock == null) { // If block is not in blocksMap it does not belong to any file toInvalidate.add(new Block(iblk)); continue; } if(storedBlock.findDatanode(this) < 0) {// Known block, but not on the DN // if the size differs from what is in the blockmap, then return // the new block. addStoredBlock will then pick up the right size of this // block and will update the block object in the BlocksMap if (storedBlock.getNumBytes() != iblk.getNumBytes()) { toAdd.add(new Block(iblk)); } else { toAdd.add(storedBlock); } continue; } // move block to the head of the list this.moveBlockToHead(storedBlock); } // collect blocks that have not been reported // all of them are next to the delimiter Iterator<Block> it = new BlockIterator(delimiter.getNext(0), this); while(it.hasNext()) toRemove.add(it.next()); this.removeBlock(delimiter); } /** Serialization for FSEditLog */ void readFieldsFromFSEditLog(DataInput in) throws IOException { this.name = UTF8.readString(in); this.storageID = UTF8.readString(in); this.infoPort = in.readShort() & 0x0000ffff; this.capacity = in.readLong(); this.dfsUsed = in.readLong(); this.remaining = in.readLong(); this.lastUpdate = in.readLong(); this.xceiverCount = in.readInt(); this.location = Text.readString(in); this.hostName = Text.readString(in); setAdminState(WritableUtils.readEnum(in, AdminStates.class)); } /** * @return Approximate number of blocks currently scheduled to be written * to this datanode. */ public int getBlocksScheduled() { return currApproxBlocksScheduled + prevApproxBlocksScheduled; } /** * Increments counter for number of blocks scheduled. */ void incBlocksScheduled() { currApproxBlocksScheduled++; } /** * Decrements counter for number of blocks scheduled. */ void decBlocksScheduled() { if (prevApproxBlocksScheduled > 0) { prevApproxBlocksScheduled--; } else if (currApproxBlocksScheduled > 0) { currApproxBlocksScheduled--; } // its ok if both counters are zero. } /** * Adjusts curr and prev number of blocks scheduled every few minutes. */ private void rollBlocksScheduled(long now) { if ((now - lastBlocksScheduledRollTime) > BLOCKS_SCHEDULED_ROLL_INTERVAL) { prevApproxBlocksScheduled = currApproxBlocksScheduled; currApproxBlocksScheduled = 0; lastBlocksScheduledRollTime = now; } } + + class DecommissioningStatus { + int underReplicatedBlocks; + int decommissionOnlyReplicas; + int underReplicatedInOpenFiles; + long startTime; + + synchronized void set(int underRep, int onlyRep, int underConstruction) { + if (isDecommissionInProgress() == false) { + return; + } + underReplicatedBlocks = underRep; + decommissionOnlyReplicas = onlyRep; + underReplicatedInOpenFiles = underConstruction; + } + + synchronized int getUnderReplicatedBlocks() { + if (isDecommissionInProgress() == false) { + return 0; + } + return underReplicatedBlocks; + } + + synchronized int getDecommissionOnlyReplicas() { + if (isDecommissionInProgress() == false) { + return 0; + } + return decommissionOnlyReplicas; + } + + synchronized int getUnderReplicatedInOpenFiles() { + if (isDecommissionInProgress() == false) { + return 0; + } + return underReplicatedInOpenFiles; + } + + synchronized void setStartTime(long time) { + startTime = time; + } + + synchronized long getStartTime() { + if (isDecommissionInProgress() == false) { + return 0; + } + return startTime; + } + } // End of class DecommissioningStatus + } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6f0a2fa..a37e313 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3117,1633 +3117,1685 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean { for (Iterator<DatanodeDescriptor> it = nodes.iterator(); it.hasNext(); ) { DatanodeDescriptor node = it.next(); try { invalidateBlock(blk, node); } catch (IOException e) { NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " + "error in deleting bad block " + blk + " on " + node + e); gotException = true; } } // Remove the block from corruptReplicasMap if (!gotException) corruptReplicas.removeFromCorruptReplicasMap(blk); } /** * For each block in the name-node verify whether it belongs to any file, * over or under replicated. Place it into the respective queue. */ private synchronized void processMisReplicatedBlocks() { long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0; neededReplications.clear(); for(BlocksMap.BlockInfo block : blocksMap.getBlocks()) { INodeFile fileINode = block.getINode(); if(fileINode == null) { // block does not belong to any file nrInvalid++; addToInvalidates(block); continue; } // calculate current replication short expectedReplication = fileINode.getReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be if (neededReplications.add(block, numCurrentReplica, num.decommissionedReplicas(), expectedReplication)) { nrUnderReplicated++; } if (numCurrentReplica > expectedReplication) { // over-replicated block nrOverReplicated++; processOverReplicatedBlock(block, expectedReplication, null, null); } } LOG.info("Total number of blocks = " + blocksMap.size()); LOG.info("Number of invalid blocks = " + nrInvalid); LOG.info("Number of under-replicated blocks = " + nrUnderReplicated); LOG.info("Number of over-replicated blocks = " + nrOverReplicated); } /** * Find how many of the containing nodes are "extra", if any. * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ private void processOverReplicatedBlock(Block block, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { if(addedNode == delNodeHint) { delNodeHint = null; } Collection<DatanodeDescriptor> nonExcess = new ArrayList<DatanodeDescriptor>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { DatanodeDescriptor cur = it.next(); Collection<Block> excessBlocks = excessReplicateMap.get(cur.getStorageID()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { // exclude corrupt replicas if (corruptNodes == null || !corruptNodes.contains(cur)) { nonExcess.add(cur); } } } } chooseExcessReplicates(nonExcess, block, replication, addedNode, delNodeHint); } /** * We want "replication" replicates for the block, but we now have too many. * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that: * * srcNodes.size() - dstNodes.size() == replication * * We pick node that make sure that replicas are spread across racks and * also try hard to pick one with least free space. * The algorithm is first to pick a node with least free space from nodes * that are on a rack holding more than one replicas of the block. * So removing such a replica won't remove a rack. * If no such a node is available, * then pick a node with least free space */ void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, Block b, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { // first form a rack to datanodes map and HashMap<String, ArrayList<DatanodeDescriptor>> rackMap = new HashMap<String, ArrayList<DatanodeDescriptor>>(); for (Iterator<DatanodeDescriptor> iter = nonExcess.iterator(); iter.hasNext();) { DatanodeDescriptor node = iter.next(); String rackName = node.getNetworkLocation(); ArrayList<DatanodeDescriptor> datanodeList = rackMap.get(rackName); if(datanodeList==null) { datanodeList = new ArrayList<DatanodeDescriptor>(); } datanodeList.add(node); rackMap.put(rackName, datanodeList); } // split nodes into two sets // priSet contains nodes on rack with more than one replica // remains contains the remaining nodes ArrayList<DatanodeDescriptor> priSet = new ArrayList<DatanodeDescriptor>(); ArrayList<DatanodeDescriptor> remains = new ArrayList<DatanodeDescriptor>(); for( Iterator<Entry<String, ArrayList<DatanodeDescriptor>>> iter = rackMap.entrySet().iterator(); iter.hasNext(); ) { Entry<String, ArrayList<DatanodeDescriptor>> rackEntry = iter.next(); ArrayList<DatanodeDescriptor> datanodeList = rackEntry.getValue(); if( datanodeList.size() == 1 ) { remains.add(datanodeList.get(0)); } else { priSet.addAll(datanodeList); } } // pick one node to delete that favors the delete hint // otherwise pick one with least space from priSet if it is not empty // otherwise one node with least space from remains boolean firstOne = true; while (nonExcess.size() - replication > 0) { DatanodeInfo cur = null; long minSpace = Long.MAX_VALUE; // check if we can del delNodeHint if (firstOne && delNodeHint !=null && nonExcess.contains(delNodeHint) && (priSet.contains(delNodeHint) || (addedNode != null && !priSet.contains(addedNode))) ) { cur = delNodeHint; } else { // regular excessive replica removal Iterator<DatanodeDescriptor> iter = priSet.isEmpty() ? remains.iterator() : priSet.iterator(); while( iter.hasNext() ) { DatanodeDescriptor node = iter.next(); long free = node.getRemaining(); if (minSpace > free) { minSpace = free; cur = node; } } } firstOne = false; // adjust rackmap, priSet, and remains String rack = cur.getNetworkLocation(); ArrayList<DatanodeDescriptor> datanodes = rackMap.get(rack); datanodes.remove(cur); if(datanodes.isEmpty()) { rackMap.remove(rack); } if( priSet.remove(cur) ) { if (datanodes.size() == 1) { priSet.remove(datanodes.get(0)); remains.add(datanodes.get(0)); } } else { remains.remove(cur); } nonExcess.remove(cur); Collection<Block> excessBlocks = excessReplicateMap.get(cur.getStorageID()); if (excessBlocks == null) { excessBlocks = new TreeSet<Block>(); excessReplicateMap.put(cur.getStorageID(), excessBlocks); } if (excessBlocks.add(b)) { excessBlocksCount++; NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates: " +"("+cur.getName()+", "+b +") is added to excessReplicateMap"); } // // The 'excessblocks' tracks blocks until we get confirmation // that the datanode has deleted them; the only way we remove them // is when we get a "removeBlock" message. // // The 'invalidate' list is used to inform the datanode the block // should be deleted. Items are removed from the invalidate list // upon giving instructions to the namenode. // addToInvalidatesNoLog(b, cur); NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: " +"("+cur.getName()+", "+b+") is added to recentInvalidateSets"); } } /** * Modify (block-->datanode) map. Possibly generate * replication tasks, if the removed block is still valid. */ synchronized void removeStoredBlock(Block block, DatanodeDescriptor node) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " +block + " from "+node.getName()); if (!blocksMap.removeNode(block, node)) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " +block+" has already been removed from node "+node); return; } // // It's possible that the block was removed because of a datanode // failure. If the block is still valid, check if replication is // necessary. In that case, put block on a possibly-will- // be-replicated list. // INode fileINode = blocksMap.getINode(block); if (fileINode != null) { decrementSafeBlockCount(block); updateNeededReplications(block, -1, 0); } // // We've removed a block from a node, so it's definitely no longer // in "excess" there. // Collection<Block> excessBlocks = excessReplicateMap.get(node.getStorageID()); if (excessBlocks != null) { if (excessBlocks.remove(block)) { excessBlocksCount--; NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + block + " is removed from excessBlocks"); if (excessBlocks.size() == 0) { excessReplicateMap.remove(node.getStorageID()); } } } // Remove the replica from corruptReplicas corruptReplicas.removeFromCorruptReplicasMap(block, node); } /** * The given node is reporting that it received a certain block. */ public synchronized void blockReceived(DatanodeID nodeID, Block block, String delHint ) throws IOException { DatanodeDescriptor node = getDatanode(nodeID); if (node == null) { NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block + " is received from an unrecorded node " + nodeID.getName()); throw new IllegalArgumentException( "Unexpected exception. Got blockReceived message from node " + block + ", but there is no info for it"); } if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.blockReceived: " +block+" is received from " + nodeID.getName()); } // Check if this datanode should actually be shutdown instead. if (shouldNodeShutdown(node)) { setDatanodeDead(node); throw new DisallowedDatanodeException(node); } // decrement number of blocks scheduled to this datanode. node.decBlocksScheduled(); // get the deletion hint node DatanodeDescriptor delHintNode = null; if(delHint!=null && delHint.length()!=0) { delHintNode = datanodeMap.get(delHint); if(delHintNode == null) { NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block + " is expected to be removed from an unrecorded node " + delHint); } } // // Modify the blocks->datanode map and node's map. // pendingReplications.remove(block); addStoredBlock(block, node, delHintNode ); } public long getMissingBlocksCount() { // not locking return Math.max(missingBlocksInPrevIter, missingBlocksInCurIter); } long[] getStats() throws IOException { checkSuperuserPrivilege(); synchronized(heartbeats) { return new long[] {this.capacityTotal, this.capacityUsed, this.capacityRemaining, this.underReplicatedBlocksCount, this.corruptReplicaBlocksCount, getMissingBlocksCount()}; } } /** * Total raw bytes including non-dfs used space. */ public long getCapacityTotal() { synchronized (heartbeats) { return this.capacityTotal; } } /** * Total used space by data nodes */ public long getCapacityUsed() { synchronized(heartbeats){ return this.capacityUsed; } } /** * Total used space by data nodes as percentage of total capacity */ public float getCapacityUsedPercent() { synchronized(heartbeats){ if (capacityTotal <= 0) { return 100; } return ((float)capacityUsed * 100.0f)/(float)capacityTotal; } } /** * Total used space by data nodes for non DFS purposes such * as storing temporary files on the local file system */ public long getCapacityUsedNonDFS() { long nonDFSUsed = 0; synchronized(heartbeats){ nonDFSUsed = capacityTotal - capacityRemaining - capacityUsed; } return nonDFSUsed < 0 ? 0 : nonDFSUsed; } /** * Total non-used raw bytes. */ public long getCapacityRemaining() { synchronized (heartbeats) { return this.capacityRemaining; } } /** * Total remaining space by data nodes as percentage of total capacity */ public float getCapacityRemainingPercent() { synchronized(heartbeats){ if (capacityTotal <= 0) { return 0; } return ((float)capacityRemaining * 100.0f)/(float)capacityTotal; } } /** * Total number of connections. */ public int getTotalLoad() { synchronized (heartbeats) { return this.totalLoad; } } int getNumberOfDatanodes(DatanodeReportType type) { return getDatanodeListForReport(type).size(); } private synchronized ArrayList<DatanodeDescriptor> getDatanodeListForReport( DatanodeReportType type) { boolean listLiveNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.LIVE; boolean listDeadNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DEAD; HashMap<String, String> mustList = new HashMap<String, String>(); if (listDeadNodes) { //first load all the nodes listed in include and exclude files. for (Iterator<String> it = hostsReader.getHosts().iterator(); it.hasNext();) { mustList.put(it.next(), ""); } for (Iterator<String> it = hostsReader.getExcludedHosts().iterator(); it.hasNext();) { mustList.put(it.next(), ""); } } ArrayList<DatanodeDescriptor> nodes = null; synchronized (datanodeMap) { nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size() + mustList.size()); for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor dn = it.next(); boolean isDead = isDatanodeDead(dn); if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) { nodes.add(dn); } //Remove any form of the this datanode in include/exclude lists. mustList.remove(dn.getName()); mustList.remove(dn.getHost()); mustList.remove(dn.getHostName()); } } if (listDeadNodes) { for (Iterator<String> it = mustList.keySet().iterator(); it.hasNext();) { DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID(it.next())); dn.setLastUpdate(0); nodes.add(dn); } } return nodes; } public synchronized DatanodeInfo[] datanodeReport( DatanodeReportType type ) throws AccessControlException { checkSuperuserPrivilege(); ArrayList<DatanodeDescriptor> results = getDatanodeListForReport(type); DatanodeInfo[] arr = new DatanodeInfo[results.size()]; for (int i=0; i<arr.length; i++) { arr[i] = new DatanodeInfo(results.get(i)); } return arr; } /** * Save namespace image. * This will save current namespace into fsimage file and empty edits file. * Requires superuser privilege and safe mode. * * @throws AccessControlException if superuser privilege is violated. * @throws IOException if */ synchronized void saveNamespace() throws AccessControlException, IOException { checkSuperuserPrivilege(); if(!isInSafeMode()) { throw new IOException("Safe mode should be turned ON " + "in order to create namespace image."); } getFSImage().saveFSImage(); LOG.info("New namespace image has been created."); } /** */ public synchronized void DFSNodesStatus(ArrayList<DatanodeDescriptor> live, ArrayList<DatanodeDescriptor> dead) { ArrayList<DatanodeDescriptor> results = getDatanodeListForReport(DatanodeReportType.ALL); for(Iterator<DatanodeDescriptor> it = results.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); if (isDatanodeDead(node)) dead.add(node); else live.add(node); } } /** * Prints information about all datanodes. */ private synchronized void datanodeDump(PrintWriter out) { synchronized (datanodeMap) { out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); out.println(node.dumpDatanode()); } } } /** * Start decommissioning the specified datanode. */ private void startDecommission (DatanodeDescriptor node) throws IOException { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { LOG.info("Start Decommissioning node " + node.getName()); node.startDecommission(); + node.decommissioningStatus.setStartTime(now()); // // all the blocks that reside on this node have to be // replicated. - Iterator<Block> decommissionBlocks = node.getBlockIterator(); - while(decommissionBlocks.hasNext()) { - Block block = decommissionBlocks.next(); - updateNeededReplications(block, -1, 0); - } + checkDecommissionStateInternal(node); } } /** * Stop decommissioning the specified datanodes. */ public void stopDecommission (DatanodeDescriptor node) throws IOException { LOG.info("Stop Decommissioning node " + node.getName()); node.stopDecommission(); } /** */ public DatanodeInfo getDataNodeInfo(String name) { return datanodeMap.get(name); } /** * @deprecated use {@link NameNode#getNameNodeAddress()} instead. */ @Deprecated public InetSocketAddress getDFSNameNodeAddress() { return nameNodeAddress; } /** */ public Date getStartTime() { return new Date(systemStart); } short getMaxReplication() { return (short)maxReplication; } short getMinReplication() { return (short)minReplication; } short getDefaultReplication() { return (short)defaultReplication; } /** * A immutable object that stores the number of live replicas and * the number of decommissined Replicas. */ static class NumberReplicas { private int liveReplicas; private int decommissionedReplicas; private int corruptReplicas; private int excessReplicas; NumberReplicas() { initialize(0, 0, 0, 0); } NumberReplicas(int live, int decommissioned, int corrupt, int excess) { initialize(live, decommissioned, corrupt, excess); } void initialize(int live, int decommissioned, int corrupt, int excess) { liveReplicas = live; decommissionedReplicas = decommissioned; corruptReplicas = corrupt; excessReplicas = excess; } int liveReplicas() { return liveReplicas; } int decommissionedReplicas() { return decommissionedReplicas; } int corruptReplicas() { return corruptReplicas; } int excessReplicas() { return excessReplicas; } } /** * Counts the number of nodes in the given list into active and * decommissioned counters. */ private NumberReplicas countNodes(Block b, Iterator<DatanodeDescriptor> nodeIter) { int count = 0; int live = 0; int corrupt = 0; int excess = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); while ( nodeIter.hasNext() ) { DatanodeDescriptor node = nodeIter.next(); if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { corrupt++; } else if (node.isDecommissionInProgress() || node.isDecommissioned()) { count++; } else { Collection<Block> blocksExcess = excessReplicateMap.get(node.getStorageID()); if (blocksExcess != null && blocksExcess.contains(b)) { excess++; } else { live++; } } } return new NumberReplicas(live, count, corrupt, excess); } /** * Return the number of nodes that are live and decommissioned. */ NumberReplicas countNodes(Block b) { return countNodes(b, blocksMap.nodeIterator(b)); } + private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, + NumberReplicas num) { + int curReplicas = num.liveReplicas(); + int curExpectedReplicas = getReplication(block); + INode fileINode = blocksMap.getINode(block); + Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block); + StringBuffer nodeList = new StringBuffer(); + while (nodeIter.hasNext()) { + DatanodeDescriptor node = nodeIter.next(); + nodeList.append(node.name); + nodeList.append(" "); + } + FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: " + + curExpectedReplicas + ", live replicas: " + curReplicas + + ", corrupt replicas: " + num.corruptReplicas() + + ", decommissioned replicas: " + num.decommissionedReplicas() + + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + + fileINode.isUnderConstruction() + ", Datanodes having this block: " + + nodeList + ", Current Datanode: " + srcNode.name + + ", Is current datanode decommissioning: " + + srcNode.isDecommissionInProgress()); + } + + /** * Return true if there are any blocks on this node that have not * yet reached their replication factor. Otherwise returns false. */ private boolean isReplicationInProgress(DatanodeDescriptor srcNode) { boolean status = false; + int underReplicatedBlocks = 0; + int decommissionOnlyReplicas = 0; + int underReplicatedInOpenFiles = 0; + for(final Iterator<Block> i = srcNode.getBlockIterator(); i.hasNext(); ) { final Block block = i.next(); INode fileINode = blocksMap.getINode(block); if (fileINode != null) { NumberReplicas num = countNodes(block); int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); if (curExpectedReplicas > curReplicas) { - status = true; + // Log info about one block for this node which needs replication + if (!status) { + status = true; + logBlockReplicationInfo(block, srcNode, num); + } + underReplicatedBlocks++; + if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { + decommissionOnlyReplicas++; + } + if (fileINode.isUnderConstruction()) { + underReplicatedInOpenFiles++; + } + if (!neededReplications.contains(block) && pendingReplications.getNumReplicas(block) == 0) { // // These blocks have been reported from the datanode // after the startDecommission method has been executed. These // blocks were in flight when the decommission was started. // neededReplications.add(block, curReplicas, num.decommissionedReplicas(), curExpectedReplicas); } } } } + srcNode.decommissioningStatus.set(underReplicatedBlocks, + decommissionOnlyReplicas, underReplicatedInOpenFiles); + return status; } /** * Change, if appropriate, the admin state of a datanode to * decommission completed. Return true if decommission is complete. */ boolean checkDecommissionStateInternal(DatanodeDescriptor node) { // // Check to see if all blocks in this decommissioned // node has reached their target replication factor. // if (node.isDecommissionInProgress()) { if (!isReplicationInProgress(node)) { node.setDecommissioned(); LOG.info("Decommission complete for node " + node.getName()); } } if (node.isDecommissioned()) { return true; } return false; } /** * Keeps track of which datanodes/ipaddress are allowed to connect to the namenode. */ private boolean inHostsList(DatanodeID node, String ipAddr) { Set<String> hostsList = hostsReader.getHosts(); return (hostsList.isEmpty() || (ipAddr != null && hostsList.contains(ipAddr)) || hostsList.contains(node.getHost()) || hostsList.contains(node.getName()) || ((node instanceof DatanodeInfo) && hostsList.contains(((DatanodeInfo)node).getHostName()))); } private boolean inExcludedHostsList(DatanodeID node, String ipAddr) { Set<String> excludeList = hostsReader.getExcludedHosts(); return ((ipAddr != null && excludeList.contains(ipAddr)) || excludeList.contains(node.getHost()) || excludeList.contains(node.getName()) || ((node instanceof DatanodeInfo) && excludeList.contains(((DatanodeInfo)node).getHostName()))); } /** * Rereads the config to get hosts and exclude list file names. * Rereads the files to update the hosts and exclude lists. It * checks if any of the hosts have changed states: * 1. Added to hosts --> no further work needed here. * 2. Removed from hosts --> mark AdminState as decommissioned. * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ public void refreshNodes(Configuration conf) throws IOException { checkSuperuserPrivilege(); // Reread the config to get dfs.hosts and dfs.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list if (conf == null) conf = new Configuration(); hostsReader.updateFileNames(conf.get("dfs.hosts",""), conf.get("dfs.hosts.exclude", "")); hostsReader.refresh(); synchronized (this) { for (Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); // Check if not include. if (!inHostsList(node, null)) { node.setDecommissioned(); // case 2. } else { if (inExcludedHostsList(node, null)) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { startDecommission(node); // case 3. } } else { if (node.isDecommissionInProgress() || node.isDecommissioned()) { stopDecommission(node); // case 4. } } } } } } void finalizeUpgrade() throws IOException { checkSuperuserPrivilege(); getFSImage().finalizeUpgrade(); } /** * Checks if the node is not on the hosts list. If it is not, then * it will be ignored. If the node is in the hosts list, but is also * on the exclude list, then it will be decommissioned. * Returns FALSE if node is rejected for registration. * Returns TRUE if node is registered (including when it is on the * exclude list and is being decommissioned). */ private synchronized boolean verifyNodeRegistration(DatanodeRegistration nodeReg, String ipAddr) throws IOException { if (!inHostsList(nodeReg, ipAddr)) { return false; } if (inExcludedHostsList(nodeReg, ipAddr)) { DatanodeDescriptor node = getDatanode(nodeReg); if (node == null) { throw new IOException("verifyNodeRegistration: unknown datanode " + nodeReg.getName()); } if (!checkDecommissionStateInternal(node)) { startDecommission(node); } } return true; } /** * Checks if the Admin state bit is DECOMMISSIONED. If so, then * we should shut it down. * * Returns true if the node should be shutdown. */ private boolean shouldNodeShutdown(DatanodeDescriptor node) { return (node.isDecommissioned()); } /** * Get data node by storage ID. * * @param nodeID * @return DatanodeDescriptor or null if the node is not found. * @throws IOException */ public DatanodeDescriptor getDatanode(DatanodeID nodeID) throws IOException { UnregisteredDatanodeException e = null; DatanodeDescriptor node = datanodeMap.get(nodeID.getStorageID()); if (node == null) return null; if (!node.getName().equals(nodeID.getName())) { e = new UnregisteredDatanodeException(nodeID, node); NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: " + e.getLocalizedMessage()); throw e; } return node; } /** Stop at and return the datanode at index (used for content browsing)*/ @Deprecated private DatanodeDescriptor getDatanodeByIndex(int index) { int i = 0; for (DatanodeDescriptor node : datanodeMap.values()) { if (i == index) { return node; } i++; } return null; } @Deprecated public String randomDataNode() { int size = datanodeMap.size(); int index = 0; if (size != 0) { index = r.nextInt(size); for(int i=0; i<size; i++) { DatanodeDescriptor d = getDatanodeByIndex(index); if (d != null && !d.isDecommissioned() && !isDatanodeDead(d) && !d.isDecommissionInProgress()) { return d.getHost() + ":" + d.getInfoPort(); } index = (index + 1) % size; } } return null; } public DatanodeDescriptor getRandomDatanode() { return replicator.chooseTarget(1, null, null, 0)[0]; } /** * SafeModeInfo contains information related to the safe mode. * <p> * An instance of {@link SafeModeInfo} is created when the name node * enters safe mode. * <p> * During name node startup {@link SafeModeInfo} counts the number of * <em>safe blocks</em>, those that have at least the minimal number of * replicas, and calculates the ratio of safe blocks to the total number * of blocks in the system, which is the size of * {@link FSNamesystem#blocksMap}. When the ratio reaches the * {@link #threshold} it starts the {@link SafeModeMonitor} daemon in order * to monitor whether the safe mode {@link #extension} is passed. * Then it leaves safe mode and destroys itself. * <p> * If safe mode is turned on manually then the number of safe blocks is * not tracked because the name node is not intended to leave safe mode * automatically in the case. * * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction) * @see SafeModeMonitor */ class SafeModeInfo { // configuration fields /** Safe mode threshold condition %.*/ private double threshold; /** Safe mode extension after the threshold. */ private int extension; /** Min replication required by safe mode. */ private int safeReplication; // internal fields /** Time when threshold was reached. * * <br>-1 safe mode is off * <br> 0 safe mode is on, but threshold is not reached yet */ private long reached = -1; /** Total number of blocks. */ int blockTotal; /** Number of safe blocks. */ private int blockSafe; /** time of the last status printout */ private long lastStatusReport = 0; /** * Creates SafeModeInfo when the name node enters * automatic safe mode at startup. * * @param conf configuration */ SafeModeInfo(Configuration conf) { this.threshold = conf.getFloat("dfs.safemode.threshold.pct", 0.95f); this.extension = conf.getInt("dfs.safemode.extension", 0); this.safeReplication = conf.getInt("dfs.replication.min", 1); this.blockTotal = 0; this.blockSafe = 0; } /** * Creates SafeModeInfo when safe mode is entered manually. * * The {@link #threshold} is set to 1.5 so that it could never be reached. * {@link #blockTotal} is set to -1 to indicate that safe mode is manual. * * @see SafeModeInfo */ private SafeModeInfo() { this.threshold = 1.5f; // this threshold can never be reached this.extension = Integer.MAX_VALUE; this.safeReplication = Short.MAX_VALUE + 1; // more than maxReplication this.blockTotal = -1; this.blockSafe = -1; this.reached = -1; enter(); reportStatus("STATE* Safe mode is ON.", true); } /** * Check if safe mode is on. * @return true if in safe mode */ synchronized boolean isOn() { try { assert isConsistent() : " SafeMode: Inconsistent filesystem state: " + "Total num of blocks, active blocks, or " + "total safe blocks don't match."; } catch(IOException e) { System.err.print(StringUtils.stringifyException(e)); } return this.reached >= 0; } /** * Enter safe mode. */ void enter() { this.reached = 0; } /** * Leave safe mode. * <p> * Switch to manual safe mode if distributed upgrade is required.<br> * Check for invalid, under- & over-replicated blocks in the end of startup. */ synchronized void leave(boolean checkForUpgrades) { if(checkForUpgrades) { // verify whether a distributed upgrade needs to be started boolean needUpgrade = false; try { needUpgrade = startDistributedUpgradeIfNeeded(); } catch(IOException e) { FSNamesystem.LOG.error(StringUtils.stringifyException(e)); } if(needUpgrade) { // switch to manual safe mode safeMode = new SafeModeInfo(); return; } } // verify blocks replications processMisReplicatedBlocks(); long timeInSafemode = now() - systemStart; NameNode.stateChangeLog.info("STATE* Leaving safe mode after " + timeInSafemode/1000 + " secs."); NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode); if (reached >= 0) { NameNode.stateChangeLog.info("STATE* Safe mode is OFF."); } reached = -1; safeMode = null; NameNode.stateChangeLog.info("STATE* Network topology has " +clusterMap.getNumOfRacks()+" racks and " +clusterMap.getNumOfLeaves()+ " datanodes"); NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has " +neededReplications.size()+" blocks"); } /** * Safe mode can be turned off iff * the threshold is reached and * the extension time have passed. * @return true if can leave or false otherwise. */ synchronized boolean canLeave() { if (reached == 0) return false; if (now() - reached < extension) { reportStatus("STATE* Safe mode ON.", false); return false; } return !needEnter(); } /** * There is no need to enter safe mode * if DFS is empty or {@link #threshold} == 0 */ boolean needEnter() { return getSafeBlockRatio() < threshold; } /** * Ratio of the number of safe blocks to the total number of blocks * to be compared with the threshold. */ private float getSafeBlockRatio() { return (blockTotal == 0 ? 1 : (float)blockSafe/blockTotal); } /** * Check and trigger safe mode if needed. */ private void checkMode() { if (needEnter()) { enter(); reportStatus("STATE* Safe mode ON.", false); return; } // the threshold is reached if (!isOn() || // safe mode is off extension <= 0 || threshold <= 0) { // don't need to wait this.leave(true); // leave safe mode return; } if (reached > 0) { // threshold has already been reached before reportStatus("STATE* Safe mode ON.", false); return; } // start monitor reached = now(); smmthread = new Daemon(new SafeModeMonitor()); smmthread.start(); reportStatus("STATE* Safe mode extension entered.", true); } /** * Set total number of blocks. */ synchronized void setBlockTotal(int total) { this.blockTotal = total; checkMode(); } /** * Increment number of safe blocks if current block has * reached minimal replication. * @param replication current replication */ synchronized void incrementSafeBlockCount(short replication) { if ((int)replication == safeReplication) this.blockSafe++; checkMode(); } /** * Decrement number of safe blocks if current block has * fallen below minimal replication. * @param replication current replication */ synchronized void decrementSafeBlockCount(short replication) { if (replication == safeReplication-1) this.blockSafe--; checkMode(); } /** * Check if safe mode was entered manually or at startup. */ boolean isManual() { return extension == Integer.MAX_VALUE; } /** * Set manual safe mode. */ void setManual() { extension = Integer.MAX_VALUE; } /** * A tip on how safe mode is to be turned off: manually or automatically. */ String getTurnOffTip() { String leaveMsg = "Safe mode will be turned off automatically"; if(reached < 0) return "Safe mode is OFF."; if(isManual()) { if(getDistributedUpgradeState()) return leaveMsg + " upon completion of " + "the distributed upgrade: upgrade progress = " + getDistributedUpgradeStatus() + "%"; leaveMsg = "Use \"hadoop dfsadmin -safemode leave\" to turn safe mode off"; } if(blockTotal < 0) return leaveMsg + "."; String safeBlockRatioMsg = String.format("The ratio of reported blocks %.4f has " + (reached == 0 ? "not " : "") + "reached the threshold %.4f. ", getSafeBlockRatio(), threshold) + leaveMsg; if(reached == 0 || isManual()) // threshold is not reached or manual return safeBlockRatioMsg + "."; // extension period is in progress return safeBlockRatioMsg + " in " + Math.abs(reached + extension - now())/1000 + " seconds."; } /** * Print status every 20 seconds. */ private void reportStatus(String msg, boolean rightNow) { long curTime = now(); if(!rightNow && (curTime - lastStatusReport < 20 * 1000)) return; NameNode.stateChangeLog.info(msg + " \n" + getTurnOffTip()); lastStatusReport = curTime; } /** * Returns printable state of the class. */ public String toString() { String resText = "Current safe block ratio = " + getSafeBlockRatio() + ". Target threshold = " + threshold + ". Minimal replication = " + safeReplication + "."; if (reached > 0) resText += " Threshold was reached " + new Date(reached) + "."; return resText; } /** * Checks consistency of the class state. * This is costly and currently called only in assert. */ boolean isConsistent() throws IOException { if (blockTotal == -1 && blockSafe == -1) { return true; // manual safe mode } int activeBlocks = blocksMap.size() - (int)pendingDeletionBlocksCount; return (blockTotal == activeBlocks) || (blockSafe >= 0 && blockSafe <= blockTotal); } } /** * Periodically check whether it is time to leave safe mode. * This thread starts when the threshold level is reached. * */ class SafeModeMonitor implements Runnable { /** interval in msec for checking safe mode: {@value} */ private static final long recheckInterval = 1000; /** */ public void run() { while (fsRunning && (safeMode != null && !safeMode.canLeave())) { try { Thread.sleep(recheckInterval); } catch (InterruptedException ie) { } } // leave safe mode and stop the monitor try { leaveSafeMode(true); } catch(SafeModeException es) { // should never happen String msg = "SafeModeMonitor may not run during distributed upgrade."; assert false : msg; throw new RuntimeException(msg, es); } smmthread = null; } } /** * Current system time. * @return current time in msec. */ static long now() { return System.currentTimeMillis(); } boolean setSafeMode(SafeModeAction action) throws IOException { if (action != SafeModeAction.SAFEMODE_GET) { checkSuperuserPrivilege(); switch(action) { case SAFEMODE_LEAVE: // leave safe mode leaveSafeMode(false); break; case SAFEMODE_ENTER: // enter safe mode enterSafeMode(); break; } } return isInSafeMode(); } /** * Check whether the name node is in safe mode. * @return true if safe mode is ON, false otherwise */ boolean isInSafeMode() { if (safeMode == null) return false; return safeMode.isOn(); } /** * Increment number of blocks that reached minimal replication. * @param replication current replication */ void incrementSafeBlockCount(int replication) { if (safeMode == null) return; safeMode.incrementSafeBlockCount((short)replication); } /** * Decrement number of blocks that reached minimal replication. */ void decrementSafeBlockCount(Block b) { if (safeMode == null) // mostly true return; safeMode.decrementSafeBlockCount((short)countNodes(b).liveReplicas()); } /** * Set the total number of blocks in the system. */ void setBlockTotal() { if (safeMode == null) return; safeMode.setBlockTotal(blocksMap.size()); } /** * Get the total number of blocks in the system. */ public long getBlocksTotal() { return blocksMap.size(); } /** * Enter safe mode manually. * @throws IOException */ synchronized void enterSafeMode() throws IOException { if (!isInSafeMode()) { safeMode = new SafeModeInfo(); return; } safeMode.setManual(); NameNode.stateChangeLog.info("STATE* Safe mode is ON. " + safeMode.getTurnOffTip()); } /** * Leave safe mode. * @throws IOException */ synchronized void leaveSafeMode(boolean checkForUpgrades) throws SafeModeException { if (!isInSafeMode()) { NameNode.stateChangeLog.info("STATE* Safe mode is already OFF."); return; } if(getDistributedUpgradeState()) throw new SafeModeException("Distributed upgrade is in progress", safeMode); safeMode.leave(checkForUpgrades); } String getSafeModeTip() { if (!isInSafeMode()) return ""; return safeMode.getTurnOffTip(); } long getEditLogSize() throws IOException { return getEditLog().getEditLogSize(); } synchronized CheckpointSignature rollEditLog() throws IOException { if (isInSafeMode()) { throw new SafeModeException("Checkpoint not created", safeMode); } LOG.info("Roll Edit Log from " + Server.getRemoteAddress()); return getFSImage().rollEditLog(); } synchronized void rollFSImage() throws IOException { if (isInSafeMode()) { throw new SafeModeException("Checkpoint not created", safeMode); } LOG.info("Roll FSImage from " + Server.getRemoteAddress()); getFSImage().rollFSImage(); } /** * Returns whether the given block is one pointed-to by a file. */ private boolean isValidBlock(Block b) { return (blocksMap.getINode(b) != null); } // Distributed upgrade manager UpgradeManagerNamenode upgradeManager = new UpgradeManagerNamenode(); UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action ) throws IOException { return upgradeManager.distributedUpgradeProgress(action); } UpgradeCommand processDistributedUpgradeCommand(UpgradeCommand comm) throws IOException { return upgradeManager.processUpgradeCommand(comm); } int getDistributedUpgradeVersion() { return upgradeManager.getUpgradeVersion(); } UpgradeCommand getDistributedUpgradeCommand() throws IOException { return upgradeManager.getBroadcastCommand(); } boolean getDistributedUpgradeState() { return upgradeManager.getUpgradeState(); } short getDistributedUpgradeStatus() { return upgradeManager.getUpgradeStatus(); } boolean startDistributedUpgradeIfNeeded() throws IOException { return upgradeManager.startUpgrade(); } PermissionStatus createFsOwnerPermissions(FsPermission permission) { return new PermissionStatus(fsOwner.getUserName(), supergroup, permission); } private FSPermissionChecker checkOwner(String path) throws AccessControlException { return checkPermission(path, true, null, null, null, null); } private FSPermissionChecker checkPathAccess(String path, FsAction access ) throws AccessControlException { return checkPermission(path, false, null, null, access, null); } private FSPermissionChecker checkParentAccess(String path, FsAction access ) throws AccessControlException { return checkPermission(path, false, null, access, null, null); } private FSPermissionChecker checkAncestorAccess(String path, FsAction access ) throws AccessControlException { return checkPermission(path, false, access, null, null, null); } private FSPermissionChecker checkTraverse(String path ) throws AccessControlException { return checkPermission(path, false, null, null, null, null); } private void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { PermissionChecker.checkSuperuserPrivilege(fsOwner, supergroup); } } /** * Check whether current user have permissions to access the path. * For more details of the parameters, see * {@link FSPermissionChecker#checkPermission(String, INodeDirectory, boolean, FsAction, FsAction, FsAction, FsAction)}. */ private FSPermissionChecker checkPermission(String path, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess) throws AccessControlException { FSPermissionChecker pc = new FSPermissionChecker( fsOwner.getUserName(), supergroup); if (!pc.isSuper) { dir.waitForReady(); pc.checkPermission(path, dir.rootDir, doCheckOwner, ancestorAccess, parentAccess, access, subAccess); } return pc; } /** * Check to see if we have exceeded the limit on the number * of inodes. */ void checkFsObjectLimit() throws IOException { if (maxFsObjects != 0 && maxFsObjects <= dir.totalInodes() + getBlocksTotal()) { throw new IOException("Exceeded the configured number of objects " + maxFsObjects + " in the filesystem."); } } /** * Get the total number of objects in the system. */ long getMaxObjects() { return maxFsObjects; } public long getFilesTotal() { return this.dir.totalInodes(); } public long getPendingReplicationBlocks() { return pendingReplicationBlocksCount; } public long getUnderReplicatedBlocks() { return underReplicatedBlocksCount; } /** Returns number of blocks with corrupt replicas */ public long getCorruptReplicaBlocks() { return corruptReplicaBlocksCount; } public long getScheduledReplicationBlocks() { return scheduledReplicationBlocksCount; } public long getPendingDeletionBlocks() { return pendingDeletionBlocksCount; } public long getExcessBlocks() { return excessBlocksCount; } public synchronized int getBlockCapacity() { return blocksMap.getCapacity(); } public String getFSState() { return isInSafeMode() ? "safeMode" : "Operational"; } private ObjectName mbeanName; /** * Register the FSNamesystem MBean using the name * "hadoop:service=NameNode,name=FSNamesystemState" */ void registerMBean(Configuration conf) { // We wrap to bypass standard mbean naming convention. // This wraping can be removed in java 6 as it is more flexible in // package naming for mbeans and their impl. StandardMBean bean; try { myFSMetrics = new FSNamesystemMetrics(conf); bean = new StandardMBean(this,FSNamesystemMBean.class); mbeanName = MBeanUtil.registerMBean("NameNode", "FSNamesystemState", bean); } catch (NotCompliantMBeanException e) { e.printStackTrace(); } LOG.info("Registered FSNamesystemStatusMBean"); } /** * get FSNamesystemMetrics */ public FSNamesystemMetrics getFSNamesystemMetrics() { return myFSMetrics; } /** * shutdown FSNamesystem */ public void shutdown() { if (mbeanName != null) MBeanUtil.unregisterMBean(mbeanName); } /** * Number of live data nodes * @return Number of live data nodes */ public int numLiveDataNodes() { int numLive = 0; synchronized (datanodeMap) { for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor dn = it.next(); if (!isDatanodeDead(dn) ) { numLive++; } } } return numLive; } /** * Number of dead data nodes * @return Number of dead data nodes */ public int numDeadDataNodes() { int numDead = 0; synchronized (datanodeMap) { for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { DatanodeDescriptor dn = it.next(); if (isDatanodeDead(dn) ) { numDead++; } } } return numDead; } /** * Sets the generation stamp for this filesystem */ public void setGenerationStamp(long stamp) { generationStamp.setStamp(stamp); } /** * Gets the generation stamp for this filesystem */ public long getGenerationStamp() { return generationStamp.getStamp(); } /** * Increments, logs and then returns the stamp */ long nextGenerationStamp() { long gs = generationStamp.nextStamp(); getEditLog().logGenerationStamp(gs); return gs; } /** * Verifies that the block is associated with a file that has a lease. * Increments, logs and then returns the stamp */ synchronized long nextGenerationStampForBlock(Block block) throws IOException { BlockInfo storedBlock = blocksMap.getStoredBlock(block); if (storedBlock == null) { String msg = block + " is already commited, storedBlock == null."; LOG.info(msg); throw new IOException(msg); } INodeFile fileINode = storedBlock.getINode(); if (!fileINode.isUnderConstruction()) { String msg = block + " is already commited, !fileINode.isUnderConstruction()."; LOG.info(msg); throw new IOException(msg); } if (!((INodeFileUnderConstruction)fileINode).setLastRecoveryTime(now())) { String msg = block + " is beening recovered, ignoring this request."; LOG.info(msg); throw new IOException(msg); } return nextGenerationStamp(); } // rename was successful. If any part of the renamed subtree had // files that were being written to, update with new filename. // void changeLease(String src, String dst, FileStatus dinfo) throws IOException { String overwrite; String replaceBy; boolean destinationExisted = true; if (dinfo == null) { destinationExisted = false; } if (destinationExisted && dinfo.isDir()) { Path spath = new Path(src); overwrite = spath.getParent().toString() + Path.SEPARATOR; replaceBy = dst + Path.SEPARATOR; } else { overwrite = src; replaceBy = dst; } leaseManager.changeLease(src, dst, overwrite, replaceBy); } /** * Serializes leases. */ void saveFilesUnderConstruction(DataOutputStream out) throws IOException { synchronized (leaseManager) { out.writeInt(leaseManager.countPath()); // write the size for (Lease lease : leaseManager.getSortedLeases()) { for(String path : lease.getPaths()) { // verify that path exists in namespace INode node = dir.getFileINode(path); if (node == null) { throw new IOException("saveLeases found path " + path + " but no matching entry in namespace."); } if (!node.isUnderConstruction()) { throw new IOException("saveLeases found path " + path + " but is not under construction."); } INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node; FSImage.writeINodeUnderConstruction(out, cons, path); } } } } + + public synchronized ArrayList<DatanodeDescriptor> getDecommissioningNodes() { + ArrayList<DatanodeDescriptor> decommissioningNodes = new ArrayList<DatanodeDescriptor>(); + ArrayList<DatanodeDescriptor> results = getDatanodeListForReport(DatanodeReportType.LIVE); + for (Iterator<DatanodeDescriptor> it = results.iterator(); it.hasNext();) { + DatanodeDescriptor node = it.next(); + if (node.isDecommissionInProgress()) { + decommissioningNodes.add(node); + } + } + return decommissioningNodes; + } } diff --git a/src/test/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/src/test/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java new file mode 100644 index 0000000..60327e0 --- /dev/null +++ b/src/test/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -0,0 +1,228 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Random; + +import org.junit.BeforeClass; +import org.junit.AfterClass; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; + +/** + * This class tests the decommissioning of nodes. + */ +public class TestDecommissioningStatus { + private static final long seed = 0xDEADBEEFL; + private static final int blockSize = 8192; + private static final int fileSize = 16384; + private static final int numDatanodes = 2; + private static MiniDFSCluster cluster; + private static FileSystem fileSys; + private static Path excludeFile; + private static FileSystem localFileSys; + private static Configuration conf; + private static Path dir; + + ArrayList<String> decommissionedNodes = new ArrayList<String>(numDatanodes); + + @BeforeClass + public static void setUp() throws Exception { + conf = new Configuration(); + conf.setBoolean("dfs.replication.considerLoad", false); + + // Set up the hosts/exclude files. + localFileSys = FileSystem.getLocal(conf); + Path workingDir = localFileSys.getWorkingDirectory(); + dir = new Path(workingDir, "build/test/data/work-dir/decommission"); + assertTrue(localFileSys.mkdirs(dir)); + excludeFile = new Path(dir, "exclude"); + conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); + conf.setInt("heartbeat.recheck.interval", 2000); + conf.setInt("dfs.heartbeat.interval", 1); + conf.setInt("dfs.replication.pending.timeout.sec", 4); + conf.setInt("dfs.replication.interval", 1000); + conf.setInt("dfs.namenode.decommission.interval", 1); + writeConfigFile(localFileSys, excludeFile, null); + + cluster = new MiniDFSCluster(conf, numDatanodes, true, null); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); + } + + @AfterClass + public static void tearDown() throws Exception { + if(fileSys != null) fileSys.close(); + if(cluster != null) cluster.shutdown(); + } + + private static void writeConfigFile(FileSystem fs, Path name, + ArrayList<String> nodes) throws IOException { + + // delete if it already exists + if (fs.exists(name)) { + fs.delete(name, true); + } + + FSDataOutputStream stm = fs.create(name); + + if (nodes != null) { + for (Iterator<String> it = nodes.iterator(); it.hasNext();) { + String node = it.next(); + stm.writeBytes(node); + stm.writeBytes("\n"); + } + } + stm.close(); + } + + private void writeFile(FileSystem fileSys, Path name, short repl) + throws IOException { + // create and write a file that contains three blocks of data + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt("io.file.buffer.size", 4096), repl, (long) blockSize); + byte[] buffer = new byte[fileSize]; + Random rand = new Random(seed); + rand.nextBytes(buffer); + stm.write(buffer); + stm.close(); + } + + private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name, + short repl) throws IOException { + // create and write a file that contains three blocks of data + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt("io.file.buffer.size", 4096), repl, (long) blockSize); + byte[] buffer = new byte[fileSize]; + Random rand = new Random(seed); + rand.nextBytes(buffer); + stm.write(buffer); + // Do not close stream, return it + // so that it is not garbage collected + return stm; + } + + private void cleanupFile(FileSystem fileSys, Path name) throws IOException { + assertTrue(fileSys.exists(name)); + fileSys.delete(name, true); + assertTrue(!fileSys.exists(name)); + } + + /* + * Decommissions the node at the given index + */ + private String decommissionNode(FSNamesystem namesystem, Configuration conf, + DFSClient client, FileSystem localFileSys, int nodeIndex) + throws IOException { + DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); + + String nodename = info[nodeIndex].getName(); + System.out.println("Decommissioning node: " + nodename); + + // write nodename into the exclude file. + ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes); + nodes.add(nodename); + writeConfigFile(localFileSys, excludeFile, nodes); + namesystem.refreshNodes(conf); + return nodename; + } + + private void checkDecommissionStatus(DatanodeDescriptor decommNode, + int expectedUnderRep, int expectedDecommissionOnly, + int expectedUnderRepInOpenFiles) { + assertEquals(decommNode.decommissioningStatus.getUnderReplicatedBlocks(), + expectedUnderRep); + assertEquals( + decommNode.decommissioningStatus.getDecommissionOnlyReplicas(), + expectedDecommissionOnly); + assertEquals(decommNode.decommissioningStatus + .getUnderReplicatedInOpenFiles(), expectedUnderRepInOpenFiles); + } + + /** + * Tests Decommissioning Status in DFS. + */ + + @Test + public void testDecommissionStatus() throws IOException, InterruptedException { + InetSocketAddress addr = new InetSocketAddress("localhost", cluster + .getNameNodePort()); + DFSClient client = new DFSClient(addr, conf); + DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); + assertEquals("Number of Datanodes ", 2, info.length); + FileSystem fileSys = cluster.getFileSystem(); + + short replicas = 2; + // + // Decommission one node. Verify the decommission status + // + Path file1 = new Path("decommission.dat"); + writeFile(fileSys, file1, replicas); + + Path file2 = new Path("decommission1.dat"); + FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas); + Thread.sleep(5000); + + FSNamesystem fsn = cluster.getNameNode().getNamesystem(); + for (int iteration = 0; iteration < numDatanodes; iteration++) { + String downnode = decommissionNode(fsn, conf, client, localFileSys, + iteration); + decommissionedNodes.add(downnode); + Thread.sleep(5000); + ArrayList<DatanodeDescriptor> decommissioningNodes = fsn + .getDecommissioningNodes(); + if (iteration == 0) { + assertEquals(decommissioningNodes.size(), 1); + DatanodeDescriptor decommNode = decommissioningNodes.get(0); + checkDecommissionStatus(decommNode, 4, 0, 2); + } else { + assertEquals(decommissioningNodes.size(), 2); + DatanodeDescriptor decommNode1 = decommissioningNodes.get(0); + DatanodeDescriptor decommNode2 = decommissioningNodes.get(1); + checkDecommissionStatus(decommNode1, 4, 4, 2); + checkDecommissionStatus(decommNode2, 4, 4, 2); + } + } + // Call refreshNodes on FSNamesystem with empty exclude file. + // This will remove the datanodes from decommissioning list and + // make them available again. + writeConfigFile(localFileSys, excludeFile, null); + fsn.refreshNodes(conf); + st1.close(); + cleanupFile(fileSys, file1); + cleanupFile(fileSys, file2); + cleanupFile(localFileSys, dir); + } +} diff --git a/src/webapps/hdfs/dfshealth.jsp b/src/webapps/hdfs/dfshealth.jsp index 2b190bc..c9431f6 100644 --- a/src/webapps/hdfs/dfshealth.jsp +++ b/src/webapps/hdfs/dfshealth.jsp @@ -1,265 +1,274 @@ <%@ page contentType="text/html; charset=UTF-8" import="javax.servlet.*" import="javax.servlet.http.*" import="java.io.*" import="java.util.*" import="org.apache.hadoop.fs.*" import="org.apache.hadoop.hdfs.*" import="org.apache.hadoop.hdfs.server.namenode.*" import="org.apache.hadoop.hdfs.server.datanode.*" import="org.apache.hadoop.hdfs.server.common.Storage" import="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory" import="org.apache.hadoop.hdfs.protocol.*" import="org.apache.hadoop.util.*" import="java.text.DateFormat" import="java.lang.Math" import="java.net.URLEncoder" %> <%! JspHelper jspHelper = new JspHelper(); int rowNum = 0; int colNum = 0; String rowTxt() { colNum = 0; return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt") + "\"> "; } String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; } void counterReset () { colNum = 0; rowNum = 0 ; } long diskBytes = 1024 * 1024 * 1024; String diskByteStr = "GB"; String sorterField = null; String sorterOrder = null; String NodeHeaderStr(String name) { String ret = "class=header"; String order = "ASC"; if ( name.equals( sorterField ) ) { ret += sorterOrder; if ( sorterOrder.equals("ASC") ) order = "DSC"; } ret += " onClick=\"window.document.location=" + "'/dfshealth.jsp?sorter/field=" + name + "&sorter/order=" + order + "'\" title=\"sort on this column\""; return ret; } public void generateNodeData( JspWriter out, DatanodeDescriptor d, String suffix, boolean alive, int nnHttpPort ) throws IOException { /* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use: 1) d.getHostName():d.getPort() to display. Domain and port are stripped if they are common across the nodes. i.e. "dn1" 2) d.getHost():d.Port() for "title". i.e. "192.168.0.5:50010" 3) d.getHostName():d.getInfoPort() for url. i.e. "http://dn1.hadoop.apache.org:50075/..." Note that "d.getHost():d.getPort()" is what DFS clients use to interact with datanodes. */ // from nn_browsedfscontent.jsp: String url = "http://" + d.getHostName() + ":" + d.getInfoPort() + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir=" + URLEncoder.encode("/", "UTF-8"); String name = d.getHostName() + ":" + d.getPort(); if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) name = name.replaceAll( "\\.[^.:]*", "" ); int idx = (suffix != null && name.endsWith( suffix )) ? name.indexOf( suffix ) : -1; out.print( rowTxt() + "<td class=\"name\"><a title=\"" + d.getHost() + ":" + d.getPort() + "\" href=\"" + url + "\">" + (( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" + (( alive ) ? "" : "\n") ); if ( !alive ) return; long c = d.getCapacity(); long u = d.getDfsUsed(); long nu = d.getNonDfsUsed(); long r = d.getRemaining(); String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent()); String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent()); String adminState = (d.isDecommissioned() ? "Decommissioned" : (d.isDecommissionInProgress() ? "Decommission In Progress": "In Service")); long timestamp = d.getLastUpdate(); long currentTime = System.currentTimeMillis(); out.print("<td class=\"lastcontact\"> " + ((currentTime - timestamp)/1000) + "<td class=\"adminstate\">" + adminState + "<td align=\"right\" class=\"capacity\">" + StringUtils.limitDecimalTo2(c*1.0/diskBytes) + "<td align=\"right\" class=\"used\">" + StringUtils.limitDecimalTo2(u*1.0/diskBytes) + "<td align=\"right\" class=\"nondfsused\">" + StringUtils.limitDecimalTo2(nu*1.0/diskBytes) + "<td align=\"right\" class=\"remaining\">" + StringUtils.limitDecimalTo2(r*1.0/diskBytes) + "<td align=\"right\" class=\"pcused\">" + percentUsed + "<td class=\"pcused\">" + ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) + "<td align=\"right\" class=\"pcremaining`\">" + percentRemaining + "<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks() + "\n"); } public void generateConfReport( JspWriter out, FSNamesystem fsn, HttpServletRequest request) throws IOException { long underReplicatedBlocks = fsn.getUnderReplicatedBlocks(); FSImage fsImage = fsn.getFSImage(); List<Storage.StorageDirectory> removedStorageDirs = fsImage.getRemovedStorageDirs(); String storageDirsSizeStr="", removedStorageDirsSizeStr="", storageDirsStr="", removedStorageDirsStr="", storageDirsDiv="", removedStorageDirsDiv=""; //FS Image storage configuration out.print("<h3> NameNode Storage: </h3>"); out.print("<div id=\"dfstable\"> <table border=1 cellpadding=10 cellspacing=0 title=\"NameNode Storage\">\n"+ "<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>"); StorageDirectory st =null; for (Iterator<StorageDirectory> it = fsImage.dirIterator(); it.hasNext();) { st = it.next(); String dir = "" + st.getRoot(); String type = "" + st.getStorageDirType(); out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td>Active</td></tr>"); } long storageDirsSize = removedStorageDirs.size(); for(int i=0; i< storageDirsSize; i++){ st = removedStorageDirs.get(i); String dir = "" + st.getRoot(); String type = "" + st.getStorageDirType(); out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td><font color=red>Failed</font></td></tr>"); } out.print("</table></div><br>\n"); } public void generateDFSHealthReport(JspWriter out, NameNode nn, HttpServletRequest request) throws IOException { FSNamesystem fsn = nn.getNamesystem(); ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); jspHelper.DFSNodesStatus(live, dead); + ArrayList<DatanodeDescriptor> decommissioning = fsn + .getDecommissioningNodes(); + sorterField = request.getParameter("sorter/field"); sorterOrder = request.getParameter("sorter/order"); if ( sorterField == null ) sorterField = "name"; if ( sorterOrder == null ) sorterOrder = "ASC"; // Find out common suffix. Should this be before or after the sort? String port_suffix = null; if ( live.size() > 0 ) { String name = live.get(0).getName(); int idx = name.indexOf(':'); if ( idx > 0 ) { port_suffix = name.substring( idx ); } for ( int i=1; port_suffix != null && i < live.size(); i++ ) { if ( live.get(i).getName().endsWith( port_suffix ) == false ) { port_suffix = null; break; } } } counterReset(); long total = fsn.getCapacityTotal(); long remaining = fsn.getCapacityRemaining(); long used = fsn.getCapacityUsed(); long nonDFS = fsn.getCapacityUsedNonDFS(); float percentUsed = fsn.getCapacityUsedPercent(); float percentRemaining = fsn.getCapacityRemainingPercent(); out.print( "<div id=\"dfstable\"> <table>\n" + rowTxt() + colTxt() + "Configured Capacity" + colTxt() + ":" + colTxt() + StringUtils.byteDesc( total ) + rowTxt() + colTxt() + "DFS Used" + colTxt() + ":" + colTxt() + StringUtils.byteDesc( used ) + rowTxt() + colTxt() + "Non DFS Used" + colTxt() + ":" + colTxt() + StringUtils.byteDesc( nonDFS ) + rowTxt() + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt() + StringUtils.byteDesc( remaining ) + rowTxt() + colTxt() + "DFS Used%" + colTxt() + ":" + colTxt() + StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt() + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt() + StringUtils.limitDecimalTo2(percentRemaining) + " %" + rowTxt() + colTxt() + "<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> " + colTxt() + ":" + colTxt() + live.size() + rowTxt() + colTxt() + "<a href=\"dfsnodelist.jsp?whatNodes=DEAD\">Dead Nodes</a> " + - colTxt() + ":" + colTxt() + dead.size() + - "</table></div><br>\n" ); + colTxt() + ":" + colTxt() + dead.size() + rowTxt() + colTxt() + + "<a href=\"dfsnodelist.jsp?whatNodes=DECOMMISSIONING\">" + + "Decommissioning Nodes</a> " + + colTxt() + ":" + colTxt() + decommissioning.size() + + rowTxt() + colTxt() + + "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt() + + fsn.getUnderReplicatedBlocks() + + "</table></div><br>\n" ); if (live.isEmpty() && dead.isEmpty()) { out.print("There are no datanodes in the cluster"); } }%> <% NameNode nn = (NameNode)application.getAttribute("name.node"); FSNamesystem fsn = nn.getNamesystem(); String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort(); %> <html> <link rel="stylesheet" type="text/css" href="/static/hadoop.css"> <title>Hadoop NameNode <%=namenodeLabel%></title> <body> <h1>NameNode '<%=namenodeLabel%>'</h1> <div id="dfstable"> <table> <tr> <td id="col1"> Started: <td> <%= fsn.getStartTime()%> <tr> <td id="col1"> Version: <td> <%= VersionInfo.getVersion()%>, r<%= VersionInfo.getRevision()%> <tr> <td id="col1"> Compiled: <td> <%= VersionInfo.getDate()%> by <%= VersionInfo.getUser()%> <tr> <td id="col1"> Upgrades: <td> <%= jspHelper.getUpgradeStatusText()%> </table></div><br> <b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br> <b><a href="/logs/">Namenode Logs</a></b> <hr> <h3>Cluster Summary</h3> <b> <%= jspHelper.getSafeModeText()%> </b> <b> <%= jspHelper.getInodeLimitText()%> </b> <a class="warning"> <%= JspHelper.getWarningText(fsn)%></a> <% generateDFSHealthReport(out, nn, request); %> <hr> <% generateConfReport(out, fsn, request); %> <% out.println(ServletUtil.htmlFooter()); %> diff --git a/src/webapps/hdfs/dfsnodelist.jsp b/src/webapps/hdfs/dfsnodelist.jsp index 60fa887..a588b03 100644 --- a/src/webapps/hdfs/dfsnodelist.jsp +++ b/src/webapps/hdfs/dfsnodelist.jsp @@ -1,265 +1,332 @@ <%@ page contentType="text/html; charset=UTF-8" import="javax.servlet.*" import="javax.servlet.http.*" import="java.io.*" import="java.util.*" import="org.apache.hadoop.fs.*" import="org.apache.hadoop.hdfs.*" import="org.apache.hadoop.hdfs.server.common.*" import="org.apache.hadoop.hdfs.server.namenode.*" import="org.apache.hadoop.hdfs.server.datanode.*" import="org.apache.hadoop.hdfs.protocol.*" import="org.apache.hadoop.util.*" import="java.text.DateFormat" import="java.lang.Math" import="java.net.URLEncoder" %> <%! JspHelper jspHelper = new JspHelper(); int rowNum = 0; int colNum = 0; String rowTxt() { colNum = 0; return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt") + "\"> "; } String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; } void counterReset () { colNum = 0; rowNum = 0 ; } long diskBytes = 1024 * 1024 * 1024; String diskByteStr = "GB"; String sorterField = null; String sorterOrder = null; String whatNodes = "LIVE"; String NodeHeaderStr(String name) { String ret = "class=header"; String order = "ASC"; if ( name.equals( sorterField ) ) { ret += sorterOrder; if ( sorterOrder.equals("ASC") ) order = "DSC"; } ret += " onClick=\"window.document.location=" + "'/dfsnodelist.jsp?whatNodes="+whatNodes+"&sorter/field=" + name + "&sorter/order=" + order + "'\" title=\"sort on this column\""; return ret; } +void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d, + String suffix, boolean alive, int nnHttpPort) throws IOException { + String url = "http://" + d.getHostName() + ":" + d.getInfoPort() + + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir=" + + URLEncoder.encode("/", "UTF-8"); + + String name = d.getHostName() + ":" + d.getPort(); + if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*")) + name = name.replaceAll("\\.[^.:]*", ""); + int idx = (suffix != null && name.endsWith(suffix)) ? name + .indexOf(suffix) : -1; + + out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getHost() + ":" + + d.getPort() + "\" href=\"" + url + "\">" + + ((idx > 0) ? name.substring(0, idx) : name) + "</a>" + + ((alive) ? "" : "\n")); + if (!alive) { + return; + } + + long decommRequestTime = d.decommissioningStatus.getStartTime(); + long timestamp = d.getLastUpdate(); + long currentTime = System.currentTimeMillis(); + long hoursSinceDecommStarted = (currentTime - decommRequestTime)/3600000; + long remainderMinutes = ((currentTime - decommRequestTime)/60000) % 60; + out.print("<td class=\"lastcontact\"> " + + ((currentTime - timestamp) / 1000) + + "<td class=\"underreplicatedblocks\">" + + d.decommissioningStatus.getUnderReplicatedBlocks() + + "<td class=\"blockswithonlydecommissioningreplicas\">" + + d.decommissioningStatus.getDecommissionOnlyReplicas() + + "<td class=\"underrepblocksinfilesunderconstruction\">" + + d.decommissioningStatus.getUnderReplicatedInOpenFiles() + + "<td class=\"timesincedecommissionrequest\">" + + hoursSinceDecommStarted + " hrs " + remainderMinutes + " mins" + + "\n"); +} + + public void generateNodeData( JspWriter out, DatanodeDescriptor d, String suffix, boolean alive, int nnHttpPort ) throws IOException { /* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use: 1) d.getHostName():d.getPort() to display. Domain and port are stripped if they are common across the nodes. i.e. "dn1" 2) d.getHost():d.Port() for "title". i.e. "192.168.0.5:50010" 3) d.getHostName():d.getInfoPort() for url. i.e. "http://dn1.hadoop.apache.org:50075/..." Note that "d.getHost():d.getPort()" is what DFS clients use to interact with datanodes. */ // from nn_browsedfscontent.jsp: String url = "http://" + d.getHostName() + ":" + d.getInfoPort() + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir=" + URLEncoder.encode("/", "UTF-8"); String name = d.getHostName() + ":" + d.getPort(); if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) name = name.replaceAll( "\\.[^.:]*", "" ); int idx = (suffix != null && name.endsWith( suffix )) ? name.indexOf( suffix ) : -1; out.print( rowTxt() + "<td class=\"name\"><a title=\"" + d.getHost() + ":" + d.getPort() + "\" href=\"" + url + "\">" + (( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" + (( alive ) ? "" : "\n") ); if ( !alive ) return; long c = d.getCapacity(); long u = d.getDfsUsed(); long nu = d.getNonDfsUsed(); long r = d.getRemaining(); String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent()); String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent()); String adminState = (d.isDecommissioned() ? "Decommissioned" : (d.isDecommissionInProgress() ? "Decommission In Progress": "In Service")); long timestamp = d.getLastUpdate(); long currentTime = System.currentTimeMillis(); out.print("<td class=\"lastcontact\"> " + ((currentTime - timestamp)/1000) + "<td class=\"adminstate\">" + adminState + "<td align=\"right\" class=\"capacity\">" + StringUtils.limitDecimalTo2(c*1.0/diskBytes) + "<td align=\"right\" class=\"used\">" + StringUtils.limitDecimalTo2(u*1.0/diskBytes) + "<td align=\"right\" class=\"nondfsused\">" + StringUtils.limitDecimalTo2(nu*1.0/diskBytes) + "<td align=\"right\" class=\"remaining\">" + StringUtils.limitDecimalTo2(r*1.0/diskBytes) + "<td align=\"right\" class=\"pcused\">" + percentUsed + "<td class=\"pcused\">" + ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) + "<td align=\"right\" class=\"pcremaining`\">" + percentRemaining + "<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks() + "\n"); } public void generateDFSNodesList(JspWriter out, NameNode nn, HttpServletRequest request) throws IOException { ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); jspHelper.DFSNodesStatus(live, dead); whatNodes = request.getParameter("whatNodes"); // show only live or only dead nodes sorterField = request.getParameter("sorter/field"); sorterOrder = request.getParameter("sorter/order"); if ( sorterField == null ) sorterField = "name"; if ( sorterOrder == null ) sorterOrder = "ASC"; jspHelper.sortNodeList(live, sorterField, sorterOrder); jspHelper.sortNodeList(dead, "name", "ASC"); // Find out common suffix. Should this be before or after the sort? String port_suffix = null; if ( live.size() > 0 ) { String name = live.get(0).getName(); int idx = name.indexOf(':'); if ( idx > 0 ) { port_suffix = name.substring( idx ); } for ( int i=1; port_suffix != null && i < live.size(); i++ ) { if ( live.get(i).getName().endsWith( port_suffix ) == false ) { port_suffix = null; break; } } } counterReset(); try { Thread.sleep(1000); } catch (InterruptedException e) {} if (live.isEmpty() && dead.isEmpty()) { out.print("There are no datanodes in the cluster"); } else { int nnHttpPort = nn.getHttpAddress().getPort(); out.print( "<div id=\"dfsnodetable\"> "); if(whatNodes.equals("LIVE")) { out.print( "<a name=\"LiveNodes\" id=\"title\">" + "Live Datanodes : " + live.size() + "</a>" + "<br><br>\n<table border=1 cellspacing=0>\n" ); counterReset(); if ( live.size() > 0 ) { if ( live.get(0).getCapacity() > 1024 * diskBytes ) { diskBytes *= 1024; diskByteStr = "TB"; } out.print( "<tr class=\"headerRow\"> <th " + NodeHeaderStr("name") + "> Node <th " + NodeHeaderStr("lastcontact") + "> Last <br>Contact <th " + NodeHeaderStr("adminstate") + "> Admin State <th " + NodeHeaderStr("capacity") + "> Configured <br>Capacity (" + diskByteStr + ") <th " + NodeHeaderStr("used") + "> Used <br>(" + diskByteStr + ") <th " + NodeHeaderStr("nondfsused") + "> Non DFS <br>Used (" + diskByteStr + ") <th " + NodeHeaderStr("remaining") + "> Remaining <br>(" + diskByteStr + ") <th " + NodeHeaderStr("pcused") + "> Used <br>(%) <th " + NodeHeaderStr("pcused") + "> Used <br>(%) <th " + NodeHeaderStr("pcremaining") + "> Remaining <br>(%) <th " + NodeHeaderStr("blocks") + "> Blocks\n" ); jspHelper.sortNodeList(live, sorterField, sorterOrder); for ( int i=0; i < live.size(); i++ ) { generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort); } } out.print("</table>\n"); - } else { + } else if (whatNodes.equals("DEAD")) { out.print("<br> <a name=\"DeadNodes\" id=\"title\"> " + " Dead Datanodes : " +dead.size() + "</a><br><br>\n"); if ( dead.size() > 0 ) { out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " + "<td> Node \n" ); jspHelper.sortNodeList(dead, "name", "ASC"); for ( int i=0; i < dead.size() ; i++ ) { generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort); } out.print("</table>\n"); } - } - out.print("</div>"); + } else if (whatNodes.equals("DECOMMISSIONING")) { + // Decommissioning Nodes + ArrayList<DatanodeDescriptor> decommissioning = nn.getNamesystem() + .getDecommissioningNodes(); + out.print("<br> <a name=\"DecommissioningNodes\" id=\"title\"> " + + " Decommissioning Datanodes : " + decommissioning.size() + + "</a><br><br>\n"); + if (decommissioning.size() > 0) { + out.print("<table border=1 cellspacing=0> <tr class=\"headRow\"> " + + "<th " + NodeHeaderStr("name") + + "> Node <th " + NodeHeaderStr("lastcontact") + + "> Last <br>Contact <th " + + NodeHeaderStr("underreplicatedblocks") + + "> Under Replicated Blocks <th " + + NodeHeaderStr("blockswithonlydecommissioningreplicas") + + "> Blocks With No <br> Live Replicas <th " + + NodeHeaderStr("underrepblocksinfilesunderconstruction") + + "> Under Replicated Blocks <br> In Files Under Construction" + + " <th " + NodeHeaderStr("timesincedecommissionrequest") + + "> Time Since Decommissioning Started" + ); + jspHelper.sortNodeList(decommissioning, "name", "ASC"); + for (int i = 0; i < decommissioning.size(); i++) { + generateDecommissioningNodeData(out, decommissioning.get(i), + port_suffix, true, nnHttpPort); + } + out.print("</table>\n"); + } + out.print("</div>"); + } } }%> <% NameNode nn = (NameNode)application.getAttribute("name.node"); FSNamesystem fsn = nn.getNamesystem(); String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort(); %> <html> <link rel="stylesheet" type="text/css" href="/static/hadoop.css"> <title>Hadoop NameNode <%=namenodeLabel%></title> <body> <h1>NameNode '<%=namenodeLabel%>'</h1> <div id="dfstable"> <table> <tr> <td id="col1"> Started: <td> <%= fsn.getStartTime()%> <tr> <td id="col1"> Version: <td> <%= VersionInfo.getVersion()%>, r<%= VersionInfo.getRevision()%> <tr> <td id="col1"> Compiled: <td> <%= VersionInfo.getDate()%> by <%= VersionInfo.getUser()%> <tr> <td id="col1"> Upgrades: <td> <%= jspHelper.getUpgradeStatusText()%> </table></div><br> <b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br> <b><a href="/logs/">Namenode Logs</a></b><br> <b><a href=/dfshealth.jsp> Go back to DFS home</a></b> <hr> <% generateDFSNodesList(out, nn, request); %> <% out.println(ServletUtil.htmlFooter()); %>
jaxlaw/hadoop-common
cc85c61702d9955cb53c39147c30c4fea0980246
HADOOP:6234 from https://issues.apache.org/jira/secure/attachment/12425635/COMMON-6234.rel20.1.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 3a22338..64fa509 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,402 +1,404 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383000 + HADOOP-6234. Add new option dfs.umaskmode to set umask in configuration + to use octal or symbolic instead of decimal. (Jakob Homan via suresh) HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. (Amareshwari Sriramadasu via acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/fs/FsShellPermissions.java b/src/core/org/apache/hadoop/fs/FsShellPermissions.java index b602dda..65d5c70 100644 --- a/src/core/org/apache/hadoop/fs/FsShellPermissions.java +++ b/src/core/org/apache/hadoop/fs/FsShellPermissions.java @@ -1,291 +1,179 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.IOException; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.fs.FsShell.CmdHandler; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.ChmodParser; /** * This class is the home for file permissions related commands. * Moved to this seperate class since FsShell is getting too large. */ class FsShellPermissions { /*========== chmod ==========*/ /* The pattern is alsmost as flexible as mode allowed by * chmod shell command. The main restriction is that we recognize only rwxX. * To reduce errors we also enforce 3 digits for octal mode. */ - private static Pattern chmodNormalPattern = - Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxX]+)([,\\s]*)\\s*"); - private static Pattern chmodOctalPattern = - Pattern.compile("^\\s*[+]?([0-7]{3})\\s*$"); static String CHMOD_USAGE = "-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH..."; + private static ChmodParser pp; + private static class ChmodHandler extends CmdHandler { - private short userMode, groupMode, othersMode; - private char userType = '+', groupType = '+', othersType='+'; - - private void applyNormalPattern(String modeStr, Matcher matcher) - throws IOException { - boolean commaSeperated = false; - - for(int i=0; i < 1 || matcher.end() < modeStr.length(); i++) { - if (i>0 && (!commaSeperated || !matcher.find())) { - patternError(modeStr); - } - - /* groups : 1 : [ugoa]* - * 2 : [+-=] - * 3 : [rwxX]+ - * 4 : [,\s]* - */ - - String str = matcher.group(2); - char type = str.charAt(str.length() - 1); - - boolean user, group, others; - user = group = others = false; - - for(char c : matcher.group(1).toCharArray()) { - switch (c) { - case 'u' : user = true; break; - case 'g' : group = true; break; - case 'o' : others = true; break; - case 'a' : break; - default : throw new RuntimeException("Unexpected"); - } - } - - if (!(user || group || others)) { // same as specifying 'a' - user = group = others = true; - } - - short mode = 0; - for(char c : matcher.group(3).toCharArray()) { - switch (c) { - case 'r' : mode |= 4; break; - case 'w' : mode |= 2; break; - case 'x' : mode |= 1; break; - case 'X' : mode |= 8; break; - default : throw new RuntimeException("Unexpected"); - } - } - - if ( user ) { - userMode = mode; - userType = type; - } - - if ( group ) { - groupMode = mode; - groupType = type; - } - - if ( others ) { - othersMode = mode; - othersType = type; - } - - commaSeperated = matcher.group(4).contains(","); - } - } - - private void applyOctalPattern(String modeStr, Matcher matcher) { - userType = groupType = othersType = '='; - String str = matcher.group(1); - userMode = Short.valueOf(str.substring(0, 1)); - groupMode = Short.valueOf(str.substring(1, 2)); - othersMode = Short.valueOf(str.substring(2, 3)); - } - - private void patternError(String mode) throws IOException { - throw new IOException("chmod : mode '" + mode + - "' does not match the expected pattern."); - } - ChmodHandler(FileSystem fs, String modeStr) throws IOException { super("chmod", fs); - Matcher matcher = null; - - if ((matcher = chmodNormalPattern.matcher(modeStr)).find()) { - applyNormalPattern(modeStr, matcher); - } else if ((matcher = chmodOctalPattern.matcher(modeStr)).matches()) { - applyOctalPattern(modeStr, matcher); - } else { - patternError(modeStr); + try { + pp = new ChmodParser(modeStr); + } catch(IllegalArgumentException iea) { + patternError(iea.getMessage()); } } - private int applyChmod(char type, int mode, int existing, boolean exeOk) { - boolean capX = false; - - if ((mode&8) != 0) { // convert X to x; - capX = true; - mode &= ~8; - mode |= 1; - } - - switch (type) { - case '+' : mode = mode | existing; break; - case '-' : mode = (~mode) & existing; break; - case '=' : break; - default : throw new RuntimeException("Unexpected"); - } - - // if X is specified add 'x' only if exeOk or x was already set. - if (capX && !exeOk && (mode&1) != 0 && (existing&1) == 0) { - mode &= ~1; // remove x - } - - return mode; + private void patternError(String mode) throws IOException { + throw new IOException("chmod : mode '" + mode + + "' does not match the expected pattern."); } - + @Override public void run(FileStatus file, FileSystem srcFs) throws IOException { - FsPermission perms = file.getPermission(); - int existing = perms.toShort(); - boolean exeOk = file.isDir() || (existing & 0111) != 0; - int newperms = ( applyChmod(userType, userMode, - (existing>>>6)&7, exeOk) << 6 | - applyChmod(groupType, groupMode, - (existing>>>3)&7, exeOk) << 3 | - applyChmod(othersType, othersMode, existing&7, exeOk) ); + int newperms = pp.applyNewPermission(file); - if (existing != newperms) { + if (file.getPermission().toShort() != newperms) { try { srcFs.setPermission(file.getPath(), new FsPermission((short)newperms)); } catch (IOException e) { System.err.println(getName() + ": changing permissions of '" + file.getPath() + "':" + e.getMessage()); } } } } /*========== chown ==========*/ static private String allowedChars = "[-_./@a-zA-Z0-9]"; ///allows only "allowedChars" above in names for owner and group static private Pattern chownPattern = Pattern.compile("^\\s*(" + allowedChars + "+)?" + "([:](" + allowedChars + "*))?\\s*$"); static private Pattern chgrpPattern = Pattern.compile("^\\s*(" + allowedChars + "+)\\s*$"); static String CHOWN_USAGE = "-chown [-R] [OWNER][:[GROUP]] PATH..."; static String CHGRP_USAGE = "-chgrp [-R] GROUP PATH..."; private static class ChownHandler extends CmdHandler { protected String owner = null; protected String group = null; protected ChownHandler(String cmd, FileSystem fs) { //for chgrp super(cmd, fs); } ChownHandler(FileSystem fs, String ownerStr) throws IOException { super("chown", fs); Matcher matcher = chownPattern.matcher(ownerStr); if (!matcher.matches()) { throw new IOException("'" + ownerStr + "' does not match " + "expected pattern for [owner][:group]."); } owner = matcher.group(1); group = matcher.group(3); if (group != null && group.length() == 0) { group = null; } if (owner == null && group == null) { throw new IOException("'" + ownerStr + "' does not specify " + " onwer or group."); } } @Override public void run(FileStatus file, FileSystem srcFs) throws IOException { //Should we do case insensitive match? String newOwner = (owner == null || owner.equals(file.getOwner())) ? null : owner; String newGroup = (group == null || group.equals(file.getGroup())) ? null : group; if (newOwner != null || newGroup != null) { try { srcFs.setOwner(file.getPath(), newOwner, newGroup); } catch (IOException e) { System.err.println(getName() + ": changing ownership of '" + file.getPath() + "':" + e.getMessage()); } } } } /*========== chgrp ==========*/ private static class ChgrpHandler extends ChownHandler { ChgrpHandler(FileSystem fs, String groupStr) throws IOException { super("chgrp", fs); Matcher matcher = chgrpPattern.matcher(groupStr); if (!matcher.matches()) { throw new IOException("'" + groupStr + "' does not match " + "expected pattern for group"); } group = matcher.group(1); } } static void changePermissions(FileSystem fs, String cmd, String argv[], int startIndex, FsShell shell) throws IOException { CmdHandler handler = null; boolean recursive = false; // handle common arguments, currently only "-R" for (; startIndex < argv.length && argv[startIndex].equals("-R"); startIndex++) { recursive = true; } if ( startIndex >= argv.length ) { throw new IOException("Not enough arguments for the command"); } if (cmd.equals("-chmod")) { handler = new ChmodHandler(fs, argv[startIndex++]); } else if (cmd.equals("-chown")) { handler = new ChownHandler(fs, argv[startIndex++]); } else if (cmd.equals("-chgrp")) { handler = new ChgrpHandler(fs, argv[startIndex++]); } shell.runCmdHandler(handler, argv, startIndex, recursive); } } diff --git a/src/core/org/apache/hadoop/fs/permission/ChmodParser.java b/src/core/org/apache/hadoop/fs/permission/ChmodParser.java new file mode 100644 index 0000000..b8ca668 --- /dev/null +++ b/src/core/org/apache/hadoop/fs/permission/ChmodParser.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import java.util.regex.Pattern; + +import org.apache.hadoop.fs.FileStatus; + +/** + * Parse a permission mode passed in from a chmod command and apply that + * mode against an existing file. + */ +public class ChmodParser extends PermissionParser { + private static Pattern chmodOctalPattern = + Pattern.compile("^\\s*[+]?([0-7]{3})\\s*$"); + private static Pattern chmodNormalPattern = + Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxX]+)([,\\s]*)\\s*"); + + public ChmodParser(String modeStr) throws IllegalArgumentException { + super(modeStr, chmodNormalPattern, chmodOctalPattern); + } + + /** + * Apply permission against specified file and determine what the + * new mode would be + * @param file File against which to apply mode + * @return File's new mode if applied. + */ + public short applyNewPermission(FileStatus file) { + FsPermission perms = file.getPermission(); + int existing = perms.toShort(); + boolean exeOk = file.isDir() || (existing & 0111) != 0; + + return (short)combineModes(existing, exeOk); + } +} diff --git a/src/core/org/apache/hadoop/fs/permission/FsPermission.java b/src/core/org/apache/hadoop/fs/permission/FsPermission.java index d395a20..fdb2616 100644 --- a/src/core/org/apache/hadoop/fs/permission/FsPermission.java +++ b/src/core/org/apache/hadoop/fs/permission/FsPermission.java @@ -1,198 +1,220 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.*; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + /** * A class for file/directory permissions. */ public class FsPermission implements Writable { + private static final Log LOG = LogFactory.getLog(FsPermission.class); + static final WritableFactory FACTORY = new WritableFactory() { public Writable newInstance() { return new FsPermission(); } }; static { // register a ctor WritableFactories.setFactory(FsPermission.class, FACTORY); } /** Create an immutable {@link FsPermission} object. */ public static FsPermission createImmutable(short permission) { return new FsPermission(permission) { public FsPermission applyUMask(FsPermission umask) { throw new UnsupportedOperationException(); } public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } }; } //POSIX permission style private FsAction useraction = null; private FsAction groupaction = null; private FsAction otheraction = null; private FsPermission() {} /** * Construct by the given {@link FsAction}. * @param u user action * @param g group action * @param o other action */ public FsPermission(FsAction u, FsAction g, FsAction o) {set(u, g, o);} /** * Construct by the given mode. * @param mode * @see #toShort() */ public FsPermission(short mode) { fromShort(mode); } /** * Copy constructor * * @param other other permission */ public FsPermission(FsPermission other) { this.useraction = other.useraction; this.groupaction = other.groupaction; this.otheraction = other.otheraction; } /** Return user {@link FsAction}. */ public FsAction getUserAction() {return useraction;} /** Return group {@link FsAction}. */ public FsAction getGroupAction() {return groupaction;} /** Return other {@link FsAction}. */ public FsAction getOtherAction() {return otheraction;} private void set(FsAction u, FsAction g, FsAction o) { useraction = u; groupaction = g; otheraction = o; } public void fromShort(short n) { FsAction[] v = FsAction.values(); set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7]); } /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { out.writeShort(toShort()); } /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { fromShort(in.readShort()); } /** * Create and initialize a {@link FsPermission} from {@link DataInput}. */ public static FsPermission read(DataInput in) throws IOException { FsPermission p = new FsPermission(); p.readFields(in); return p; } /** * Encode the object to a short. */ public short toShort() { int s = (useraction.ordinal() << 6) | (groupaction.ordinal() << 3) | otheraction.ordinal(); return (short)s; } /** {@inheritDoc} */ public boolean equals(Object obj) { if (obj instanceof FsPermission) { FsPermission that = (FsPermission)obj; return this.useraction == that.useraction && this.groupaction == that.groupaction && this.otheraction == that.otheraction; } return false; } /** {@inheritDoc} */ public int hashCode() {return toShort();} /** {@inheritDoc} */ public String toString() { return useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL; } /** Apply a umask to this permission and return a new one */ public FsPermission applyUMask(FsPermission umask) { return new FsPermission(useraction.and(umask.useraction.not()), groupaction.and(umask.groupaction.not()), otheraction.and(umask.otheraction.not())); } /** umask property label */ - public static final String UMASK_LABEL = "dfs.umask"; + public static final String DEPRECATED_UMASK_LABEL = "dfs.umask"; + public static final String UMASK_LABEL = "dfs.umaskmode"; public static final int DEFAULT_UMASK = 0022; /** Get the user file creation mask (umask) */ public static FsPermission getUMask(Configuration conf) { int umask = DEFAULT_UMASK; - if (conf != null) { - umask = conf.getInt(UMASK_LABEL, DEFAULT_UMASK); + + // Attempt to pull value from configuration, trying new key first and then + // deprecated key, along with a warning, if not present + if(conf != null) { + String confUmask = conf.get(UMASK_LABEL); + if(confUmask != null) { // UMASK_LABEL is set + umask = new UmaskParser(confUmask).getUMask(); + } else { // check for deprecated key label + int oldStyleValue = conf.getInt(DEPRECATED_UMASK_LABEL, Integer.MIN_VALUE); + if(oldStyleValue != Integer.MIN_VALUE) { // Property was set with old key + LOG.warn(DEPRECATED_UMASK_LABEL + " configuration key is deprecated. " + + "Convert to " + UMASK_LABEL + ", using octal or symbolic umask " + + "specifications."); + umask = oldStyleValue; + } + } } + return new FsPermission((short)umask); } /** Set the user file creation mask (umask) */ public static void setUMask(Configuration conf, FsPermission umask) { conf.setInt(UMASK_LABEL, umask.toShort()); } /** Get the default permission. */ public static FsPermission getDefault() { return new FsPermission((short)0777); } /** * Create a FsPermission from a Unix symbolic permission string * @param unixSymbolicPermission e.g. "-rw-rw-rw-" */ public static FsPermission valueOf(String unixSymbolicPermission) { if (unixSymbolicPermission == null) { return null; } else if (unixSymbolicPermission.length() != 10) { throw new IllegalArgumentException("length != 10(unixSymbolicPermission=" + unixSymbolicPermission + ")"); } int n = 0; for(int i = 1; i < unixSymbolicPermission.length(); i++) { n = n << 1; char c = unixSymbolicPermission.charAt(i); n += (c == '-' || c == 'T' || c == 'S') ? 0: 1; } return new FsPermission((short)n); } } diff --git a/src/core/org/apache/hadoop/fs/permission/PermissionParser.java b/src/core/org/apache/hadoop/fs/permission/PermissionParser.java new file mode 100644 index 0000000..a4c5245 --- /dev/null +++ b/src/core/org/apache/hadoop/fs/permission/PermissionParser.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Base class for parsing either chmod permissions or umask permissions. + * Includes common code needed by either operation as implemented in + * UmaskParser and ChmodParser classes. + */ +class PermissionParser { + protected short userMode; + protected short groupMode; + protected short othersMode; + protected char userType = '+'; + protected char groupType = '+'; + protected char othersType = '+'; + + /** + * Begin parsing permission stored in modeStr + * + * @param modeStr Permission mode, either octal or symbolic + * @param symbolic Use-case specific symbolic pattern to match against + * @throws IllegalArgumentException if unable to parse modeStr + */ + public PermissionParser(String modeStr, Pattern symbolic, Pattern octal) + throws IllegalArgumentException { + Matcher matcher = null; + + if ((matcher = symbolic.matcher(modeStr)).find()) { + applyNormalPattern(modeStr, matcher); + } else if ((matcher = octal.matcher(modeStr)).matches()) { + applyOctalPattern(modeStr, matcher); + } else { + throw new IllegalArgumentException(modeStr); + } + } + + private void applyNormalPattern(String modeStr, Matcher matcher) { + // Are there multiple permissions stored in one chmod? + boolean commaSeperated = false; + + for (int i = 0; i < 1 || matcher.end() < modeStr.length(); i++) { + if (i > 0 && (!commaSeperated || !matcher.find())) { + throw new IllegalArgumentException(modeStr); + } + + /* + * groups : 1 : [ugoa]* 2 : [+-=] 3 : [rwxX]+ 4 : [,\s]* + */ + + String str = matcher.group(2); + char type = str.charAt(str.length() - 1); + + boolean user, group, others; + user = group = others = false; + + for (char c : matcher.group(1).toCharArray()) { + switch (c) { + case 'u': + user = true; + break; + case 'g': + group = true; + break; + case 'o': + others = true; + break; + case 'a': + break; + default: + throw new RuntimeException("Unexpected"); + } + } + + if (!(user || group || others)) { // same as specifying 'a' + user = group = others = true; + } + + short mode = 0; + + for (char c : matcher.group(3).toCharArray()) { + switch (c) { + case 'r': + mode |= 4; + break; + case 'w': + mode |= 2; + break; + case 'x': + mode |= 1; + break; + case 'X': + mode |= 8; + break; + default: + throw new RuntimeException("Unexpected"); + } + } + + if (user) { + userMode = mode; + userType = type; + } + + if (group) { + groupMode = mode; + groupType = type; + } + + if (others) { + othersMode = mode; + othersType = type; + } + + commaSeperated = matcher.group(4).contains(","); + } + } + + private void applyOctalPattern(String modeStr, Matcher matcher) { + userType = groupType = othersType = '='; + + String str = matcher.group(1); + userMode = Short.valueOf(str.substring(0, 1)); + groupMode = Short.valueOf(str.substring(1, 2)); + othersMode = Short.valueOf(str.substring(2, 3)); + } + + protected int combineModes(int existing, boolean exeOk) { + return combineModeSegments(userType, userMode, + (existing>>>6)&7, exeOk) << 6 | + combineModeSegments(groupType, groupMode, + (existing>>>3)&7, exeOk) << 3 | + combineModeSegments(othersType, othersMode, existing&7, exeOk); + } + + protected int combineModeSegments(char type, int mode, + int existing, boolean exeOk) { + boolean capX = false; + + if ((mode&8) != 0) { // convert X to x; + capX = true; + mode &= ~8; + mode |= 1; + } + + switch (type) { + case '+' : mode = mode | existing; break; + case '-' : mode = (~mode) & existing; break; + case '=' : break; + default : throw new RuntimeException("Unexpected"); + } + + // if X is specified add 'x' only if exeOk or x was already set. + if (capX && !exeOk && (mode&1) != 0 && (existing&1) == 0) { + mode &= ~1; // remove x + } + + return mode; + } +} diff --git a/src/core/org/apache/hadoop/fs/permission/UmaskParser.java b/src/core/org/apache/hadoop/fs/permission/UmaskParser.java new file mode 100644 index 0000000..ea894c5 --- /dev/null +++ b/src/core/org/apache/hadoop/fs/permission/UmaskParser.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import java.util.regex.Pattern; + +/** + * Parse umask value provided as a string, either in octal or symbolic + * format and return it as a short value. Umask values are slightly + * different from standard modes as they cannot specify X. + */ +class UmaskParser extends PermissionParser { + private static Pattern chmodOctalPattern = + Pattern.compile("^\\s*[+]?([0-7]{3})\\s*$"); + private static Pattern umaskSymbolicPattern = /* not allow X */ + Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwx]+)([,\\s]*)\\s*"); + final short umaskMode; + + public UmaskParser(String modeStr) throws IllegalArgumentException { + super(modeStr, umaskSymbolicPattern, chmodOctalPattern); + + umaskMode = (short)combineModes(0, false); + } + + public short getUMask() { + return umaskMode; + } +} diff --git a/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml b/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml index 3e00ebb..8899c37 100644 --- a/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml +++ b/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml @@ -1,191 +1,191 @@ <?xml version="1.0"?> <!-- Copyright 2008 The Apache Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd"> <document> <header> <title> HDFS Permissions Guide </title> </header> <body> <section> <title>Overview</title> <p> The Hadoop Distributed File System (HDFS) implements a permissions model for files and directories that shares much of the POSIX model. Each file and directory is associated with an <em>owner</em> and a <em>group</em>. The file or directory has separate permissions for the user that is the owner, for other users that are members of the group, and for all other users. For files, the <em>r</em> permission is required to read the file, and the <em>w</em> permission is required to write or append to the file. For directories, the <em>r</em> permission is required to list the contents of the directory, the <em>w</em> permission is required to create or delete files or directories, and the <em>x</em> permission is required to access a child of the directory. In contrast to the POSIX model, there are no <em>sticky</em>, <em>setuid</em> or <em>setgid</em> bits for files as there is no notion of executable files. For directories, there no <em>sticky</em>, <em>setuid</em> or <em>setgid</em> bits directory as a simplification. Collectively, the permissions of a file or directory are its <em>mode</em>. In general, Unix customs for representing and displaying modes will be used, including the use of octal numbers in this description. When a file or directory is created, its owner is the user identity of the client process, and its group is the group of the parent directory (the BSD rule). </p> <p> Each client process that accesses HDFS has a two-part identity composed of the <em>user name</em>, and <em>groups list</em>. Whenever HDFS must do a permissions check for a file or directory <code>foo</code> accessed by a client process, </p> <ul> <li> If the user name matches the owner of <code>foo</code>, then the owner permissions are tested; </li> <li> Else if the group of <code>foo</code> matches any of member of the groups list, then the group permissions are tested; </li> <li> Otherwise the other permissions of <code>foo</code> are tested. </li> </ul> <p> If a permissions check fails, the client operation fails. </p> </section> <section><title>User Identity</title> <p> In this release of Hadoop the identity of a client process is just whatever the host operating system says it is. For Unix-like systems, </p> <ul> <li> The user name is the equivalent of <code>`whoami`</code>; </li> <li> The group list is the equivalent of <code>`bash -c groups`</code>. </li> </ul> <p> In the future there will be other ways of establishing user identity (think Kerberos, LDAP, and others). There is no expectation that this first method is secure in protecting one user from impersonating another. This user identity mechanism combined with the permissions model allows a cooperative community to share file system resources in an organized fashion. </p> <p> In any case, the user identity mechanism is extrinsic to HDFS itself. There is no provision within HDFS for creating user identities, establishing groups, or processing user credentials. </p> </section> <section> <title>Understanding the Implementation</title> <p> Each file or directory operation passes the full path name to the name node, and the permissions checks are applied along the path for each operation. The client framework will implicitly associate the user identity with the connection to the name node, reducing the need for changes to the existing client API. It has always been the case that when one operation on a file succeeds, the operation might fail when repeated because the file, or some directory on the path, no longer exists. For instance, when the client first begins reading a file, it makes a first request to the name node to discover the location of the first blocks of the file. A second request made to find additional blocks may fail. On the other hand, deleting a file does not revoke access by a client that already knows the blocks of the file. With the addition of permissions, a client's access to a file may be withdrawn between requests. Again, changing permissions does not revoke the access of a client that already knows the file's blocks. </p> <p> The map-reduce framework delegates the user identity by passing strings without special concern for confidentiality. The owner and group of a file or directory are stored as strings; there is no conversion from user and group identity numbers as is conventional in Unix. </p> <p> The permissions features of this release did not require any changes to the behavior of data nodes. Blocks on the data nodes do not have any of the <em>Hadoop</em> ownership or permissions attributes associated with them. </p> </section> <section> <title>Changes to the File System API</title> <p> All methods that use a path parameter will throw <code>AccessControlException</code> if permission checking fails. </p> <p>New methods:</p> <ul> <li> <code>public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException;</code> </li> <li> <code>public boolean mkdirs(Path f, FsPermission permission) throws IOException;</code> </li> <li> <code>public void setPermission(Path p, FsPermission permission) throws IOException;</code> </li> <li> <code>public void setOwner(Path p, String username, String groupname) throws IOException;</code> </li> <li> <code>public FileStatus getFileStatus(Path f) throws IOException;</code> will additionally return the user, group and mode associated with the path. </li> </ul> <p> The mode of a new file or directory is restricted my the <code>umask</code> set as a configuration parameter. When the existing <code>create(path, &hellip;)</code> method (<em>without</em> the permission parameter) is used, the mode of the new file is <code>666&thinsp;&amp;&thinsp;^umask</code>. When the new <code>create(path, </code><em>permission</em><code>, &hellip;)</code> method (<em>with</em> the permission parameter <em>P</em>) is used, the mode of the new file is <code>P&thinsp;&amp;&thinsp;^umask&thinsp;&amp;&thinsp;666</code>. When a new directory is created with the existing <code>mkdirs(path)</code> method (<em>without</em> the permission parameter), the mode of the new directory is <code>777&thinsp;&amp;&thinsp;^umask</code>. When the new <code>mkdirs(path, </code><em>permission</em> <code>)</code> method (<em>with</em> the permission parameter <em>P</em>) is used, the mode of new directory is <code>P&thinsp;&amp;&thinsp;^umask&thinsp;&amp;&thinsp;777</code>. </p> </section> <section> <title>Changes to the Application Shell</title> <p>New operations:</p> <dl> <dt><code>chmod [-R]</code> <em>mode file &hellip;</em></dt> <dd> Only the owner of a file or the super-user is permitted to change the mode of a file. </dd> <dt><code>chgrp [-R]</code> <em>group file &hellip;</em></dt> <dd> The user invoking <code>chgrp</code> must belong to the specified group and be the owner of the file, or be the super-user. </dd> <dt><code>chown [-R]</code> <em>[owner][:[group]] file &hellip;</em></dt> <dd> The owner of a file may only be altered by a super-user. </dd> <dt><code>ls </code> <em>file &hellip;</em></dt><dd></dd> <dt><code>lsr </code> <em>file &hellip;</em></dt> <dd> The output is reformatted to display the owner, group and mode. </dd> </dl></section> <section> <title>The Super-User</title> <p> The super-user is the user with the same identity as name node process itself. Loosely, if you started the name node, then you are the super-user. The super-user can do anything in that permissions checks never fail for the super-user. There is no persistent notion of who <em>was</em> the super-user; when the name node is started the process identity determines who is the super-user <em>for now</em>. The HDFS super-user does not have to be the super-user of the name node host, nor is it necessary that all clusters have the same super-user. Also, an experimenter running HDFS on a personal workstation, conveniently becomes that installation's super-user without any configuration. </p> <p> In addition, the administrator my identify a distinguished group using a configuration parameter. If set, members of this group are also super-users. </p> </section> <section> <title>The Web Server</title> <p> The identity of the web server is a configuration parameter. That is, the name node has no notion of the identity of the <em>real</em> user, but the web server behaves as if it has the identity (user and groups) of a user chosen by the administrator. Unless the chosen identity matches the super-user, parts of the name space may be invisible to the web server.</p> </section> <section> <title>On-line Upgrade</title> <p> If a cluster starts with a version 0.15 data set (<code>fsimage</code>), all files and directories will have owner <em>O</em>, group <em>G</em>, and mode <em>M</em>, where <em>O</em> and <em>G</em> are the user and group identity of the super-user, and <em>M</em> is a configuration parameter. </p> </section> <section> <title>Configuration Parameters</title> <dl> <dt><code>dfs.permissions = true </code></dt> <dd> If <code>yes</code> use the permissions system as described here. If <code>no</code>, permission <em>checking</em> is turned off, but all other behavior is unchanged. Switching from one parameter value to the other does not change the mode, owner or group of files or directories. <p> </p> Regardless of whether permissions are on or off, <code>chmod</code>, <code>chgrp</code> and <code>chown</code> <em>always</em> check permissions. These functions are only useful in the permissions context, and so there is no backwards compatibility issue. Furthermore, this allows administrators to reliably set owners and permissions in advance of turning on regular permissions checking. </dd> <dt><code>dfs.web.ugi = webuser,webgroup</code></dt> <dd> The user name to be used by the web server. Setting this to the name of the super-user allows any web client to see everything. Changing this to an otherwise unused identity allows web clients to see only those things visible using "other" permissions. Additional groups may be added to the comma-separated list. </dd> <dt><code>dfs.permissions.supergroup = supergroup</code></dt> <dd> The name of the group of super-users. </dd> <dt><code>dfs.upgrade.permission = 777</code></dt> <dd> The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files. For configuration files, the decimal value <em>511<sub>10</sub></em> may be used. </dd> - <dt><code>dfs.umask = 022</code></dt> + <dt><code>dfs.umaskmode = 022</code></dt> <dd> - The <code>umask</code> used when creating files and directories. For configuration files, the decimal value <em>18<sub>10</sub></em> may be used. + The <code>umask</code> used when creating files and directories. May be specified either via three octal digits or symbolic values, with the same constraints as the dfs chmod command. </dd> </dl> </section> </body> </document> diff --git a/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java b/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java index a563740..8107efe 100644 --- a/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java +++ b/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java @@ -1,70 +1,130 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; + import junit.framework.TestCase; import static org.apache.hadoop.fs.permission.FsAction.*; public class TestFsPermission extends TestCase { public void testFsAction() { //implies for(FsAction a : FsAction.values()) { assertTrue(ALL.implies(a)); } for(FsAction a : FsAction.values()) { assertTrue(a == NONE? NONE.implies(a): !NONE.implies(a)); } for(FsAction a : FsAction.values()) { assertTrue(a == READ_EXECUTE || a == READ || a == EXECUTE || a == NONE? READ_EXECUTE.implies(a): !READ_EXECUTE.implies(a)); } //masks assertEquals(EXECUTE, EXECUTE.and(READ_EXECUTE)); assertEquals(READ, READ.and(READ_EXECUTE)); assertEquals(NONE, WRITE.and(READ_EXECUTE)); assertEquals(READ, READ_EXECUTE.and(READ_WRITE)); assertEquals(NONE, READ_EXECUTE.and(WRITE)); assertEquals(WRITE_EXECUTE, ALL.and(WRITE_EXECUTE)); } public void testFsPermission() { for(short s = 0; s < (1<<9); s++) { assertEquals(s, new FsPermission(s).toShort()); } String symbolic = "-rwxrwxrwx"; StringBuilder b = new StringBuilder("-123456789"); for(int i = 0; i < (1<<9); i++) { for(int j = 1; j < 10; j++) { b.setCharAt(j, '-'); } String binary = Integer.toBinaryString(i); int len = binary.length(); for(int j = 0; j < len; j++) { if (binary.charAt(j) == '1') { int k = 9 - (len - 1 - j); b.setCharAt(k, symbolic.charAt(k)); } } assertEquals(i, FsPermission.valueOf(b.toString()).toShort()); } } + + public void testUMaskParser() throws IOException { + Configuration conf = new Configuration(); + + // Ensure that we get the right octal values back for all legal values + for(FsAction u : FsAction.values()) { + for(FsAction g : FsAction.values()) { + for(FsAction o : FsAction.values()) { + FsPermission f = new FsPermission(u, g, o); + String asOctal = String.format("%1$03o", f.toShort()); + conf.set(FsPermission.UMASK_LABEL, asOctal); + FsPermission fromConf = FsPermission.getUMask(conf); + assertEquals(f, fromConf); + } + } + } + } + + public void TestSymbolicUmasks() { + Configuration conf = new Configuration(); + + // Test some symbolic settings Setting Octal result + String [] symbolic = new String [] { "a+rw", "666", + "u=x,g=r,o=w", "142", + "u=x", "100" }; + + for(int i = 0; i < symbolic.length; i += 2) { + conf.set(FsPermission.UMASK_LABEL, symbolic[i]); + short val = Short.valueOf(symbolic[i + 1], 8); + assertEquals(val, FsPermission.getUMask(conf).toShort()); + } + } + + public void testBadUmasks() { + Configuration conf = new Configuration(); + + for(String b : new String [] {"1777", "22", "99", "foo", ""}) { + conf.set(FsPermission.UMASK_LABEL, b); + try { + FsPermission.getUMask(conf); + fail("Shouldn't have been able to parse bad umask"); + } catch(IllegalArgumentException iae) { + assertEquals(iae.getMessage(), b); + } + } + } + + // Ensure that when the deprecated decimal umask key is used, it is correctly + // parsed as such and converted correctly to an FsPermission value + public void testDeprecatedUmask() { + Configuration conf = new Configuration(); + conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "302"); // 302 = 0456 + FsPermission umask = FsPermission.getUMask(conf); + + assertEquals(0456, umask.toShort()); + } }
jaxlaw/hadoop-common
cd26bf531279347155898611cab392fe401aff46
MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus easing load on it. Amareshwari Sriramadasu.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 1e06804..3a22338 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,399 +1,402 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl HDFS-625. Fix NullPointerException thrown from ListPathServlet. Contributed by Suresh Srinivas. HADOOP-6343. Log unexpected throwable object caught in RPC. Contributed by Jitendra Nath Pandey MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) + MAPREDUCE-1219. Fixed JobTracker to not collect per-job metrics, thus + easing load on it. (Amareshwari Sriramadasu via acmurthy) + yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java b/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java index e5da65a..ec54bcc 100644 --- a/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java +++ b/src/mapred/org/apache/hadoop/mapred/JobTrackerMetricsInst.java @@ -1,408 +1,402 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; import org.apache.hadoop.metrics.jvm.JvmMetrics; class JobTrackerMetricsInst extends JobTrackerInstrumentation implements Updater { private final MetricsRecord metricsRecord; private int numMapTasksLaunched = 0; private int numMapTasksCompleted = 0; private int numMapTasksFailed = 0; private int numReduceTasksLaunched = 0; private int numReduceTasksCompleted = 0; private int numReduceTasksFailed = 0; private int numJobsSubmitted = 0; private int numJobsCompleted = 0; private int numWaitingMaps = 0; private int numWaitingReduces = 0; //Cluster status fields. private volatile int numMapSlots = 0; private volatile int numReduceSlots = 0; private int numBlackListedMapSlots = 0; private int numBlackListedReduceSlots = 0; private int numReservedMapSlots = 0; private int numReservedReduceSlots = 0; private int numOccupiedMapSlots = 0; private int numOccupiedReduceSlots = 0; private int numJobsFailed = 0; private int numJobsKilled = 0; private int numJobsPreparing = 0; private int numJobsRunning = 0; private int numRunningMaps = 0; private int numRunningReduces = 0; private int numMapTasksKilled = 0; private int numReduceTasksKilled = 0; private int numTrackers = 0; private int numTrackersBlackListed = 0; private int numTrackersDecommissioned = 0; public JobTrackerMetricsInst(JobTracker tracker, JobConf conf) { super(tracker, conf); String sessionId = conf.getSessionId(); // Initiate JVM Metrics JvmMetrics.init("JobTracker", sessionId); // Create a record for map-reduce metrics MetricsContext context = MetricsUtil.getContext("mapred"); metricsRecord = MetricsUtil.createRecord(context, "jobtracker"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); } /** * Since this object is a registered updater, this method will be called * periodically, e.g. every 5 seconds. */ public void doUpdates(MetricsContext unused) { synchronized (this) { metricsRecord.setMetric("map_slots", numMapSlots); metricsRecord.setMetric("reduce_slots", numReduceSlots); metricsRecord.incrMetric("blacklisted_maps", numBlackListedMapSlots); metricsRecord.incrMetric("blacklisted_reduces", numBlackListedReduceSlots); metricsRecord.incrMetric("maps_launched", numMapTasksLaunched); metricsRecord.incrMetric("maps_completed", numMapTasksCompleted); metricsRecord.incrMetric("maps_failed", numMapTasksFailed); metricsRecord.incrMetric("reduces_launched", numReduceTasksLaunched); metricsRecord.incrMetric("reduces_completed", numReduceTasksCompleted); metricsRecord.incrMetric("reduces_failed", numReduceTasksFailed); metricsRecord.incrMetric("jobs_submitted", numJobsSubmitted); metricsRecord.incrMetric("jobs_completed", numJobsCompleted); metricsRecord.incrMetric("waiting_maps", numWaitingMaps); metricsRecord.incrMetric("waiting_reduces", numWaitingReduces); metricsRecord.incrMetric("reserved_map_slots", numReservedMapSlots); metricsRecord.incrMetric("reserved_reduce_slots", numReservedReduceSlots); metricsRecord.incrMetric("occupied_map_slots", numOccupiedMapSlots); metricsRecord.incrMetric("occupied_reduce_slots", numOccupiedReduceSlots); metricsRecord.incrMetric("jobs_failed", numJobsFailed); metricsRecord.incrMetric("jobs_killed", numJobsKilled); metricsRecord.incrMetric("jobs_preparing", numJobsPreparing); metricsRecord.incrMetric("jobs_running", numJobsRunning); metricsRecord.incrMetric("running_maps", numRunningMaps); metricsRecord.incrMetric("running_reduces", numRunningReduces); metricsRecord.incrMetric("maps_killed", numMapTasksKilled); metricsRecord.incrMetric("reduces_killed", numReduceTasksKilled); metricsRecord.incrMetric("trackers", numTrackers); metricsRecord.incrMetric("trackers_blacklisted", numTrackersBlackListed); metricsRecord.setMetric("trackers_decommissioned", numTrackersDecommissioned); numMapTasksLaunched = 0; numMapTasksCompleted = 0; numMapTasksFailed = 0; numReduceTasksLaunched = 0; numReduceTasksCompleted = 0; numReduceTasksFailed = 0; numJobsSubmitted = 0; numJobsCompleted = 0; numWaitingMaps = 0; numWaitingReduces = 0; numBlackListedMapSlots = 0; numBlackListedReduceSlots = 0; numReservedMapSlots = 0; numReservedReduceSlots = 0; numOccupiedMapSlots = 0; numOccupiedReduceSlots = 0; numJobsFailed = 0; numJobsKilled = 0; numJobsPreparing = 0; numJobsRunning = 0; numRunningMaps = 0; numRunningReduces = 0; numMapTasksKilled = 0; numReduceTasksKilled = 0; numTrackers = 0; numTrackersBlackListed = 0; } metricsRecord.update(); - - if (tracker != null) { - for (JobInProgress jip : tracker.getRunningJobs()) { - jip.updateMetrics(); - } - } } @Override public synchronized void launchMap(TaskAttemptID taskAttemptID) { ++numMapTasksLaunched; decWaitingMaps(taskAttemptID.getJobID(), 1); } @Override public synchronized void completeMap(TaskAttemptID taskAttemptID) { ++numMapTasksCompleted; } @Override public synchronized void failedMap(TaskAttemptID taskAttemptID) { ++numMapTasksFailed; addWaitingMaps(taskAttemptID.getJobID(), 1); } @Override public synchronized void launchReduce(TaskAttemptID taskAttemptID) { ++numReduceTasksLaunched; decWaitingReduces(taskAttemptID.getJobID(), 1); } @Override public synchronized void completeReduce(TaskAttemptID taskAttemptID) { ++numReduceTasksCompleted; } @Override public synchronized void failedReduce(TaskAttemptID taskAttemptID) { ++numReduceTasksFailed; addWaitingReduces(taskAttemptID.getJobID(), 1); } @Override public synchronized void submitJob(JobConf conf, JobID id) { ++numJobsSubmitted; } @Override public synchronized void completeJob(JobConf conf, JobID id) { ++numJobsCompleted; } @Override public synchronized void addWaitingMaps(JobID id, int task) { numWaitingMaps += task; } @Override public synchronized void decWaitingMaps(JobID id, int task) { numWaitingMaps -= task; } @Override public synchronized void addWaitingReduces(JobID id, int task) { numWaitingReduces += task; } @Override public synchronized void decWaitingReduces(JobID id, int task){ numWaitingReduces -= task; } @Override public synchronized void setMapSlots(int slots) { numMapSlots = slots; } @Override public synchronized void setReduceSlots(int slots) { numReduceSlots = slots; } @Override public synchronized void addBlackListedMapSlots(int slots){ numBlackListedMapSlots += slots; } @Override public synchronized void decBlackListedMapSlots(int slots){ numBlackListedMapSlots -= slots; } @Override public synchronized void addBlackListedReduceSlots(int slots){ numBlackListedReduceSlots += slots; } @Override public synchronized void decBlackListedReduceSlots(int slots){ numBlackListedReduceSlots -= slots; } @Override public synchronized void addReservedMapSlots(int slots) { numReservedMapSlots += slots; } @Override public synchronized void decReservedMapSlots(int slots) { numReservedMapSlots -= slots; } @Override public synchronized void addReservedReduceSlots(int slots) { numReservedReduceSlots += slots; } @Override public synchronized void decReservedReduceSlots(int slots) { numReservedReduceSlots -= slots; } @Override public synchronized void addOccupiedMapSlots(int slots) { numOccupiedMapSlots += slots; } @Override public synchronized void decOccupiedMapSlots(int slots) { numOccupiedMapSlots -= slots; } @Override public synchronized void addOccupiedReduceSlots(int slots) { numOccupiedReduceSlots += slots; } @Override public synchronized void decOccupiedReduceSlots(int slots) { numOccupiedReduceSlots -= slots; } @Override public synchronized void failedJob(JobConf conf, JobID id) { numJobsFailed++; } @Override public synchronized void killedJob(JobConf conf, JobID id) { numJobsKilled++; } @Override public synchronized void addPrepJob(JobConf conf, JobID id) { numJobsPreparing++; } @Override public synchronized void decPrepJob(JobConf conf, JobID id) { numJobsPreparing--; } @Override public synchronized void addRunningJob(JobConf conf, JobID id) { numJobsRunning++; } @Override public synchronized void decRunningJob(JobConf conf, JobID id) { numJobsRunning--; } @Override public synchronized void addRunningMaps(int task) { numRunningMaps += task; } @Override public synchronized void decRunningMaps(int task) { numRunningMaps -= task; } @Override public synchronized void addRunningReduces(int task) { numRunningReduces += task; } @Override public synchronized void decRunningReduces(int task) { numRunningReduces -= task; } @Override public synchronized void killedMap(TaskAttemptID taskAttemptID) { numMapTasksKilled++; } @Override public synchronized void killedReduce(TaskAttemptID taskAttemptID) { numReduceTasksKilled++; } @Override public synchronized void addTrackers(int trackers) { numTrackers += trackers; } @Override public synchronized void decTrackers(int trackers) { numTrackers -= trackers; } @Override public synchronized void addBlackListedTrackers(int trackers) { numTrackersBlackListed += trackers; } @Override public synchronized void decBlackListedTrackers(int trackers) { numTrackersBlackListed -= trackers; } @Override public synchronized void setDecommissionedTrackers(int trackers) { numTrackersDecommissioned = trackers; } }
jaxlaw/hadoop-common
6c70692ce2683295e214621aacee666a94f429c9
Updating the build.xml to 'genericize' the version string and YAHOO-CHANGES.txt to include the correct list of fixes. Going forward build versions will be managed by the build.properties file.
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 391bc9b..1e06804 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,393 +1,399 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. -yahoo-hadoop-0.20.1-3092118005: +yahoo-hadoop-0.20.1-3195383000 HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota and suggests using -skpTrash, when moving to trash fails. (Boris Shkolnik via suresh) HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl + HDFS-625. Fix NullPointerException thrown from ListPathServlet. + Contributed by Suresh Srinivas. + + HADOOP-6343. Log unexpected throwable object caught in RPC. + Contributed by Jitendra Nath Pandey + MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/build.xml b/build.xml index 2f23584..e64ff2d 100644 --- a/build.xml +++ b/build.xml @@ -1,542 +1,542 @@ <?xml version="1.0"?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project name="Hadoop" default="compile" xmlns:ivy="antlib:org.apache.ivy.ant"> <!-- Load all the default properties, and any the user wants --> <!-- to contribute (without having to type -D or edit this file --> <property file="${user.home}/build.properties" /> <property file="${basedir}/build.properties" /> <property name="Name" value="Yahoo! Distribution of Hadoop"/> <property name="name" value="hadoop"/> - <property name="version" value="0.20.1.3092118005"/> + <property name="version" value="0.20.1-dev"/> <property name="final.name" value="${name}-${version}"/> <property name="year" value="2009"/> <property name="src.dir" value="${basedir}/src"/> <property name="core.src.dir" value="${src.dir}/core"/> <property name="mapred.src.dir" value="${src.dir}/mapred"/> <property name="hdfs.src.dir" value="${src.dir}/hdfs"/> <property name="native.src.dir" value="${basedir}/src/native"/> <property name="examples.dir" value="${basedir}/src/examples"/> <property name="anttasks.dir" value="${basedir}/src/ant"/> <property name="lib.dir" value="${basedir}/lib"/> <property name="conf.dir" value="${basedir}/conf"/> <property name="contrib.dir" value="${basedir}/src/contrib"/> <property name="docs.src" value="${basedir}/src/docs"/> <property name="src.docs.cn" value="${basedir}/src/docs/cn"/> <property name="changes.src" value="${docs.src}/changes"/> <property name="c++.src" value="${basedir}/src/c++"/> <property name="c++.utils.src" value="${c++.src}/utils"/> <property name="c++.pipes.src" value="${c++.src}/pipes"/> <property name="c++.examples.pipes.src" value="${examples.dir}/pipes"/> <property name="c++.libhdfs.src" value="${c++.src}/libhdfs"/> <property name="librecordio.src" value="${c++.src}/librecordio"/> <property name="tools.src" value="${basedir}/src/tools"/> <property name="xercescroot" value=""/> <property name="build.dir" value="${basedir}/build"/> <property name="build.classes" value="${build.dir}/classes"/> <property name="build.src" value="${build.dir}/src"/> <property name="build.tools" value="${build.dir}/tools"/> <property name="build.webapps" value="${build.dir}/webapps"/> <property name="build.examples" value="${build.dir}/examples"/> <property name="build.anttasks" value="${build.dir}/ant"/> <property name="build.librecordio" value="${build.dir}/librecordio"/> <!-- convert spaces to _ so that mac os doesn't break things --> <exec executable="sed" inputstring="${os.name}" outputproperty="nonspace.os"> <arg value="s/ /_/g"/> </exec> <property name="build.platform" value="${nonspace.os}-${os.arch}-${sun.arch.data.model}"/> <property name="jvm.arch" value="${sun.arch.data.model}"/> <property name="build.native" value="${build.dir}/native/${build.platform}"/> <property name="build.c++" value="${build.dir}/c++-build/${build.platform}"/> <property name="build.c++.utils" value="${build.c++}/utils"/> <property name="build.c++.pipes" value="${build.c++}/pipes"/> <property name="build.c++.libhdfs" value="${build.c++}/libhdfs"/> <property name="build.c++.examples.pipes" value="${build.c++}/examples/pipes"/> <property name="build.docs" value="${build.dir}/docs"/> <property name="build.docs.cn" value="${build.dir}/docs/cn"/> <property name="build.javadoc" value="${build.docs}/api"/> <property name="build.javadoc.dev" value="${build.docs}/dev-api"/> <property name="build.encoding" value="ISO-8859-1"/> <property name="install.c++" value="${build.dir}/c++/${build.platform}"/> <property name="install.c++.examples" value="${build.dir}/c++-examples/${build.platform}"/> <property name="test.src.dir" value="${basedir}/src/test"/> <property name="test.lib.dir" value="${basedir}/src/test/lib"/> <property name="test.build.dir" value="${build.dir}/test"/> <property name="test.generated.dir" value="${test.build.dir}/src"/> <property name="test.build.data" value="${test.build.dir}/data"/> <property name="test.cache.data" value="${test.build.dir}/cache"/> <property name="test.debug.data" value="${test.build.dir}/debug"/> <property name="test.log.dir" value="${test.build.dir}/logs"/> <property name="test.build.classes" value="${test.build.dir}/classes"/> <property name="test.build.testjar" value="${test.build.dir}/testjar"/> <property name="test.build.testshell" value="${test.build.dir}/testshell"/> <property name="test.build.extraconf" value="${test.build.dir}/extraconf"/> <property name="test.build.javadoc" value="${test.build.dir}/docs/api"/> <property name="test.build.javadoc.dev" value="${test.build.dir}/docs/dev-api"/> <property name="test.include" value="Test*"/> <property name="test.classpath.id" value="test.classpath"/> <property name="test.output" value="no"/> <property name="test.timeout" value="900000"/> <property name="test.junit.output.format" value="plain"/> <property name="test.junit.fork.mode" value="perTest" /> <property name="test.junit.printsummary" value="yes" /> <property name="test.junit.haltonfailure" value="no" /> <property name="test.junit.maxmemory" value="512m" /> <property name="test.libhdfs.conf.dir" value="${c++.libhdfs.src}/tests/conf"/> <property name="test.libhdfs.dir" value="${test.build.dir}/libhdfs"/> <property name="librecordio.test.dir" value="${test.build.dir}/librecordio"/> <property name="web.src.dir" value="${basedir}/src/web"/> <property name="src.webapps" value="${basedir}/src/webapps"/> <property name="javadoc.link.java" value="http://java.sun.com/javase/6/docs/api/"/> <property name="javadoc.packages" value="org.apache.hadoop.*"/> <property name="dist.dir" value="${build.dir}/${final.name}"/> <property name="javac.debug" value="on"/> <property name="javac.optimize" value="on"/> <property name="javac.deprecation" value="off"/> <property name="javac.version" value="1.6"/> <property name="javac.args" value=""/> <property name="javac.args.warnings" value="-Xlint:unchecked"/> <property name="clover.db.dir" location="${build.dir}/test/clover/db"/> <property name="clover.report.dir" location="${build.dir}/test/clover/reports"/> <property name="rat.reporting.classname" value="rat.Report"/> <property name="jdiff.build.dir" value="${build.docs}/jdiff"/> <property name="jdiff.xml.dir" value="${lib.dir}/jdiff"/> <property name="jdiff.stable" value="0.19.2"/> <property name="jdiff.stable.javadoc" value="http://hadoop.apache.org/core/docs/r${jdiff.stable}/api/"/> <property name="scratch.dir" value="${user.home}/tmp"/> <property name="svn.cmd" value="svn"/> <property name="grep.cmd" value="grep"/> <property name="patch.cmd" value="patch"/> <property name="make.cmd" value="make"/> <!-- task-controller properties set here --> <!-- Source directory from where configure is run and files are copied --> <property name="c++.task-controller.src" value="${basedir}/src/c++/task-controller" /> <!-- directory where autoconf files + temporary files and src is stored for compilation --> <property name="build.c++.task-controller" value="${build.c++}/task-controller" /> <!-- the default install dir is build directory override it using -Dtask-controller.install.dir=$HADOOP_HOME/bin --> <property name="task-controller.install.dir" value="${dist.dir}/bin" /> <!-- end of task-controller properties --> <!-- IVY properteis set here --> <property name="ivy.dir" location="ivy" /> <loadproperties srcfile="${ivy.dir}/libraries.properties"/> <property name="ivy.jar" location="${ivy.dir}/ivy-${ivy.version}.jar"/> <property name="ivy_repo_url" value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/> <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml" /> <property name="ivy.org" value="org.apache.hadoop"/> <property name="build.dir" location="build" /> <property name="dist.dir" value="${build.dir}/${final.name}"/> <property name="build.ivy.dir" location="${build.dir}/ivy" /> <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" /> <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/> <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" /> <property name="build.ivy.maven.dir" location="${build.ivy.dir}/maven" /> <property name="build.ivy.maven.pom" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.pom" /> <property name="build.ivy.maven.jar" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.jar" /> <!--this is the naming policy for artifacts we want pulled down--> <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/> <!--this is how artifacts that get built are named--> <property name="ivy.publish.pattern" value="hadoop-[revision]-core.[ext]"/> <property name="hadoop.jar" location="${build.dir}/hadoop-${hadoop.version}-core.jar" /> <!-- jdiff.home property set --> <property name="jdiff.home" value="${build.ivy.lib.dir}/${ant.project.name}/jdiff"/> <property name="jdiff.jar" value="${jdiff.home}/jdiff-${jdiff.version}.jar"/> <property name="xerces.jar" value="${jdiff.home}/xerces-${xerces.version}.jar"/> <property name="clover.jar" location="${clover.home}/lib/clover.jar"/> <available property="clover.present" file="${clover.jar}" /> <!-- check if clover reports should be generated --> <condition property="clover.enabled"> <and> <isset property="run.clover"/> <isset property="clover.present"/> </and> </condition> <!-- the normal classpath --> <path id="classpath"> <pathelement location="${build.classes}"/> <fileset dir="${lib.dir}"> <include name="**/*.jar" /> <exclude name="**/excluded/" /> </fileset> <pathelement location="${conf.dir}"/> <path refid="ivy-common.classpath"/> </path> <!-- the unit test classpath: uses test.src.dir for configuration --> <path id="test.classpath"> <pathelement location="${test.build.extraconf}"/> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> <pathelement location="${build.dir}"/> <pathelement location="${build.examples}"/> <pathelement location="${build.tools}"/> <pathelement path="${clover.jar}"/> <fileset dir="${test.lib.dir}"> <include name="**/*.jar" /> <exclude name="**/excluded/" /> </fileset> <path refid="classpath"/> </path> <!-- the cluster test classpath: uses conf.dir for configuration --> <path id="test.cluster.classpath"> <path refid="classpath"/> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> <pathelement location="${build.dir}"/> </path> <!-- properties dependent on the items defined above. --> <!--<available classname="${rat.reporting.classname}" classpathref="classpath" property="rat.present" value="true"/> --> <!-- ====================================================== --> <!-- Macro definitions --> <!-- ====================================================== --> <macrodef name="macro_tar" description="Worker Macro for tar"> <attribute name="param.destfile"/> <element name="param.listofitems"/> <sequential> <tar compression="gzip" longfile="gnu" destfile="@{param.destfile}"> <param.listofitems/> </tar> </sequential> </macrodef> <!-- ====================================================== --> <!-- Stuff needed by all targets --> <!-- ====================================================== --> <target name="init" depends="ivy-retrieve-common"> <mkdir dir="${build.dir}"/> <mkdir dir="${build.classes}"/> <mkdir dir="${build.tools}"/> <mkdir dir="${build.src}"/> <mkdir dir="${build.webapps}/task/WEB-INF"/> <mkdir dir="${build.webapps}/job/WEB-INF"/> <mkdir dir="${build.webapps}/hdfs/WEB-INF"/> <mkdir dir="${build.webapps}/datanode/WEB-INF"/> <mkdir dir="${build.webapps}/secondary/WEB-INF"/> <mkdir dir="${build.examples}"/> <mkdir dir="${build.anttasks}"/> <mkdir dir="${build.dir}/c++"/> <mkdir dir="${test.build.dir}"/> <mkdir dir="${test.build.classes}"/> <mkdir dir="${test.build.testjar}"/> <mkdir dir="${test.build.testshell}"/> <mkdir dir="${test.build.extraconf}"/> <tempfile property="touch.temp.file" destDir="${java.io.tmpdir}"/> <touch millis="0" file="${touch.temp.file}"> <fileset dir="${conf.dir}" includes="**/*.template"/> <fileset dir="${contrib.dir}" includes="**/*.template"/> </touch> <delete file="${touch.temp.file}"/> <!-- copy all of the jsp and static files --> <copy todir="${build.webapps}"> <fileset dir="${src.webapps}"> <exclude name="**/*.jsp" /> </fileset> </copy> <copy todir="${conf.dir}" verbose="true"> <fileset dir="${conf.dir}" includes="**/*.template"/> <mapper type="glob" from="*.template" to="*"/> </copy> <copy todir="${contrib.dir}" verbose="true"> <fileset dir="${contrib.dir}" includes="**/*.template"/> <mapper type="glob" from="*.template" to="*"/> </copy> <exec executable="sh"> <arg line="src/saveVersion.sh ${version}"/> </exec> <exec executable="sh"> <arg line="src/fixFontsPath.sh ${src.docs.cn}"/> </exec> </target> <!-- ====================================================== --> <!-- Compile the Java files --> <!-- ====================================================== --> <target name="record-parser" depends="init" if="javacc.home"> <javacc target="${core.src.dir}/org/apache/hadoop/record/compiler/generated/rcc.jj" outputdirectory="${core.src.dir}/org/apache/hadoop/record/compiler/generated" javacchome="${javacc.home}" /> </target> <target name="compile-rcc-compiler" depends="init, record-parser"> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/record/compiler/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args}"/> <classpath refid="classpath"/> </javac> <taskdef name="recordcc" classname="org.apache.hadoop.record.compiler.ant.RccTask"> <classpath refid="classpath" /> </taskdef> </target> <target name="compile-core-classes" depends="init, compile-rcc-compiler"> <taskdef classname="org.apache.jasper.JspC" name="jsp-compile" > <classpath refid="test.classpath"/> </taskdef> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${core.src.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${core.src.dir}" includes="**/*.properties"/> <fileset dir="${core.src.dir}" includes="core-default.xml"/> </copy> </target> <target name="compile-mapred-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/task" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/task/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/job" outputdir="${build.src}" package="org.apache.hadoop.mapred" webxml="${build.webapps}/job/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${mapred.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${mapred.src.dir}" includes="**/*.properties"/> <fileset dir="${mapred.src.dir}" includes="mapred-default.xml"/> </copy> </target> <target name="compile-hdfs-classes" depends="compile-core-classes"> <jsp-compile uriroot="${src.webapps}/hdfs" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.namenode" webxml="${build.webapps}/hdfs/WEB-INF/web.xml"> </jsp-compile> <jsp-compile uriroot="${src.webapps}/datanode" outputdir="${build.src}" package="org.apache.hadoop.hdfs.server.datanode" webxml="${build.webapps}/datanode/WEB-INF/web.xml"> </jsp-compile> <!-- Compile Java files (excluding JSPs) checking warnings --> <javac encoding="${build.encoding}" srcdir="${hdfs.src.dir};${build.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.classes}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.classes}"> <fileset dir="${hdfs.src.dir}" includes="**/*.properties"/> <fileset dir="${hdfs.src.dir}" includes="hdfs-default.xml"/> </copy> </target> <target name="compile-tools" depends="init"> <javac encoding="${build.encoding}" srcdir="${tools.src}" includes="org/apache/hadoop/**/*.java" destdir="${build.tools}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath refid="classpath"/> </javac> <copy todir="${build.tools}"> <fileset dir="${tools.src}" includes="**/*.properties" /> </copy> </target> <target name="compile-native"> <antcall target="compile-core-native"> <param name="compile.native" value="true"/> </antcall> </target> <target name="compile-core-native" depends="compile-core-classes" if="compile.native"> <mkdir dir="${build.native}/lib"/> <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/> <javah classpath="${build.classes}" destdir="${build.native}/src/org/apache/hadoop/io/compress/zlib" force="yes" verbose="yes" > <class name="org.apache.hadoop.io.compress.zlib.ZlibCompressor" /> <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" /> </javah> <exec dir="${build.native}" executable="sh" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> <arg line="${native.src.dir}/configure"/> </exec> <exec dir="${build.native}" executable="${make.cmd}" failonerror="true"> <env key="OS_NAME" value="${os.name}"/> <env key="OS_ARCH" value="${os.arch}"/> <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/> <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/> </exec> <exec dir="${build.native}" executable="sh" failonerror="true"> <arg line="${build.native}/libtool --mode=install cp ${build.native}/lib/libhadoop.la ${build.native}/lib"/> </exec> </target> <target name="compile-core" depends="clover,compile-core-classes,compile-mapred-classes, compile-hdfs-classes,compile-core-native,compile-c++" description="Compile core only"> </target> <target name="compile-contrib" depends="compile-core,compile-c++-libhdfs"> <subant target="compile"> <property name="version" value="${version}"/> <fileset file="${contrib.dir}/build.xml"/> </subant> </target> <target name="compile" depends="compile-core, compile-contrib, compile-ant-tasks, compile-tools" description="Compile core, contrib"> </target> <target name="compile-examples" depends="compile-core,compile-tools,compile-c++-examples"> <javac encoding="${build.encoding}" srcdir="${examples.dir}" includes="org/apache/hadoop/**/*.java" destdir="${build.examples}" debug="${javac.debug}" optimize="${javac.optimize}" target="${javac.version}" source="${javac.version}" deprecation="${javac.deprecation}"> <compilerarg line="${javac.args} ${javac.args.warnings}" /> <classpath> <path refid="classpath"/> <pathelement location="${build.tools}"/> </classpath> </javac> </target> <!-- ================================================================== --> <!-- Make hadoop.jar -->
jaxlaw/hadoop-common
9fda5fe4de0bf6a9b972941d8bfda60a7df01d77
HADOOP:6203 from https://issues.apache.org/jira/secure/attachment/12425243/c6203_20091116_0.20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 5298834..391bc9b 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,389 +1,393 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3092118005: + HADOOP-6203. FsShell rm/rmr error message indicates exceeding Trash quota + and suggests using -skpTrash, when moving to trash fails. + (Boris Shkolnik via suresh) + HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo (Nicholas), SZE via cdouglas) HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/core/org/apache/hadoop/fs/FsShell.java b/src/core/org/apache/hadoop/fs/FsShell.java index 51fe6eb..3791522 100644 --- a/src/core/org/apache/hadoop/fs/FsShell.java +++ b/src/core/org/apache/hadoop/fs/FsShell.java @@ -551,1028 +551,1038 @@ public class FsShell extends Configured implements Tool { * @throws IOException */ private void setFileReplication(Path file, FileSystem srcFs, short newRep, List<Path> waitList) throws IOException { if (srcFs.setReplication(file, newRep)) { if (waitList != null) { waitList.add(file); } System.out.println("Replication " + newRep + " set: " + file); } else { System.err.println("Could not set replication for: " + file); } } /** * Get a listing of all files in that match the file pattern <i>srcf</i>. * @param srcf a file pattern specifying source files * @param recursive if need to list files in subdirs * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ private int ls(String srcf, boolean recursive) throws IOException { Path srcPath = new Path(srcf); FileSystem srcFs = srcPath.getFileSystem(this.getConf()); FileStatus[] srcs = srcFs.globStatus(srcPath); if (srcs==null || srcs.length==0) { throw new FileNotFoundException("Cannot access " + srcf + ": No such file or directory."); } boolean printHeader = (srcs.length == 1) ? true: false; int numOfErrors = 0; for(int i=0; i<srcs.length; i++) { numOfErrors += ls(srcs[i], srcFs, recursive, printHeader); } return numOfErrors == 0 ? 0 : -1; } /* list all files under the directory <i>src</i> * ideally we should provide "-l" option, that lists like "ls -l". */ private int ls(FileStatus src, FileSystem srcFs, boolean recursive, boolean printHeader) throws IOException { final String cmd = recursive? "lsr": "ls"; final FileStatus[] items = shellListStatus(cmd, srcFs, src); if (items == null) { return 1; } else { int numOfErrors = 0; if (!recursive && printHeader) { if (items.length != 0) { System.out.println("Found " + items.length + " items"); } } int maxReplication = 3, maxLen = 10, maxOwner = 0,maxGroup = 0; for(int i = 0; i < items.length; i++) { FileStatus stat = items[i]; int replication = String.valueOf(stat.getReplication()).length(); int len = String.valueOf(stat.getLen()).length(); int owner = String.valueOf(stat.getOwner()).length(); int group = String.valueOf(stat.getGroup()).length(); if (replication > maxReplication) maxReplication = replication; if (len > maxLen) maxLen = len; if (owner > maxOwner) maxOwner = owner; if (group > maxGroup) maxGroup = group; } for (int i = 0; i < items.length; i++) { FileStatus stat = items[i]; Path cur = stat.getPath(); String mdate = dateForm.format(new Date(stat.getModificationTime())); System.out.print((stat.isDir() ? "d" : "-") + stat.getPermission() + " "); System.out.printf("%"+ maxReplication + "s ", (!stat.isDir() ? stat.getReplication() : "-")); if (maxOwner > 0) System.out.printf("%-"+ maxOwner + "s ", stat.getOwner()); if (maxGroup > 0) System.out.printf("%-"+ maxGroup + "s ", stat.getGroup()); System.out.printf("%"+ maxLen + "d ", stat.getLen()); System.out.print(mdate + " "); System.out.println(cur.toUri().getPath()); if (recursive && stat.isDir()) { numOfErrors += ls(stat,srcFs, recursive, printHeader); } } return numOfErrors; } } /** * Show the size of all files that match the file pattern <i>src</i> * @param src a file pattern specifying source files * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ void du(String src) throws IOException { Path srcPath = new Path(src); FileSystem srcFs = srcPath.getFileSystem(getConf()); Path[] pathItems = FileUtil.stat2Paths(srcFs.globStatus(srcPath), srcPath); FileStatus items[] = srcFs.listStatus(pathItems); if ((items == null) || ((items.length == 0) && (!srcFs.exists(srcPath)))){ throw new FileNotFoundException("Cannot access " + src + ": No such file or directory."); } else { System.out.println("Found " + items.length + " items"); int maxLength = 10; long length[] = new long[items.length]; for (int i = 0; i < items.length; i++) { length[i] = items[i].isDir() ? srcFs.getContentSummary(items[i].getPath()).getLength() : items[i].getLen(); int len = String.valueOf(length[i]).length(); if (len > maxLength) maxLength = len; } for(int i = 0; i < items.length; i++) { System.out.printf("%-"+ (maxLength + BORDER) +"d", length[i]); System.out.println(items[i].getPath()); } } } /** * Show the summary disk usage of each dir/file * that matches the file pattern <i>src</i> * @param src a file pattern specifying source files * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ void dus(String src) throws IOException { Path srcPath = new Path(src); FileSystem srcFs = srcPath.getFileSystem(getConf()); FileStatus status[] = srcFs.globStatus(new Path(src)); if (status==null || status.length==0) { throw new FileNotFoundException("Cannot access " + src + ": No such file or directory."); } for(int i=0; i<status.length; i++) { long totalSize = srcFs.getContentSummary(status[i].getPath()).getLength(); String pathStr = status[i].getPath().toString(); System.out.println(("".equals(pathStr)?".":pathStr) + "\t" + totalSize); } } /** * Create the given dir */ void mkdir(String src) throws IOException { Path f = new Path(src); FileSystem srcFs = f.getFileSystem(getConf()); FileStatus fstatus = null; try { fstatus = srcFs.getFileStatus(f); if (fstatus.isDir()) { throw new IOException("cannot create directory " + src + ": File exists"); } else { throw new IOException(src + " exists but " + "is not a directory"); } } catch(FileNotFoundException e) { if (!srcFs.mkdirs(f)) { throw new IOException("failed to create " + src); } } } /** * (Re)create zero-length file at the specified path. * This will be replaced by a more UNIX-like touch when files may be * modified. */ void touchz(String src) throws IOException { Path f = new Path(src); FileSystem srcFs = f.getFileSystem(getConf()); FileStatus st; if (srcFs.exists(f)) { st = srcFs.getFileStatus(f); if (st.isDir()) { // TODO: handle this throw new IOException(src + " is a directory"); } else if (st.getLen() != 0) throw new IOException(src + " must be a zero-length file"); } FSDataOutputStream out = srcFs.create(f); out.close(); } /** * Check file types. */ int test(String argv[], int i) throws IOException { if (!argv[i].startsWith("-") || argv[i].length() > 2) throw new IOException("Not a flag: " + argv[i]); char flag = argv[i].toCharArray()[1]; Path f = new Path(argv[++i]); FileSystem srcFs = f.getFileSystem(getConf()); switch(flag) { case 'e': return srcFs.exists(f) ? 0 : 1; case 'z': return srcFs.getFileStatus(f).getLen() == 0 ? 0 : 1; case 'd': return srcFs.getFileStatus(f).isDir() ? 0 : 1; default: throw new IOException("Unknown flag: " + flag); } } /** * Print statistics about path in specified format. * Format sequences: * %b: Size of file in blocks * %n: Filename * %o: Block size * %r: replication * %y: UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot; * %Y: Milliseconds since January 1, 1970 UTC */ void stat(char[] fmt, String src) throws IOException { Path srcPath = new Path(src); FileSystem srcFs = srcPath.getFileSystem(getConf()); FileStatus glob[] = srcFs.globStatus(srcPath); if (null == glob) throw new IOException("cannot stat `" + src + "': No such file or directory"); for (FileStatus f : glob) { StringBuilder buf = new StringBuilder(); for (int i = 0; i < fmt.length; ++i) { if (fmt[i] != '%') { buf.append(fmt[i]); } else { if (i + 1 == fmt.length) break; switch(fmt[++i]) { case 'b': buf.append(f.getLen()); break; case 'F': buf.append(f.isDir() ? "directory" : "regular file"); break; case 'n': buf.append(f.getPath().getName()); break; case 'o': buf.append(f.getBlockSize()); break; case 'r': buf.append(f.getReplication()); break; case 'y': buf.append(modifFmt.format(new Date(f.getModificationTime()))); break; case 'Y': buf.append(f.getModificationTime()); break; default: buf.append(fmt[i]); break; } } } System.out.println(buf.toString()); } } /** * Move files that match the file pattern <i>srcf</i> * to a destination file. * When moving mutiple files, the destination must be a directory. * Otherwise, IOException is thrown. * @param srcf a file pattern specifying source files * @param dstf a destination local file/directory * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ void rename(String srcf, String dstf) throws IOException { Path srcPath = new Path(srcf); Path dstPath = new Path(dstf); FileSystem srcFs = srcPath.getFileSystem(getConf()); FileSystem dstFs = dstPath.getFileSystem(getConf()); URI srcURI = srcFs.getUri(); URI dstURI = dstFs.getUri(); if (srcURI.compareTo(dstURI) != 0) { throw new IOException("src and destination filesystems do not match."); } Path[] srcs = FileUtil.stat2Paths(srcFs.globStatus(srcPath), srcPath); Path dst = new Path(dstf); if (srcs.length > 1 && !srcFs.isDirectory(dst)) { throw new IOException("When moving multiple files, " + "destination should be a directory."); } for(int i=0; i<srcs.length; i++) { if (!srcFs.rename(srcs[i], dst)) { FileStatus srcFstatus = null; FileStatus dstFstatus = null; try { srcFstatus = srcFs.getFileStatus(srcs[i]); } catch(FileNotFoundException e) { throw new FileNotFoundException(srcs[i] + ": No such file or directory"); } try { dstFstatus = dstFs.getFileStatus(dst); } catch(IOException e) { } if((srcFstatus!= null) && (dstFstatus!= null)) { if (srcFstatus.isDir() && !dstFstatus.isDir()) { throw new IOException("cannot overwrite non directory " + dst + " with directory " + srcs[i]); } } throw new IOException("Failed to rename " + srcs[i] + " to " + dst); } } } /** * Move/rename file(s) to a destination file. Multiple source * files can be specified. The destination is the last element of * the argvp[] array. * If multiple source files are specified, then the destination * must be a directory. Otherwise, IOException is thrown. * @exception: IOException */ private int rename(String argv[], Configuration conf) throws IOException { int i = 0; int exitCode = 0; String cmd = argv[i++]; String dest = argv[argv.length-1]; // // If the user has specified multiple source files, then // the destination has to be a directory // if (argv.length > 3) { Path dst = new Path(dest); FileSystem dstFs = dst.getFileSystem(getConf()); if (!dstFs.isDirectory(dst)) { throw new IOException("When moving multiple files, " + "destination " + dest + " should be a directory."); } } // // for each source file, issue the rename // for (; i < argv.length - 1; i++) { try { // // issue the rename to the fs // rename(argv[i], dest); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error mesage. // exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (IOException e) { // // IO exception encountered locally. // exitCode = -1; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } } return exitCode; } /** * Copy files that match the file pattern <i>srcf</i> * to a destination file. * When copying mutiple files, the destination must be a directory. * Otherwise, IOException is thrown. * @param srcf a file pattern specifying source files * @param dstf a destination local file/directory * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ void copy(String srcf, String dstf, Configuration conf) throws IOException { Path srcPath = new Path(srcf); FileSystem srcFs = srcPath.getFileSystem(getConf()); Path dstPath = new Path(dstf); FileSystem dstFs = dstPath.getFileSystem(getConf()); Path [] srcs = FileUtil.stat2Paths(srcFs.globStatus(srcPath), srcPath); if (srcs.length > 1 && !dstFs.isDirectory(dstPath)) { throw new IOException("When copying multiple files, " + "destination should be a directory."); } for(int i=0; i<srcs.length; i++) { FileUtil.copy(srcFs, srcs[i], dstFs, dstPath, false, conf); } } /** * Copy file(s) to a destination file. Multiple source * files can be specified. The destination is the last element of * the argvp[] array. * If multiple source files are specified, then the destination * must be a directory. Otherwise, IOException is thrown. * @exception: IOException */ private int copy(String argv[], Configuration conf) throws IOException { int i = 0; int exitCode = 0; String cmd = argv[i++]; String dest = argv[argv.length-1]; // // If the user has specified multiple source files, then // the destination has to be a directory // if (argv.length > 3) { Path dst = new Path(dest); if (!fs.isDirectory(dst)) { throw new IOException("When copying multiple files, " + "destination " + dest + " should be a directory."); } } // // for each source file, issue the copy // for (; i < argv.length - 1; i++) { try { // // issue the copy to the fs // copy(argv[i], dest, conf); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error mesage. // exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (IOException e) { // // IO exception encountered locally. // exitCode = -1; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } } return exitCode; } /** * Delete all files that match the file pattern <i>srcf</i>. * @param srcf a file pattern specifying source files * @param recursive if need to delete subdirs * @param skipTrash Should we skip the trash, if it's enabled? * @throws IOException * @see org.apache.hadoop.fs.FileSystem#globStatus(Path) */ void delete(String srcf, final boolean recursive, final boolean skipTrash) throws IOException { //rm behavior in Linux // [~/1207]$ ls ?.txt // x.txt z.txt // [~/1207]$ rm x.txt y.txt z.txt // rm: cannot remove `y.txt': No such file or directory Path srcPattern = new Path(srcf); new DelayedExceptionThrowing() { @Override void process(Path p, FileSystem srcFs) throws IOException { delete(p, srcFs, recursive, skipTrash); } }.globAndProcess(srcPattern, srcPattern.getFileSystem(getConf())); } /* delete a file */ private void delete(Path src, FileSystem srcFs, boolean recursive, boolean skipTrash) throws IOException { FileStatus fs = null; try { fs = srcFs.getFileStatus(src); } catch (FileNotFoundException fnfe) { // Have to re-throw so that console output is as expected throw new FileNotFoundException("cannot remove " + src + ": No such file or directory."); } if (fs.isDir() && !recursive) { throw new IOException("Cannot remove directory \"" + src + "\", use -rmr instead"); } if(!skipTrash) { - Trash trashTmp = new Trash(srcFs, getConf()); - if (trashTmp.moveToTrash(src)) { - System.out.println("Moved to trash: " + src); - return; + try { + Trash trashTmp = new Trash(srcFs, getConf()); + if (trashTmp.moveToTrash(src)) { + System.out.println("Moved to trash: " + src); + return; + } + } catch (IOException e) { + Exception cause = (Exception) e.getCause(); + String msg = ""; + if(cause != null) { + msg = cause.getLocalizedMessage(); + } + System.err.println("Problem with Trash." + msg +". Consider using -skipTrash option"); + throw e; } } if (srcFs.delete(src, true)) { System.out.println("Deleted " + src); } else { throw new IOException("Delete failed " + src); } } private void expunge() throws IOException { trash.expunge(); trash.checkpoint(); } /** * Returns the Trash object associated with this shell. */ public Path getCurrentTrashDir() { return trash.getCurrentTrashDir(); } /** * Parse the incoming command string * @param cmd * @param pos ignore anything before this pos in cmd * @throws IOException */ private void tail(String[] cmd, int pos) throws IOException { CommandFormat c = new CommandFormat("tail", 1, 1, "f"); String src = null; Path path = null; try { List<String> parameters = c.parse(cmd, pos); src = parameters.get(0); } catch(IllegalArgumentException iae) { System.err.println("Usage: java FsShell " + TAIL_USAGE); throw iae; } boolean foption = c.getOpt("f") ? true: false; path = new Path(src); FileSystem srcFs = path.getFileSystem(getConf()); if (srcFs.isDirectory(path)) { throw new IOException("Source must be a file."); } long fileSize = srcFs.getFileStatus(path).getLen(); long offset = (fileSize > 1024) ? fileSize - 1024: 0; while (true) { FSDataInputStream in = srcFs.open(path); in.seek(offset); IOUtils.copyBytes(in, System.out, 1024, false); offset = in.getPos(); in.close(); if (!foption) { break; } fileSize = srcFs.getFileStatus(path).getLen(); offset = (fileSize > offset) ? offset: fileSize; try { Thread.sleep(5000); } catch (InterruptedException e) { break; } } } /** * This class runs a command on a given FileStatus. This can be used for * running various commands like chmod, chown etc. */ static abstract class CmdHandler { protected int errorCode = 0; protected boolean okToContinue = true; protected String cmdName; int getErrorCode() { return errorCode; } boolean okToContinue() { return okToContinue; } String getName() { return cmdName; } protected CmdHandler(String cmdName, FileSystem fs) { this.cmdName = cmdName; } public abstract void run(FileStatus file, FileSystem fs) throws IOException; } /** helper returns listStatus() */ private static FileStatus[] shellListStatus(String cmd, FileSystem srcFs, FileStatus src) { if (!src.isDir()) { FileStatus[] files = { src }; return files; } Path path = src.getPath(); try { FileStatus[] files = srcFs.listStatus(path); if ( files == null ) { System.err.println(cmd + ": could not get listing for '" + path + "'"); } return files; } catch (IOException e) { System.err.println(cmd + ": could not get get listing for '" + path + "' : " + e.getMessage().split("\n")[0]); } return null; } /** * Runs the command on a given file with the command handler. * If recursive is set, command is run recursively. */ private static int runCmdHandler(CmdHandler handler, FileStatus stat, FileSystem srcFs, boolean recursive) throws IOException { int errors = 0; handler.run(stat, srcFs); if (recursive && stat.isDir() && handler.okToContinue()) { FileStatus[] files = shellListStatus(handler.getName(), srcFs, stat); if (files == null) { return 1; } for(FileStatus file : files ) { errors += runCmdHandler(handler, file, srcFs, recursive); } } return errors; } ///top level runCmdHandler int runCmdHandler(CmdHandler handler, String[] args, int startIndex, boolean recursive) throws IOException { int errors = 0; for (int i=startIndex; i<args.length; i++) { Path srcPath = new Path(args[i]); FileSystem srcFs = srcPath.getFileSystem(getConf()); Path[] paths = FileUtil.stat2Paths(srcFs.globStatus(srcPath), srcPath); for(Path path : paths) { try { FileStatus file = srcFs.getFileStatus(path); if (file == null) { System.err.println(handler.getName() + ": could not get status for '" + path + "'"); errors++; } else { errors += runCmdHandler(handler, file, srcFs, recursive); } } catch (IOException e) { String msg = (e.getMessage() != null ? e.getLocalizedMessage() : (e.getCause().getMessage() != null ? e.getCause().getLocalizedMessage() : "null")); System.err.println(handler.getName() + ": could not get status for '" + path + "': " + msg.split("\n")[0]); } } } return (errors > 0 || handler.getErrorCode() != 0) ? 1 : 0; } /** * Return an abbreviated English-language desc of the byte length * @deprecated Consider using {@link org.apache.hadoop.util.StringUtils#byteDesc} instead. */ @Deprecated public static String byteDesc(long len) { return StringUtils.byteDesc(len); } /** * @deprecated Consider using {@link org.apache.hadoop.util.StringUtils#limitDecimalTo2} instead. */ @Deprecated public static synchronized String limitDecimalTo2(double d) { return StringUtils.limitDecimalTo2(d); } private void printHelp(String cmd) { String summary = "hadoop fs is the command to execute fs commands. " + "The full syntax is: \n\n" + "hadoop fs [-fs <local | file system URI>] [-conf <configuration file>]\n\t" + "[-D <property=value>] [-ls <path>] [-lsr <path>] [-du <path>]\n\t" + "[-dus <path>] [-mv <src> <dst>] [-cp <src> <dst>] [-rm [-skipTrash] <src>]\n\t" + "[-rmr [-skipTrash] <src>] [-put <localsrc> ... <dst>] [-copyFromLocal <localsrc> ... <dst>]\n\t" + "[-moveFromLocal <localsrc> ... <dst>] [" + GET_SHORT_USAGE + "\n\t" + "[-getmerge <src> <localdst> [addnl]] [-cat <src>]\n\t" + "[" + COPYTOLOCAL_SHORT_USAGE + "] [-moveToLocal <src> <localdst>]\n\t" + "[-mkdir <path>] [-report] [" + SETREP_SHORT_USAGE + "]\n\t" + "[-touchz <path>] [-test -[ezd] <path>] [-stat [format] <path>]\n\t" + "[-tail [-f] <path>] [-text <path>]\n\t" + "[" + FsShellPermissions.CHMOD_USAGE + "]\n\t" + "[" + FsShellPermissions.CHOWN_USAGE + "]\n\t" + "[" + FsShellPermissions.CHGRP_USAGE + "]\n\t" + "[" + Count.USAGE + "]\n\t" + "[-help [cmd]]\n"; String conf ="-conf <configuration file>: Specify an application configuration file."; String D = "-D <property=value>: Use value for given property."; String fs = "-fs [local | <file system URI>]: \tSpecify the file system to use.\n" + "\t\tIf not specified, the current configuration is used, \n" + "\t\ttaken from the following, in increasing precedence: \n" + "\t\t\tcore-default.xml inside the hadoop jar file \n" + "\t\t\tcore-site.xml in $HADOOP_CONF_DIR \n" + "\t\t'local' means use the local file system as your DFS. \n" + "\t\t<file system URI> specifies a particular file system to \n" + "\t\tcontact. This argument is optional but if used must appear\n" + "\t\tappear first on the command line. Exactly one additional\n" + "\t\targument must be specified. \n"; String ls = "-ls <path>: \tList the contents that match the specified file pattern. If\n" + "\t\tpath is not specified, the contents of /user/<currentUser>\n" + "\t\twill be listed. Directory entries are of the form \n" + "\t\t\tdirName (full path) <dir> \n" + "\t\tand file entries are of the form \n" + "\t\t\tfileName(full path) <r n> size \n" + "\t\twhere n is the number of replicas specified for the file \n" + "\t\tand size is the size of the file, in bytes.\n"; String lsr = "-lsr <path>: \tRecursively list the contents that match the specified\n" + "\t\tfile pattern. Behaves very similarly to hadoop fs -ls,\n" + "\t\texcept that the data is shown for all the entries in the\n" + "\t\tsubtree.\n"; String du = "-du <path>: \tShow the amount of space, in bytes, used by the files that \n" + "\t\tmatch the specified file pattern. Equivalent to the unix\n" + "\t\tcommand \"du -sb <path>/*\" in case of a directory, \n" + "\t\tand to \"du -b <path>\" in case of a file.\n" + "\t\tThe output is in the form \n" + "\t\t\tname(full path) size (in bytes)\n"; String dus = "-dus <path>: \tShow the amount of space, in bytes, used by the files that \n" + "\t\tmatch the specified file pattern. Equivalent to the unix\n" + "\t\tcommand \"du -sb\" The output is in the form \n" + "\t\t\tname(full path) size (in bytes)\n"; String mv = "-mv <src> <dst>: Move files that match the specified file pattern <src>\n" + "\t\tto a destination <dst>. When moving multiple files, the \n" + "\t\tdestination must be a directory. \n"; String cp = "-cp <src> <dst>: Copy files that match the file pattern <src> to a \n" + "\t\tdestination. When copying multiple files, the destination\n" + "\t\tmust be a directory. \n"; String rm = "-rm [-skipTrash] <src>: \tDelete all files that match the specified file pattern.\n" + "\t\tEquivalent to the Unix command \"rm <src>\"\n" + "\t\t-skipTrash option bypasses trash, if enabled, and immediately\n" + "deletes <src>"; String rmr = "-rmr [-skipTrash] <src>: \tRemove all directories which match the specified file \n" + "\t\tpattern. Equivalent to the Unix command \"rm -rf <src>\"\n" + "\t\t-skipTrash option bypasses trash, if enabled, and immediately\n" + "deletes <src>"; String put = "-put <localsrc> ... <dst>: \tCopy files " + "from the local file system \n\t\tinto fs. \n"; String copyFromLocal = "-copyFromLocal <localsrc> ... <dst>:" + " Identical to the -put command.\n"; String moveFromLocal = "-moveFromLocal <localsrc> ... <dst>:" + " Same as -put, except that the source is\n\t\tdeleted after it's copied.\n"; String get = GET_SHORT_USAGE + ": Copy files that match the file pattern <src> \n" + "\t\tto the local name. <src> is kept. When copying mutiple, \n" + "\t\tfiles, the destination must be a directory. \n"; String getmerge = "-getmerge <src> <localdst>: Get all the files in the directories that \n" + "\t\tmatch the source file pattern and merge and sort them to only\n" + "\t\tone file on local fs. <src> is kept.\n"; String cat = "-cat <src>: \tFetch all files that match the file pattern <src> \n" + "\t\tand display their content on stdout.\n"; String text = "-text <src>: \tTakes a source file and outputs the file in text format.\n" + "\t\tThe allowed formats are zip and TextRecordInputStream.\n"; String copyToLocal = COPYTOLOCAL_SHORT_USAGE + ": Identical to the -get command.\n"; String moveToLocal = "-moveToLocal <src> <localdst>: Not implemented yet \n"; String mkdir = "-mkdir <path>: \tCreate a directory in specified location. \n"; String setrep = SETREP_SHORT_USAGE + ": Set the replication level of a file. \n" + "\t\tThe -R flag requests a recursive change of replication level \n" + "\t\tfor an entire tree.\n"; String touchz = "-touchz <path>: Write a timestamp in yyyy-MM-dd HH:mm:ss format\n" + "\t\tin a file at <path>. An error is returned if the file exists with non-zero length\n"; String test = "-test -[ezd] <path>: If file { exists, has zero length, is a directory\n" + "\t\tthen return 0, else return 1.\n"; String stat = "-stat [format] <path>: Print statistics about the file/directory at <path>\n" + "\t\tin the specified format. Format accepts filesize in blocks (%b), filename (%n),\n" + "\t\tblock size (%o), replication (%r), modification date (%y, %Y)\n"; String tail = TAIL_USAGE + ": Show the last 1KB of the file. \n" + "\t\tThe -f option shows apended data as the file grows. \n"; String chmod = FsShellPermissions.CHMOD_USAGE + "\n" + "\t\tChanges permissions of a file.\n" + "\t\tThis works similar to shell's chmod with a few exceptions.\n\n" + "\t-R\tmodifies the files recursively. This is the only option\n" + "\t\tcurrently supported.\n\n" + "\tMODE\tMode is same as mode used for chmod shell command.\n" + "\t\tOnly letters recognized are 'rwxX'. E.g. a+r,g-w,+rwx,o=r\n\n" + "\tOCTALMODE Mode specifed in 3 digits. Unlike shell command,\n" + "\t\tthis requires all three digits.\n" + "\t\tE.g. 754 is same as u=rwx,g=rx,o=r\n\n" + "\t\tIf none of 'augo' is specified, 'a' is assumed and unlike\n" + "\t\tshell command, no umask is applied.\n"; String chown = FsShellPermissions.CHOWN_USAGE + "\n" + "\t\tChanges owner and group of a file.\n" + "\t\tThis is similar to shell's chown with a few exceptions.\n\n" + "\t-R\tmodifies the files recursively. This is the only option\n" + "\t\tcurrently supported.\n\n" + "\t\tIf only owner or group is specified then only owner or\n" + "\t\tgroup is modified.\n\n" + "\t\tThe owner and group names may only cosists of digits, alphabet,\n"+ "\t\tand any of '-_.@/' i.e. [-_.@/a-zA-Z0-9]. The names are case\n" + "\t\tsensitive.\n\n" + "\t\tWARNING: Avoid using '.' to separate user name and group though\n" + "\t\tLinux allows it. If user names have dots in them and you are\n" + "\t\tusing local file system, you might see surprising results since\n" + "\t\tshell command 'chown' is used for local files.\n"; String chgrp = FsShellPermissions.CHGRP_USAGE + "\n" + "\t\tThis is equivalent to -chown ... :GROUP ...\n"; String help = "-help [cmd]: \tDisplays help for given command or all commands if none\n" + "\t\tis specified.\n"; if ("fs".equals(cmd)) { System.out.println(fs); } else if ("conf".equals(cmd)) { System.out.println(conf); } else if ("D".equals(cmd)) { System.out.println(D); } else if ("ls".equals(cmd)) { System.out.println(ls); } else if ("lsr".equals(cmd)) { System.out.println(lsr); } else if ("du".equals(cmd)) { System.out.println(du); } else if ("dus".equals(cmd)) { System.out.println(dus); } else if ("rm".equals(cmd)) { System.out.println(rm); } else if ("rmr".equals(cmd)) { System.out.println(rmr); } else if ("mkdir".equals(cmd)) { System.out.println(mkdir); } else if ("mv".equals(cmd)) { System.out.println(mv); } else if ("cp".equals(cmd)) { System.out.println(cp); } else if ("put".equals(cmd)) { System.out.println(put); } else if ("copyFromLocal".equals(cmd)) { System.out.println(copyFromLocal); } else if ("moveFromLocal".equals(cmd)) { System.out.println(moveFromLocal); } else if ("get".equals(cmd)) { System.out.println(get); } else if ("getmerge".equals(cmd)) { System.out.println(getmerge); } else if ("copyToLocal".equals(cmd)) { System.out.println(copyToLocal); } else if ("moveToLocal".equals(cmd)) { System.out.println(moveToLocal); } else if ("cat".equals(cmd)) { System.out.println(cat); } else if ("get".equals(cmd)) { System.out.println(get); } else if ("setrep".equals(cmd)) { System.out.println(setrep); } else if ("touchz".equals(cmd)) { System.out.println(touchz); } else if ("test".equals(cmd)) { System.out.println(test); } else if ("text".equals(cmd)) { System.out.println(text); } else if ("stat".equals(cmd)) { System.out.println(stat); } else if ("tail".equals(cmd)) { System.out.println(tail); } else if ("chmod".equals(cmd)) { System.out.println(chmod); } else if ("chown".equals(cmd)) { System.out.println(chown); } else if ("chgrp".equals(cmd)) { System.out.println(chgrp); } else if (Count.matches(cmd)) { System.out.println(Count.DESCRIPTION); } else if ("help".equals(cmd)) { System.out.println(help); } else { System.out.println(summary); System.out.println(fs); System.out.println(ls); System.out.println(lsr); System.out.println(du); System.out.println(dus); System.out.println(mv); System.out.println(cp); System.out.println(rm); System.out.println(rmr); System.out.println(put); System.out.println(copyFromLocal); System.out.println(moveFromLocal); System.out.println(get); System.out.println(getmerge); System.out.println(cat); System.out.println(copyToLocal); System.out.println(moveToLocal); System.out.println(mkdir); System.out.println(setrep); System.out.println(tail); System.out.println(touchz); System.out.println(test); System.out.println(text); System.out.println(stat); System.out.println(chmod); System.out.println(chown); System.out.println(chgrp); System.out.println(Count.DESCRIPTION); System.out.println(help); } } /** * Apply operation specified by 'cmd' on all parameters * starting from argv[startindex]. */ private int doall(String cmd, String argv[], int startindex) { int exitCode = 0; int i = startindex; boolean rmSkipTrash = false; // Check for -skipTrash option in rm/rmr if(("-rm".equals(cmd) || "-rmr".equals(cmd)) && "-skipTrash".equals(argv[i])) { rmSkipTrash = true; i++; } // // for each source file, issue the command // for (; i < argv.length; i++) { try { // // issue the command to the fs // if ("-cat".equals(cmd)) { cat(argv[i], true); } else if ("-mkdir".equals(cmd)) { mkdir(argv[i]); } else if ("-rm".equals(cmd)) { delete(argv[i], false, rmSkipTrash); } else if ("-rmr".equals(cmd)) { delete(argv[i], true, rmSkipTrash); } else if ("-du".equals(cmd)) { du(argv[i]); } else if ("-dus".equals(cmd)) { dus(argv[i]); } else if (Count.matches(cmd)) { new Count(argv, i, getConf()).runAll(); } else if ("-ls".equals(cmd)) { exitCode = ls(argv[i], false); } else if ("-lsr".equals(cmd)) { exitCode = ls(argv[i], true); } else if ("-touchz".equals(cmd)) { touchz(argv[i]); } else if ("-text".equals(cmd)) { text(argv[i]); } } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message. // exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); diff --git a/src/core/org/apache/hadoop/fs/Trash.java b/src/core/org/apache/hadoop/fs/Trash.java index f4f93f8..986cf43 100644 --- a/src/core/org/apache/hadoop/fs/Trash.java +++ b/src/core/org/apache/hadoop/fs/Trash.java @@ -1,288 +1,288 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.text.*; import java.io.*; import java.util.Date; import org.apache.commons.logging.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.util.StringUtils; /** Provides a <i>trash</i> feature. Files are moved to a user's trash * directory, a subdirectory of their home directory named ".Trash". Files are * initially moved to a <i>current</i> sub-directory of the trash directory. * Within that sub-directory their original path is preserved. Periodically * one may checkpoint the current trash and remove older checkpoints. (This * design permits trash management without enumeration of the full trash * content, without date support in the filesystem, and without clock * synchronization.) */ public class Trash extends Configured { private static final Log LOG = LogFactory.getLog(Trash.class); private static final Path CURRENT = new Path("Current"); private static final Path TRASH = new Path(".Trash/"); private static final Path HOMES = new Path("/user/"); private static final FsPermission PERMISSION = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); private static final DateFormat CHECKPOINT = new SimpleDateFormat("yyMMddHHmm"); private static final int MSECS_PER_MINUTE = 60*1000; private final FileSystem fs; private final Path trash; private final Path current; private final long interval; /** Construct a trash can accessor. * @param conf a Configuration */ public Trash(Configuration conf) throws IOException { this(FileSystem.get(conf), conf); } /** * Construct a trash can accessor for the FileSystem provided. */ public Trash(FileSystem fs, Configuration conf) throws IOException { super(conf); this.fs = fs; this.trash = new Path(fs.getHomeDirectory(), TRASH); this.current = new Path(trash, CURRENT); this.interval = conf.getLong("fs.trash.interval", 60) * MSECS_PER_MINUTE; } private Trash(Path home, Configuration conf) throws IOException { super(conf); this.fs = home.getFileSystem(conf); this.trash = new Path(home, TRASH); this.current = new Path(trash, CURRENT); this.interval = conf.getLong("fs.trash.interval", 60) * MSECS_PER_MINUTE; } private Path makeTrashRelativePath(Path basePath, Path rmFilePath) { return new Path(basePath + rmFilePath.toUri().getPath()); } /** Move a file or directory to the current trash directory. * @return false if the item is already in the trash or trash is disabled */ public boolean moveToTrash(Path path) throws IOException { if (interval == 0) return false; if (!path.isAbsolute()) // make path absolute path = new Path(fs.getWorkingDirectory(), path); if (!fs.exists(path)) // check that path exists throw new FileNotFoundException(path.toString()); String qpath = path.makeQualified(fs).toString(); if (qpath.startsWith(trash.toString())) { return false; // already in trash } if (trash.getParent().toString().startsWith(qpath)) { throw new IOException("Cannot move \"" + path + "\" to the trash, as it contains the trash"); } Path trashPath = makeTrashRelativePath(current, path); Path baseTrashPath = makeTrashRelativePath(current, path.getParent()); IOException cause = null; // try twice, in case checkpoint between the mkdirs() & rename() for (int i = 0; i < 2; i++) { try { if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current - LOG.warn("Can't create trash directory: "+baseTrashPath); + LOG.warn("Can't create(mkdir) trash directory: "+baseTrashPath); return false; } } catch (IOException e) { LOG.warn("Can't create trash directory: "+baseTrashPath); cause = e; break; } try { // // if the target path in Trash already exists, then append with // a number. Start from 1. // String orig = trashPath.toString(); for (int j = 1; fs.exists(trashPath); j++) { trashPath = new Path(orig + "." + j); } if (fs.rename(path, trashPath)) // move to current trash return true; } catch (IOException e) { cause = e; } } throw (IOException) new IOException("Failed to move to trash: "+path).initCause(cause); } /** Create a trash checkpoint. */ public void checkpoint() throws IOException { if (!fs.exists(current)) // no trash, no checkpoint return; Path checkpoint; synchronized (CHECKPOINT) { checkpoint = new Path(trash, CHECKPOINT.format(new Date())); } if (fs.rename(current, checkpoint)) { LOG.info("Created trash checkpoint: "+checkpoint.toUri().getPath()); } else { throw new IOException("Failed to checkpoint trash: "+checkpoint); } } /** Delete old checkpoints. */ public void expunge() throws IOException { FileStatus[] dirs = fs.listStatus(trash); // scan trash sub-directories if( dirs == null){ return; } long now = System.currentTimeMillis(); for (int i = 0; i < dirs.length; i++) { Path path = dirs[i].getPath(); String dir = path.toUri().getPath(); String name = path.getName(); if (name.equals(CURRENT.getName())) // skip current continue; long time; try { synchronized (CHECKPOINT) { time = CHECKPOINT.parse(name).getTime(); } } catch (ParseException e) { LOG.warn("Unexpected item in trash: "+dir+". Ignoring."); continue; } if ((now - interval) > time) { if (fs.delete(path, true)) { LOG.info("Deleted trash checkpoint: "+dir); } else { LOG.warn("Couldn't delete checkpoint: "+dir+" Ignoring."); } } } } // // get the current working directory // Path getCurrentTrashDir() { return current; } /** Return a {@link Runnable} that periodically empties the trash of all * users, intended to be run by the superuser. Only one checkpoint is kept * at a time. */ public Runnable getEmptier() throws IOException { return new Emptier(getConf()); } private static class Emptier implements Runnable { private Configuration conf; private FileSystem fs; private long interval; public Emptier(Configuration conf) throws IOException { this.conf = conf; this.interval = conf.getLong("fs.trash.interval", 60) * MSECS_PER_MINUTE; this.fs = FileSystem.get(conf); } public void run() { if (interval == 0) return; // trash disabled long now = System.currentTimeMillis(); long end; while (true) { end = ceiling(now, interval); try { // sleep for interval Thread.sleep(end - now); } catch (InterruptedException e) { return; // exit on interrupt } try { now = System.currentTimeMillis(); if (now >= end) { FileStatus[] homes = null; try { homes = fs.listStatus(HOMES); // list all home dirs } catch (IOException e) { LOG.warn("Trash can't list homes: "+e+" Sleeping."); continue; } if (homes == null) continue; for (FileStatus home : homes) { // dump each trash if (!home.isDir()) continue; try { Trash trash = new Trash(home.getPath(), conf); trash.expunge(); trash.checkpoint(); } catch (IOException e) { LOG.warn("Trash caught: "+e+". Skipping "+home.getPath()+"."); } } } } catch (Exception e) { LOG.warn("RuntimeException during Trash.Emptier.run() " + StringUtils.stringifyException(e)); } } } private long ceiling(long time, long interval) { return floor(time, interval) + interval; } private long floor(long time, long interval) { return (time / interval) * interval; } } /** Run an emptier.*/ public static void main(String[] args) throws Exception { new Trash(new Configuration()).getEmptier().run(); } }
jaxlaw/hadoop-common
6215da0b06c3803ab271f838fc84ff46bb97cbf9
HADOOP:5675 from https://issues.apache.org/jira/secure/attachment/12406687/5675_20090428.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index a0355fa..5298834 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,386 +1,389 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3092118005: + HADOOP-5675. Do not launch a job if DistCp has no work to do. (Tsz Wo + (Nicholas), SZE via cdouglas) + HDFS-761. Fix failure to process rename operation from edits log due to quota verification. (suresh) HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/tools/org/apache/hadoop/tools/DistCp.java b/src/tools/org/apache/hadoop/tools/DistCp.java index fcb0b8f..0a332b1 100644 --- a/src/tools/org/apache/hadoop/tools/DistCp.java +++ b/src/tools/org/apache/hadoop/tools/DistCp.java @@ -138,1213 +138,1218 @@ public class DistCp implements Tool { throw new IllegalArgumentException("<n> not specified in " + cmd); } long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]); if (n <= 0) { throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd); } return n; } } static enum FileAttribute { BLOCK_SIZE, REPLICATION, USER, GROUP, PERMISSION; final char symbol; private FileAttribute() {symbol = toString().toLowerCase().charAt(0);} static EnumSet<FileAttribute> parse(String s) { if (s == null || s.length() == 0) { return EnumSet.allOf(FileAttribute.class); } EnumSet<FileAttribute> set = EnumSet.noneOf(FileAttribute.class); FileAttribute[] attributes = values(); for(char c : s.toCharArray()) { int i = 0; for(; i < attributes.length && c != attributes[i].symbol; i++); if (i < attributes.length) { if (!set.contains(attributes[i])) { set.add(attributes[i]); } else { throw new IllegalArgumentException("There are more than one '" + attributes[i].symbol + "' in " + s); } } else { throw new IllegalArgumentException("'" + c + "' in " + s + " is undefined."); } } return set; } } static final String TMP_DIR_LABEL = NAME + ".tmp.dir"; static final String DST_DIR_LABEL = NAME + ".dest.path"; static final String JOB_DIR_LABEL = NAME + ".job.dir"; static final String MAX_MAPS_LABEL = NAME + ".max.map.tasks"; static final String SRC_LIST_LABEL = NAME + ".src.list"; static final String SRC_COUNT_LABEL = NAME + ".src.count"; static final String TOTAL_SIZE_LABEL = NAME + ".total.size"; static final String DST_DIR_LIST_LABEL = NAME + ".dst.dir.list"; static final String BYTES_PER_MAP_LABEL = NAME + ".bytes.per.map"; static final String PRESERVE_STATUS_LABEL = Options.PRESERVE_STATUS.propertyname + ".value"; private JobConf conf; public void setConf(Configuration conf) { if (conf instanceof JobConf) { this.conf = (JobConf) conf; } else { this.conf = new JobConf(conf); } } public Configuration getConf() { return conf; } public DistCp(Configuration conf) { setConf(conf); } /** * An input/output pair of filenames. */ static class FilePair implements Writable { FileStatus input = new FileStatus(); String output; FilePair() { } FilePair(FileStatus input, String output) { this.input = input; this.output = output; } public void readFields(DataInput in) throws IOException { input.readFields(in); output = Text.readString(in); } public void write(DataOutput out) throws IOException { input.write(out); Text.writeString(out, output); } public String toString() { return input + " : " + output; } } /** * InputFormat of a distcp job responsible for generating splits of the src * file list. */ static class CopyInputFormat implements InputFormat<Text, Text> { /** * Produce splits such that each is no greater than the quotient of the * total size and the number of splits requested. * @param job The handle to the JobConf object * @param numSplits Number of splits requested */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { int cnfiles = job.getInt(SRC_COUNT_LABEL, -1); long cbsize = job.getLong(TOTAL_SIZE_LABEL, -1); String srcfilelist = job.get(SRC_LIST_LABEL, ""); if (cnfiles < 0 || cbsize < 0 || "".equals(srcfilelist)) { throw new RuntimeException("Invalid metadata: #files(" + cnfiles + ") total_size(" + cbsize + ") listuri(" + srcfilelist + ")"); } Path src = new Path(srcfilelist); FileSystem fs = src.getFileSystem(job); FileStatus srcst = fs.getFileStatus(src); ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits); LongWritable key = new LongWritable(); FilePair value = new FilePair(); final long targetsize = cbsize / numSplits; long pos = 0L; long last = 0L; long acc = 0L; long cbrem = srcst.getLen(); SequenceFile.Reader sl = null; try { sl = new SequenceFile.Reader(fs, src, job); for (; sl.next(key, value); last = sl.getPosition()) { // if adding this split would put this split past the target size, // cut the last split and put this next file in the next split. if (acc + key.get() > targetsize && acc != 0) { long splitsize = last - pos; splits.add(new FileSplit(src, pos, splitsize, (String[])null)); cbrem -= splitsize; pos = last; acc = 0L; } acc += key.get(); } } finally { checkAndClose(sl); } if (cbrem != 0) { splits.add(new FileSplit(src, pos, cbrem, (String[])null)); } return splits.toArray(new FileSplit[splits.size()]); } /** * Returns a reader for this split of the src file list. */ public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split); } } /** * FSCopyFilesMapper: The mapper for copying files between FileSystems. */ static class CopyFilesMapper implements Mapper<LongWritable, FilePair, WritableComparable<?>, Text> { // config private int sizeBuf = 128 * 1024; private FileSystem destFileSys = null; private boolean ignoreReadFailures; private boolean preserve_status; private EnumSet<FileAttribute> preseved; private boolean overwrite; private boolean update; private Path destPath = null; private byte[] buffer = null; private JobConf job; // stats private int failcount = 0; private int skipcount = 0; private int copycount = 0; private String getCountString() { return "Copied: " + copycount + " Skipped: " + skipcount + " Failed: " + failcount; } private void updateStatus(Reporter reporter) { reporter.setStatus(getCountString()); } /** * Return true if dst should be replaced by src and the update flag is set. * Right now, this merely checks that the src and dst len are not equal. * This should be improved on once modification times, CRCs, etc. can * be meaningful in this context. * @throws IOException */ private boolean needsUpdate(FileStatus srcstatus, FileSystem dstfs, Path dstpath) throws IOException { return update && !sameFile(srcstatus.getPath().getFileSystem(job), srcstatus, dstfs, dstpath); } private FSDataOutputStream create(Path f, Reporter reporter, FileStatus srcstat) throws IOException { if (destFileSys.exists(f)) { destFileSys.delete(f, false); } if (!preserve_status) { return destFileSys.create(f, true, sizeBuf, reporter); } FsPermission permission = preseved.contains(FileAttribute.PERMISSION)? srcstat.getPermission(): null; short replication = preseved.contains(FileAttribute.REPLICATION)? srcstat.getReplication(): destFileSys.getDefaultReplication(); long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)? srcstat.getBlockSize(): destFileSys.getDefaultBlockSize(); return destFileSys.create(f, permission, true, sizeBuf, replication, blockSize, reporter); } /** * Copy a file to a destination. * @param srcstat src path and metadata * @param dstpath dst path * @param reporter */ private void copy(FileStatus srcstat, Path relativedst, OutputCollector<WritableComparable<?>, Text> outc, Reporter reporter) throws IOException { Path absdst = new Path(destPath, relativedst); int totfiles = job.getInt(SRC_COUNT_LABEL, -1); assert totfiles >= 0 : "Invalid file count " + totfiles; // if a directory, ensure created even if empty if (srcstat.isDir()) { if (destFileSys.exists(absdst)) { if (!destFileSys.getFileStatus(absdst).isDir()) { throw new IOException("Failed to mkdirs: " + absdst+" is a file."); } } else if (!destFileSys.mkdirs(absdst)) { throw new IOException("Failed to mkdirs " + absdst); } // TODO: when modification times can be set, directories should be // emitted to reducers so they might be preserved. Also, mkdirs does // not currently return an error when the directory already exists; // if this changes, all directory work might as well be done in reduce return; } if (destFileSys.exists(absdst) && !overwrite && !needsUpdate(srcstat, destFileSys, absdst)) { outc.collect(null, new Text("SKIP: " + srcstat.getPath())); ++skipcount; reporter.incrCounter(Counter.SKIP, 1); updateStatus(reporter); return; } Path tmpfile = new Path(job.get(TMP_DIR_LABEL), relativedst); long cbcopied = 0L; FSDataInputStream in = null; FSDataOutputStream out = null; try { // open src file in = srcstat.getPath().getFileSystem(job).open(srcstat.getPath()); reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen()); // open tmp file out = create(tmpfile, reporter, srcstat); // copy file for(int cbread; (cbread = in.read(buffer)) >= 0; ) { out.write(buffer, 0, cbread); cbcopied += cbread; reporter.setStatus( String.format("%.2f ", cbcopied*100.0/srcstat.getLen()) + absdst + " [ " + StringUtils.humanReadableInt(cbcopied) + " / " + StringUtils.humanReadableInt(srcstat.getLen()) + " ]"); } } finally { checkAndClose(in); checkAndClose(out); } if (cbcopied != srcstat.getLen()) { throw new IOException("File size not matched: copied " + bytesString(cbcopied) + " to tmpfile (=" + tmpfile + ") but expected " + bytesString(srcstat.getLen()) + " from " + srcstat.getPath()); } else { if (totfiles == 1) { // Copying a single file; use dst path provided by user as destination // rather than destination directory, if a file Path dstparent = absdst.getParent(); if (!(destFileSys.exists(dstparent) && destFileSys.getFileStatus(dstparent).isDir())) { absdst = dstparent; } } if (destFileSys.exists(absdst) && destFileSys.getFileStatus(absdst).isDir()) { throw new IOException(absdst + " is a directory"); } if (!destFileSys.mkdirs(absdst.getParent())) { throw new IOException("Failed to craete parent dir: " + absdst.getParent()); } rename(tmpfile, absdst); FileStatus dststat = destFileSys.getFileStatus(absdst); if (dststat.getLen() != srcstat.getLen()) { destFileSys.delete(absdst, false); throw new IOException("File size not matched: copied " + bytesString(dststat.getLen()) + " to dst (=" + absdst + ") but expected " + bytesString(srcstat.getLen()) + " from " + srcstat.getPath()); } updatePermissions(srcstat, dststat); } // report at least once for each file ++copycount; reporter.incrCounter(Counter.BYTESCOPIED, cbcopied); reporter.incrCounter(Counter.COPY, 1); updateStatus(reporter); } /** rename tmp to dst, delete dst if already exists */ private void rename(Path tmp, Path dst) throws IOException { try { if (destFileSys.exists(dst)) { destFileSys.delete(dst, true); } if (!destFileSys.rename(tmp, dst)) { throw new IOException(); } } catch(IOException cause) { throw (IOException)new IOException("Fail to rename tmp file (=" + tmp + ") to destination file (=" + dst + ")").initCause(cause); } } private void updatePermissions(FileStatus src, FileStatus dst ) throws IOException { if (preserve_status) { DistCp.updatePermissions(src, dst, preseved, destFileSys); } } static String bytesString(long b) { return b + " bytes (" + StringUtils.humanReadableInt(b) + ")"; } /** Mapper configuration. * Extracts source and destination file system, as well as * top-level paths on source and destination directories. * Gets the named file systems, to be used later in map. */ public void configure(JobConf job) { destPath = new Path(job.get(DST_DIR_LABEL, "/")); try { destFileSys = destPath.getFileSystem(job); } catch (IOException ex) { throw new RuntimeException("Unable to get the named file system.", ex); } sizeBuf = job.getInt("copy.buf.size", 128 * 1024); buffer = new byte[sizeBuf]; ignoreReadFailures = job.getBoolean(Options.IGNORE_READ_FAILURES.propertyname, false); preserve_status = job.getBoolean(Options.PRESERVE_STATUS.propertyname, false); if (preserve_status) { preseved = FileAttribute.parse(job.get(PRESERVE_STATUS_LABEL)); } update = job.getBoolean(Options.UPDATE.propertyname, false); overwrite = !update && job.getBoolean(Options.OVERWRITE.propertyname, false); this.job = job; } /** Map method. Copies one file from source file system to destination. * @param key src len * @param value FilePair (FileStatus src, Path dst) * @param out Log of failed copies * @param reporter */ public void map(LongWritable key, FilePair value, OutputCollector<WritableComparable<?>, Text> out, Reporter reporter) throws IOException { final FileStatus srcstat = value.input; final Path relativedst = new Path(value.output); try { copy(srcstat, relativedst, out, reporter); } catch (IOException e) { ++failcount; reporter.incrCounter(Counter.FAIL, 1); updateStatus(reporter); final String sfailure = "FAIL " + relativedst + " : " + StringUtils.stringifyException(e); out.collect(null, new Text(sfailure)); LOG.info(sfailure); try { for (int i = 0; i < 3; ++i) { try { final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst); if (destFileSys.delete(tmp, true)) break; } catch (Throwable ex) { // ignore, we are just cleaning up LOG.debug("Ignoring cleanup exception", ex); } // update status, so we don't get timed out updateStatus(reporter); Thread.sleep(3 * 1000); } } catch (InterruptedException inte) { throw (IOException)new IOException().initCause(inte); } } finally { updateStatus(reporter); } } public void close() throws IOException { if (0 == failcount || ignoreReadFailures) { return; } throw new IOException(getCountString()); } } private static List<Path> fetchFileList(Configuration conf, Path srcList) throws IOException { List<Path> result = new ArrayList<Path>(); FileSystem fs = srcList.getFileSystem(conf); BufferedReader input = null; try { input = new BufferedReader(new InputStreamReader(fs.open(srcList))); String line = input.readLine(); while (line != null) { result.add(new Path(line)); line = input.readLine(); } } finally { checkAndClose(input); } return result; } @Deprecated public static void copy(Configuration conf, String srcPath, String destPath, Path logPath, boolean srcAsList, boolean ignoreReadFailures) throws IOException { final Path src = new Path(srcPath); List<Path> tmp = new ArrayList<Path>(); if (srcAsList) { tmp.addAll(fetchFileList(conf, src)); } else { tmp.add(src); } EnumSet<Options> flags = ignoreReadFailures ? EnumSet.of(Options.IGNORE_READ_FAILURES) : EnumSet.noneOf(Options.class); final Path dst = new Path(destPath); copy(conf, new Arguments(tmp, dst, logPath, flags, null, Long.MAX_VALUE, Long.MAX_VALUE, null)); } /** Sanity check for srcPath */ private static void checkSrcPath(Configuration conf, List<Path> srcPaths ) throws IOException { List<IOException> rslt = new ArrayList<IOException>(); for (Path p : srcPaths) { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { rslt.add(new IOException("Input source " + p + " does not exist.")); } } if (!rslt.isEmpty()) { throw new InvalidInputException(rslt); } } /** * Driver to copy srcPath to destPath depending on required protocol. * @param args arguments */ static void copy(final Configuration conf, final Arguments args ) throws IOException { LOG.info("srcPaths=" + args.srcs); LOG.info("destPath=" + args.dst); checkSrcPath(conf, args.srcs); JobConf job = createJobConf(conf); if (args.preservedAttributes != null) { job.set(PRESERVE_STATUS_LABEL, args.preservedAttributes); } if (args.mapredSslConf != null) { job.set("dfs.https.client.keystore.resource", args.mapredSslConf); } //Initialize the mapper try { - setup(conf, job, args); - JobClient.runJob(job); + if (setup(conf, job, args)) { + JobClient.runJob(job); + } finalize(conf, job, args.dst, args.preservedAttributes); } finally { //delete tmp fullyDelete(job.get(TMP_DIR_LABEL), job); //delete jobDirectory fullyDelete(job.get(JOB_DIR_LABEL), job); } } private static void updatePermissions(FileStatus src, FileStatus dst, EnumSet<FileAttribute> preseved, FileSystem destFileSys ) throws IOException { String owner = null; String group = null; if (preseved.contains(FileAttribute.USER) && !src.getOwner().equals(dst.getOwner())) { owner = src.getOwner(); } if (preseved.contains(FileAttribute.GROUP) && !src.getGroup().equals(dst.getGroup())) { group = src.getGroup(); } if (owner != null || group != null) { destFileSys.setOwner(dst.getPath(), owner, group); } if (preseved.contains(FileAttribute.PERMISSION) && !src.getPermission().equals(dst.getPermission())) { destFileSys.setPermission(dst.getPath(), src.getPermission()); } } static private void finalize(Configuration conf, JobConf jobconf, final Path destPath, String presevedAttributes) throws IOException { if (presevedAttributes == null) { return; } EnumSet<FileAttribute> preseved = FileAttribute.parse(presevedAttributes); if (!preseved.contains(FileAttribute.USER) && !preseved.contains(FileAttribute.GROUP) && !preseved.contains(FileAttribute.PERMISSION)) { return; } FileSystem dstfs = destPath.getFileSystem(conf); Path dstdirlist = new Path(jobconf.get(DST_DIR_LIST_LABEL)); SequenceFile.Reader in = null; try { in = new SequenceFile.Reader(dstdirlist.getFileSystem(jobconf), dstdirlist, jobconf); Text dsttext = new Text(); FilePair pair = new FilePair(); for(; in.next(dsttext, pair); ) { Path absdst = new Path(destPath, pair.output); updatePermissions(pair.input, dstfs.getFileStatus(absdst), preseved, dstfs); } } finally { checkAndClose(in); } } static private class Arguments { final List<Path> srcs; final Path dst; final Path log; final EnumSet<Options> flags; final String preservedAttributes; final long filelimit; final long sizelimit; final String mapredSslConf; /** * Arguments for distcp * @param srcs List of source paths * @param dst Destination path * @param log Log output directory * @param flags Command-line flags * @param preservedAttributes Preserved attributes * @param filelimit File limit * @param sizelimit Size limit */ Arguments(List<Path> srcs, Path dst, Path log, EnumSet<Options> flags, String preservedAttributes, long filelimit, long sizelimit, String mapredSslConf) { this.srcs = srcs; this.dst = dst; this.log = log; this.flags = flags; this.preservedAttributes = preservedAttributes; this.filelimit = filelimit; this.sizelimit = sizelimit; this.mapredSslConf = mapredSslConf; if (LOG.isTraceEnabled()) { LOG.trace("this = " + this); } } static Arguments valueOf(String[] args, Configuration conf ) throws IOException { List<Path> srcs = new ArrayList<Path>(); Path dst = null; Path log = null; EnumSet<Options> flags = EnumSet.noneOf(Options.class); String presevedAttributes = null; String mapredSslConf = null; long filelimit = Long.MAX_VALUE; long sizelimit = Long.MAX_VALUE; for (int idx = 0; idx < args.length; idx++) { Options[] opt = Options.values(); int i = 0; for(; i < opt.length && !args[idx].startsWith(opt[i].cmd); i++); if (i < opt.length) { flags.add(opt[i]); if (opt[i] == Options.PRESERVE_STATUS) { presevedAttributes = args[idx].substring(2); FileAttribute.parse(presevedAttributes); //validation } else if (opt[i] == Options.FILE_LIMIT) { filelimit = Options.FILE_LIMIT.parseLong(args, ++idx); } else if (opt[i] == Options.SIZE_LIMIT) { sizelimit = Options.SIZE_LIMIT.parseLong(args, ++idx); } } else if ("-f".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("urilist_uri not specified in -f"); } srcs.addAll(fetchFileList(conf, new Path(args[idx]))); } else if ("-log".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("logdir not specified in -log"); } log = new Path(args[idx]); } else if ("-mapredSslConf".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("ssl conf file not specified in -mapredSslConf"); } mapredSslConf = args[idx]; } else if ("-m".equals(args[idx])) { if (++idx == args.length) { throw new IllegalArgumentException("num_maps not specified in -m"); } try { conf.setInt(MAX_MAPS_LABEL, Integer.valueOf(args[idx])); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid argument to -m: " + args[idx]); } } else if ('-' == args[idx].codePointAt(0)) { throw new IllegalArgumentException("Invalid switch " + args[idx]); } else if (idx == args.length -1) { dst = new Path(args[idx]); } else { srcs.add(new Path(args[idx])); } } // mandatory command-line parameters if (srcs.isEmpty() || dst == null) { throw new IllegalArgumentException("Missing " + (dst == null ? "dst path" : "src")); } // incompatible command-line flags final boolean isOverwrite = flags.contains(Options.OVERWRITE); final boolean isUpdate = flags.contains(Options.UPDATE); final boolean isDelete = flags.contains(Options.DELETE); if (isOverwrite && isUpdate) { throw new IllegalArgumentException("Conflicting overwrite policies"); } if (isDelete && !isOverwrite && !isUpdate) { throw new IllegalArgumentException(Options.DELETE.cmd + " must be specified with " + Options.OVERWRITE + " or " + Options.UPDATE + "."); } return new Arguments(srcs, dst, log, flags, presevedAttributes, filelimit, sizelimit, mapredSslConf); } /** {@inheritDoc} */ public String toString() { return getClass().getName() + "{" + "\n srcs = " + srcs + "\n dst = " + dst + "\n log = " + log + "\n flags = " + flags + "\n preservedAttributes = " + preservedAttributes + "\n filelimit = " + filelimit + "\n sizelimit = " + sizelimit + "\n mapredSslConf = " + mapredSslConf + "\n}"; } } /** * This is the main driver for recursively copying directories * across file systems. It takes at least two cmdline parameters. A source * URL and a destination URL. It then essentially does an "ls -lR" on the * source URL, and writes the output in a round-robin manner to all the map * input files. The mapper actually copies the files allotted to it. The * reduce is empty. */ public int run(String[] args) { try { copy(conf, Arguments.valueOf(args, conf)); return 0; } catch (IllegalArgumentException e) { System.err.println(StringUtils.stringifyException(e) + "\n" + usage); ToolRunner.printGenericCommandUsage(System.err); return -1; } catch (DuplicationException e) { System.err.println(StringUtils.stringifyException(e)); return DuplicationException.ERROR_CODE; } catch (RemoteException e) { final IOException unwrapped = e.unwrapRemoteException( FileNotFoundException.class, AccessControlException.class, QuotaExceededException.class); System.err.println(StringUtils.stringifyException(unwrapped)); return -3; } catch (Exception e) { System.err.println("With failures, global counters are inaccurate; " + "consider running with -i"); System.err.println("Copy failed: " + StringUtils.stringifyException(e)); return -999; } } public static void main(String[] args) throws Exception { JobConf job = new JobConf(DistCp.class); DistCp distcp = new DistCp(job); int res = ToolRunner.run(distcp, args); System.exit(res); } /** * Make a path relative with respect to a root path. * absPath is always assumed to descend from root. * Otherwise returned path is null. */ static String makeRelative(Path root, Path absPath) { if (!absPath.isAbsolute()) { throw new IllegalArgumentException("!absPath.isAbsolute(), absPath=" + absPath); } String p = absPath.toUri().getPath(); StringTokenizer pathTokens = new StringTokenizer(p, "/"); for(StringTokenizer rootTokens = new StringTokenizer( root.toUri().getPath(), "/"); rootTokens.hasMoreTokens(); ) { if (!rootTokens.nextToken().equals(pathTokens.nextToken())) { return null; } } StringBuilder sb = new StringBuilder(); for(; pathTokens.hasMoreTokens(); ) { sb.append(pathTokens.nextToken()); if (pathTokens.hasMoreTokens()) { sb.append(Path.SEPARATOR); } } return sb.length() == 0? ".": sb.toString(); } /** * Calculate how many maps to run. * Number of maps is bounded by a minimum of the cumulative size of the * copy / (distcp.bytes.per.map, default BYTES_PER_MAP or -m on the * command line) and at most (distcp.max.map.tasks, default * MAX_MAPS_PER_NODE * nodes in the cluster). * @param totalBytes Count of total bytes for job * @param job The job to configure * @return Count of maps to run. */ private static void setMapCount(long totalBytes, JobConf job) throws IOException { int numMaps = (int)(totalBytes / job.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP)); numMaps = Math.min(numMaps, job.getInt(MAX_MAPS_LABEL, MAX_MAPS_PER_NODE * new JobClient(job).getClusterStatus().getTaskTrackers())); job.setNumMapTasks(Math.max(numMaps, 1)); } /** Fully delete dir */ static void fullyDelete(String dir, Configuration conf) throws IOException { if (dir != null) { Path tmp = new Path(dir); tmp.getFileSystem(conf).delete(tmp, true); } } //Job configuration private static JobConf createJobConf(Configuration conf) { JobConf jobconf = new JobConf(conf, DistCp.class); jobconf.setJobName(NAME); // turn off speculative execution, because DFS doesn't handle // multiple writers to the same file. jobconf.setMapSpeculativeExecution(false); jobconf.setInputFormat(CopyInputFormat.class); jobconf.setOutputKeyClass(Text.class); jobconf.setOutputValueClass(Text.class); jobconf.setMapperClass(CopyFilesMapper.class); jobconf.setNumReduceTasks(0); return jobconf; } private static final Random RANDOM = new Random(); public static String getRandomId() { return Integer.toString(RANDOM.nextInt(Integer.MAX_VALUE), 36); } /** * Initialize DFSCopyFileMapper specific job-configuration. * @param conf : The dfs/mapred configuration. * @param jobConf : The handle to the jobConf object to be initialized. * @param args Arguments + * @return true if it is necessary to launch a job. */ - private static void setup(Configuration conf, JobConf jobConf, + private static boolean setup(Configuration conf, JobConf jobConf, final Arguments args) throws IOException { jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString()); //set boolean values final boolean update = args.flags.contains(Options.UPDATE); final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE); jobConf.setBoolean(Options.UPDATE.propertyname, update); jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite); jobConf.setBoolean(Options.IGNORE_READ_FAILURES.propertyname, args.flags.contains(Options.IGNORE_READ_FAILURES)); jobConf.setBoolean(Options.PRESERVE_STATUS.propertyname, args.flags.contains(Options.PRESERVE_STATUS)); final String randomId = getRandomId(); JobClient jClient = new JobClient(jobConf); Path jobDirectory = new Path(jClient.getSystemDir(), NAME + "_" + randomId); jobConf.set(JOB_DIR_LABEL, jobDirectory.toString()); FileSystem dstfs = args.dst.getFileSystem(conf); boolean dstExists = dstfs.exists(args.dst); boolean dstIsDir = false; if (dstExists) { dstIsDir = dstfs.getFileStatus(args.dst).isDir(); } // default logPath Path logPath = args.log; if (logPath == null) { String filename = "_distcp_logs_" + randomId; if (!dstExists || !dstIsDir) { Path parent = args.dst.getParent(); if (!dstfs.exists(parent)) { dstfs.mkdirs(parent); } logPath = new Path(parent, filename); } else { logPath = new Path(args.dst, filename); } } FileOutputFormat.setOutputPath(jobConf, logPath); // create src list, dst list FileSystem jobfs = jobDirectory.getFileSystem(jobConf); Path srcfilelist = new Path(jobDirectory, "_distcp_src_files"); jobConf.set(SRC_LIST_LABEL, srcfilelist.toString()); SequenceFile.Writer src_writer = SequenceFile.createWriter(jobfs, jobConf, srcfilelist, LongWritable.class, FilePair.class, SequenceFile.CompressionType.NONE); Path dstfilelist = new Path(jobDirectory, "_distcp_dst_files"); SequenceFile.Writer dst_writer = SequenceFile.createWriter(jobfs, jobConf, dstfilelist, Text.class, Text.class, SequenceFile.CompressionType.NONE); Path dstdirlist = new Path(jobDirectory, "_distcp_dst_dirs"); jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString()); SequenceFile.Writer dir_writer = SequenceFile.createWriter(jobfs, jobConf, dstdirlist, Text.class, FilePair.class, SequenceFile.CompressionType.NONE); // handle the case where the destination directory doesn't exist // and we've only a single src directory OR we're updating/overwriting // the contents of the destination directory. final boolean special = (args.srcs.size() == 1 && !dstExists) || update || overwrite; int srcCount = 0, cnsyncf = 0, dirsyn = 0; long fileCount = 0L, byteCount = 0L, cbsyncs = 0L; try { for(Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext(); ) { final Path src = srcItr.next(); FileSystem srcfs = src.getFileSystem(conf); FileStatus srcfilestat = srcfs.getFileStatus(src); Path root = special && srcfilestat.isDir()? src: src.getParent(); if (srcfilestat.isDir()) { ++srcCount; } Stack<FileStatus> pathstack = new Stack<FileStatus>(); for(pathstack.push(srcfilestat); !pathstack.empty(); ) { FileStatus cur = pathstack.pop(); FileStatus[] children = srcfs.listStatus(cur.getPath()); for(int i = 0; i < children.length; i++) { boolean skipfile = false; final FileStatus child = children[i]; final String dst = makeRelative(root, child.getPath()); ++srcCount; if (child.isDir()) { pathstack.push(child); } else { //skip file if the src and the dst files are the same. skipfile = update && sameFile(srcfs, child, dstfs, new Path(args.dst, dst)); //skip file if it exceed file limit or size limit skipfile |= fileCount == args.filelimit || byteCount + child.getLen() > args.sizelimit; if (!skipfile) { ++fileCount; byteCount += child.getLen(); if (LOG.isTraceEnabled()) { LOG.trace("adding file " + child.getPath()); } ++cnsyncf; cbsyncs += child.getLen(); if (cnsyncf > SYNC_FILE_MAX || cbsyncs > BYTES_PER_MAP) { src_writer.sync(); dst_writer.sync(); cnsyncf = 0; cbsyncs = 0L; } } } if (!skipfile) { src_writer.append(new LongWritable(child.isDir()? 0: child.getLen()), new FilePair(child, dst)); } dst_writer.append(new Text(dst), new Text(child.getPath().toString())); } if (cur.isDir()) { String dst = makeRelative(root, cur.getPath()); dir_writer.append(new Text(dst), new FilePair(cur, dst)); if (++dirsyn > SYNC_FILE_MAX) { dirsyn = 0; dir_writer.sync(); } } } } } finally { checkAndClose(src_writer); checkAndClose(dst_writer); checkAndClose(dir_writer); } FileStatus dststatus = null; try { dststatus = dstfs.getFileStatus(args.dst); } catch(FileNotFoundException fnfe) { LOG.info(args.dst + " does not exist."); } // create dest path dir if copying > 1 file if (dststatus == null) { if (srcCount > 1 && !dstfs.mkdirs(args.dst)) { throw new IOException("Failed to create" + args.dst); } } final Path sorted = new Path(jobDirectory, "_distcp_sorted"); checkDuplication(jobfs, dstfilelist, sorted, conf); if (dststatus != null && args.flags.contains(Options.DELETE)) { deleteNonexisting(dstfs, dststatus, sorted, jobfs, jobDirectory, jobConf, conf); } Path tmpDir = new Path( (dstExists && !dstIsDir) || (!dstExists && srcCount == 1)? args.dst.getParent(): args.dst, "_distcp_tmp_" + randomId); jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString()); - LOG.info("srcCount=" + srcCount); + LOG.info("sourcePathsCount=" + srcCount); + LOG.info("filesToCopyCount=" + fileCount); + LOG.info("bytesToCopyCount=" + StringUtils.humanReadableInt(byteCount)); jobConf.setInt(SRC_COUNT_LABEL, srcCount); jobConf.setLong(TOTAL_SIZE_LABEL, byteCount); setMapCount(byteCount, jobConf); + return fileCount > 0; } /** * Check whether the contents of src and dst are the same. * * Return false if dstpath does not exist * * If the files have different sizes, return false. * * If the files have the same sizes, the file checksums will be compared. * * When file checksum is not supported in any of file systems, * two files are considered as the same if they have the same size. */ static private boolean sameFile(FileSystem srcfs, FileStatus srcstatus, FileSystem dstfs, Path dstpath) throws IOException { FileStatus dststatus; try { dststatus = dstfs.getFileStatus(dstpath); } catch(FileNotFoundException fnfe) { return false; } //same length? if (srcstatus.getLen() != dststatus.getLen()) { return false; } //get src checksum final FileChecksum srccs; try { srccs = srcfs.getFileChecksum(srcstatus.getPath()); } catch(FileNotFoundException fnfe) { /* * Two possible cases: * (1) src existed once but was deleted between the time period that * srcstatus was obtained and the try block above. * (2) srcfs does not support file checksum and (incorrectly) throws * FNFE, e.g. some previous versions of HftpFileSystem. * For case (1), it is okay to return true since src was already deleted. * For case (2), true should be returned. */ return true; } //compare checksums try { final FileChecksum dstcs = dstfs.getFileChecksum(dststatus.getPath()); //return true if checksum is not supported //(i.e. some of the checksums is null) return srccs == null || dstcs == null || srccs.equals(dstcs); } catch(FileNotFoundException fnfe) { return false; } } /** Delete the dst files/dirs which do not exist in src */ static private void deleteNonexisting( FileSystem dstfs, FileStatus dstroot, Path dstsorted, FileSystem jobfs, Path jobdir, JobConf jobconf, Configuration conf ) throws IOException { if (!dstroot.isDir()) { throw new IOException("dst must be a directory when option " + Options.DELETE.cmd + " is set, but dst (= " + dstroot.getPath() + ") is not a directory."); } //write dst lsr results final Path dstlsr = new Path(jobdir, "_distcp_dst_lsr"); final SequenceFile.Writer writer = SequenceFile.createWriter(jobfs, jobconf, dstlsr, Text.class, FileStatus.class, SequenceFile.CompressionType.NONE); try { //do lsr to get all file statuses in dstroot final Stack<FileStatus> lsrstack = new Stack<FileStatus>(); for(lsrstack.push(dstroot); !lsrstack.isEmpty(); ) { final FileStatus status = lsrstack.pop(); if (status.isDir()) { for(FileStatus child : dstfs.listStatus(status.getPath())) { String relative = makeRelative(dstroot.getPath(), child.getPath()); writer.append(new Text(relative), child); lsrstack.push(child); } } } } finally { checkAndClose(writer); } //sort lsr results final Path sortedlsr = new Path(jobdir, "_distcp_dst_lsr_sorted"); SequenceFile.Sorter sorter = new SequenceFile.Sorter(jobfs, new Text.Comparator(), Text.class, FileStatus.class, jobconf); sorter.sort(dstlsr, sortedlsr); //compare lsr list and dst list SequenceFile.Reader lsrin = null; SequenceFile.Reader dstin = null; try { lsrin = new SequenceFile.Reader(jobfs, sortedlsr, jobconf); dstin = new SequenceFile.Reader(jobfs, dstsorted, jobconf); //compare sorted lsr list and sorted dst list final Text lsrpath = new Text(); final FileStatus lsrstatus = new FileStatus(); final Text dstpath = new Text(); final Text dstfrom = new Text(); final FsShell shell = new FsShell(conf); final String[] shellargs = {"-rmr", null}; boolean hasnext = dstin.next(dstpath, dstfrom); for(; lsrin.next(lsrpath, lsrstatus); ) { int dst_cmp_lsr = dstpath.compareTo(lsrpath); for(; hasnext && dst_cmp_lsr < 0; ) { hasnext = dstin.next(dstpath, dstfrom); dst_cmp_lsr = dstpath.compareTo(lsrpath); } if (dst_cmp_lsr == 0) { //lsrpath exists in dst, skip it hasnext = dstin.next(dstpath, dstfrom); } else { //lsrpath does not exist, delete it String s = new Path(dstroot.getPath(), lsrpath.toString()).toString(); if (shellargs[1] == null || !isAncestorPath(shellargs[1], s)) { shellargs[1] = s; int r = 0; try { r = shell.run(shellargs); } catch(Exception e) { throw new IOException("Exception from shell.", e); } if (r != 0) { throw new IOException("\"" + shellargs[0] + " " + shellargs[1] + "\" returns non-zero value " + r); } } } } } finally { checkAndClose(lsrin); checkAndClose(dstin); } } //is x an ancestor path of y? static private boolean isAncestorPath(String x, String y) { if (!y.startsWith(x)) { return false; } final int len = x.length(); return y.length() == len || y.charAt(len) == Path.SEPARATOR_CHAR; } /** Check whether the file list have duplication. */ static private void checkDuplication(FileSystem fs, Path file, Path sorted, Configuration conf) throws IOException { SequenceFile.Reader in = null; try { SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, new Text.Comparator(), Text.class, Text.class, conf); sorter.sort(file, sorted); in = new SequenceFile.Reader(fs, sorted, conf); Text prevdst = null, curdst = new Text(); Text prevsrc = null, cursrc = new Text(); for(; in.next(curdst, cursrc); ) { if (prevdst != null && curdst.equals(prevdst)) { throw new DuplicationException( "Invalid input, there are duplicated files in the sources: " + prevsrc + ", " + cursrc); } prevdst = curdst; curdst = new Text(); prevsrc = cursrc; cursrc = new Text(); } } finally { checkAndClose(in); } } static boolean checkAndClose(java.io.Closeable io) { if (io != null) { try { io.close(); } catch(IOException ioe) { LOG.warn(StringUtils.stringifyException(ioe)); return false; } } return true; } /** An exception class for duplicated source files. */ public static class DuplicationException extends IOException { private static final long serialVersionUID = 1L; /** Error code for this exception */ public static final int ERROR_CODE = -2; DuplicationException(String message) {super(message);} } }
jaxlaw/hadoop-common
4005da3d0d343196ca8353f99148f9d9be56bab2
HDFS:761 from https://issues.apache.org/jira/secure/attachment/12424549/hdfs-761.1.rel20.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index 9b468f6..a0355fa 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,383 +1,386 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3092118005: + HDFS-761. Fix failure to process rename operation from edits log + due to quota verification. (suresh) + HDFS-457. Better handling of volume failure in Data Node storage, This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. Contributed by Erik Steffl MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 68056bc..fad88d8 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -499,844 +499,852 @@ class FSDirectory implements FSConstants, Closeable { } return fileBlocks; } /** * Get the blocksize of a file * @param filename the filename * @return the number of bytes * @throws IOException if it is a directory or does not exist. */ long getPreferredBlockSize(String filename) throws IOException { synchronized (rootDir) { INode fileNode = rootDir.getNode(filename); if (fileNode == null) { throw new IOException("Unknown file: " + filename); } if (fileNode.isDirectory()) { throw new IOException("Getting block size of a directory: " + filename); } return ((INodeFile)fileNode).getPreferredBlockSize(); } } boolean exists(String src) { src = normalizePath(src); synchronized(rootDir) { INode inode = rootDir.getNode(src); if (inode == null) { return false; } return inode.isDirectory()? true: ((INodeFile)inode).getBlocks() != null; } } void setPermission(String src, FsPermission permission ) throws IOException { unprotectedSetPermission(src, permission); fsImage.getEditLog().logSetPermissions(src, permission); } void unprotectedSetPermission(String src, FsPermission permissions) throws FileNotFoundException { synchronized(rootDir) { INode inode = rootDir.getNode(src); if(inode == null) throw new FileNotFoundException("File does not exist: " + src); inode.setPermission(permissions); } } void setOwner(String src, String username, String groupname ) throws IOException { unprotectedSetOwner(src, username, groupname); fsImage.getEditLog().logSetOwner(src, username, groupname); } void unprotectedSetOwner(String src, String username, String groupname) throws FileNotFoundException { synchronized(rootDir) { INode inode = rootDir.getNode(src); if(inode == null) throw new FileNotFoundException("File does not exist: " + src); if (username != null) { inode.setUser(username); } if (groupname != null) { inode.setGroup(groupname); } } } /** * Remove the file from management, return blocks */ INode delete(String src) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src); } waitForReady(); long now = FSNamesystem.now(); INode deletedNode = unprotectedDelete(src, now); if (deletedNode != null) { fsImage.getEditLog().logDelete(src, now); } return deletedNode; } /** Return if a directory is empty or not **/ boolean isDirEmpty(String src) { boolean dirNotEmpty = true; if (!isDir(src)) { return true; } synchronized(rootDir) { INode targetNode = rootDir.getNode(src); assert targetNode != null : "should be taken care in isDir() above"; if (((INodeDirectory)targetNode).getChildren().size() != 0) { dirNotEmpty = false; } } return dirNotEmpty; } /** * Delete a path from the name space * Update the count at each ancestor directory with quota * @param src a string representation of a path to an inode * @param modificationTime the time the inode is removed * @param deletedBlocks the place holder for the blocks to be removed * @return if the deletion succeeds */ INode unprotectedDelete(String src, long modificationTime) { src = normalizePath(src); synchronized (rootDir) { INode[] inodes = rootDir.getExistingPathINodes(src); INode targetNode = inodes[inodes.length-1]; if (targetNode == null) { // non-existent src NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " +"failed to remove "+src+" because it does not exist"); return null; } else if (inodes.length == 1) { // src is the root NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because the root is not allowed to be deleted"); return null; } else { try { // Remove the node from the namespace removeChild(inodes, inodes.length-1); // set the parent's modification time inodes[inodes.length-2].setModificationTime(modificationTime); // GC all the blocks underneath the node. ArrayList<Block> v = new ArrayList<Block>(); int filesRemoved = targetNode.collectSubtreeBlocksAndClear(v); incrDeletedFileCount(filesRemoved); namesystem.removePathAndBlocks(src, v); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " +src+" is removed"); } return targetNode; } catch (IOException e) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because " + e.getMessage()); return null; } } } } /** * Replaces the specified inode with the specified one. */ void replaceNode(String path, INodeFile oldnode, INodeFile newnode) throws IOException { replaceNode(path, oldnode, newnode, true); } /** * @see #replaceNode(String, INodeFile, INodeFile) */ private void replaceNode(String path, INodeFile oldnode, INodeFile newnode, boolean updateDiskspace) throws IOException { synchronized (rootDir) { long dsOld = oldnode.diskspaceConsumed(); // // Remove the node from the namespace // if (!oldnode.removeNode()) { NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " + "failed to remove " + path); throw new IOException("FSDirectory.replaceNode: " + "failed to remove " + path); } /* Currently oldnode and newnode are assumed to contain the same * blocks. Otherwise, blocks need to be removed from the blocksMap. */ rootDir.addNode(path, newnode); //check if disk space needs to be updated. long dsNew = 0; if (updateDiskspace && (dsNew = newnode.diskspaceConsumed()) != dsOld) { try { updateSpaceConsumed(path, 0, dsNew-dsOld); } catch (QuotaExceededException e) { // undo replaceNode(path, newnode, oldnode, false); throw e; } } int index = 0; for (Block b : newnode.getBlocks()) { BlockInfo info = namesystem.blocksMap.addINode(b, newnode); newnode.setBlock(index, info); // inode refers to the block in BlocksMap index++; } } } /** * Get a listing of files given path 'src' * * This function is admittedly very inefficient right now. We'll * make it better later. */ FileStatus[] getListing(String src) { String srcs = normalizePath(src); synchronized (rootDir) { INode targetNode = rootDir.getNode(srcs); if (targetNode == null) return null; if (!targetNode.isDirectory()) { return new FileStatus[]{createFileStatus(srcs, targetNode)}; } List<INode> contents = ((INodeDirectory)targetNode).getChildren(); FileStatus listing[] = new FileStatus[contents.size()]; if(! srcs.endsWith(Path.SEPARATOR)) srcs += Path.SEPARATOR; int i = 0; for (INode cur : contents) { listing[i] = createFileStatus(srcs+cur.getLocalName(), cur); i++; } return listing; } } /** Get the file info for a specific file. * @param src The string representation of the path to the file * @return object containing information regarding the file * or null if file not found */ FileStatus getFileInfo(String src) { String srcs = normalizePath(src); synchronized (rootDir) { INode targetNode = rootDir.getNode(srcs); if (targetNode == null) { return null; } else { return createFileStatus(srcs, targetNode); } } } /** * Get the blocks associated with the file. */ Block[] getFileBlocks(String src) { waitForReady(); synchronized (rootDir) { INode targetNode = rootDir.getNode(src); if (targetNode == null) return null; if(targetNode.isDirectory()) return null; return ((INodeFile)targetNode).getBlocks(); } } /** * Get {@link INode} associated with the file. */ INodeFile getFileINode(String src) { synchronized (rootDir) { INode inode = rootDir.getNode(src); if (inode == null || inode.isDirectory()) return null; return (INodeFile)inode; } } /** * Retrieve the existing INodes along the given path. * * @param path the path to explore * @return INodes array containing the existing INodes in the order they * appear when following the path from the root INode to the * deepest INodes. The array size will be the number of expected * components in the path, and non existing components will be * filled with null * * @see INodeDirectory#getExistingPathINodes(byte[][], INode[]) */ INode[] getExistingPathINodes(String path) { synchronized (rootDir){ return rootDir.getExistingPathINodes(path); } } /** * Check whether the filepath could be created */ boolean isValidToCreate(String src) { String srcs = normalizePath(src); synchronized (rootDir) { if (srcs.startsWith("/") && !srcs.endsWith("/") && rootDir.getNode(srcs) == null) { return true; } else { return false; } } } /** * Check whether the path specifies a directory */ boolean isDir(String src) { synchronized (rootDir) { INode node = rootDir.getNode(normalizePath(src)); return node != null && node.isDirectory(); } } /** Updates namespace and diskspace consumed for all * directories until the parent directory of file represented by path. * * @param path path for the file. * @param nsDelta the delta change of namespace * @param dsDelta the delta change of diskspace * @throws QuotaExceededException if the new count violates any quota limit * @throws FileNotFound if path does not exist. */ void updateSpaceConsumed(String path, long nsDelta, long dsDelta) throws QuotaExceededException, FileNotFoundException { synchronized (rootDir) { INode[] inodes = rootDir.getExistingPathINodes(path); int len = inodes.length; if (inodes[len - 1] == null) { throw new FileNotFoundException(path + " does not exist under rootDir."); } updateCount(inodes, len-1, nsDelta, dsDelta, true); } } /** update count of each inode with quota * * @param inodes an array of inodes on a path * @param numOfINodes the number of inodes to update starting from index 0 * @param nsDelta the delta change of namespace * @param dsDelta the delta change of diskspace * @param checkQuota if true then check if quota is exceeded * @throws QuotaExceededException if the new count violates any quota limit */ private void updateCount(INode[] inodes, int numOfINodes, long nsDelta, long dsDelta, boolean checkQuota) throws QuotaExceededException { if (!ready) { //still intializing. do not check or update quotas. return; } if (numOfINodes>inodes.length) { numOfINodes = inodes.length; } if (checkQuota) { verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); } for(int i = 0; i < numOfINodes; i++) { if (inodes[i].isQuotaSet()) { // a directory with quota INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; node.updateNumItemsInTree(nsDelta, dsDelta); } } } /** * update quota of each inode and check to see if quota is exceeded. * See {@link #updateCount(INode[], int, long, long, boolean)} */ private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, long nsDelta, long dsDelta) { try { updateCount(inodes, numOfINodes, nsDelta, dsDelta, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e); } } /** Return the name of the path represented by inodes at [0, pos] */ private static String getFullPathName(INode[] inodes, int pos) { StringBuilder fullPathName = new StringBuilder(); for (int i=1; i<=pos; i++) { fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName()); } return fullPathName.toString(); } /** * Create a directory * If ancestor directories do not exist, automatically create them. * @param src string representation of the path to the directory * @param permissions the permission of the directory * @param inheritPermission if the permission of the directory should inherit * from its parent or not. The automatically created * ones always inherit its permission from its parent * @param now creation time * @return true if the operation succeeds false otherwise * @throws FileNotFoundException if an ancestor or itself is a file * @throws QuotaExceededException if directory creation violates * any quota limit */ boolean mkdirs(String src, PermissionStatus permissions, boolean inheritPermission, long now) throws FileNotFoundException, QuotaExceededException { src = normalizePath(src); String[] names = INode.getPathNames(src); byte[][] components = INode.getPathComponents(names); INode[] inodes = new INode[components.length]; synchronized(rootDir) { rootDir.getExistingPathINodes(components, inodes); // find the index of the first null in inodes[] StringBuilder pathbuilder = new StringBuilder(); int i = 1; for(; i < inodes.length && inodes[i] != null; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); if (!inodes[i].isDirectory()) { throw new FileNotFoundException("Parent path is not a directory: " + pathbuilder); } } // create directories beginning from the first null index for(; i < inodes.length; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); String cur = pathbuilder.toString(); unprotectedMkdir(inodes, i, components[i], permissions, inheritPermission || i != components.length-1, now); if (inodes[i] == null) { return false; } // Directory creation also count towards FilesCreated // to match count of files_deleted metric. if (namesystem != null) NameNode.getNameNodeMetrics().numFilesCreated.inc(); fsImage.getEditLog().logMkDir(cur, inodes[i]); NameNode.stateChangeLog.debug( "DIR* FSDirectory.mkdirs: created directory " + cur); } } return true; } /** */ INode unprotectedMkdir(String src, PermissionStatus permissions, long timestamp) throws QuotaExceededException { byte[][] components = INode.getPathComponents(src); INode[] inodes = new INode[components.length]; synchronized (rootDir) { rootDir.getExistingPathINodes(components, inodes); unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], permissions, false, timestamp); return inodes[inodes.length-1]; } } /** create a directory at index pos. * The parent path to the directory is at [0, pos-1]. * All ancestors exist. Newly created one stored at index pos. */ private void unprotectedMkdir(INode[] inodes, int pos, byte[] name, PermissionStatus permission, boolean inheritPermission, long timestamp) throws QuotaExceededException { inodes[pos] = addChild(inodes, pos, new INodeDirectory(name, permission, timestamp), -1, inheritPermission ); } /** Add a node child to the namespace. The full path name of the node is src. * childDiskspace should be -1, if unknown. * QuotaExceededException is thrown if it violates quota limit */ private <T extends INode> T addNode(String src, T child, long childDiskspace, boolean inheritPermission) throws QuotaExceededException { byte[][] components = INode.getPathComponents(src); child.setLocalName(components[components.length-1]); INode[] inodes = new INode[components.length]; synchronized (rootDir) { rootDir.getExistingPathINodes(components, inodes); return addChild(inodes, inodes.length-1, child, childDiskspace, inheritPermission); } } /** * Verify quota for adding or moving a new INode with required * namespace and diskspace to a given position. * * @param inodes INodes corresponding to a path * @param pos position where a new INode will be added * @param nsDelta needed namespace * @param dsDelta needed diskspace * @param commonAncestor Last node in inodes array that is a common ancestor * for a INode that is being moved from one location to the other. * Pass null if a node is not being moved. * @throws QuotaExceededException if quota limit is exceeded. */ private void verifyQuota(INode[] inodes, int pos, long nsDelta, long dsDelta, INode commonAncestor) throws QuotaExceededException { + if (!ready) { + // Do not check quota if edits log is still being processed + return; + } if (pos>inodes.length) { pos = inodes.length; } int i = pos - 1; try { // check existing components in the path for(; i >= 0; i--) { if (commonAncestor == inodes[i]) { // Moving an existing node. Stop checking for quota when common // ancestor is reached return; } if (inodes[i].isQuotaSet()) { // a directory with quota INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; node.verifyQuota(nsDelta, dsDelta); } } } catch (QuotaExceededException e) { e.setPathName(getFullPathName(inodes, i)); throw e; } } /** * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves * dstInodes[dstInodes.length-1] * * @param srcInodes directory from where node is being moved. * @param dstInodes directory to where node is moved to. * @throws QuotaExceededException if quota limit is exceeded. */ private void verifyQuotaForRename(INode[] srcInodes, INode[]dstInodes) throws QuotaExceededException { + if (!ready) { + // Do not check quota if edits log is still being processed + return; + } INode srcInode = srcInodes[srcInodes.length - 1]; INode commonAncestor = null; for(int i =0;srcInodes[i] == dstInodes[i]; i++) { commonAncestor = srcInodes[i]; } INode.DirCounts counts = new INode.DirCounts(); srcInode.spaceConsumedInTree(counts); verifyQuota(dstInodes, dstInodes.length - 1, counts.getNsCount(), counts.getDsCount(), commonAncestor); } /** Add a node child to the inodes at index pos. * Its ancestors are stored at [0, pos-1]. * QuotaExceededException is thrown if it violates quota limit */ private <T extends INode> T addChild(INode[] pathComponents, int pos, T child, long childDiskspace, boolean inheritPermission, boolean checkQuota) throws QuotaExceededException { INode.DirCounts counts = new INode.DirCounts(); child.spaceConsumedInTree(counts); if (childDiskspace < 0) { childDiskspace = counts.getDsCount(); } updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace, checkQuota); T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild( child, inheritPermission); if (addedNode == null) { updateCount(pathComponents, pos, -counts.getNsCount(), -childDiskspace, true); } return addedNode; } private <T extends INode> T addChild(INode[] pathComponents, int pos, T child, long childDiskspace, boolean inheritPermission) throws QuotaExceededException { return addChild(pathComponents, pos, child, childDiskspace, inheritPermission, true); } private <T extends INode> T addChildNoQuotaCheck(INode[] pathComponents, int pos, T child, long childDiskspace, boolean inheritPermission) { T inode = null; try { inode = addChild(pathComponents, pos, child, childDiskspace, inheritPermission, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); } return inode; } /** Remove an inode at index pos from the namespace. * Its ancestors are stored at [0, pos-1]. * Count of each ancestor with quota is also updated. * Return the removed node; null if the removal fails. */ private INode removeChild(INode[] pathComponents, int pos) { INode removedNode = ((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]); if (removedNode != null) { INode.DirCounts counts = new INode.DirCounts(); removedNode.spaceConsumedInTree(counts); updateCountNoQuotaCheck(pathComponents, pos, -counts.getNsCount(), -counts.getDsCount()); } return removedNode; } /** */ String normalizePath(String src) { if (src.length() > 1 && src.endsWith("/")) { src = src.substring(0, src.length() - 1); } return src; } ContentSummary getContentSummary(String src) throws IOException { String srcs = normalizePath(src); synchronized (rootDir) { INode targetNode = rootDir.getNode(srcs); if (targetNode == null) { throw new FileNotFoundException("File does not exist: " + srcs); } else { return targetNode.computeContentSummary(); } } } /** Update the count of each directory with quota in the namespace * A directory's count is defined as the total number inodes in the tree * rooted at the directory. * * This is an update of existing state of the filesystem and does not * throw QuotaExceededException. */ void updateCountForINodeWithQuota() { updateCountForINodeWithQuota(rootDir, new INode.DirCounts(), new ArrayList<INode>(50)); } /** * Update the count of the directory if it has a quota and return the count * * This does not throw a QuotaExceededException. This is just an update * of of existing state and throwing QuotaExceededException does not help * with fixing the state, if there is a problem. * * @param dir the root of the tree that represents the directory * @param counters counters for name space and disk space * @param nodesInPath INodes for the each of components in the path. * @return the size of the tree */ private static void updateCountForINodeWithQuota(INodeDirectory dir, INode.DirCounts counts, ArrayList<INode> nodesInPath) { long parentNamespace = counts.nsCount; long parentDiskspace = counts.dsCount; counts.nsCount = 1L;//for self. should not call node.spaceConsumedInTree() counts.dsCount = 0L; /* We don't need nodesInPath if we could use 'parent' field in * INode. using 'parent' is not currently recommended. */ nodesInPath.add(dir); for (INode child : dir.getChildren()) { if (child.isDirectory()) { updateCountForINodeWithQuota((INodeDirectory)child, counts, nodesInPath); } else { // reduce recursive calls counts.nsCount += 1; counts.dsCount += ((INodeFile)child).diskspaceConsumed(); } } if (dir.isQuotaSet()) { ((INodeDirectoryWithQuota)dir).setSpaceConsumed(counts.nsCount, counts.dsCount); // check if quota is violated for some reason. if ((dir.getNsQuota() >= 0 && counts.nsCount > dir.getNsQuota()) || (dir.getDsQuota() >= 0 && counts.dsCount > dir.getDsQuota())) { // can only happen because of a software bug. the bug should be fixed. StringBuilder path = new StringBuilder(512); for (INode n : nodesInPath) { path.append('/'); path.append(n.getLocalName()); } NameNode.LOG.warn("Quota violation in image for " + path + " (Namespace quota : " + dir.getNsQuota() + " consumed : " + counts.nsCount + ")" + " (Diskspace quota : " + dir.getDsQuota() + " consumed : " + counts.dsCount + ")."); } } // pop nodesInPath.remove(nodesInPath.size()-1); counts.nsCount += parentNamespace; counts.dsCount += parentDiskspace; } /** * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. * Sets quota for for a directory. * @returns INodeDirectory if any of the quotas have changed. null other wise. * @throws FileNotFoundException if the path does not exist or is a file * @throws QuotaExceededException if the directory tree size is * greater than the given quota */ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) throws FileNotFoundException, QuotaExceededException { // sanity check if ((nsQuota < 0 && nsQuota != FSConstants.QUOTA_DONT_SET && nsQuota < FSConstants.QUOTA_RESET) || (dsQuota < 0 && dsQuota != FSConstants.QUOTA_DONT_SET && dsQuota < FSConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Illegal value for nsQuota or " + "dsQuota : " + nsQuota + " and " + dsQuota); } String srcs = normalizePath(src); INode[] inodes = rootDir.getExistingPathINodes(src); INode targetNode = inodes[inodes.length-1]; if (targetNode == null) { throw new FileNotFoundException("Directory does not exist: " + srcs); } else if (!targetNode.isDirectory()) { throw new FileNotFoundException("Cannot set quota on a file: " + srcs); } else { // a directory inode INodeDirectory dirNode = (INodeDirectory)targetNode; long oldNsQuota = dirNode.getNsQuota(); long oldDsQuota = dirNode.getDsQuota(); if (nsQuota == FSConstants.QUOTA_DONT_SET) { nsQuota = oldNsQuota; } if (dsQuota == FSConstants.QUOTA_DONT_SET) { dsQuota = oldDsQuota; } if (dirNode instanceof INodeDirectoryWithQuota) { // a directory with quota; so set the quota to the new value ((INodeDirectoryWithQuota)dirNode).setQuota(nsQuota, dsQuota); } else { // a non-quota directory; so replace it with a directory with quota INodeDirectoryWithQuota newNode = new INodeDirectoryWithQuota(nsQuota, dsQuota, dirNode); // non-root directory node; parent != null INodeDirectory parent = (INodeDirectory)inodes[inodes.length-2]; dirNode = newNode; parent.replaceChild(newNode); } return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; } } /** * See {@link ClientProtocol#setQuota(String, long, long)} for the * contract. * @see #unprotectedSetQuota(String, long, long) */ void setQuota(String src, long nsQuota, long dsQuota) throws FileNotFoundException, QuotaExceededException { synchronized (rootDir) { INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota); if (dir != null) { fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), dir.getDsQuota()); } } } long totalInodes() { synchronized (rootDir) { return rootDir.numItemsInTree(); } } /** * Sets the access time on the file. Logs it in the transaction log */ void setTimes(String src, INodeFile inode, long mtime, long atime, boolean force) throws IOException { if (unprotectedSetTimes(src, inode, mtime, atime, force)) { fsImage.getEditLog().logTimes(src, mtime, atime); } } boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) throws IOException { INodeFile inode = getFileINode(src); return unprotectedSetTimes(src, inode, mtime, atime, force); } private boolean unprotectedSetTimes(String src, INodeFile inode, long mtime, long atime, boolean force) throws IOException { boolean status = false; if (mtime != -1) { inode.setModificationTimeForce(mtime); status = true; } if (atime != -1) { long inodeTime = inode.getAccessTime(); // if the last access time update was within the last precision interval, then // no need to store access time if (atime <= inodeTime + namesystem.getAccessTimePrecision() && !force) { status = false; } else { inode.setAccessTime(atime); status = true; } } return status; } /** * Create FileStatus by file INode */ private static FileStatus createFileStatus(String path, INode node) { // length is zero for directories return new FileStatus(node.isDirectory() ? 0 : node.computeContentSummary().getLength(), node.isDirectory(), node.isDirectory() ? 0 : ((INodeFile)node).getReplication(), node.isDirectory() ? 0 : ((INodeFile)node).getPreferredBlockSize(), node.getModificationTime(), node.getAccessTime(), node.getFsPermission(), node.getUserName(), node.getGroupName(), new Path(path)); } } diff --git a/src/test/org/apache/hadoop/hdfs/TestDFSRename.java b/src/test/org/apache/hadoop/hdfs/TestDFSRename.java index 0d76a52..84071c6 100644 --- a/src/test/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/src/test/org/apache/hadoop/hdfs/TestDFSRename.java @@ -1,170 +1,205 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import java.io.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; public class TestDFSRename extends junit.framework.TestCase { - MiniDFSCluster cluster = null; + static Configuration CONF = new Configuration(); + static MiniDFSCluster cluster = null; static int countLease(MiniDFSCluster cluster) { return cluster.getNameNode().namesystem.leaseManager.countLease(); } final Path dir = new Path("/test/rename/"); @Override protected void setUp() throws Exception { - Configuration conf = new Configuration(); - cluster = new MiniDFSCluster(conf, 2, true, null); + cluster = new MiniDFSCluster(CONF, 2, true, null); + } + + private void restartCluster() throws IOException { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + cluster = new MiniDFSCluster(CONF, 1, false, null); + cluster.waitClusterUp(); } @Override protected void tearDown() throws Exception { if (cluster != null) {cluster.shutdown();} } void list(FileSystem fs, String name) throws IOException { FileSystem.LOG.info("\n\n" + name); for(FileStatus s : fs.listStatus(dir)) { FileSystem.LOG.info("" + s.getPath()); } } static void createFile(FileSystem fs, Path f) throws IOException { DataOutputStream a_out = fs.create(f); a_out.writeBytes("something"); a_out.close(); } public void testRename() throws Exception { FileSystem fs = cluster.getFileSystem(); assertTrue(fs.mkdirs(dir)); { //test lease Path a = new Path(dir, "a"); Path aa = new Path(dir, "aa"); Path b = new Path(dir, "b"); createFile(fs, a); //should not have any lease assertEquals(0, countLease(cluster)); createFile(fs, aa); DataOutputStream aa_out = fs.create(aa); aa_out.writeBytes("something"); //should have 1 lease assertEquals(1, countLease(cluster)); list(fs, "rename0"); fs.rename(a, b); list(fs, "rename1"); aa_out.writeBytes(" more"); aa_out.close(); list(fs, "rename2"); //should not have any lease assertEquals(0, countLease(cluster)); } { // test non-existent destination Path dstPath = new Path("/c/d"); assertFalse(fs.exists(dstPath)); assertFalse(fs.rename(dir, dstPath)); } { // dst cannot be a file or directory under src // test rename /a/b/foo to /a/b/c Path src = new Path("/a/b"); Path dst = new Path("/a/b/c"); createFile(fs, new Path(src, "foo")); // dst cannot be a file under src assertFalse(fs.rename(src, dst)); // dst cannot be a directory under src assertFalse(fs.rename(src.getParent(), dst.getParent())); } { // dst can start with src, if it is not a directory or file under src // test rename /test /testfile Path src = new Path("/testPrefix"); Path dst = new Path("/testPrefixfile"); createFile(fs, src); assertTrue(fs.rename(src, dst)); } { // dst should not be same as src test rename /a/b/c to /a/b/c Path src = new Path("/a/b/c"); createFile(fs, src); assertTrue(fs.rename(src, src)); assertFalse(fs.rename(new Path("/a/b"), new Path("/a/b/"))); assertTrue(fs.rename(src, new Path("/a/b/c/"))); } fs.delete(dir, true); } public void testRenameWithQuota() throws Exception { DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); Path src1 = new Path(dir, "testRenameWithQuota/srcdir/src1"); Path src2 = new Path(dir, "testRenameWithQuota/srcdir/src2"); Path dst1 = new Path(dir, "testRenameWithQuota/dstdir/dst1"); Path dst2 = new Path(dir, "testRenameWithQuota/dstdir/dst2"); createFile(fs, src1); createFile(fs, src2); fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_DONT_SET); fs.mkdirs(dst1.getParent()); fs.setQuota(dst1.getParent(), FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_DONT_SET); // Test1: src does not exceed quota and dst has quota to accommodate rename rename(src1, dst1, true, false); // Test2: src does not exceed quota and dst has *no* quota to accommodate // rename fs.setQuota(dst1.getParent(), 1, FSConstants.QUOTA_DONT_SET); rename(src2, dst2, false, true); // Test3: src exceeds quota and dst has *no* quota to accommodate rename fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET); rename(dst1, src1, false, true); } + /** + * Perform operations such as setting quota, deletion of files, rename and + * ensure system can apply edits log during startup. + */ + public void testEditsLog() throws Exception { + DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + Path src1 = new Path(dir, "testEditsLog/srcdir/src1"); + Path dst1 = new Path(dir, "testEditsLog/dstdir/dst1"); + createFile(fs, src1); + fs.mkdirs(dst1.getParent()); + createFile(fs, dst1); + + // Set quota so that dst1 parent cannot allow under it new files/directories + fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET); + // Free up quota for a subsequent rename + fs.delete(dst1, true); + rename(src1, dst1, true, false); + + // Restart the cluster and ensure the above operations can be + // loaded from the edits log + restartCluster(); + fs = (DistributedFileSystem)cluster.getFileSystem(); + assertFalse(fs.exists(src1)); // ensure src1 is already renamed + assertTrue(fs.exists(dst1)); // ensure rename dst exists + } + private void rename(Path src, Path dst, boolean renameSucceeds, boolean quotaException) throws Exception { DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); try { assertEquals(renameSucceeds, fs.rename(src, dst)); } catch (QuotaExceededException ex) { assertTrue(quotaException); } assertEquals(renameSucceeds, !fs.exists(src)); assertEquals(renameSucceeds, fs.exists(dst)); } }
jaxlaw/hadoop-common
9d44defec43bdc4aa4faa2000d9463f8b5bdd0f4
HDFS:457 from https://issues.apache.org/jira/secure/attachment/12424673/jira.HDFS-457.branch-0.20-internal.patch
diff --git a/YAHOO-CHANGES.txt b/YAHOO-CHANGES.txt index fac9f06..9b468f6 100644 --- a/YAHOO-CHANGES.txt +++ b/YAHOO-CHANGES.txt @@ -1,379 +1,383 @@ Yahoo! Distribution of Hadoop Change Log Patches from the following Apache Jira issues have been applied to this release in the order indicated. This is in addition to the patches applied from issues referenced in CHANGES.txt. yahoo-hadoop-0.20.1-3092118005: + HDFS-457. Better handling of volume failure in Data Node storage, + This fix is a port from hdfs-0.22 to common-0.20 by Boris Shkolnik. + Contributed by Erik Steffl + MAPREDUCE-1196. Fix FileOutputCommitter to use the deprecated cleanupJob api correctly. (acmurthy) yahoo-hadoop-0.20.1-3092118004: HADOOP-6344. rm and rmr immediately delete files rather than sending to trash, despite trash being enabled, if a user is over-quota. (jhoman) MAPREDUCE-1160. Reduce verbosity of log lines in some Map/Reduce classes to avoid filling up jobtracker logs on a busy cluster. (Ravi Gummadi and Hong Tang via yhemanth) HDFS-587. Add ability to run HDFS with MR test on non-default queue, also updated junit dependendcy from junit-3.8.1 to junit-4.5 (to make it possible to use Configured and Tool to process command line to be able to specify a queue). Contributed by Erik Steffl. MAPREDUCE-1158. Fix JT running maps and running reduces metrics. (sharad) MAPREDUCE-947. Fix bug in earlier implementation that was causing unit tests to fail. (Ravi Gummadi via yhemanth) MAPREDUCE-1062. Fix MRReliabilityTest to work with retired jobs (Contributed by Sreekanth Ramakrishnan) MAPREDUCE-1090. Modified log statement in TaskMemoryManagerThread to include task attempt id. (yhemanth) MAPREDUCE-1098. Fixed the distributed-cache to not do i/o while holding a global lock. (Amareshwari Sriramadasu via acmurthy) MAPREDUCE-1048. Add occupied/reserved slot usage summary on jobtracker UI. (Amareshwari Sriramadasu via sharad) MAPREDUCE-1103. Added more metrics to Jobtracker. (sharad) MAPREDUCE-947. Added commitJob and abortJob apis to OutputCommitter. Enhanced FileOutputCommitter to create a _SUCCESS file for successful jobs. (Amar Kamat & Jothi Padmanabhan via acmurthy) MAPREDUCE-1105. Remove max limit configuration in capacity scheduler in favor of max capacity percentage thus allowing the limit to go over queue capacity. (Rahul Kumar Singh via yhemanth) MAPREDUCE-1086. Setup Hadoop logging environment for tasks to point to task related parameters. (Ravi Gummadi via yhemanth) MAPREDUCE-739. Allow relative paths to be created inside archives. (mahadev) HADOOP-6097. Multiple bugs w/ Hadoop archives (mahadev) HADOOP-6231. Allow caching of filesystem instances to be disabled on a per-instance basis (ben slusky via mahadev) MAPREDUCE-826. harchive doesn't use ToolRunner / harchive returns 0 even if the job fails with exception (koji via mahadev) HDFS-686. NullPointerException is thrown while merging edit log and image. (hairong) HDFS-709. Fix TestDFSShell failure due to rename bug introduced by HDFS-677. (suresh) HDFS-677. Rename failure when both source and destination quota exceeds results in deletion of source. (suresh) HADOOP-6284. Add a new parameter, HADOOP_JAVA_PLATFORM_OPTS, to hadoop-config.sh so that it allows setting java command options for JAVA_PLATFORM. (Koji Noguchi via szetszwo) MAPREDUCE-732. Removed spurious log statements in the node blacklisting logic. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-144. Includes dump of the process tree in task diagnostics when a task is killed due to exceeding memory limits. (Vinod Kumar Vavilapalli via yhemanth) MAPREDUCE-979. Fixed JobConf APIs related to memory parameters to return values of new configuration variables when deprecated variables are disabled. (Sreekanth Ramakrishnan via yhemanth) MAPREDUCE-277. Makes job history counters available on the job history viewers. (Jothi Padmanabhan via ddas) HADOOP-5625. Add operation duration to clienttrace. (Lei Xu via cdouglas) HADOOP-5222. Add offset to datanode clienttrace. (Lei Xu via cdouglas) HADOOP-6218. Adds a feature where TFile can be split by Record Sequence number. Contributed by Hong Tang and Raghu Angadi. yahoo-hadoop-0.20.1-3041192001 MAPREDUCE-1088. Changed permissions on JobHistory files on local disk to 0744. Contributed by Arun C. Murthy. HADOOP-6304. Use java.io.File.set{Readable|Writable|Executable} where possible in RawLocalFileSystem. Contributed by Arun C. Murthy. yahoo-hadoop-0.20.1-3041192000 MAPREDUCE-270. Fix the tasktracker to optionally send an out-of-band heartbeat on task-completion for better job-latency. Contributed by Arun C. Murthy Configuration changes: add mapreduce.tasktracker.outofband.heartbeat MAPREDUCE-1030. Fix capacity-scheduler to assign a map and a reduce task per-heartbeat. Contributed by Rahuk K Singh. MAPREDUCE-1028. Fixed number of slots occupied by cleanup tasks to one irrespective of slot size for the job. Contributed by Ravi Gummadi. MAPREDUCE-964. Fixed start and finish times of TaskStatus to be consistent, thereby fixing inconsistencies in metering tasks. Contributed by Sreekanth Ramakrishnan. HADOOP-5976. Add a new command, classpath, to the hadoop script. Contributed by Owen O'Malley and Gary Murry HADOOP-5784. Makes the number of heartbeats that should arrive a second at the JobTracker configurable. Contributed by Amareshwari Sriramadasu. MAPREDUCE-945. Modifies MRBench and TestMapRed to use ToolRunner so that options such as queue name can be passed via command line. Contributed by Sreekanth Ramakrishnan. yahoo-hadoop-0.20.0-3006291003 HADOOP:5420 Correct bug in earlier implementation by Arun C. Murthy HADOOP-5363 Add support for proxying connections to multiple clusters with different versions to hdfsproxy. Contributed by Zhiyong Zhang HADOOP-5780. Improve per block message prited by -metaSave in HDFS. (Raghu Angadi) yahoo-hadoop-0.20.0-2957040010 HADOOP-6227. Fix Configuration to allow final parameters to be set to null and prevent them from being overridden. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0-2957040007 MAPREDUCE-430 Added patch supplied by Amar Kamat to allow roll forward on branch to includ externally committed patch. yahoo-hadoop-0.20.0-2957040006 MAPREDUCE-768. Provide an option to dump jobtracker configuration in JSON format to standard output. Contributed by V.V.Chaitanya yahoo-hadoop-0.20.0-2957040004 MAPREDUCE-834 Correct an issue created by merging this issue with patch attached to external Jira. yahoo-hadoop-0.20.0-2957040003 HADOOP-6184 Provide an API to dump Configuration in a JSON format. Contributed by V.V.Chaitanya Krishna. MAPREDUCE-745 Patch added for this issue to allow branch-0.20 to merge cleanly. yahoo-hadoop-0.20.0-2957040000 MAPREDUCE:478 Allow map and reduce jvm parameters, environment variables and ulimit to be set separately. MAPREDUCE:682 Removes reservations on tasktrackers which are blacklisted. Contributed by Sreekanth Ramakrishnan. HADOOP:5420 Support killing of process groups in LinuxTaskController binary HADOOP-5488 Removes the pidfile management for the Task JVM from the framework and instead passes the PID back and forth between the TaskTracker and the Task processes. Contributed by Ravi Gummadi. MAPREDUCE:467 Provide ability to collect statistics about total tasks and succeeded tasks in different time windows. yahoo-hadoop-0.20.0.2949784002: MAPREDUCE-817. Add a cache for retired jobs with minimal job info and provide a way to access history file url MAPREDUCE-814. Provide a way to configure completed job history files to be on HDFS. MAPREDUCE-838 Fixes a problem in the way commit of task outputs happens. The bug was that even if commit failed, the task would be declared as successful. Contributed by Amareshwari Sriramadasu. yahoo-hadoop-0.20.0.2902658004: MAPREDUCE-809 Fix job-summary logs to correctly record final status of FAILED and KILLED jobs. http://issues.apache.org/jira/secure/attachment/12414726/MAPREDUCE-809_0_20090728_yhadoop20.patch MAPREDUCE-740 Log a job-summary at the end of a job, while allowing it to be configured to use a custom appender if desired. http://issues.apache.org/jira/secure/attachment/12413941/MAPREDUCE-740_2_20090717_yhadoop20.patch MAPREDUCE-771 Fixes a bug which delays normal jobs in favor of high-ram jobs. http://issues.apache.org/jira/secure/attachment/12413990/MAPREDUCE-771-20.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt MAPREDUCE-733 Fixes a bug that when a task tracker is killed , it throws exception. Instead it should catch it and process it and allow the rest of the flow to go through http://issues.apache.org/jira/secure/attachment/12413015/MAPREDUCE-733-ydist.patch MAPREDUCE-734 Fixes a bug which prevented hi ram jobs from being removed from the scheduler queue. http://issues.apache.org/jira/secure/attachment/12413035/MAPREDUCE-734-20.patch MAPREDUCE-693 Fixes a bug that when a job is submitted and the JT is restarted (before job files have been written) and the job is killed after recovery, the conf files fail to be moved to the "done" subdirectory. http://issues.apache.org/jira/secure/attachment/12412823/MAPREDUCE-693-v1.2-branch-0.20.patch MAPREDUCE-722 Fixes a bug where more slots are getting reserved for HiRAM job tasks than required. http://issues.apache.org/jira/secure/attachment/12412744/MAPREDUCE-722.1.txt MAPREDUCE-683 TestJobTrackerRestart failed because of stale filemanager cache (which was created once per jvm). This patch makes sure that the filemanager is inited upon every JobHistory.init() and hence upon every restart. Note that this wont happen in production as upon a restart the new jobtracker will start in a new jvm and hence a new cache will be created. http://issues.apache.org/jira/secure/attachment/12412743/MAPREDUCE-683-v1.2.1-branch-0.20.patch MAPREDUCE-709 Fixes a bug where node health check script does not display the correct message on timeout. http://issues.apache.org/jira/secure/attachment/12412711/mapred-709-ydist.patch MAPREDUCE-708 Fixes a bug where node health check script does not refresh the "reason for blacklisting". http://issues.apache.org/jira/secure/attachment/12412706/MAPREDUCE-708-ydist.patch MAPREDUCE-522 Rewrote TestQueueCapacities to make it simpler and avoid timeout errors. http://issues.apache.org/jira/secure/attachment/12412472/mapred-522-ydist.patch MAPREDUCE-532 Provided ability in the capacity scheduler to limit the number of slots that can be concurrently used per queue at any given time. http://issues.apache.org/jira/secure/attachment/12412592/MAPREDUCE-532-20.patch MAPREDUCE-211 Provides ability to run a health check script on the tasktracker nodes and blacklist nodes if they are unhealthy. Contributed by Sreekanth Ramakrishnan. http://issues.apache.org/jira/secure/attachment/12412161/mapred-211-internal.patch MAPREDUCE-516 Remove .orig file included by mistake. http://issues.apache.org/jira/secure/attachment/12412108/HADOOP-5964_2_20090629_yhadoop.patch MAPREDUCE-416 Moves the history file to a "done" folder whenever a job completes. http://issues.apache.org/jira/secure/attachment/12411938/MAPREDUCE-416-v1.6-branch-0.20.patch HADOOP-5980 Previously, task spawned off by LinuxTaskController didn't get LD_LIBRARY_PATH in their environment. The tasks will now get same LD_LIBRARY_PATH value as when spawned off by DefaultTaskController. http://issues.apache.org/jira/secure/attachment/12410825/hadoop-5980-v20.patch HADOOP-5981 This issue completes the feature mentioned in HADOOP-2838. HADOOP-2838 provided a way to set env variables in child process. This issue provides a way to inherit tt's env variables and append or reset it. So now X=$X:y will inherit X (if there) and append y to it. http://issues.apache.org/jira/secure/attachment/12410454/hadoop5981-branch-20-example.patch HADOOP-5419 This issue is to provide an improvement on the existing M/R framework to let users know which queues they have access to, and for what operations. One use case for this would that currently there is no easy way to know if the user has access to submit jobs to a queue, until it fails with an access control exception. http://issues.apache.org/jira/secure/attachment/12410824/hadoop-5419-v20.2.patch HADOOP-5420 Support setsid based kill in LinuxTaskController. http://issues.apache.org/jira/secure/attachment/12414735/5420-ydist.patch.txt HADOOP-5643 Added the functionality to refresh jobtrackers node list via command line (bin/hadoop mradmin -refreshNodes). The command should be run as the jobtracker owner (jobtracker process owner) or from a super group (mapred.permissions.supergroup). http://issues.apache.org/jira/secure/attachment/12410619/Fixed%2B5643-0.20-final HADOOP-2838 Now the users can set environment variables using mapred.child.env. They can do the following X=Y : set X to Y X=$X:Y : Append Y to X (which should be taken from the tasktracker) http://issues.apache.org/jira/secure/attachment/12409895/HADOOP-2838-v2.2-branch-20-example.patch HADOOP-5818. Revert the renaming from FSNamesystem.checkSuperuserPrivilege to checkAccess by HADOOP-5643. (Amar Kamat via szetszwo) https://issues.apache.org/jira/secure/attachment/12409835/5818for0.20.patch HADOOP-5801. Fixes the problem: If the hosts file is changed across restart then it should be refreshed upon recovery so that the excluded hosts are lost and the maps are re-executed. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409834/5801-0.20.patch HADOOP-5643. HADOOP-5643. Adds a way to decommission TaskTrackers while the JobTracker is running. (Amar Kamat via ddas) https://issues.apache.org/jira/secure/attachment/12409833/Fixed+5643-0.20 HADOOP-5419. Provide a facility to query the Queue ACLs for the current user. (Rahul Kumar Singh via yhemanth) http://issues.apache.org/jira/secure/attachment/12409323/hadoop-5419-v20.patch HADOOP-5733. Add map/reduce slot capacity and blacklisted capacity to JobTracker metrics. (Sreekanth Ramakrishnan via cdouglas) http://issues.apache.org/jira/secure/attachment/12409322/hadoop-5733-v20.patch HADOOP-5738. Split "waiting_tasks" JobTracker metric into waiting maps and waiting reduces. (Sreekanth Ramakrishnan via cdouglas) https://issues.apache.org/jira/secure/attachment/12409321/5738-y20.patch HADOOP-4842. Streaming now allows specifiying a command for the combiner. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12402355/patch-4842-3.txt HADOOP-4490. Provide ability to run tasks as job owners. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409318/hadoop-4490-br20-3.patch https://issues.apache.org/jira/secure/attachment/12410170/hadoop-4490-br20-3.2.patch HADOOP-5442. Paginate jobhistory display and added some search capabilities. (Amar Kamat via acmurthy) http://issues.apache.org/jira/secure/attachment/12402301/HADOOP-5442-v1.12.patch HADOOP-3327. Improves handling of READ_TIMEOUT during map output copying. (Amareshwari Sriramadasu via ddas) http://issues.apache.org/jira/secure/attachment/12399449/patch-3327-2.txt HADOOP-5113. Fixed logcondense to remove files for usernames beginning with characters specified in the -l option. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409317/hadoop-5113-0.18.txt HADOOP-2898. Provide an option to specify a port range for Hadoop services provisioned by HOD. (Peeyush Bishnoi via yhemanth) http://issues.apache.org/jira/secure/attachment/12409316/hadoop-2898-0.20.txt HADOOP-4930. Implement a Linux native executable that can be used to launch tasks as users. (Sreekanth Ramakrishnan via yhemanth) http://issues.apache.org/jira/secure/attachment/12409402/hadoop-4930v20.patch diff --git a/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java b/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java index 5d826bf..dca8b6d 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java +++ b/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java @@ -1,86 +1,87 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.conf.Configuration; /************************************ * Some handy constants * ************************************/ public interface FSConstants { public static int MIN_BLOCKS_FOR_WRITE = 5; // Chunk the block Invalidate message public static final int BLOCK_INVALIDATE_CHUNK = 100; // Long that indicates "leave current quota unchanged" public static final long QUOTA_DONT_SET = Long.MAX_VALUE; public static final long QUOTA_RESET = -1L; // // Timeouts, constants // public static long HEARTBEAT_INTERVAL = 3; public static long BLOCKREPORT_INTERVAL = 60 * 60 * 1000; public static long BLOCKREPORT_INITIAL_DELAY = 0; public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD; public static final long LEASE_RECOVER_PERIOD = 10 * 1000; //in ms // We need to limit the length and depth of a path in the filesystem. HADOOP-438 // Currently we set the maximum length to 8k characters and the maximum depth to 1k. public static int MAX_PATH_LENGTH = 8000; public static int MAX_PATH_DEPTH = 1000; public static final int BUFFER_SIZE = new Configuration().getInt("io.file.buffer.size", 4096); //Used for writing header etc. public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512); //TODO [email protected]: should be conf injected? public static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024; public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE; + public static final int MIN_NUM_OF_VALID_VOLUMES = 1;// for a DN to run // SafeMode actions public enum SafeModeAction{ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; } // type of the datanode report public static enum DatanodeReportType {ALL, LIVE, DEAD } /** * Distributed upgrade actions: * * 1. Get upgrade status. * 2. Get detailed upgrade status. * 3. Proceed with the upgrade if it is stuck, no matter what the status is. */ public static enum UpgradeAction { GET_STATUS, DETAILED_STATUS, FORCE_PROCEED; } // Version is reflected in the dfs image and edit log files. // Version is reflected in the data storage file. // Versions are negative. // Decrement LAYOUT_VERSION to define a new version. public static final int LAYOUT_VERSION = -18; // Current version: // Support disk space quotas } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index c6ca96d..d8c04b5 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -1,1014 +1,1030 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode; import java.io.BufferedOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.LinkedList; import java.util.zip.CRC32; import java.util.zip.Checksum; import org.apache.commons.logging.Log; import org.apache.hadoop.fs.FSInputChecker; import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT; /** A class that receives a block and writes to its own disk, meanwhile * may copies it to another site. If a throttler is provided, * streaming throttling is also supported. **/ class BlockReceiver implements java.io.Closeable, FSConstants { public static final Log LOG = DataNode.LOG; static final Log ClientTraceLog = DataNode.ClientTraceLog; private Block block; // the block to receive protected boolean finalized; private DataInputStream in = null; // from where data are read private DataChecksum checksum; // from where chunks of a block can be read private OutputStream out = null; // to block file at local disk private DataOutputStream checksumOut = null; // to crc file at local disk private int bytesPerChecksum; private int checksumSize; private ByteBuffer buf; // contains one full packet. private int bufRead; //amount of valid data in the buf private int maxPacketReadLen; protected long offsetInBlock; protected final String inAddr; protected final String myAddr; private String mirrorAddr; private DataOutputStream mirrorOut; private Daemon responder = null; private BlockTransferThrottler throttler; private FSDataset.BlockWriteStreams streams; private boolean isRecovery = false; private String clientName; DatanodeInfo srcDataNode = null; private Checksum partialCrc = null; private DataNode datanode = null; BlockReceiver(Block block, DataInputStream in, String inAddr, String myAddr, boolean isRecovery, String clientName, DatanodeInfo srcDataNode, DataNode datanode) throws IOException { try{ this.block = block; this.in = in; this.inAddr = inAddr; this.myAddr = myAddr; this.isRecovery = isRecovery; this.clientName = clientName; this.offsetInBlock = 0; this.srcDataNode = srcDataNode; this.datanode = datanode; this.checksum = DataChecksum.newDataChecksum(in); this.bytesPerChecksum = checksum.getBytesPerChecksum(); this.checksumSize = checksum.getChecksumSize(); // // Open local disk out // streams = datanode.data.writeToBlock(block, isRecovery); this.finalized = datanode.data.isValidBlock(block); if (streams != null) { this.out = streams.dataOut; this.checksumOut = new DataOutputStream(new BufferedOutputStream( streams.checksumOut, SMALL_BUFFER_SIZE)); // If this block is for appends, then remove it from periodic // validation. if (datanode.blockScanner != null && isRecovery) { datanode.blockScanner.deleteBlock(block); } } } catch (BlockAlreadyExistsException bae) { throw bae; } catch(IOException ioe) { IOUtils.closeStream(this); cleanupBlock(); // check if there is a disk error IOException cause = FSDataset.getCauseIfDiskError(ioe); + DataNode.LOG.warn("IOException in BlockReceiver constructor. Cause is ", + cause); + if (cause != null) { // possible disk error ioe = cause; datanode.checkDiskError(ioe); // may throw an exception here } throw ioe; } } /** * close files. */ public void close() throws IOException { IOException ioe = null; // close checksum file try { if (checksumOut != null) { checksumOut.flush(); checksumOut.close(); checksumOut = null; } } catch(IOException e) { ioe = e; } // close block file try { if (out != null) { out.flush(); out.close(); out = null; } } catch (IOException e) { ioe = e; } // disk check if(ioe != null) { datanode.checkDiskError(ioe); throw ioe; } } /** * Flush block data and metadata files to disk. * @throws IOException */ void flush() throws IOException { if (checksumOut != null) { checksumOut.flush(); } if (out != null) { out.flush(); } } /** * While writing to mirrorOut, failure to write to mirror should not * affect this datanode unless a client is writing the block. */ private void handleMirrorOutError(IOException ioe) throws IOException { LOG.info(datanode.dnRegistration + ":Exception writing block " + block + " to mirror " + mirrorAddr + "\n" + StringUtils.stringifyException(ioe)); mirrorOut = null; // // If stream-copy fails, continue // writing to disk for replication requests. For client // writes, return error so that the client can do error // recovery. // if (clientName.length() > 0) { throw ioe; } } /** * Verify multiple CRC chunks. */ private void verifyChunks( byte[] dataBuf, int dataOff, int len, byte[] checksumBuf, int checksumOff ) throws IOException { while (len > 0) { int chunkLen = Math.min(len, bytesPerChecksum); checksum.update(dataBuf, dataOff, chunkLen); if (!checksum.compare(checksumBuf, checksumOff)) { if (srcDataNode != null) { try { LOG.info("report corrupt block " + block + " from datanode " + srcDataNode + " to namenode"); LocatedBlock lb = new LocatedBlock(block, new DatanodeInfo[] {srcDataNode}); datanode.namenode.reportBadBlocks(new LocatedBlock[] {lb}); } catch (IOException e) { LOG.warn("Failed to report bad block " + block + " from datanode " + srcDataNode + " to namenode"); } } throw new IOException("Unexpected checksum mismatch " + "while writing " + block + " from " + inAddr); } checksum.reset(); dataOff += chunkLen; checksumOff += checksumSize; len -= chunkLen; } } /** * Makes sure buf.position() is zero without modifying buf.remaining(). * It moves the data if position needs to be changed. */ private void shiftBufData() { if (bufRead != buf.limit()) { throw new IllegalStateException("bufRead should be same as " + "buf.limit()"); } //shift the remaining data on buf to the front if (buf.position() > 0) { int dataLeft = buf.remaining(); if (dataLeft > 0) { byte[] b = buf.array(); System.arraycopy(b, buf.position(), b, 0, dataLeft); } buf.position(0); bufRead = dataLeft; buf.limit(bufRead); } } /** * reads upto toRead byte to buf at buf.limit() and increments the limit. * throws an IOException if read does not succeed. */ private int readToBuf(int toRead) throws IOException { if (toRead < 0) { toRead = (maxPacketReadLen > 0 ? maxPacketReadLen : buf.capacity()) - buf.limit(); } int nRead = in.read(buf.array(), buf.limit(), toRead); if (nRead < 0) { throw new EOFException("while trying to read " + toRead + " bytes"); } bufRead = buf.limit() + nRead; buf.limit(bufRead); return nRead; } /** * Reads (at least) one packet and returns the packet length. * buf.position() points to the start of the packet and * buf.limit() point to the end of the packet. There could * be more data from next packet in buf.<br><br> * * It tries to read a full packet with single read call. * Consecutive packets are usually of the same length. */ private int readNextPacket() throws IOException { /* This dances around buf a little bit, mainly to read * full packet with single read and to accept arbitarary size * for next packet at the same time. */ if (buf == null) { /* initialize buffer to the best guess size: * 'chunksPerPacket' calculation here should match the same * calculation in DFSClient to make the guess accurate. */ int chunkSize = bytesPerChecksum + checksumSize; int chunksPerPacket = (datanode.writePacketSize - DataNode.PKT_HEADER_LEN - SIZE_OF_INTEGER + chunkSize - 1)/chunkSize; buf = ByteBuffer.allocate(DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER + Math.max(chunksPerPacket, 1) * chunkSize); buf.limit(0); } // See if there is data left in the buffer : if (bufRead > buf.limit()) { buf.limit(bufRead); } while (buf.remaining() < SIZE_OF_INTEGER) { if (buf.position() > 0) { shiftBufData(); } readToBuf(-1); } /* We mostly have the full packet or at least enough for an int */ buf.mark(); int payloadLen = buf.getInt(); buf.reset(); if (payloadLen == 0) { //end of stream! buf.limit(buf.position() + SIZE_OF_INTEGER); return 0; } // check corrupt values for pktLen, 100MB upper limit should be ok? if (payloadLen < 0 || payloadLen > (100*1024*1024)) { throw new IOException("Incorrect value for packet payload : " + payloadLen); } int pktSize = payloadLen + DataNode.PKT_HEADER_LEN; if (buf.remaining() < pktSize) { //we need to read more data int toRead = pktSize - buf.remaining(); // first make sure buf has enough space. int spaceLeft = buf.capacity() - buf.limit(); if (toRead > spaceLeft && buf.position() > 0) { shiftBufData(); spaceLeft = buf.capacity() - buf.limit(); } if (toRead > spaceLeft) { byte oldBuf[] = buf.array(); int toCopy = buf.limit(); buf = ByteBuffer.allocate(toCopy + toRead); System.arraycopy(oldBuf, 0, buf.array(), 0, toCopy); buf.limit(toCopy); } //now read: while (toRead > 0) { toRead -= readToBuf(toRead); } } if (buf.remaining() > pktSize) { buf.limit(buf.position() + pktSize); } if (pktSize > maxPacketReadLen) { maxPacketReadLen = pktSize; } return payloadLen; } /** * Receives and processes a packet. It can contain many chunks. * returns size of the packet. */ private int receivePacket() throws IOException { int payloadLen = readNextPacket(); if (payloadLen <= 0) { return payloadLen; } buf.mark(); //read the header buf.getInt(); // packet length offsetInBlock = buf.getLong(); // get offset of packet in block long seqno = buf.getLong(); // get seqno boolean lastPacketInBlock = (buf.get() != 0); int endOfHeader = buf.position(); buf.reset(); if (LOG.isDebugEnabled()){ LOG.debug("Receiving one packet for block " + block + " of length " + payloadLen + " seqno " + seqno + " offsetInBlock " + offsetInBlock + " lastPacketInBlock " + lastPacketInBlock); } setBlockPosition(offsetInBlock); //First write the packet to the mirror: if (mirrorOut != null) { try { mirrorOut.write(buf.array(), buf.position(), buf.remaining()); mirrorOut.flush(); } catch (IOException e) { handleMirrorOutError(e); } } buf.position(endOfHeader); int len = buf.getInt(); if (len < 0) { throw new IOException("Got wrong length during writeBlock(" + block + ") from " + inAddr + " at offset " + offsetInBlock + ": " + len); } if (len == 0) { LOG.debug("Receiving empty packet for block " + block); } else { offsetInBlock += len; int checksumLen = ((len + bytesPerChecksum - 1)/bytesPerChecksum)* checksumSize; if ( buf.remaining() != (checksumLen + len)) { throw new IOException("Data remaining in packet does not match " + "sum of checksumLen and dataLen"); } int checksumOff = buf.position(); int dataOff = checksumOff + checksumLen; byte pktBuf[] = buf.array(); buf.position(buf.limit()); // move to the end of the data. /* skip verifying checksum iff this is not the last one in the * pipeline and clientName is non-null. i.e. Checksum is verified * on all the datanodes when the data is being written by a * datanode rather than a client. Whe client is writing the data, * protocol includes acks and only the last datanode needs to verify * checksum. */ if (mirrorOut == null || clientName.length() == 0) { verifyChunks(pktBuf, dataOff, len, pktBuf, checksumOff); } try { if (!finalized) { //finally write to the disk : out.write(pktBuf, dataOff, len); // If this is a partial chunk, then verify that this is the only // chunk in the packet. Calculate new crc for this chunk. if (partialCrc != null) { if (len > bytesPerChecksum) { throw new IOException("Got wrong length during writeBlock(" + block + ") from " + inAddr + " " + "A packet can have only one partial chunk."+ " len = " + len + " bytesPerChecksum " + bytesPerChecksum); } partialCrc.update(pktBuf, dataOff, len); byte[] buf = FSOutputSummer.convertToByteStream(partialCrc, checksumSize); checksumOut.write(buf); LOG.debug("Writing out partial crc for data len " + len); partialCrc = null; } else { checksumOut.write(pktBuf, checksumOff, checksumLen); } datanode.myMetrics.bytesWritten.inc(len); } } catch (IOException iex) { datanode.checkDiskError(iex); throw iex; } } /// flush entire packet before sending ack flush(); // put in queue for pending acks if (responder != null) { ((PacketResponder)responder.getRunnable()).enqueue(seqno, lastPacketInBlock); } if (throttler != null) { // throttle I/O throttler.throttle(payloadLen); } return payloadLen; } void writeChecksumHeader(DataOutputStream mirrorOut) throws IOException { checksum.writeHeader(mirrorOut); } void receiveBlock( DataOutputStream mirrOut, // output to next datanode DataInputStream mirrIn, // input from next datanode DataOutputStream replyOut, // output to previous datanode String mirrAddr, BlockTransferThrottler throttlerArg, int numTargets) throws IOException { mirrorOut = mirrOut; mirrorAddr = mirrAddr; throttler = throttlerArg; try { // write data chunk header if (!finalized) { BlockMetadataHeader.writeHeader(checksumOut, checksum); } if (clientName.length() > 0) { responder = new Daemon(datanode.threadGroup, new PacketResponder(this, block, mirrIn, replyOut, numTargets)); responder.start(); // start thread to processes reponses } /* * Receive until packet length is zero. */ while (receivePacket() > 0) {} // flush the mirror out if (mirrorOut != null) { try { mirrorOut.writeInt(0); // mark the end of the block mirrorOut.flush(); } catch (IOException e) { handleMirrorOutError(e); } } // wait for all outstanding packet responses. And then // indicate responder to gracefully shutdown. if (responder != null) { ((PacketResponder)responder.getRunnable()).close(); } // if this write is for a replication request (and not // from a client), then finalize block. For client-writes, // the block is finalized in the PacketResponder. if (clientName.length() == 0) { // close the block/crc files close(); // Finalize the block. Does this fsync()? block.setNumBytes(offsetInBlock); datanode.data.finalizeBlock(block); datanode.myMetrics.blocksWritten.inc(); } } catch (IOException ioe) { LOG.info("Exception in receiveBlock for block " + block + " " + ioe); IOUtils.closeStream(this); if (responder != null) { responder.interrupt(); } cleanupBlock(); throw ioe; } finally { if (responder != null) { try { responder.join(); } catch (InterruptedException e) { throw new IOException("Interrupted receiveBlock"); } responder = null; } } } /** Cleanup a partial block * if this write is for a replication request (and not from a client) */ private void cleanupBlock() throws IOException { if (clientName.length() == 0) { // not client write datanode.data.unfinalizeBlock(block); } } /** * Sets the file pointer in the local block file to the specified value. */ private void setBlockPosition(long offsetInBlock) throws IOException { if (finalized) { if (!isRecovery) { throw new IOException("Write to offset " + offsetInBlock + " of block " + block + " that is already finalized."); } if (offsetInBlock > datanode.data.getLength(block)) { throw new IOException("Write to offset " + offsetInBlock + " of block " + block + " that is already finalized and is of size " + datanode.data.getLength(block)); } return; } if (datanode.data.getChannelPosition(block, streams) == offsetInBlock) { return; // nothing to do } long offsetInChecksum = BlockMetadataHeader.getHeaderSize() + offsetInBlock / bytesPerChecksum * checksumSize; if (out != null) { out.flush(); } if (checksumOut != null) { checksumOut.flush(); } // If this is a partial chunk, then read in pre-existing checksum if (offsetInBlock % bytesPerChecksum != 0) { LOG.info("setBlockPosition trying to set position to " + offsetInBlock + " for block " + block + " which is not a multiple of bytesPerChecksum " + bytesPerChecksum); computePartialChunkCrc(offsetInBlock, offsetInChecksum, bytesPerChecksum); } LOG.info("Changing block file offset of block " + block + " from " + datanode.data.getChannelPosition(block, streams) + " to " + offsetInBlock + " meta file offset to " + offsetInChecksum); // set the position of the block file datanode.data.setChannelPosition(block, streams, offsetInBlock, offsetInChecksum); } /** * reads in the partial crc chunk and computes checksum * of pre-existing data in partial chunk. */ private void computePartialChunkCrc(long blkoff, long ckoff, int bytesPerChecksum) throws IOException { // find offset of the beginning of partial chunk. // int sizePartialChunk = (int) (blkoff % bytesPerChecksum); int checksumSize = checksum.getChecksumSize(); blkoff = blkoff - sizePartialChunk; LOG.info("computePartialChunkCrc sizePartialChunk " + sizePartialChunk + " block " + block + " offset in block " + blkoff + " offset in metafile " + ckoff); // create an input stream from the block file // and read in partial crc chunk into temporary buffer // byte[] buf = new byte[sizePartialChunk]; byte[] crcbuf = new byte[checksumSize]; FSDataset.BlockInputStreams instr = null; try { instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff); IOUtils.readFully(instr.dataIn, buf, 0, sizePartialChunk); // open meta file and read in crc value computer earlier IOUtils.readFully(instr.checksumIn, crcbuf, 0, crcbuf.length); } finally { IOUtils.closeStream(instr); } // compute crc of partial chunk from data read in the block file. partialCrc = new CRC32(); partialCrc.update(buf, 0, sizePartialChunk); LOG.info("Read in partial CRC chunk from disk for block " + block); // paranoia! verify that the pre-computed crc matches what we // recalculated just now if (partialCrc.getValue() != FSInputChecker.checksum2long(crcbuf)) { String msg = "Partial CRC " + partialCrc.getValue() + " does not match value computed the " + " last time file was closed " + FSInputChecker.checksum2long(crcbuf); throw new IOException(msg); } //LOG.debug("Partial CRC matches 0x" + // Long.toHexString(partialCrc.getValue())); } /** * Processed responses from downstream datanodes in the pipeline * and sends back replies to the originator. */ class PacketResponder implements Runnable, FSConstants { //packet waiting for ack private LinkedList<Packet> ackQueue = new LinkedList<Packet>(); private volatile boolean running = true; private Block block; DataInputStream mirrorIn; // input from downstream datanode DataOutputStream replyOut; // output to upstream datanode private int numTargets; // number of downstream datanodes including myself private BlockReceiver receiver; // The owner of this responder. public String toString() { return "PacketResponder " + numTargets + " for Block " + this.block; } PacketResponder(BlockReceiver receiver, Block b, DataInputStream in, DataOutputStream out, int numTargets) { this.receiver = receiver; this.block = b; mirrorIn = in; replyOut = out; this.numTargets = numTargets; } /** * enqueue the seqno that is still be to acked by the downstream datanode. * @param seqno * @param lastPacketInBlock */ synchronized void enqueue(long seqno, boolean lastPacketInBlock) { if (running) { LOG.debug("PacketResponder " + numTargets + " adding seqno " + seqno + " to ack queue."); ackQueue.addLast(new Packet(seqno, lastPacketInBlock)); notifyAll(); } } /** * wait for all pending packets to be acked. Then shutdown thread. */ synchronized void close() { while (running && ackQueue.size() != 0 && datanode.shouldRun) { try { wait(); } catch (InterruptedException e) { running = false; } } LOG.debug("PacketResponder " + numTargets + " for block " + block + " Closing down."); running = false; notifyAll(); } private synchronized void lastDataNodeRun() { long lastHeartbeat = System.currentTimeMillis(); boolean lastPacket = false; final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; while (running && datanode.shouldRun && !lastPacket) { long now = System.currentTimeMillis(); try { // wait for a packet to be sent to downstream datanode while (running && datanode.shouldRun && ackQueue.size() == 0) { long idle = now - lastHeartbeat; long timeout = (datanode.socketTimeout/2) - idle; if (timeout <= 0) { timeout = 1000; } try { wait(timeout); } catch (InterruptedException e) { if (running) { LOG.info("PacketResponder " + numTargets + " for block " + block + " Interrupted."); running = false; } break; } // send a heartbeat if it is time. now = System.currentTimeMillis(); if (now - lastHeartbeat > datanode.socketTimeout/2) { replyOut.writeLong(-1); // send heartbeat replyOut.flush(); lastHeartbeat = now; } } if (!running || !datanode.shouldRun) { break; } Packet pkt = ackQueue.removeFirst(); long expected = pkt.seqno; notifyAll(); LOG.debug("PacketResponder " + numTargets + " for block " + block + " acking for packet " + expected); // If this is the last packet in block, then close block // file and finalize the block before responding success if (pkt.lastPacketInBlock) { if (!receiver.finalized) { receiver.close(); final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(receiver.offsetInBlock); datanode.data.finalizeBlock(block); datanode.myMetrics.blocksWritten.inc(); datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT); if (ClientTraceLog.isInfoEnabled() && receiver.clientName.length() > 0) { long offset = 0; ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, receiver.inAddr, receiver.myAddr, block.getNumBytes(), "HDFS_WRITE", receiver.clientName, offset, datanode.dnRegistration.getStorageID(), block, endTime-startTime)); } else { LOG.info("Received block " + block + " of size " + block.getNumBytes() + " from " + receiver.inAddr); } } lastPacket = true; } replyOut.writeLong(expected); replyOut.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); replyOut.flush(); } catch (Exception e) { + LOG.warn("IOException in BlockReceiver.lastNodeRun: ", e); if (running) { + try { + datanode.checkDiskError(e); // may throw an exception here + } catch (IOException ioe) { + LOG.warn("DataNode.chekDiskError failed in lastDataNodeRun with: ", + ioe); + } LOG.info("PacketResponder " + block + " " + numTargets + " Exception " + StringUtils.stringifyException(e)); running = false; } } } LOG.info("PacketResponder " + numTargets + " for block " + block + " terminating"); } /** * Thread to process incoming acks. * @see java.lang.Runnable#run() */ public void run() { // If this is the last datanode in pipeline, then handle differently if (numTargets == 0) { lastDataNodeRun(); return; } boolean lastPacketInBlock = false; final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; while (running && datanode.shouldRun && !lastPacketInBlock) { try { short op = DataTransferProtocol.OP_STATUS_SUCCESS; boolean didRead = false; long expected = -2; try { // read seqno from downstream datanode long seqno = mirrorIn.readLong(); didRead = true; if (seqno == -1) { replyOut.writeLong(-1); // send keepalive replyOut.flush(); LOG.debug("PacketResponder " + numTargets + " got -1"); continue; } else if (seqno == -2) { LOG.debug("PacketResponder " + numTargets + " got -2"); } else { LOG.debug("PacketResponder " + numTargets + " got seqno = " + seqno); Packet pkt = null; synchronized (this) { while (running && datanode.shouldRun && ackQueue.size() == 0) { if (LOG.isDebugEnabled()) { LOG.debug("PacketResponder " + numTargets + " seqno = " + seqno + " for block " + block + " waiting for local datanode to finish write."); } wait(); } pkt = ackQueue.removeFirst(); expected = pkt.seqno; notifyAll(); LOG.debug("PacketResponder " + numTargets + " seqno = " + seqno); if (seqno != expected) { throw new IOException("PacketResponder " + numTargets + " for block " + block + " expected seqno:" + expected + " received:" + seqno); } lastPacketInBlock = pkt.lastPacketInBlock; } } } catch (Throwable e) { if (running) { LOG.info("PacketResponder " + block + " " + numTargets + " Exception " + StringUtils.stringifyException(e)); running = false; } } if (Thread.interrupted()) { /* The receiver thread cancelled this thread. * We could also check any other status updates from the * receiver thread (e.g. if it is ok to write to replyOut). * It is prudent to not send any more status back to the client * because this datanode has a problem. The upstream datanode * will detect a timout on heartbeats and will declare that * this datanode is bad, and rightly so. */ LOG.info("PacketResponder " + block + " " + numTargets + " : Thread is interrupted."); running = false; continue; } if (!didRead) { op = DataTransferProtocol.OP_STATUS_ERROR; } // If this is the last packet in block, then close block // file and finalize the block before responding success if (lastPacketInBlock && !receiver.finalized) { receiver.close(); final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(receiver.offsetInBlock); datanode.data.finalizeBlock(block); datanode.myMetrics.blocksWritten.inc(); datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT); if (ClientTraceLog.isInfoEnabled() && receiver.clientName.length() > 0) { long offset = 0; ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, receiver.inAddr, receiver.myAddr, block.getNumBytes(), "HDFS_WRITE", receiver.clientName, offset, datanode.dnRegistration.getStorageID(), block, endTime-startTime)); } else { LOG.info("Received block " + block + " of size " + block.getNumBytes() + " from " + receiver.inAddr); } } // send my status back to upstream datanode replyOut.writeLong(expected); // send seqno upstream replyOut.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); LOG.debug("PacketResponder " + numTargets + " for block " + block + " responded my status " + " for seqno " + expected); // forward responses from downstream datanodes. for (int i = 0; i < numTargets && datanode.shouldRun; i++) { try { if (op == DataTransferProtocol.OP_STATUS_SUCCESS) { op = mirrorIn.readShort(); if (op != DataTransferProtocol.OP_STATUS_SUCCESS) { LOG.debug("PacketResponder for block " + block + ": error code received from downstream " + " datanode[" + i + "] " + op); } } } catch (Throwable e) { op = DataTransferProtocol.OP_STATUS_ERROR; } replyOut.writeShort(op); } replyOut.flush(); LOG.debug("PacketResponder " + block + " " + numTargets + " responded other status " + " for seqno " + expected); // If we were unable to read the seqno from downstream, then stop. if (expected == -2) { running = false; } // If we forwarded an error response from a downstream datanode // and we are acting on behalf of a client, then we quit. The // client will drive the recovery mechanism. if (op == DataTransferProtocol.OP_STATUS_ERROR && receiver.clientName.length() > 0) { running = false; } } catch (IOException e) { + LOG.warn("IOException in BlockReceiver.run(): ", e); if (running) { + try { + datanode.checkDiskError(e); // may throw an exception here + } catch (IOException ioe) { + LOG.warn("DataNode.chekDiskError failed in run() with: ", ioe); + } LOG.info("PacketResponder " + block + " " + numTargets + " Exception " + StringUtils.stringifyException(e)); running = false; } } catch (RuntimeException e) { if (running) { LOG.info("PacketResponder " + block + " " + numTargets + " Exception " + StringUtils.stringifyException(e)); running = false; } } } LOG.info("PacketResponder " + numTargets + " for block " + block + " terminating"); } } /** * This information is cached by the Datanode in the ackQueue. */ static private class Packet { long seqno; boolean lastPacketInBlock; Packet(long seqno, boolean lastPacketInBlock) { this.seqno = seqno; this.lastPacketInBlock = lastPacketInBlock; } } } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 872d4b6..12ca2c1 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -127,1479 +127,1506 @@ import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; public class DataNode extends Configured implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, Runnable { public static final Log LOG = LogFactory.getLog(DataNode.class); static{ Configuration.addDefaultResource("hdfs-default.xml"); Configuration.addDefaultResource("hdfs-site.xml"); } public static final String DN_CLIENTTRACE_FORMAT = "src: %s" + // src IP ", dest: %s" + // dst IP ", bytes: %s" + // byte count ", op: %s" + // operation ", cliID: %s" + // DFSClient id ", offset: %s" + // offset ", srvID: %s" + // DatanodeRegistration ", blockid: %s" + // block id ", duration: %s"; // duration time static final Log ClientTraceLog = LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); /** * Use {@link NetUtils#createSocketAddr(String)} instead. */ @Deprecated public static InetSocketAddress createSocketAddr(String target ) throws IOException { return NetUtils.createSocketAddr(target); } public DatanodeProtocol namenode = null; public FSDatasetInterface data = null; public DatanodeRegistration dnRegistration = null; volatile boolean shouldRun = true; private LinkedList<Block> receivedBlockList = new LinkedList<Block>(); /** list of blocks being recovered */ private final Map<Block, Block> ongoingRecovery = new HashMap<Block, Block>(); private LinkedList<String> delHints = new LinkedList<String>(); public final static String EMPTY_DEL_HINT = ""; AtomicInteger xmitsInProgress = new AtomicInteger(); Daemon dataXceiverServer = null; ThreadGroup threadGroup = null; long blockReportInterval; //disallow the sending of BR before instructed to do so long lastBlockReport = 0; boolean resetBlockReportTime = true; long initialBlockReportDelay = BLOCKREPORT_INITIAL_DELAY * 1000L; long lastHeartbeat = 0; long heartBeatInterval; private DataStorage storage = null; private HttpServer infoServer = null; DataNodeMetrics myMetrics; private static InetSocketAddress nameNodeAddr; private InetSocketAddress selfAddr; private static DataNode datanodeObject = null; private Thread dataNodeThread = null; String machineName; private static String dnThreadName; int socketTimeout; int socketWriteTimeout = 0; boolean transferToAllowed = true; int writePacketSize = 0; public DataBlockScanner blockScanner = null; public Daemon blockScannerThread = null; private static final Random R = new Random(); // For InterDataNodeProtocol public Server ipcServer; /** * Current system time. * @return current time in msec. */ static long now() { return System.currentTimeMillis(); } /** * Create the DataNode given a configuration and an array of dataDirs. * 'dataDirs' is where the blocks are stored. */ DataNode(Configuration conf, AbstractList<File> dataDirs) throws IOException { super(conf); datanodeObject = this; try { startDataNode(conf, dataDirs); } catch (IOException ie) { shutdown(); throw ie; } } /** * This method starts the data node with the specified conf. * * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs ) throws IOException { // use configured nameserver & interface to get local hostname if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost( conf.get("dfs.datanode.dns.interface","default"), conf.get("dfs.datanode.dns.nameserver","default")); } InetSocketAddress nameNodeAddr = NameNode.getAddress(conf); this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port", "dfs.datanode.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nameNodeAddr, conf); // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set("StorageId", dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils.newInstance( Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)* 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; DataNode.nameNodeAddr = nameNodeAddr; //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if ( !(data instanceof FSDataset) ) { reason = "verifcation is supported only with FSDataset"; } if ( reason == null ) { blockScanner = new DataBlockScanner(this, (FSDataset)data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port", "dfs.datanode.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( "dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID()); // set service-level authorization security policy if (conf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider)(ReflectionUtils.newInstance( conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class), conf)); SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider)); } //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr( conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf); ipcServer.start(); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); } /** * Creates either NIO or regular depending on socketWriteTimeout. */ protected Socket newSocket() throws IOException { return (socketWriteTimeout > 0) ? SocketChannel.open().socket() : new Socket(); } private NamespaceInfo handshake() throws IOException { NamespaceInfo nsInfo = new NamespaceInfo(); while (shouldRun) { try { nsInfo = namenode.versionRequest(); break; } catch(SocketTimeoutException e) { // namenode is busy LOG.info("Problem connecting to server: " + getNameNodeAddr()); try { Thread.sleep(1000); } catch (InterruptedException ie) {} } } String errorMsg = null; // verify build version if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion() )) { errorMsg = "Incompatible build versions: namenode BV = " + nsInfo.getBuildVersion() + "; datanode BV = " + Storage.getBuildVersion(); LOG.fatal( errorMsg ); try { namenode.errorReport( dnRegistration, DatanodeProtocol.NOTIFY, errorMsg ); } catch( SocketTimeoutException e ) { // namenode is busy LOG.info("Problem connecting to server: " + getNameNodeAddr()); } throw new IOException( errorMsg ); } assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Data-node and name-node layout versions must be the same." + "Expected: "+ FSConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } /** Return the DataNode object * */ public static DataNode getDataNode() { return datanodeObject; } public static InterDatanodeProtocol createInterDataNodeProtocolProxy( DatanodeID datanodeid, Configuration conf) throws IOException { InetSocketAddress addr = NetUtils.createSocketAddr( datanodeid.getHost() + ":" + datanodeid.getIpcPort()); if (InterDatanodeProtocol.LOG.isDebugEnabled()) { InterDatanodeProtocol.LOG.info("InterDatanodeProtocol addr=" + addr); } return (InterDatanodeProtocol)RPC.getProxy(InterDatanodeProtocol.class, InterDatanodeProtocol.versionID, addr, conf); } public InetSocketAddress getNameNodeAddr() { return nameNodeAddr; } public InetSocketAddress getSelfAddr() { return selfAddr; } DataNodeMetrics getMetrics() { return myMetrics; } /** * Return the namenode's identifier */ public String getNamenode() { //return namenode.toString(); return "<namenode>"; } public static void setNewStorageID(DatanodeRegistration dnReg) { /* Return * "DS-randInt-ipaddr-currentTimeMillis" * It is considered extermely rare for all these numbers to match * on a different machine accidentally for the following * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and * b) Good chance ip address would be different, and * c) Even on the same machine, Datanode is designed to use different ports. * d) Good chance that these are started at different times. * For a confict to occur all the 4 above have to match!. * The format of this string can be changed anytime in future without * affecting its functionality. */ String ip = "unknownIP"; try { ip = DNS.getDefaultIP("default"); } catch (UnknownHostException ignored) { LOG.warn("Could not find ip address of \"default\" inteface."); } int rand = 0; try { rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE); } catch (NoSuchAlgorithmException e) { LOG.warn("Could not use SecureRandom"); rand = R.nextInt(Integer.MAX_VALUE); } dnReg.storageID = "DS-" + rand + "-"+ ip + "-" + dnReg.getPort() + "-" + System.currentTimeMillis(); } /** * Register datanode * <p> * The datanode needs to register with the namenode on startup in order * 1) to report which storage it is serving now and * 2) to receive a registrationID * issued by the namenode to recognize registered datanodes. * * @see FSNamesystem#registerDatanode(DatanodeRegistration) * @throws IOException */ private void register() throws IOException { if (dnRegistration.getStorageID().equals("")) { setNewStorageID(dnRegistration); } while(shouldRun) { try { // reset name to machineName. Mainly for web interface. dnRegistration.name = machineName + ":" + dnRegistration.getPort(); dnRegistration = namenode.register(dnRegistration); break; } catch(SocketTimeoutException e) { // namenode is busy LOG.info("Problem connecting to server: " + getNameNodeAddr()); try { Thread.sleep(1000); } catch (InterruptedException ie) {} } } assert ("".equals(storage.getStorageID()) && !"".equals(dnRegistration.getStorageID())) || storage.getStorageID().equals(dnRegistration.getStorageID()) : "New storageID can be assigned only if data-node is not formatted"; if (storage.getStorageID().equals("")) { storage.setStorageID(dnRegistration.getStorageID()); storage.writeAll(); LOG.info("New storage id " + dnRegistration.getStorageID() + " is assigned to data-node " + dnRegistration.getName()); } if(! storage.getStorageID().equals(dnRegistration.getStorageID())) { throw new IOException("Inconsistent storage IDs. Name-node returned " + dnRegistration.getStorageID() + ". Expecting " + storage.getStorageID()); } // random short delay - helps scatter the BR from all DNs scheduleBlockReport(initialBlockReportDelay); } /** * Shut down this instance of the datanode. * Returns only after shutdown is complete. * This method can only be called by the offerService thread. * Otherwise, deadlock might occur. */ public void shutdown() { if (infoServer != null) { try { infoServer.stop(); } catch (Exception e) { LOG.warn("Exception shutting down DataNode", e); } } if (ipcServer != null) { ipcServer.stop(); } this.shouldRun = false; if (dataXceiverServer != null) { ((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill(); this.dataXceiverServer.interrupt(); // wait for all data receiver threads to exit if (this.threadGroup != null) { while (true) { this.threadGroup.interrupt(); LOG.info("Waiting for threadgroup to exit, active threads is " + this.threadGroup.activeCount()); if (this.threadGroup.activeCount() == 0) { break; } try { Thread.sleep(1000); } catch (InterruptedException e) {} } } // wait for dataXceiveServer to terminate try { this.dataXceiverServer.join(); } catch (InterruptedException ie) { } } RPC.stopProxy(namenode); // stop the RPC threads if(upgradeManager != null) upgradeManager.shutdownUpgrade(); if (blockScannerThread != null) { blockScannerThread.interrupt(); try { blockScannerThread.join(3600000L); // wait for at most 1 hour } catch (InterruptedException ie) { } } if (storage != null) { try { this.storage.unlockAll(); } catch (IOException ie) { } } if (dataNodeThread != null) { dataNodeThread.interrupt(); try { dataNodeThread.join(); } catch (InterruptedException ie) { } } if (data != null) { data.shutdown(); } if (myMetrics != null) { myMetrics.shutdown(); } } - /* Check if there is no space in disk or the disk is read-only - * when IOException occurs. - * If so, handle the error */ - protected void checkDiskError( IOException e ) throws IOException { - if (e.getMessage() != null && + /** Check if there is no space in disk + * @param e that caused this checkDiskError call + **/ + protected void checkDiskError(Exception e ) throws IOException { + + LOG.warn("checkDiskError: exception: ", e); + + if (e.getMessage() != null && e.getMessage().startsWith("No space left on device")) { throw new DiskOutOfSpaceException("No space left on device"); } else { checkDiskError(); } } - /* Check if there is no disk space and if so, handle the error*/ - protected void checkDiskError( ) throws IOException { + /** + * Check if there is a disk failure and if so, handle the error + * + **/ + protected void checkDiskError( ) { try { data.checkDataDir(); } catch(DiskErrorException de) { handleDiskError(de.getMessage()); } } private void handleDiskError(String errMsgr) { - LOG.warn("DataNode is shutting down.\n" + errMsgr); - shouldRun = false; + boolean hasEnoughResource = data.hasEnoughResource(); + LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResource); + + //if hasEnoughtResource = true - more volumes are available, so we don't want + // to shutdown DN completely and don't want NN to remove it. + int dp_error = DatanodeProtocol.DISK_ERROR; + if(hasEnoughResource == false) { + // DN will be shutdown and NN should remove it + dp_error = DatanodeProtocol.FATAL_DISK_ERROR; + } + //inform NameNode try { namenode.errorReport( - dnRegistration, DatanodeProtocol.DISK_ERROR, errMsgr); + dnRegistration, dp_error, errMsgr); } catch(IOException ignored) { } + + + if(hasEnoughResource) { + scheduleBlockReport(0); + return; // do not shutdown + } + + LOG.warn("DataNode is shutting down.\n" + errMsgr); + shouldRun = false; } /** Number of concurrent xceivers per node. */ int getXceiverCount() { return threadGroup == null ? 0 : threadGroup.activeCount(); } /** * Main loop for the DataNode. Runs until shutdown, * forever calling remote NameNode functions. */ public void offerService() throws Exception { LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec" + " Initial delay: " + initialBlockReportDelay + "msec"); // // Now loop for a long time.... // while (shouldRun) { try { long startTime = now(); // // Every so often, send heartbeat or block-report // if (startTime - lastHeartbeat > heartBeatInterval) { // // All heartbeat messages include following info: // -- Datanode name // -- data transfer port // -- Total capacity // -- Bytes remaining // lastHeartbeat = startTime; DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration, data.getCapacity(), data.getDfsUsed(), data.getRemaining(), xmitsInProgress.get(), getXceiverCount()); myMetrics.heartbeats.inc(now() - startTime); //LOG.info("Just sent heartbeat, with name " + localName); if (!processCommand(cmds)) continue; } // check if there are newly received blocks Block [] blockArray=null; String [] delHintArray=null; synchronized(receivedBlockList) { synchronized(delHints) { int numBlocks = receivedBlockList.size(); if (numBlocks > 0) { if(numBlocks!=delHints.size()) { LOG.warn("Panic: receiveBlockList and delHints are not of the same length" ); } // // Send newly-received blockids to namenode // blockArray = receivedBlockList.toArray(new Block[numBlocks]); delHintArray = delHints.toArray(new String[numBlocks]); } } } if (blockArray != null) { if(delHintArray == null || delHintArray.length != blockArray.length ) { LOG.warn("Panic: block array & delHintArray are not the same" ); } namenode.blockReceived(dnRegistration, blockArray, delHintArray); synchronized (receivedBlockList) { synchronized (delHints) { for(int i=0; i<blockArray.length; i++) { receivedBlockList.remove(blockArray[i]); delHints.remove(delHintArray[i]); } } } } // send block report if (startTime - lastBlockReport > blockReportInterval) { // // Send latest blockinfo report if timer has expired. // Get back a list of local block(s) that are obsolete // and can be safely GC'ed. // long brStartTime = now(); Block[] bReport = data.getBlockReport(); DatanodeCommand cmd = namenode.blockReport(dnRegistration, BlockListAsLongs.convertToArrayLongs(bReport)); long brTime = now() - brStartTime; myMetrics.blockReports.inc(brTime); LOG.info("BlockReport of " + bReport.length + " blocks got processed in " + brTime + " msecs"); // // If we have sent the first block report, then wait a random // time before we start the periodic block reports. // if (resetBlockReportTime) { lastBlockReport = startTime - R.nextInt((int)(blockReportInterval)); resetBlockReportTime = false; } else { /* say the last block report was at 8:20:14. The current report * should have started around 9:20:14 (default 1 hour interval). * If current time is : * 1) normal like 9:20:18, next report should be at 10:20:14 * 2) unexpected like 11:35:43, next report should be at 12:20:14 */ lastBlockReport += (now() - lastBlockReport) / blockReportInterval * blockReportInterval; } processCommand(cmd); } // start block scanner if (blockScanner != null && blockScannerThread == null && upgradeManager.isUpgradeCompleted()) { LOG.info("Starting Periodic block scanner."); blockScannerThread = new Daemon(blockScanner); blockScannerThread.start(); } // // There is no work to do; sleep until hearbeat timer elapses, // or work arrives, and then iterate again. // long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat); synchronized(receivedBlockList) { if (waitTime > 0 && receivedBlockList.size() == 0) { try { receivedBlockList.wait(waitTime); } catch (InterruptedException ie) { } } } // synchronized } catch(RemoteException re) { String reClass = re.getClassName(); if (UnregisteredDatanodeException.class.getName().equals(reClass) || DisallowedDatanodeException.class.getName().equals(reClass) || IncorrectVersionException.class.getName().equals(reClass)) { LOG.warn("DataNode is shutting down: " + StringUtils.stringifyException(re)); shutdown(); return; } LOG.warn(StringUtils.stringifyException(re)); } catch (IOException e) { LOG.warn(StringUtils.stringifyException(e)); } } // while (shouldRun) } // offerService /** * Process an array of datanode commands * * @param cmds an array of datanode commands * @return true if further processing may be required or false otherwise. */ private boolean processCommand(DatanodeCommand[] cmds) { if (cmds != null) { for (DatanodeCommand cmd : cmds) { try { if (processCommand(cmd) == false) { return false; } } catch (IOException ioe) { LOG.warn("Error processing datanode Command", ioe); } } } return true; } /** * * @param cmd * @return true if further processing may be required or false otherwise. * @throws IOException */ private boolean processCommand(DatanodeCommand cmd) throws IOException { if (cmd == null) return true; final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null; switch(cmd.getAction()) { case DatanodeProtocol.DNA_TRANSFER: // Send a copy of a block to another datanode transferBlocks(bcmd.getBlocks(), bcmd.getTargets()); myMetrics.blocksReplicated.inc(bcmd.getBlocks().length); break; case DatanodeProtocol.DNA_INVALIDATE: // // Some local block(s) are obsolete and can be // safely garbage-collected. // Block toDelete[] = bcmd.getBlocks(); try { if (blockScanner != null) { blockScanner.deleteBlocks(toDelete); } data.invalidate(toDelete); } catch(IOException e) { checkDiskError(); throw e; } myMetrics.blocksRemoved.inc(toDelete.length); break; case DatanodeProtocol.DNA_SHUTDOWN: // shut down the data node this.shutdown(); return false; case DatanodeProtocol.DNA_REGISTER: // namenode requested a registration - at start or if NN lost contact LOG.info("DatanodeCommand action: DNA_REGISTER"); if (shouldRun) { register(); } break; case DatanodeProtocol.DNA_FINALIZE: storage.finalizeUpgrade(); break; case UpgradeCommand.UC_ACTION_START_UPGRADE: // start distributed upgrade here processDistributedUpgradeCommand((UpgradeCommand)cmd); break; case DatanodeProtocol.DNA_RECOVERBLOCK: recoverBlocks(bcmd.getBlocks(), bcmd.getTargets()); break; default: LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction()); } return true; } // Distributed upgrade manager UpgradeManagerDatanode upgradeManager = new UpgradeManagerDatanode(this); private void processDistributedUpgradeCommand(UpgradeCommand comm ) throws IOException { assert upgradeManager != null : "DataNode.upgradeManager is null."; upgradeManager.processUpgradeCommand(comm); } /** * Start distributed upgrade if it should be initiated by the data-node. */ private void startDistributedUpgradeIfNeeded() throws IOException { UpgradeManagerDatanode um = DataNode.getDataNode().upgradeManager; assert um != null : "DataNode.upgradeManager is null."; if(!um.getUpgradeState()) return; um.setUpgradeState(false, um.getUpgradeVersion()); um.startUpgrade(); return; } private void transferBlock( Block block, DatanodeInfo xferTargets[] ) throws IOException { if (!data.isValidBlock(block)) { // block does not exist or is under-construction String errStr = "Can't send invalid block " + block; LOG.info(errStr); namenode.errorReport(dnRegistration, DatanodeProtocol.INVALID_BLOCK, errStr); return; } // Check if NN recorded length matches on-disk length long onDiskLength = data.getLength(block); if (block.getNumBytes() > onDiskLength) { // Shorter on-disk len indicates corruption so report NN the corrupt block namenode.reportBadBlocks(new LocatedBlock[]{ new LocatedBlock(block, new DatanodeInfo[] { new DatanodeInfo(dnRegistration)})}); LOG.info("Can't replicate block " + block + " because on-disk length " + onDiskLength + " is shorter than NameNode recorded length " + block.getNumBytes()); return; } int numTargets = xferTargets.length; if (numTargets > 0) { if (LOG.isInfoEnabled()) { StringBuilder xfersBuilder = new StringBuilder(); for (int i = 0; i < numTargets; i++) { xfersBuilder.append(xferTargets[i].getName()); xfersBuilder.append(" "); } LOG.info(dnRegistration + " Starting thread to transfer block " + block + " to " + xfersBuilder); } new Daemon(new DataTransfer(xferTargets, block, this)).start(); } } private void transferBlocks( Block blocks[], DatanodeInfo xferTargets[][] ) { for (int i = 0; i < blocks.length; i++) { try { transferBlock(blocks[i], xferTargets[i]); } catch (IOException ie) { LOG.warn("Failed to transfer block " + blocks[i], ie); } } } /* * Informing the name node could take a long long time! Should we wait * till namenode is informed before responding with success to the * client? For now we don't. */ protected void notifyNamenodeReceivedBlock(Block block, String delHint) { if(block==null || delHint==null) { throw new IllegalArgumentException(block==null?"Block is null":"delHint is null"); } synchronized (receivedBlockList) { synchronized (delHints) { receivedBlockList.add(block); delHints.add(delHint); receivedBlockList.notifyAll(); } } } /* ******************************************************************** Protocol when a client reads data from Datanode (Cur Ver: 9): Client's Request : ================= Processed in DataXceiver: +----------------------------------------------+ | Common Header | 1 byte OP == OP_READ_BLOCK | +----------------------------------------------+ Processed in readBlock() : +-------------------------------------------------------------------------+ | 8 byte Block ID | 8 byte genstamp | 8 byte start offset | 8 byte length | +-------------------------------------------------------------------------+ | vInt length | <DFSClient id> | +-----------------------------------+ Client sends optional response only at the end of receiving data. DataNode Response : =================== In readBlock() : If there is an error while initializing BlockSender : +---------------------------+ | 2 byte OP_STATUS_ERROR | and connection will be closed. +---------------------------+ Otherwise +---------------------------+ | 2 byte OP_STATUS_SUCCESS | +---------------------------+ Actual data, sent by BlockSender.sendBlock() : ChecksumHeader : +--------------------------------------------------+ | 1 byte CHECKSUM_TYPE | 4 byte BYTES_PER_CHECKSUM | +--------------------------------------------------+ Followed by actual data in the form of PACKETS: +------------------------------------+ | Sequence of data PACKETs .... | +------------------------------------+ A "PACKET" is defined further below. The client reads data until it receives a packet with "LastPacketInBlock" set to true or with a zero length. If there is no checksum error, it replies to DataNode with OP_STATUS_CHECKSUM_OK: Client optional response at the end of data transmission : +------------------------------+ | 2 byte OP_STATUS_CHECKSUM_OK | +------------------------------+ PACKET : Contains a packet header, checksum and data. Amount of data ======== carried is set by BUFFER_SIZE. +-----------------------------------------------------+ | 4 byte packet length (excluding packet header) | +-----------------------------------------------------+ | 8 byte offset in the block | 8 byte sequence number | +-----------------------------------------------------+ | 1 byte isLastPacketInBlock | +-----------------------------------------------------+ | 4 byte Length of actual data | +-----------------------------------------------------+ | x byte checksum data. x is defined below | +-----------------------------------------------------+ | actual data ...... | +-----------------------------------------------------+ x = (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM * CHECKSUM_SIZE CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32) The above packet format is used while writing data to DFS also. Not all the fields might be used while reading. ************************************************************************ */ /** Header size for a packet */ public static final int PKT_HEADER_LEN = ( 4 + /* Packet payload length */ 8 + /* offset in block */ 8 + /* seqno */ 1 /* isLastPacketInBlock */); /** * Used for transferring a block of data. This class * sends a piece of data to another DataNode. */ class DataTransfer implements Runnable { DatanodeInfo targets[]; Block b; DataNode datanode; /** * Connect to the first item in the target list. Pass along the * entire target list, the block, and the data. */ public DataTransfer(DatanodeInfo targets[], Block b, DataNode datanode) throws IOException { this.targets = targets; this.b = b; this.datanode = datanode; } /** * Do the deed, write the bytes */ public void run() { xmitsInProgress.getAndIncrement(); Socket sock = null; DataOutputStream out = null; BlockSender blockSender = null; try { InetSocketAddress curTarget = NetUtils.createSocketAddr(targets[0].getName()); sock = newSocket(); NetUtils.connect(sock, curTarget, socketTimeout); sock.setSoTimeout(targets.length * socketTimeout); long writeTimeout = socketWriteTimeout + HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); out = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, datanode); DatanodeInfo srcNode = new DatanodeInfo(dnRegistration); // // Header info // out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK); out.writeLong(b.getBlockId()); out.writeLong(b.getGenerationStamp()); out.writeInt(0); // no pipelining out.writeBoolean(false); // not part of recovery Text.writeString(out, ""); // client out.writeBoolean(true); // sending src node information srcNode.write(out); // Write src node DatanodeInfo // write targets out.writeInt(targets.length - 1); for (int i = 1; i < targets.length; i++) { targets[i].write(out); } // send data & checksum blockSender.sendBlock(out, baseStream, null); // no response necessary LOG.info(dnRegistration + ":Transmitted block " + b + " to " + curTarget); } catch (IOException ie) { LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + targets[0].getName() + " got " + StringUtils.stringifyException(ie)); + // check if there are any disk problem + datanode.checkDiskError(); + } finally { xmitsInProgress.getAndDecrement(); IOUtils.closeStream(blockSender); IOUtils.closeStream(out); IOUtils.closeSocket(sock); } } } /** * No matter what kind of exception we get, keep retrying to offerService(). * That's the loop that connects to the NameNode and provides basic DataNode * functionality. * * Only stop when "shouldRun" is turned off (which can only happen at shutdown). */ public void run() { LOG.info(dnRegistration + "In DataNode.run, data = " + data); // start dataXceiveServer dataXceiverServer.start(); while (shouldRun) { try { startDistributedUpgradeIfNeeded(); offerService(); } catch (Exception ex) { LOG.error("Exception: " + StringUtils.stringifyException(ex)); if (shouldRun) { try { Thread.sleep(5000); } catch (InterruptedException ie) { } } } } LOG.info(dnRegistration + ":Finishing DataNode in: "+data); shutdown(); } /** Start a single datanode daemon and wait for it to finish. * If this thread is specifically interrupted, it will stop waiting. */ public static void runDatanodeDaemon(DataNode dn) throws IOException { if (dn != null) { //register datanode dn.register(); dn.dataNodeThread = new Thread(dn, dnThreadName); dn.dataNodeThread.setDaemon(true); // needed for JUnit testing dn.dataNodeThread.start(); } } static boolean isDatanodeUp(DataNode dn) { return dn.dataNodeThread != null && dn.dataNodeThread.isAlive(); } /** Instantiate a single datanode object. This must be run by invoking * {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. */ public static DataNode instantiateDataNode(String args[], Configuration conf) throws IOException { if (conf == null) conf = new Configuration(); if (!parseArguments(args, conf)) { printUsage(); return null; } if (conf.get("dfs.network.script") != null) { LOG.error("This configuration for rack identification is not supported" + " anymore. RackID resolution is handled by the NameNode."); System.exit(-1); } String[] dataDirs = conf.getStrings("dfs.data.dir"); dnThreadName = "DataNode: [" + StringUtils.arrayToString(dataDirs) + "]"; return makeInstance(dataDirs, conf); } /** Instantiate & Start a single datanode daemon and wait for it to finish. * If this thread is specifically interrupted, it will stop waiting. */ public static DataNode createDataNode(String args[], Configuration conf) throws IOException { DataNode dn = instantiateDataNode(args, conf); runDatanodeDaemon(dn); return dn; } void join() { if (dataNodeThread != null) { try { dataNodeThread.join(); } catch (InterruptedException e) {} } } /** * Make an instance of DataNode after ensuring that at least one of the * given data directories (and their parent directories, if necessary) * can be created. * @param dataDirs List of directories, where the new DataNode instance should * keep its files. * @param conf Configuration instance to use. * @return DataNode instance for given list of data dirs and conf, or null if * no directory from this directory list can be created. * @throws IOException */ public static DataNode makeInstance(String[] dataDirs, Configuration conf) throws IOException { ArrayList<File> dirs = new ArrayList<File>(); for (int i = 0; i < dataDirs.length; i++) { File data = new File(dataDirs[i]); try { DiskChecker.checkDir(data); dirs.add(data); } catch(DiskErrorException e) { LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage()); } } if (dirs.size() > 0) return new DataNode(conf, dirs); LOG.error("All directories in dfs.data.dir are invalid."); return null; } @Override public String toString() { return "DataNode{" + "data=" + data + ", localName='" + dnRegistration.getName() + "'" + ", storageID='" + dnRegistration.getStorageID() + "'" + ", xmitsInProgress=" + xmitsInProgress.get() + "}"; } private static void printUsage() { System.err.println("Usage: java DataNode"); System.err.println(" [-rollback]"); } /** * Parse and verify command line arguments and set configuration parameters. * * @return false if passed argements are incorrect */ private static boolean parseArguments(String args[], Configuration conf) { int argsLen = (args == null) ? 0 : args.length; StartupOption startOpt = StartupOption.REGULAR; for(int i=0; i < argsLen; i++) { String cmd = args[i]; if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) { LOG.error("-r, --rack arguments are not supported anymore. RackID " + "resolution is handled by the NameNode."); System.exit(-1); } else if ("-rollback".equalsIgnoreCase(cmd)) { startOpt = StartupOption.ROLLBACK; } else if ("-regular".equalsIgnoreCase(cmd)) { startOpt = StartupOption.REGULAR; } else return false; } setStartupOption(conf, startOpt); return true; } private static void setStartupOption(Configuration conf, StartupOption opt) { conf.set("dfs.datanode.startup", opt.toString()); } static StartupOption getStartupOption(Configuration conf) { return StartupOption.valueOf(conf.get("dfs.datanode.startup", StartupOption.REGULAR.toString())); } /** * This methods arranges for the data node to send the block report at the next heartbeat. */ public void scheduleBlockReport(long delay) { if (delay > 0) { // send BR after random delay lastBlockReport = System.currentTimeMillis() - ( blockReportInterval - R.nextInt((int)(delay))); } else { // send at next heartbeat lastBlockReport = lastHeartbeat - blockReportInterval; } resetBlockReportTime = true; // reset future BRs for randomness } /** * This method is used for testing. * Examples are adding and deleting blocks directly. * The most common usage will be when the data node's storage is similated. * * @return the fsdataset that stores the blocks */ public FSDatasetInterface getFSDataset() { return data; } /** */ public static void main(String args[]) { try { StringUtils.startupShutdownMessage(DataNode.class, args, LOG); DataNode datanode = createDataNode(args, null); if (datanode != null) datanode.join(); } catch (Throwable e) { LOG.error(StringUtils.stringifyException(e)); System.exit(-1); } } // InterDataNodeProtocol implementation /** {@inheritDoc} */ public BlockMetaDataInfo getBlockMetaDataInfo(Block block ) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("block=" + block); } Block stored = data.getStoredBlock(block.getBlockId()); if (stored == null) { return null; } BlockMetaDataInfo info = new BlockMetaDataInfo(stored, blockScanner.getLastScanTime(stored)); if (LOG.isDebugEnabled()) { LOG.debug("getBlockMetaDataInfo successful block=" + stored + " length " + stored.getNumBytes() + " genstamp " + stored.getGenerationStamp()); } // paranoia! verify that the contents of the stored block // matches the block file on disk. data.validateBlockMetadata(stored); return info; } public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) { Daemon d = new Daemon(threadGroup, new Runnable() { /** Recover a list of blocks. It is run by the primary datanode. */ public void run() { for(int i = 0; i < blocks.length; i++) { try { logRecoverBlock("NameNode", blocks[i], targets[i]); recoverBlock(blocks[i], false, targets[i], true); } catch (IOException e) { LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e); } } } }); d.start(); return d; } /** {@inheritDoc} */ public void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException { LOG.info("oldblock=" + oldblock + "(length=" + oldblock.getNumBytes() + "), newblock=" + newblock + "(length=" + newblock.getNumBytes() + "), datanode=" + dnRegistration.getName()); data.updateBlock(oldblock, newblock); if (finalize) { data.finalizeBlock(newblock); myMetrics.blocksWritten.inc(); notifyNamenodeReceivedBlock(newblock, EMPTY_DEL_HINT); LOG.info("Received block " + newblock + " of size " + newblock.getNumBytes() + " as part of lease recovery."); } } /** {@inheritDoc} */ public long getProtocolVersion(String protocol, long clientVersion ) throws IOException { if (protocol.equals(InterDatanodeProtocol.class.getName())) { return InterDatanodeProtocol.versionID; } else if (protocol.equals(ClientDatanodeProtocol.class.getName())) { return ClientDatanodeProtocol.versionID; } throw new IOException("Unknown protocol to " + getClass().getSimpleName() + ": " + protocol); } /** A convenient class used in lease recovery */ private static class BlockRecord { final DatanodeID id; final InterDatanodeProtocol datanode; final Block block; BlockRecord(DatanodeID id, InterDatanodeProtocol datanode, Block block) { this.id = id; this.datanode = datanode; this.block = block; } /** {@inheritDoc} */ public String toString() { return "block:" + block + " node:" + id; } } /** Recover a block */ private LocatedBlock recoverBlock(Block block, boolean keepLength, DatanodeID[] datanodeids, boolean closeFile) throws IOException { // If the block is already being recovered, then skip recovering it. // This can happen if the namenode and client start recovering the same // file at the same time. synchronized (ongoingRecovery) { Block tmp = new Block(); tmp.set(block.getBlockId(), block.getNumBytes(), GenerationStamp.WILDCARD_STAMP); if (ongoingRecovery.get(tmp) != null) { String msg = "Block " + block + " is already being recovered, " + " ignoring this request to recover it."; LOG.info(msg); throw new IOException(msg); } ongoingRecovery.put(block, block); } try { List<BlockRecord> syncList = new ArrayList<BlockRecord>(); long minlength = Long.MAX_VALUE; int errorCount = 0; //check generation stamps for(DatanodeID id : datanodeids) { try { InterDatanodeProtocol datanode = dnRegistration.equals(id)? this: DataNode.createInterDataNodeProtocolProxy(id, getConf()); BlockMetaDataInfo info = datanode.getBlockMetaDataInfo(block); if (info != null && info.getGenerationStamp() >= block.getGenerationStamp()) { if (keepLength) { if (info.getNumBytes() == block.getNumBytes()) { syncList.add(new BlockRecord(id, datanode, new Block(info))); } } else { syncList.add(new BlockRecord(id, datanode, new Block(info))); if (info.getNumBytes() < minlength) { minlength = info.getNumBytes(); } } } } catch (IOException e) { ++errorCount; InterDatanodeProtocol.LOG.warn( "Failed to getBlockMetaDataInfo for block (=" + block + ") from datanode (=" + id + ")", e); } } if (syncList.isEmpty() && errorCount > 0) { throw new IOException("All datanodes failed: block=" + block + ", datanodeids=" + Arrays.asList(datanodeids)); } if (!keepLength) { block.setNumBytes(minlength); } return syncBlock(block, syncList, closeFile); } finally { synchronized (ongoingRecovery) { ongoingRecovery.remove(block); } } } /** Block synchronization */ private LocatedBlock syncBlock(Block block, List<BlockRecord> syncList, boolean closeFile) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", (length=" + block.getNumBytes() + "), syncList=" + syncList + ", closeFile=" + closeFile); } //syncList.isEmpty() that all datanodes do not have the block //so the block can be deleted. if (syncList.isEmpty()) { namenode.commitBlockSynchronization(block, 0, 0, closeFile, true, DatanodeID.EMPTY_ARRAY); return null; } List<DatanodeID> successList = new ArrayList<DatanodeID>(); long generationstamp = namenode.nextGenerationStamp(block); Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp); for(BlockRecord r : syncList) { try { r.datanode.updateBlock(r.block, newblock, closeFile); successList.add(r.id); } catch (IOException e) { InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock=" + newblock + ", datanode=" + r.id + ")", e); } } if (!successList.isEmpty()) { DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]); namenode.commitBlockSynchronization(block, newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false, nlist); DatanodeInfo[] info = new DatanodeInfo[nlist.length]; for (int i = 0; i < nlist.length; i++) { info[i] = new DatanodeInfo(nlist[i]); } return new LocatedBlock(newblock, info); // success } //failed StringBuilder b = new StringBuilder(); for(BlockRecord r : syncList) { b.append("\n " + r.id); } throw new IOException("Cannot recover " + block + ", none of these " + syncList.size() + " datanodes success {" + b + "\n}"); } // ClientDataNodeProtocol implementation /** {@inheritDoc} */ public LocatedBlock recoverBlock(Block block, boolean keepLength, DatanodeInfo[] targets ) throws IOException { logRecoverBlock("Client", block, targets); return recoverBlock(block, keepLength, targets, false); } private static void logRecoverBlock(String who, Block block, DatanodeID[] targets) { StringBuilder msg = new StringBuilder(targets[0].getName()); for (int i = 1; i < targets.length; i++) { msg.append(", " + targets[i].getName()); } LOG.info(who + " calls recoverBlock(block=" + block + ", targets=[" + msg + "])"); } } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index d41950f..6a324a4 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -1,1433 +1,1564 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode; -import java.io.*; -import java.util.*; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.TreeSet; import javax.management.NotCompliantMBeanException; import javax.management.ObjectName; import javax.management.StandardMBean; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DF; +import org.apache.hadoop.fs.DU; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.metrics.util.MBeanUtil; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.conf.*; -import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; -import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; +import org.mortbay.log.Log; /************************************************** * FSDataset manages a set of data blocks. Each block * has a unique name and an extent on disk. * ***************************************************/ public class FSDataset implements FSConstants, FSDatasetInterface { /** * A node type that can be built into a tree reflecting the * hierarchy of blocks on the local disk. */ class FSDir { File dir; int numBlocks = 0; FSDir children[]; int lastChildIdx = 0; /** */ public FSDir(File dir) throws IOException { this.dir = dir; this.children = null; if (!dir.exists()) { if (!dir.mkdirs()) { throw new IOException("Mkdirs failed to create " + dir.toString()); } } else { File[] files = dir.listFiles(); int numChildren = 0; for (int idx = 0; idx < files.length; idx++) { if (files[idx].isDirectory()) { numChildren++; } else if (Block.isBlockFilename(files[idx])) { numBlocks++; } } if (numChildren > 0) { children = new FSDir[numChildren]; int curdir = 0; for (int idx = 0; idx < files.length; idx++) { if (files[idx].isDirectory()) { children[curdir] = new FSDir(files[idx]); curdir++; } } } } } public File addBlock(Block b, File src) throws IOException { //First try without creating subdirectories File file = addBlock(b, src, false, false); return (file != null) ? file : addBlock(b, src, true, true); } private File addBlock(Block b, File src, boolean createOk, boolean resetIdx) throws IOException { if (numBlocks < maxBlocksPerDir) { File dest = new File(dir, b.getBlockName()); File metaData = getMetaFile( src, b ); File newmeta = getMetaFile(dest, b); if ( ! metaData.renameTo( newmeta ) || ! src.renameTo( dest ) ) { throw new IOException( "could not move files for " + b + " from tmp to " + dest.getAbsolutePath() ); } if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("addBlock: Moved " + metaData + " to " + newmeta); DataNode.LOG.debug("addBlock: Moved " + src + " to " + dest); } numBlocks += 1; return dest; } if (lastChildIdx < 0 && resetIdx) { //reset so that all children will be checked lastChildIdx = random.nextInt(children.length); } if (lastChildIdx >= 0 && children != null) { //Check if any child-tree has room for a block. for (int i=0; i < children.length; i++) { int idx = (lastChildIdx + i)%children.length; File file = children[idx].addBlock(b, src, false, resetIdx); if (file != null) { lastChildIdx = idx; return file; } } lastChildIdx = -1; } if (!createOk) { return null; } if (children == null || children.length == 0) { children = new FSDir[maxBlocksPerDir]; for (int idx = 0; idx < maxBlocksPerDir; idx++) { children[idx] = new FSDir(new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx)); } } //now pick a child randomly for creating a new set of subdirs. lastChildIdx = random.nextInt(children.length); return children[ lastChildIdx ].addBlock(b, src, true, false); } /** Find the metadata file for the specified block file. * Return the generation stamp from the name of the metafile. */ long getGenerationStampFromFile(File[] listdir, File blockFile) { String blockName = blockFile.getName(); for (int j = 0; j < listdir.length; j++) { String path = listdir[j].getName(); if (!path.startsWith(blockName)) { continue; } String[] vals = path.split("_"); if (vals.length != 3) { // blk, blkid, genstamp.meta continue; } String[] str = vals[2].split("\\."); if (str.length != 2) { continue; } return Long.parseLong(str[0]); } DataNode.LOG.warn("Block " + blockFile + " does not have a metafile!"); return Block.GRANDFATHER_GENERATION_STAMP; } /** * Populate the given blockSet with any child blocks * found at this node. */ public void getBlockInfo(TreeSet<Block> blockSet) { if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getBlockInfo(blockSet); } } File blockFiles[] = dir.listFiles(); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFiles[i])) { long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]); blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp)); } } } void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap, FSVolume volume) { if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getVolumeMap(volumeMap, volume); } } File blockFiles[] = dir.listFiles(); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFiles[i])) { long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]); volumeMap.put(new Block(blockFiles[i], blockFiles[i].length(), genStamp), new DatanodeBlockInfo(volume, blockFiles[i])); } } } /** * check if a data diretory is healthy * @throws DiskErrorException */ public void checkDirTree() throws DiskErrorException { DiskChecker.checkDir(dir); if (children != null) { for (int i = 0; i < children.length; i++) { children[i].checkDirTree(); } } } void clearPath(File f) { String root = dir.getAbsolutePath(); String dir = f.getAbsolutePath(); if (dir.startsWith(root)) { String[] dirNames = dir.substring(root.length()). split(File.separator + "subdir"); if (clearPath(f, dirNames, 1)) return; } clearPath(f, null, -1); } /* * dirNames is an array of string integers derived from * usual directory structure data/subdirN/subdirXY/subdirM ... * If dirName array is non-null, we only check the child at * the children[dirNames[idx]]. This avoids iterating over * children in common case. If directory structure changes * in later versions, we need to revisit this. */ private boolean clearPath(File f, String[] dirNames, int idx) { if ((dirNames == null || idx == dirNames.length) && dir.compareTo(f) == 0) { numBlocks--; return true; } if (dirNames != null) { //guess the child index from the directory name if (idx > (dirNames.length - 1) || children == null) { return false; } int childIdx; try { childIdx = Integer.parseInt(dirNames[idx]); } catch (NumberFormatException ignored) { // layout changed? we could print a warning. return false; } return (childIdx >= 0 && childIdx < children.length) ? children[childIdx].clearPath(f, dirNames, idx+1) : false; } //guesses failed. back to blind iteration. if (children != null) { for(int i=0; i < children.length; i++) { if (children[i].clearPath(f, null, -1)){ return true; } } } return false; } public String toString() { return "FSDir{" + "dir=" + dir + ", children=" + (children == null ? null : Arrays.asList(children)) + "}"; } } class FSVolume { private FSDir dataDir; private File tmpDir; private File detachDir; // copy on write for blocks in snapshot private DF usage; private DU dfsUsage; private long reserved; FSVolume(File currentDir, Configuration conf) throws IOException { this.reserved = conf.getLong("dfs.datanode.du.reserved", 0); boolean supportAppends = conf.getBoolean("dfs.support.append", false); File parent = currentDir.getParentFile(); this.detachDir = new File(parent, "detach"); if (detachDir.exists()) { recoverDetachedBlocks(currentDir, detachDir); } // Files that were being written when the datanode was last shutdown // are now moved back to the data directory. It is possible that // in the future, we might want to do some sort of datanode-local // recovery for these blocks. For example, crc validation. // this.tmpDir = new File(parent, "tmp"); if (tmpDir.exists()) { if (supportAppends) { recoverDetachedBlocks(currentDir, tmpDir); } else { FileUtil.fullyDelete(tmpDir); } } this.dataDir = new FSDir(currentDir); if (!tmpDir.mkdirs()) { if (!tmpDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } } if (!detachDir.mkdirs()) { if (!detachDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + detachDir.toString()); } } this.usage = new DF(parent, conf); this.dfsUsage = new DU(parent, conf); this.dfsUsage.start(); } void decDfsUsed(long value) { dfsUsage.decDfsUsed(value); } long getDfsUsed() throws IOException { return dfsUsage.getUsed(); } long getCapacity() throws IOException { if (reserved > usage.getCapacity()) { return 0; } return usage.getCapacity()-reserved; } long getAvailable() throws IOException { long remaining = getCapacity()-getDfsUsed(); long available = usage.getAvailable(); if (remaining>available) { remaining = available; } return (remaining > 0) ? remaining : 0; } String getMount() throws IOException { return usage.getMount(); } File getDir() { return dataDir.dir; } /** * Temporary files. They get moved to the real block directory either when * the block is finalized or the datanode restarts. */ File createTmpFile(Block b) throws IOException { File f = new File(tmpDir, b.getBlockName()); return createTmpFile(b, f); } /** * Returns the name of the temporary file for this block. */ File getTmpFile(Block b) throws IOException { File f = new File(tmpDir, b.getBlockName()); return f; } /** * Files used for copy-on-write. They need recovery when datanode * restarts. */ File createDetachFile(Block b, String filename) throws IOException { File f = new File(detachDir, filename); return createTmpFile(b, f); } private File createTmpFile(Block b, File f) throws IOException { if (f.exists()) { throw new IOException("Unexpected problem in creating temporary file for "+ b + ". File " + f + " should not be present, but is."); } // Create the zero-length temp file // boolean fileCreated = false; try { fileCreated = f.createNewFile(); } catch (IOException ioe) { throw (IOException)new IOException(DISK_ERROR +f).initCause(ioe); } if (!fileCreated) { throw new IOException("Unexpected problem in creating temporary file for "+ b + ". File " + f + " should be creatable, but is already present."); } return f; } File addBlock(Block b, File f) throws IOException { File blockFile = dataDir.addBlock(b, f); File metaFile = getMetaFile( blockFile , b); dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); return blockFile; } void checkDirs() throws DiskErrorException { dataDir.checkDirTree(); DiskChecker.checkDir(tmpDir); } void getBlockInfo(TreeSet<Block> blockSet) { dataDir.getBlockInfo(blockSet); } void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap) { dataDir.getVolumeMap(volumeMap, this); } void clearPath(File f) { dataDir.clearPath(f); } public String toString() { return dataDir.dir.getAbsolutePath(); } /** * Recover detached files on datanode restart. If a detached block * does not exist in the original directory, then it is moved to the * original directory. */ private void recoverDetachedBlocks(File dataDir, File dir) throws IOException { File contents[] = dir.listFiles(); if (contents == null) { return; } for (int i = 0; i < contents.length; i++) { if (!contents[i].isFile()) { throw new IOException ("Found " + contents[i] + " in " + dir + " but it is not a file."); } // // If the original block file still exists, then no recovery // is needed. // File blk = new File(dataDir, contents[i].getName()); if (!blk.exists()) { if (!contents[i].renameTo(blk)) { throw new IOException("Unable to recover detached file " + contents[i]); } continue; } if (!contents[i].delete()) { throw new IOException("Unable to cleanup detached file " + contents[i]); } } } } static class FSVolumeSet { FSVolume[] volumes = null; int curVolume = 0; FSVolumeSet(FSVolume[] volumes) { this.volumes = volumes; } + + private int numberOfVolumes() { + return volumes.length; + } synchronized FSVolume getNextVolume(long blockSize) throws IOException { + + if(volumes.length < 1) { + throw new DiskOutOfSpaceException("No more available volumes"); + } + + // since volumes could've been removed because of the failure + // make sure we are not out of bounds + if(curVolume >= volumes.length) { + curVolume = 0; + } + int startVolume = curVolume; + while (true) { FSVolume volume = volumes[curVolume]; curVolume = (curVolume + 1) % volumes.length; if (volume.getAvailable() > blockSize) { return volume; } if (curVolume == startVolume) { throw new DiskOutOfSpaceException("Insufficient space for an additional block"); } } } long getDfsUsed() throws IOException { long dfsUsed = 0L; for (int idx = 0; idx < volumes.length; idx++) { dfsUsed += volumes[idx].getDfsUsed(); } return dfsUsed; } synchronized long getCapacity() throws IOException { long capacity = 0L; for (int idx = 0; idx < volumes.length; idx++) { capacity += volumes[idx].getCapacity(); } return capacity; } synchronized long getRemaining() throws IOException { long remaining = 0L; for (int idx = 0; idx < volumes.length; idx++) { remaining += volumes[idx].getAvailable(); } return remaining; } synchronized void getBlockInfo(TreeSet<Block> blockSet) { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].getBlockInfo(blockSet); } } synchronized void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap) { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].getVolumeMap(volumeMap); } } - synchronized void checkDirs() throws DiskErrorException { + /** + * goes over all the volumes and checkDir eachone of them + * if one throws DiskErrorException - removes from the list of active + * volumes. + * @return list of all the removed volumes + */ + synchronized List<FSVolume> checkDirs() { + + ArrayList<FSVolume> removed_vols = null; + for (int idx = 0; idx < volumes.length; idx++) { - volumes[idx].checkDirs(); + FSVolume fsv = volumes[idx]; + try { + fsv.checkDirs(); + } catch (DiskErrorException e) { + DataNode.LOG.warn("Removing failed volume " + fsv + ": ",e); + if(removed_vols == null) { + removed_vols = new ArrayList<FSVolume>(1); + } + removed_vols.add(volumes[idx]); + volumes[idx] = null; //remove the volume + } + } + + // repair array - copy non null elements + int removed_size = (removed_vols==null)? 0 : removed_vols.size(); + if(removed_size > 0) { + FSVolume fsvs[] = new FSVolume [volumes.length-removed_size]; + for(int idx=0,idy=0; idx<volumes.length; idx++) { + if(volumes[idx] != null) { + fsvs[idy] = volumes[idx]; + idy++; + } + } + volumes = fsvs; // replace array of volumes } + Log.info("Completed FSVolumeSet.checkDirs. Removed=" + removed_size + + "volumes. List of current volumes: " + toString()); + + return removed_vols; } public String toString() { StringBuffer sb = new StringBuffer(); for (int idx = 0; idx < volumes.length; idx++) { sb.append(volumes[idx].toString()); if (idx != volumes.length - 1) { sb.append(","); } } return sb.toString(); } } ////////////////////////////////////////////////////// // // FSDataSet // ////////////////////////////////////////////////////// //Find better place? public static final String METADATA_EXTENSION = ".meta"; public static final short METADATA_VERSION = 1; static class ActiveFile { final File file; final List<Thread> threads = new ArrayList<Thread>(2); ActiveFile(File f, List<Thread> list) { file = f; if (list != null) { threads.addAll(list); } threads.add(Thread.currentThread()); } public String toString() { return getClass().getSimpleName() + "(file=" + file + ", threads=" + threads + ")"; } } static String getMetaFileName(String blockFileName, long genStamp) { return blockFileName + "_" + genStamp + METADATA_EXTENSION; } static File getMetaFile(File f , Block b) { return new File(getMetaFileName(f.getAbsolutePath(), b.getGenerationStamp())); } protected File getMetaFile(Block b) throws IOException { return getMetaFile(getBlockFile(b), b); } /** Find the corresponding meta data file from a given block file */ private static File findMetaFile(final File blockFile) throws IOException { final String prefix = blockFile.getName() + "_"; final File parent = blockFile.getParentFile(); File[] matches = parent.listFiles(new FilenameFilter() { public boolean accept(File dir, String name) { return dir.equals(parent) && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION); } }); if (matches == null || matches.length == 0) { throw new IOException("Meta file not found, blockFile=" + blockFile); } else if (matches.length > 1) { throw new IOException("Found more than one meta files: " + Arrays.asList(matches)); } return matches[0]; } /** Find the corresponding meta data file from a given block file */ private static long parseGenerationStamp(File blockFile, File metaFile ) throws IOException { String metaname = metaFile.getName(); String gs = metaname.substring(blockFile.getName().length() + 1, metaname.length() - METADATA_EXTENSION.length()); try { return Long.parseLong(gs); } catch(NumberFormatException nfe) { throw (IOException)new IOException("blockFile=" + blockFile + ", metaFile=" + metaFile).initCause(nfe); } } /** Return the block file for the given ID */ public File findBlockFile(long blockId) { final Block b = new Block(blockId); File blockfile = null; ActiveFile activefile = ongoingCreates.get(b); if (activefile != null) { blockfile = activefile.file; } if (blockfile == null) { blockfile = getFile(b); } if (blockfile == null) { if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("ongoingCreates=" + ongoingCreates); DataNode.LOG.debug("volumeMap=" + volumeMap); } } return blockfile; } /** {@inheritDoc} */ public synchronized Block getStoredBlock(long blkid) throws IOException { File blockfile = findBlockFile(blkid); if (blockfile == null) { return null; } File metafile = findMetaFile(blockfile); return new Block(blkid, blockfile.length(), parseGenerationStamp(blockfile, metafile)); } public boolean metaFileExists(Block b) throws IOException { return getMetaFile(b).exists(); } public long getMetaDataLength(Block b) throws IOException { File checksumFile = getMetaFile( b ); return checksumFile.length(); } public MetaDataInputStream getMetaDataInputStream(Block b) throws IOException { File checksumFile = getMetaFile( b ); return new MetaDataInputStream(new FileInputStream(checksumFile), checksumFile.length()); } FSVolumeSet volumes; private HashMap<Block,ActiveFile> ongoingCreates = new HashMap<Block,ActiveFile>(); private int maxBlocksPerDir = 0; private HashMap<Block,DatanodeBlockInfo> volumeMap = null; static Random random = new Random(); /** * An FSDataset has a directory where it loads its data files. */ public FSDataset(DataStorage storage, Configuration conf) throws IOException { this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64); FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()]; for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf); } volumes = new FSVolumeSet(volArray); volumeMap = new HashMap<Block, DatanodeBlockInfo>(); volumes.getVolumeMap(volumeMap); registerMBean(storage.getStorageID()); } /** * Return the total space used by dfs datanode */ public long getDfsUsed() throws IOException { return volumes.getDfsUsed(); } - + /** + * Return true - if there are still valid volumes + * on the DataNode + */ + public boolean hasEnoughResource(){ + return volumes.numberOfVolumes() >= MIN_NUM_OF_VALID_VOLUMES; + } + /** * Return total capacity, used and unused */ public long getCapacity() throws IOException { return volumes.getCapacity(); } /** * Return how many bytes can still be stored in the FSDataset */ public long getRemaining() throws IOException { return volumes.getRemaining(); } /** * Find the block's on-disk length */ public long getLength(Block b) throws IOException { return getBlockFile(b).length(); } /** * Get File name for a given block. */ public synchronized File getBlockFile(Block b) throws IOException { File f = validateBlockFile(b); if(f == null) { if (InterDatanodeProtocol.LOG.isDebugEnabled()) { InterDatanodeProtocol.LOG.debug("b=" + b + ", volumeMap=" + volumeMap); } throw new IOException("Block " + b + " is not valid."); } return f; } public synchronized InputStream getBlockInputStream(Block b) throws IOException { return new FileInputStream(getBlockFile(b)); } public synchronized InputStream getBlockInputStream(Block b, long seekOffset) throws IOException { File blockFile = getBlockFile(b); RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r"); if (seekOffset > 0) { blockInFile.seek(seekOffset); } return new FileInputStream(blockInFile.getFD()); } /** * Returns handles to the block file and its metadata file */ public synchronized BlockInputStreams getTmpInputStreams(Block b, long blkOffset, long ckoff) throws IOException { DatanodeBlockInfo info = volumeMap.get(b); if (info == null) { throw new IOException("Block " + b + " does not exist in volumeMap."); } FSVolume v = info.getVolume(); File blockFile = v.getTmpFile(b); RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r"); if (blkOffset > 0) { blockInFile.seek(blkOffset); } File metaFile = getMetaFile(blockFile, b); RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r"); if (ckoff > 0) { metaInFile.seek(ckoff); } return new BlockInputStreams(new FileInputStream(blockInFile.getFD()), new FileInputStream(metaInFile.getFD())); } private BlockWriteStreams createBlockWriteStreams( File f , File metafile) throws IOException { return new BlockWriteStreams(new FileOutputStream(new RandomAccessFile( f , "rw" ).getFD()), new FileOutputStream( new RandomAccessFile( metafile , "rw" ).getFD() )); } /** * Make a copy of the block if this block is linked to an existing * snapshot. This ensures that modifying this block does not modify * data in any existing snapshots. * @param block Block * @param numLinks Detach if the number of links exceed this value * @throws IOException * @return - true if the specified block was detached */ public boolean detachBlock(Block block, int numLinks) throws IOException { DatanodeBlockInfo info = null; synchronized (this) { info = volumeMap.get(block); } return info.detachBlock(block, numLinks); } static private <T> void updateBlockMap(Map<Block, T> blockmap, Block oldblock, Block newblock) throws IOException { if (blockmap.containsKey(oldblock)) { T value = blockmap.remove(oldblock); blockmap.put(newblock, value); } } /** {@inheritDoc} */ public void updateBlock(Block oldblock, Block newblock) throws IOException { if (oldblock.getBlockId() != newblock.getBlockId()) { throw new IOException("Cannot update oldblock (=" + oldblock + ") to newblock (=" + newblock + ")."); } for(;;) { final List<Thread> threads = tryUpdateBlock(oldblock, newblock); if (threads == null) { return; } // interrupt and wait for all ongoing create threads for(Thread t : threads) { t.interrupt(); } for(Thread t : threads) { try { t.join(); } catch (InterruptedException e) { DataNode.LOG.warn("interruptOngoingCreates: t=" + t, e); } } } } /** * Try to update an old block to a new block. * If there are ongoing create threads running for the old block, * the threads will be returned without updating the block. * * @return ongoing create threads if there is any. Otherwise, return null. */ private synchronized List<Thread> tryUpdateBlock( Block oldblock, Block newblock) throws IOException { //check ongoing create threads final ActiveFile activefile = ongoingCreates.get(oldblock); if (activefile != null && !activefile.threads.isEmpty()) { //remove dead threads for(Iterator<Thread> i = activefile.threads.iterator(); i.hasNext(); ) { final Thread t = i.next(); if (!t.isAlive()) { i.remove(); } } //return living threads if (!activefile.threads.isEmpty()) { return new ArrayList<Thread>(activefile.threads); } } //No ongoing create threads is alive. Update block. File blockFile = findBlockFile(oldblock.getBlockId()); if (blockFile == null) { throw new IOException("Block " + oldblock + " does not exist."); } File oldMetaFile = findMetaFile(blockFile); long oldgs = parseGenerationStamp(blockFile, oldMetaFile); //rename meta file to a tmp file File tmpMetaFile = new File(oldMetaFile.getParent(), oldMetaFile.getName()+"_tmp" + newblock.getGenerationStamp()); if (!oldMetaFile.renameTo(tmpMetaFile)){ throw new IOException("Cannot rename block meta file to " + tmpMetaFile); } //update generation stamp if (oldgs > newblock.getGenerationStamp()) { throw new IOException("Cannot update block (id=" + newblock.getBlockId() + ") generation stamp from " + oldgs + " to " + newblock.getGenerationStamp()); } //update length if (newblock.getNumBytes() > oldblock.getNumBytes()) { throw new IOException("Cannot update block file (=" + blockFile + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes()); } if (newblock.getNumBytes() < oldblock.getNumBytes()) { truncateBlock(blockFile, tmpMetaFile, oldblock.getNumBytes(), newblock.getNumBytes()); } //rename the tmp file to the new meta file (with new generation stamp) File newMetaFile = getMetaFile(blockFile, newblock); if (!tmpMetaFile.renameTo(newMetaFile)) { throw new IOException("Cannot rename tmp meta file to " + newMetaFile); } updateBlockMap(ongoingCreates, oldblock, newblock); updateBlockMap(volumeMap, oldblock, newblock); // paranoia! verify that the contents of the stored block // matches the block file on disk. validateBlockMetadata(newblock); return null; } static private void truncateBlock(File blockFile, File metaFile, long oldlen, long newlen) throws IOException { if (newlen == oldlen) { return; } if (newlen > oldlen) { throw new IOException("Cannout truncate block to from oldlen (=" + oldlen + ") to newlen (=" + newlen + ")"); } DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); int checksumsize = dcs.getChecksumSize(); int bpc = dcs.getBytesPerChecksum(); long n = (newlen - 1)/bpc + 1; long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize; long lastchunkoffset = (n - 1)*bpc; int lastchunksize = (int)(newlen - lastchunkoffset); byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw"); try { //truncate blockFile blockRAF.setLength(newlen); //read last chunk blockRAF.seek(lastchunkoffset); blockRAF.readFully(b, 0, lastchunksize); } finally { blockRAF.close(); } //compute checksum dcs.update(b, 0, lastchunksize); dcs.writeValue(b, 0, false); //update metaFile RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw"); try { metaRAF.setLength(newmetalen); metaRAF.seek(newmetalen - checksumsize); metaRAF.write(b, 0, checksumsize); } finally { metaRAF.close(); } } private final static String DISK_ERROR = "Possible disk error on file creation: "; /** Get the cause of an I/O exception if caused by a possible disk error * @param ioe an I/O exception * @return cause if the I/O exception is caused by a possible disk error; * null otherwise. */ static IOException getCauseIfDiskError(IOException ioe) { if (ioe.getMessage()!=null && ioe.getMessage().startsWith(DISK_ERROR)) { return (IOException)ioe.getCause(); } else { return null; } } /** * Start writing to a block file * If isRecovery is true and the block pre-exists, then we kill all volumeMap.put(b, v); volumeMap.put(b, v); * other threads that might be writing to this block, and then reopen the file. */ public BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOException { // // Make sure the block isn't a valid one - we're still creating it! // if (isValidBlock(b)) { if (!isRecovery) { throw new BlockAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } // If the block was successfully finalized because all packets // were successfully processed at the Datanode but the ack for // some of the packets were not received by the client. The client // re-opens the connection and retries sending those packets. // The other reason is that an "append" is occurring to this block. detachBlock(b, 1); } long blockSize = b.getNumBytes(); // // Serialize access to /tmp, and check if file already there. // File f = null; List<Thread> threads = null; synchronized (this) { // // Is it already in the create process? // ActiveFile activeFile = ongoingCreates.get(b); if (activeFile != null) { f = activeFile.file; threads = activeFile.threads; if (!isRecovery) { throw new BlockAlreadyExistsException("Block " + b + " has already been started (though not completed), and thus cannot be created."); } else { for (Thread thread:threads) { thread.interrupt(); } } ongoingCreates.remove(b); } FSVolume v = null; if (!isRecovery) { v = volumes.getNextVolume(blockSize); // create temporary file to hold block in the designated volume f = createTmpFile(v, b); volumeMap.put(b, new DatanodeBlockInfo(v)); } else if (f != null) { DataNode.LOG.info("Reopen already-open Block for append " + b); // create or reuse temporary file to hold block in the designated volume v = volumeMap.get(b).getVolume(); volumeMap.put(b, new DatanodeBlockInfo(v)); } else { // reopening block for appending to it. DataNode.LOG.info("Reopen Block for append " + b); v = volumeMap.get(b).getVolume(); f = createTmpFile(v, b); File blkfile = getBlockFile(b); File oldmeta = getMetaFile(b); File newmeta = getMetaFile(f, b); // rename meta file to tmp directory DataNode.LOG.debug("Renaming " + oldmeta + " to " + newmeta); if (!oldmeta.renameTo(newmeta)) { throw new IOException("Block " + b + " reopen failed. " + " Unable to move meta file " + oldmeta + " to tmp dir " + newmeta); } // rename block file to tmp directory DataNode.LOG.debug("Renaming " + blkfile + " to " + f); if (!blkfile.renameTo(f)) { if (!f.delete()) { throw new IOException("Block " + b + " reopen failed. " + " Unable to remove file " + f); } if (!blkfile.renameTo(f)) { throw new IOException("Block " + b + " reopen failed. " + " Unable to move block file " + blkfile + " to tmp dir " + f); } } volumeMap.put(b, new DatanodeBlockInfo(v)); } if (f == null) { DataNode.LOG.warn("Block " + b + " reopen failed " + " Unable to locate tmp file."); throw new IOException("Block " + b + " reopen failed " + " Unable to locate tmp file."); } ongoingCreates.put(b, new ActiveFile(f, threads)); } try { if (threads != null) { for (Thread thread:threads) { thread.join(); } } } catch (InterruptedException e) { throw new IOException("Recovery waiting for thread interrupted."); } // // Finally, allow a writer to the block file // REMIND - mjc - make this a filter stream that enforces a max // block size, so clients can't go crazy // File metafile = getMetaFile(f, b); DataNode.LOG.debug("writeTo blockfile is " + f + " of size " + f.length()); DataNode.LOG.debug("writeTo metafile is " + metafile + " of size " + metafile.length()); return createBlockWriteStreams( f , metafile); } /** * Retrieves the offset in the block to which the * the next write will write data to. */ public long getChannelPosition(Block b, BlockWriteStreams streams) throws IOException { FileOutputStream file = (FileOutputStream) streams.dataOut; return file.getChannel().position(); } /** * Sets the offset in the block to which the * the next write will write data to. */ public void setChannelPosition(Block b, BlockWriteStreams streams, long dataOffset, long ckOffset) throws IOException { long size = 0; synchronized (this) { FSVolume vol = volumeMap.get(b).getVolume(); size = vol.getTmpFile(b).length(); } if (size < dataOffset) { String msg = "Trying to change block file offset of block " + b + " to " + dataOffset + " but actual size of file is " + size; throw new IOException(msg); } FileOutputStream file = (FileOutputStream) streams.dataOut; file.getChannel().position(dataOffset); file = (FileOutputStream) streams.checksumOut; file.getChannel().position(ckOffset); } synchronized File createTmpFile( FSVolume vol, Block blk ) throws IOException { if ( vol == null ) { vol = volumeMap.get( blk ).getVolume(); if ( vol == null ) { throw new IOException("Could not find volume for block " + blk); } } return vol.createTmpFile(blk); } // // REMIND - mjc - eventually we should have a timeout system // in place to clean up block files left by abandoned clients. // We should have some timer in place, so that if a blockfile // is created but non-valid, and has been idle for >48 hours, // we can GC it safely. // /** * Complete the block write! */ public synchronized void finalizeBlock(Block b) throws IOException { ActiveFile activeFile = ongoingCreates.get(b); if (activeFile == null) { throw new IOException("Block " + b + " is already finalized."); } File f = activeFile.file; if (f == null || !f.exists()) { throw new IOException("No temporary file " + f + " for block " + b); } FSVolume v = volumeMap.get(b).getVolume(); if (v == null) { throw new IOException("No volume for temporary file " + f + " for block " + b); } File dest = null; dest = v.addBlock(b, f); volumeMap.put(b, new DatanodeBlockInfo(v, dest)); ongoingCreates.remove(b); } /** * Remove the temporary block file (if any) */ public synchronized void unfinalizeBlock(Block b) throws IOException { // remove the block from in-memory data structure ActiveFile activefile = ongoingCreates.remove(b); if (activefile == null) { return; } volumeMap.remove(b); // delete the on-disk temp file if (delBlockFromDisk(activefile.file, getMetaFile(activefile.file, b), b)) { DataNode.LOG.warn("Block " + b + " unfinalized and removed. " ); } } /** * Remove a block from disk * @param blockFile block file * @param metaFile block meta file * @param b a block * @return true if on-disk files are deleted; false otherwise */ private boolean delBlockFromDisk(File blockFile, File metaFile, Block b) { if (blockFile == null) { DataNode.LOG.warn("No file exists for block: " + b); return true; } if (!blockFile.delete()) { DataNode.LOG.warn("Not able to delete the block file: " + blockFile); return false; } else { // remove the meta file if (metaFile != null && !metaFile.delete()) { DataNode.LOG.warn( "Not able to delete the meta block file: " + metaFile); return false; } } return true; } /** * Return a table of block data */ public Block[] getBlockReport() { TreeSet<Block> blockSet = new TreeSet<Block>(); volumes.getBlockInfo(blockSet); Block blockTable[] = new Block[blockSet.size()]; int i = 0; for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) { blockTable[i] = it.next(); } return blockTable; } /** * Check whether the given block is a valid one. */ public boolean isValidBlock(Block b) { - return validateBlockFile(b) != null; + File f = null;; + try { + f = validateBlockFile(b); + } catch(IOException e) { + Log.warn("Block " + b + " is not valid:",e); + } + + return f != null; } /** * Find the file corresponding to the block and return it if it exists. */ - File validateBlockFile(Block b) { + File validateBlockFile(Block b) throws IOException { //Should we check for metadata file too? File f = getFile(b); - if(f != null && f.exists()) - return f; + + if(f != null ) { + if(f.exists()) + return f; + + // if file is not null, but doesn't exist - possibly disk failed + DataNode datanode = DataNode.getDataNode(); + datanode.checkDiskError(); + } + if (InterDatanodeProtocol.LOG.isDebugEnabled()) { InterDatanodeProtocol.LOG.debug("b=" + b + ", f=" + f); } return null; } /** {@inheritDoc} */ public void validateBlockMetadata(Block b) throws IOException { DatanodeBlockInfo info = volumeMap.get(b); if (info == null) { throw new IOException("Block " + b + " does not exist in volumeMap."); } FSVolume v = info.getVolume(); File tmp = v.getTmpFile(b); File f = getFile(b); if (f == null) { f = tmp; } if (f == null) { throw new IOException("Block " + b + " does not exist on disk."); } if (!f.exists()) { throw new IOException("Block " + b + " block file " + f + " does not exist on disk."); } if (b.getNumBytes() != f.length()) { throw new IOException("Block " + b + " length is " + b.getNumBytes() + " does not match block file length " + f.length()); } File meta = getMetaFile(f, b); if (meta == null) { throw new IOException("Block " + b + " metafile does not exist."); } if (!meta.exists()) { throw new IOException("Block " + b + " metafile " + meta + " does not exist on disk."); } if (meta.length() == 0) { throw new IOException("Block " + b + " metafile " + meta + " is empty."); } long stamp = parseGenerationStamp(f, meta); if (stamp != b.getGenerationStamp()) { throw new IOException("Block " + b + " genstamp is " + b.getGenerationStamp() + " does not match meta file stamp " + stamp); } } /** * We're informed that a block is no longer valid. We * could lazily garbage-collect the block, but why bother? * just get rid of it. */ public void invalidate(Block invalidBlks[]) throws IOException { boolean error = false; for (int i = 0; i < invalidBlks.length; i++) { File f = null; FSVolume v; synchronized (this) { f = getFile(invalidBlks[i]); DatanodeBlockInfo dinfo = volumeMap.get(invalidBlks[i]); if (dinfo == null) { DataNode.LOG.warn("Unexpected error trying to delete block " + invalidBlks[i] + ". BlockInfo not found in volumeMap."); error = true; continue; } v = dinfo.getVolume(); if (f == null) { DataNode.LOG.warn("Unexpected error trying to delete block " + invalidBlks[i] + ". Block not found in blockMap." + ((v == null) ? " " : " Block found in volumeMap.")); error = true; continue; } if (v == null) { DataNode.LOG.warn("Unexpected error trying to delete block " + invalidBlks[i] + ". No volume for this block." + " Block found in blockMap. " + f + "."); error = true; continue; } File parent = f.getParentFile(); if (parent == null) { DataNode.LOG.warn("Unexpected error trying to delete block " + invalidBlks[i] + ". Parent not found for file " + f + "."); error = true; continue; } v.clearPath(parent); volumeMap.remove(invalidBlks[i]); } File metaFile = getMetaFile( f, invalidBlks[i] ); long blockSize = f.length()+metaFile.length(); if ( !f.delete() || ( !metaFile.delete() && metaFile.exists() ) ) { DataNode.LOG.warn("Unexpected error trying to delete block " + invalidBlks[i] + " at file " + f); error = true; continue; } v.decDfsUsed(blockSize); DataNode.LOG.info("Deleting block " + invalidBlks[i] + " file " + f); if (f.exists()) { // // This is a temporary check especially for hadoop-1220. // This will go away in the future. // DataNode.LOG.info("File " + f + " was deleted but still exists!"); } } if (error) { throw new IOException("Error in deleting blocks."); } } /** * Turn the block identifier into a filename. */ public synchronized File getFile(Block b) { DatanodeBlockInfo info = volumeMap.get(b); if (info != null) { return info.getFile(); } return null; } /** * check if a data directory is healthy + * if some volumes failed - make sure to remove all the blocks that belong + * to these volumes * @throws DiskErrorException */ public void checkDataDir() throws DiskErrorException { - volumes.checkDirs(); + long total_blocks=0, removed_blocks=0; + List<FSVolume> failed_vols = volumes.checkDirs(); + + //if there no failed volumes return + if(failed_vols == null) + return; + + // else + // remove related blocks + long mlsec = System.currentTimeMillis(); + synchronized (this) { + Iterator<Block> ib = volumeMap.keySet().iterator(); + while(ib.hasNext()) { + Block b = ib.next(); + total_blocks ++; + // check if the volume block belongs to still valid + FSVolume vol = volumeMap.get(b).getVolume(); + for(FSVolume fv: failed_vols) { + if(vol == fv) { + DataNode.LOG.warn("removing block " + b.getBlockId() + " from vol " + + vol.dataDir.dir.getAbsolutePath()); + ib.remove(); + removed_blocks++; + break; + } + } + } + } // end of sync + mlsec = System.currentTimeMillis() - mlsec; + DataNode.LOG.warn(">>>>>>>>>>>>Removed " + removed_blocks + " out of " + total_blocks + + "(took " + mlsec + " millisecs)"); + + // report the error + StringBuilder sb = new StringBuilder(); + for(FSVolume fv : failed_vols) { + sb.append(fv.dataDir.dir.getAbsolutePath() + ";"); + } + + throw new DiskErrorException("DataNode failed volumes:" + sb); + } public String toString() { return "FSDataset{dirpath='"+volumes+"'}"; } private ObjectName mbeanName; private Random rand = new Random(); /** * Register the FSDataset MBean using the name * "hadoop:service=DataNode,name=FSDatasetState-<storageid>" */ void registerMBean(final String storageId) { // We wrap to bypass standard mbean naming convetion. // This wraping can be removed in java 6 as it is more flexible in // package naming for mbeans and their impl. StandardMBean bean; String storageName; if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage storageName = "UndefinedStorageId" + rand.nextInt(); } else { storageName = storageId; } try { bean = new StandardMBean(this,FSDatasetMBean.class); mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean); } catch (NotCompliantMBeanException e) { e.printStackTrace(); } DataNode.LOG.info("Registered FSDatasetStatusMBean"); } public void shutdown() { if (mbeanName != null) MBeanUtil.unregisterMBean(mbeanName); if(volumes != null) { for (FSVolume volume : volumes.volumes) { if(volume != null) { volume.dfsUsage.shutdown(); } } } } public String getStorageInfo() { return toString(); } } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java index 56070ff..60be87a 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -1,267 +1,273 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode; import java.io.Closeable; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; /** * This is an interface for the underlying storage that stores blocks for * a data node. * Examples are the FSDataset (which stores blocks on dirs) and * SimulatedFSDataset (which simulates data). * */ public interface FSDatasetInterface extends FSDatasetMBean { /** * Returns the length of the metadata file of the specified block * @param b - the block for which the metadata length is desired * @return the length of the metadata file for the specified block. * @throws IOException */ public long getMetaDataLength(Block b) throws IOException; /** * This class provides the input stream and length of the metadata * of a block * */ static class MetaDataInputStream extends FilterInputStream { MetaDataInputStream(InputStream stream, long len) { super(stream); length = len; } private long length; public long getLength() { return length; } } /** * Returns metaData of block b as an input stream (and its length) * @param b - the block * @return the metadata input stream; * @throws IOException */ public MetaDataInputStream getMetaDataInputStream(Block b) throws IOException; /** * Does the meta file exist for this block? * @param b - the block * @return true of the metafile for specified block exits * @throws IOException */ public boolean metaFileExists(Block b) throws IOException; /** * Returns the specified block's on-disk length (excluding metadata) * @param b * @return the specified block's on-disk length (excluding metadta) * @throws IOException */ public long getLength(Block b) throws IOException; /** * @return the generation stamp stored with the block. */ public Block getStoredBlock(long blkid) throws IOException; /** * Returns an input stream to read the contents of the specified block * @param b * @return an input stream to read the contents of the specified block * @throws IOException */ public InputStream getBlockInputStream(Block b) throws IOException; /** * Returns an input stream at specified offset of the specified block * @param b * @param seekOffset * @return an input stream to read the contents of the specified block, * starting at the offset * @throws IOException */ public InputStream getBlockInputStream(Block b, long seekOffset) throws IOException; /** * Returns an input stream at specified offset of the specified block * The block is still in the tmp directory and is not finalized * @param b * @param blkoff * @param ckoff * @return an input stream to read the contents of the specified block, * starting at the offset * @throws IOException */ public BlockInputStreams getTmpInputStreams(Block b, long blkoff, long ckoff) throws IOException; /** * * This class contains the output streams for the data and checksum * of a block * */ static class BlockWriteStreams { OutputStream dataOut; OutputStream checksumOut; BlockWriteStreams(OutputStream dOut, OutputStream cOut) { dataOut = dOut; checksumOut = cOut; } } /** * This class contains the input streams for the data and checksum * of a block */ static class BlockInputStreams implements Closeable { final InputStream dataIn; final InputStream checksumIn; BlockInputStreams(InputStream dataIn, InputStream checksumIn) { this.dataIn = dataIn; this.checksumIn = checksumIn; } /** {@inheritDoc} */ public void close() { IOUtils.closeStream(dataIn); IOUtils.closeStream(checksumIn); } } /** * Creates the block and returns output streams to write data and CRC * @param b * @param isRecovery True if this is part of erro recovery, otherwise false * @return a BlockWriteStreams object to allow writing the block data * and CRC * @throws IOException */ public BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOException; /** * Update the block to the new generation stamp and length. */ public void updateBlock(Block oldblock, Block newblock) throws IOException; /** * Finalizes the block previously opened for writing using writeToBlock. * The block size is what is in the parameter b and it must match the amount * of data written * @param b * @throws IOException */ public void finalizeBlock(Block b) throws IOException; /** * Unfinalizes the block previously opened for writing using writeToBlock. * The temporary file associated with this block is deleted. * @param b * @throws IOException */ public void unfinalizeBlock(Block b) throws IOException; /** * Returns the block report - the full list of blocks stored * @return - the block report - the full list of blocks stored */ public Block[] getBlockReport(); /** * Is the block valid? * @param b * @return - true if the specified block is valid */ public boolean isValidBlock(Block b); /** * Invalidates the specified blocks * @param invalidBlks - the blocks to be invalidated * @throws IOException */ public void invalidate(Block invalidBlks[]) throws IOException; /** * Check if all the data directories are healthy * @throws DiskErrorException */ public void checkDataDir() throws DiskErrorException; /** * Stringifies the name of the storage */ public String toString(); /** * Shutdown the FSDataset */ public void shutdown(); /** * Returns the current offset in the data stream. * @param b * @param stream The stream to the data file and checksum file * @return the position of the file pointer in the data stream * @throws IOException */ public long getChannelPosition(Block b, BlockWriteStreams stream) throws IOException; /** * Sets the file pointer of the data stream and checksum stream to * the specified values. * @param b * @param stream The stream for the data file and checksum file * @param dataOffset The position to which the file pointre for the data stream * should be set * @param ckOffset The position to which the file pointre for the checksum stream * should be set * @throws IOException */ public void setChannelPosition(Block b, BlockWriteStreams stream, long dataOffset, long ckOffset) throws IOException; /** * Validate that the contents in the Block matches * the file on disk. Returns true if everything is fine. * @param b The block to be verified. * @throws IOException */ public void validateBlockMetadata(Block b) throws IOException; + + /** + * checks how many valid storage volumes are there in the DataNode + * @return true if more then minimum valid volumes left in the FSDataSet + */ + public boolean hasEnoughResource(); } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 2da9d70..15a221d 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -233,741 +233,743 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, this.httpServer.setAttribute("datanode.https.port", datanodeSslPort .getPort()); } this.httpServer.setAttribute("name.node", this); this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); this.httpServer.setAttribute("name.system.image", getFSImage()); this.httpServer.setAttribute("name.conf", conf); this.httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class); this.httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); this.httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class); this.httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class); this.httpServer.addInternalServlet("checksum", "/fileChecksum/*", FileChecksumServlets.RedirectServlet.class); this.httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = this.httpServer.getPort(); this.httpAddress = new InetSocketAddress(infoHost, infoPort); conf.set("dfs.http.address", infoHost + ":" + infoPort); LOG.info("Web-server up at: " + infoHost + ":" + infoPort); } /** * Start NameNode. * <p> * The name-node can be started with one of the following startup options: * <ul> * <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li> * <li>{@link StartupOption#FORMAT FORMAT} - format name node</li> * <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster * upgrade and create a snapshot of the current file system state</li> * <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the * cluster back to the previous state</li> * </ul> * The option is passed via configuration field: * <tt>dfs.namenode.startup</tt> * * The conf will be modified to reflect the actual ports on which * the NameNode is up and running if the user passes the port as * <code>zero</code> in the conf. * * @param conf confirguration * @throws IOException */ public NameNode(Configuration conf) throws IOException { try { initialize(conf); } catch (IOException e) { this.stop(); throw e; } } /** * Wait for service to finish. * (Normally, it runs forever.) */ public void join() { try { this.server.join(); } catch (InterruptedException ie) { } } /** * Stop all NameNode threads and wait for all to finish. */ public void stop() { if (stopRequested) return; stopRequested = true; try { if (httpServer != null) httpServer.stop(); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } if(namesystem != null) namesystem.close(); if(emptier != null) emptier.interrupt(); if(server != null) server.stop(); if (myMetrics != null) { myMetrics.shutdown(); } if (namesystem != null) { namesystem.shutdown(); } } ///////////////////////////////////////////////////// // NamenodeProtocol ///////////////////////////////////////////////////// /** * return a list of blocks & their locations on <code>datanode</code> whose * total size is <code>size</code> * * @param datanode on which blocks are located * @param size total size of blocks */ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) throws IOException { if(size <= 0) { throw new IllegalArgumentException( "Unexpected not positive size: "+size); } return namesystem.getBlocks(datanode, size); } ///////////////////////////////////////////////////// // ClientProtocol ///////////////////////////////////////////////////// /** {@inheritDoc} */ public LocatedBlocks getBlockLocations(String src, long offset, long length) throws IOException { myMetrics.numGetBlockLocations.inc(); return namesystem.getBlockLocations(getClientMachine(), src, offset, length); } private static String getClientMachine() { String clientMachine = Server.getRemoteAddress(); if (clientMachine == null) { clientMachine = ""; } return clientMachine; } /** {@inheritDoc} */ public void create(String src, FsPermission masked, String clientName, boolean overwrite, short replication, long blockSize ) throws IOException { String clientMachine = getClientMachine(); if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* NameNode.create: file " +src+" for "+clientName+" at "+clientMachine); } if (!checkPathLength(src)) { throw new IOException("create: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } namesystem.startFile(src, new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(), null, masked), clientName, clientMachine, overwrite, replication, blockSize); myMetrics.numFilesCreated.inc(); myMetrics.numCreateFileOps.inc(); } /** {@inheritDoc} */ public LocatedBlock append(String src, String clientName) throws IOException { String clientMachine = getClientMachine(); if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* NameNode.append: file " +src+" for "+clientName+" at "+clientMachine); } LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine); myMetrics.numFilesAppended.inc(); return info; } /** {@inheritDoc} */ public boolean setReplication(String src, short replication ) throws IOException { return namesystem.setReplication(src, replication); } /** {@inheritDoc} */ public void setPermission(String src, FsPermission permissions ) throws IOException { namesystem.setPermission(src, permissions); } /** {@inheritDoc} */ public void setOwner(String src, String username, String groupname ) throws IOException { namesystem.setOwner(src, username, groupname); } /** */ public LocatedBlock addBlock(String src, String clientName) throws IOException { stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " +src+" for "+clientName); LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, clientName); if (locatedBlock != null) myMetrics.numAddBlockOps.inc(); return locatedBlock; } /** * The client needs to give up on the block. */ public void abandonBlock(Block b, String src, String holder ) throws IOException { stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: " +b+" of file "+src); if (!namesystem.abandonBlock(b, src, holder)) { throw new IOException("Cannot abandon block during write to " + src); } } /** {@inheritDoc} */ public boolean complete(String src, String clientName) throws IOException { stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName); CompleteFileStatus returnCode = namesystem.completeFile(src, clientName); if (returnCode == CompleteFileStatus.STILL_WAITING) { return false; } else if (returnCode == CompleteFileStatus.COMPLETE_SUCCESS) { return true; } else { throw new IOException("Could not complete write to file " + src + " by " + clientName); } } /** * The client has detected an error on the specified located blocks * and is reporting them to the server. For now, the namenode will * mark the block as corrupt. In the future we might * check the blocks are actually corrupt. */ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { stateChangeLog.info("*DIR* NameNode.reportBadBlocks"); for (int i = 0; i < blocks.length; i++) { Block blk = blocks[i].getBlock(); DatanodeInfo[] nodes = blocks[i].getLocations(); for (int j = 0; j < nodes.length; j++) { DatanodeInfo dn = nodes[j]; namesystem.markBlockAsCorrupt(blk, dn); } } } /** {@inheritDoc} */ public long nextGenerationStamp(Block block) throws IOException{ return namesystem.nextGenerationStampForBlock(block); } /** {@inheritDoc} */ public void commitBlockSynchronization(Block block, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets ) throws IOException { namesystem.commitBlockSynchronization(block, newgenerationstamp, newlength, closeFile, deleteblock, newtargets); } public long getPreferredBlockSize(String filename) throws IOException { return namesystem.getPreferredBlockSize(filename); } /** */ public boolean rename(String src, String dst) throws IOException { stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst); if (!checkPathLength(dst)) { throw new IOException("rename: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } boolean ret = namesystem.renameTo(src, dst); if (ret) { myMetrics.numFilesRenamed.inc(); } return ret; } /** */ @Deprecated public boolean delete(String src) throws IOException { return delete(src, true); } /** {@inheritDoc} */ public boolean delete(String src, boolean recursive) throws IOException { if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* Namenode.delete: src=" + src + ", recursive=" + recursive); } boolean ret = namesystem.delete(src, recursive); if (ret) myMetrics.numDeleteFileOps.inc(); return ret; } /** * Check path length does not exceed maximum. Returns true if * length and depth are okay. Returns false if length is too long * or depth is too great. * */ private boolean checkPathLength(String src) { Path srcPath = new Path(src); return (src.length() <= MAX_PATH_LENGTH && srcPath.depth() <= MAX_PATH_DEPTH); } /** {@inheritDoc} */ public boolean mkdirs(String src, FsPermission masked) throws IOException { stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src); if (!checkPathLength(src)) { throw new IOException("mkdirs: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } return namesystem.mkdirs(src, new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(), null, masked)); } /** */ public void renewLease(String clientName) throws IOException { namesystem.renewLease(clientName); } /** */ public FileStatus[] getListing(String src) throws IOException { FileStatus[] files = namesystem.getListing(src); if (files != null) { myMetrics.numGetListingOps.inc(); } return files; } /** * Get the file info for a specific file. * @param src The string representation of the path to the file * @throws IOException if permission to access file is denied by the system * @return object containing information regarding the file * or null if file not found */ public FileStatus getFileInfo(String src) throws IOException { myMetrics.numFileInfoOps.inc(); return namesystem.getFileInfo(src); } /** @inheritDoc */ public long[] getStats() throws IOException { return namesystem.getStats(); } /** */ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) throws IOException { DatanodeInfo results[] = namesystem.datanodeReport(type); if (results == null ) { throw new IOException("Cannot find datanode report"); } return results; } /** * @inheritDoc */ public boolean setSafeMode(SafeModeAction action) throws IOException { return namesystem.setSafeMode(action); } /** * Is the cluster currently in safe mode? */ public boolean isInSafeMode() { return namesystem.isInSafeMode(); } /** * @inheritDoc */ public void saveNamespace() throws IOException { namesystem.saveNamespace(); } /** * Refresh the list of datanodes that the namenode should allow to * connect. Re-reads conf by creating new Configuration object and * uses the files list in the configuration to update the list. */ public void refreshNodes() throws IOException { namesystem.refreshNodes(new Configuration()); } /** * Returns the size of the current edit log. */ public long getEditLogSize() throws IOException { return namesystem.getEditLogSize(); } /** * Roll the edit log. */ public CheckpointSignature rollEditLog() throws IOException { return namesystem.rollEditLog(); } /** * Roll the image */ public void rollFsImage() throws IOException { namesystem.rollFSImage(); } public void finalizeUpgrade() throws IOException { namesystem.finalizeUpgrade(); } public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action ) throws IOException { return namesystem.distributedUpgradeProgress(action); } /** * Dumps namenode state into specified file */ public void metaSave(String filename) throws IOException { namesystem.metaSave(filename); } /** {@inheritDoc} */ public ContentSummary getContentSummary(String path) throws IOException { return namesystem.getContentSummary(path); } /** {@inheritDoc} */ public void setQuota(String path, long namespaceQuota, long diskspaceQuota) throws IOException { namesystem.setQuota(path, namespaceQuota, diskspaceQuota); } /** {@inheritDoc} */ public void fsync(String src, String clientName) throws IOException { namesystem.fsync(src, clientName); } /** @inheritDoc */ public void setTimes(String src, long mtime, long atime) throws IOException { namesystem.setTimes(src, mtime, atime); } //////////////////////////////////////////////////////////////// // DatanodeProtocol //////////////////////////////////////////////////////////////// /** */ public DatanodeRegistration register(DatanodeRegistration nodeReg ) throws IOException { verifyVersion(nodeReg.getVersion()); namesystem.registerDatanode(nodeReg); return nodeReg; } /** * Data node notify the name node that it is alive * Return an array of block-oriented commands for the datanode to execute. * This will be either a transfer or a delete operation. */ public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, int xmitsInProgress, int xceiverCount) throws IOException { verifyRequest(nodeReg); return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining, xceiverCount, xmitsInProgress); } public DatanodeCommand blockReport(DatanodeRegistration nodeReg, long[] blocks) throws IOException { verifyRequest(nodeReg); BlockListAsLongs blist = new BlockListAsLongs(blocks); stateChangeLog.debug("*BLOCK* NameNode.blockReport: " +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks"); namesystem.processReport(nodeReg, blist); if (getFSImage().isUpgradeFinalized()) return DatanodeCommand.FINALIZE; return null; } public void blockReceived(DatanodeRegistration nodeReg, Block blocks[], String delHints[]) throws IOException { verifyRequest(nodeReg); stateChangeLog.debug("*BLOCK* NameNode.blockReceived: " +"from "+nodeReg.getName()+" "+blocks.length+" blocks."); for (int i = 0; i < blocks.length; i++) { namesystem.blockReceived(nodeReg, blocks[i], delHints[i]); } } /** */ public void errorReport(DatanodeRegistration nodeReg, int errorCode, String msg) throws IOException { // Log error message from datanode String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName()); LOG.info("Error report from " + dnName + ": " + msg); if (errorCode == DatanodeProtocol.NOTIFY) { return; } verifyRequest(nodeReg); if (errorCode == DatanodeProtocol.DISK_ERROR) { + LOG.warn("Volume failed on " + dnName); + } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { namesystem.removeDatanode(nodeReg); } } public NamespaceInfo versionRequest() throws IOException { return namesystem.getNamespaceInfo(); } public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException { return namesystem.processDistributedUpgradeCommand(comm); } /** * Verify request. * * Verifies correctness of the datanode version, registration ID, and * if the datanode does not need to be shutdown. * * @param nodeReg data node registration * @throws IOException */ public void verifyRequest(DatanodeRegistration nodeReg) throws IOException { verifyVersion(nodeReg.getVersion()); if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) throw new UnregisteredDatanodeException(nodeReg); } /** * Verify version. * * @param version * @throws IOException */ public void verifyVersion(int version) throws IOException { if (version != LAYOUT_VERSION) throw new IncorrectVersionException(version, "data node"); } /** * Returns the name of the fsImage file */ public File getFsImageName() throws IOException { return getFSImage().getFsImageName(); } public FSImage getFSImage() { return namesystem.dir.fsImage; } /** * Returns the name of the fsImage file uploaded by periodic * checkpointing */ public File[] getFsImageNameCheckpoint() throws IOException { return getFSImage().getFsImageNameCheckpoint(); } /** * Returns the address on which the NameNodes is listening to. * @return the address on which the NameNodes is listening to. */ public InetSocketAddress getNameNodeAddress() { return serverAddress; } /** * Returns the address of the NameNodes http server, * which is used to access the name-node web UI. * * @return the http address. */ public InetSocketAddress getHttpAddress() { return httpAddress; } NetworkTopology getNetworkTopology() { return this.namesystem.clusterMap; } /** * Verify that configured directories exist, then * Interactively confirm that formatting is desired * for each existing directory and format them. * * @param conf * @param isConfirmationNeeded * @return true if formatting was aborted, false otherwise * @throws IOException */ private static boolean format(Configuration conf, boolean isConfirmationNeeded ) throws IOException { Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf); Collection<File> editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); for(Iterator<File> it = dirsToFormat.iterator(); it.hasNext();) { File curDir = it.next(); if (!curDir.exists()) continue; if (isConfirmationNeeded) { System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) "); if (!(System.in.read() == 'Y')) { System.err.println("Format aborted in "+ curDir); return true; } while(System.in.read() != '\n'); // discard the enter-key } } FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat, editDirsToFormat), conf); nsys.dir.fsImage.format(); return false; } private static boolean finalize(Configuration conf, boolean isConfirmationNeeded ) throws IOException { Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf); Collection<File> editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat, editDirsToFormat), conf); System.err.print( "\"finalize\" will remove the previous state of the files system.\n" + "Recent upgrade will become permanent.\n" + "Rollback option will not be available anymore.\n"); if (isConfirmationNeeded) { System.err.print("Finalize filesystem state ? (Y or N) "); if (!(System.in.read() == 'Y')) { System.err.println("Finalize aborted."); return true; } while(System.in.read() != '\n'); // discard the enter-key } nsys.dir.fsImage.finalizeUpgrade(); return false; } @Override public void refreshServiceAcl() throws IOException { if (!serviceAuthEnabled) { throw new AuthorizationException("Service Level Authorization not enabled!"); } SecurityUtil.getPolicy().refresh(); } private static void printUsage() { System.err.println( "Usage: java NameNode [" + StartupOption.FORMAT.getName() + "] | [" + StartupOption.UPGRADE.getName() + "] | [" + StartupOption.ROLLBACK.getName() + "] | [" + StartupOption.FINALIZE.getName() + "] | [" + StartupOption.IMPORT.getName() + "]"); } private static StartupOption parseArguments(String args[]) { int argsLen = (args == null) ? 0 : args.length; StartupOption startOpt = StartupOption.REGULAR; for(int i=0; i < argsLen; i++) { String cmd = args[i]; if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.FORMAT; } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.REGULAR; } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.UPGRADE; } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.ROLLBACK; } else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.FINALIZE; } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.IMPORT; } else return null; } return startOpt; } private static void setStartupOption(Configuration conf, StartupOption opt) { conf.set("dfs.namenode.startup", opt.toString()); } static StartupOption getStartupOption(Configuration conf) { return StartupOption.valueOf(conf.get("dfs.namenode.startup", StartupOption.REGULAR.toString())); } public static NameNode createNameNode(String argv[], Configuration conf) throws IOException { if (conf == null) conf = new Configuration(); StartupOption startOpt = parseArguments(argv); if (startOpt == null) { printUsage(); return null; } setStartupOption(conf, startOpt); switch (startOpt) { case FORMAT: boolean aborted = format(conf, true); System.exit(aborted ? 1 : 0); case FINALIZE: aborted = finalize(conf, true); System.exit(aborted ? 1 : 0); default: } NameNode namenode = new NameNode(conf); return namenode; } /** */ public static void main(String argv[]) throws Exception { try { StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); NameNode namenode = createNameNode(argv, null); if (namenode != null) namenode.join(); } catch (Throwable e) { LOG.error(StringUtils.stringifyException(e)); System.exit(-1); } } } diff --git a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 2f92a2e..0af90e6 100644 --- a/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -1,156 +1,157 @@ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.io.*; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.ipc.VersionedProtocol; /********************************************************************** * Protocol that a DFS datanode uses to communicate with the NameNode. * It's used to upload current load information and block reports. * * The only way a NameNode can communicate with a DataNode is by * returning values from these functions. * **********************************************************************/ public interface DatanodeProtocol extends VersionedProtocol { /** * 19: SendHeartbeat returns an array of DatanodeCommand objects * in stead of a DatanodeCommand object. */ public static final long versionID = 19L; // error code final static int NOTIFY = 0; - final static int DISK_ERROR = 1; + final static int DISK_ERROR = 1; // there are still valid volumes on DN final static int INVALID_BLOCK = 2; + final static int FATAL_DISK_ERROR = 3; // no valid volumes left on DN /** * Determines actions that data node should perform * when receiving a datanode command. */ final static int DNA_UNKNOWN = 0; // unknown action final static int DNA_TRANSFER = 1; // transfer blocks to another datanode final static int DNA_INVALIDATE = 2; // invalidate blocks final static int DNA_SHUTDOWN = 3; // shutdown node final static int DNA_REGISTER = 4; // re-register final static int DNA_FINALIZE = 5; // finalize previous upgrade final static int DNA_RECOVERBLOCK = 6; // request a block recovery /** * Register Datanode. * * @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration) * * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains * new storageID if the datanode did not have one and * registration ID for further communication. */ public DatanodeRegistration register(DatanodeRegistration registration ) throws IOException; /** * sendHeartbeat() tells the NameNode that the DataNode is still * alive and well. Includes some status info, too. * It also gives the NameNode a chance to return * an array of "DatanodeCommand" objects. * A DatanodeCommand tells the DataNode to invalidate local block(s), * or to copy them to other DataNodes, etc. */ public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration, long capacity, long dfsUsed, long remaining, int xmitsInProgress, int xceiverCount) throws IOException; /** * blockReport() tells the NameNode about all the locally-stored blocks. * The NameNode returns an array of Blocks that have become obsolete * and should be deleted. This function is meant to upload *all* * the locally-stored blocks. It's invoked upon startup and then * infrequently afterwards. * @param registration * @param blocks - the block list as an array of longs. * Each block is represented as 2 longs. * This is done instead of Block[] to reduce memory used by block reports. * * @return - the next command for DN to process. * @throws IOException */ public DatanodeCommand blockReport(DatanodeRegistration registration, long[] blocks) throws IOException; /** * blockReceived() allows the DataNode to tell the NameNode about * recently-received block data, with a hint for pereferred replica * to be deleted when there is any excessive blocks. * For example, whenever client code * writes a new Block here, or another DataNode copies a Block to * this DataNode, it will call blockReceived(). */ public void blockReceived(DatanodeRegistration registration, Block blocks[], String[] delHints) throws IOException; /** * errorReport() tells the NameNode about something that has gone * awry. Useful for debugging. */ public void errorReport(DatanodeRegistration registration, int errorCode, String msg) throws IOException; public NamespaceInfo versionRequest() throws IOException; /** * This is a very general way to send a command to the name-node during * distributed upgrade process. * * The generosity is because the variety of upgrade commands is unpredictable. * The reply from the name-node is also received in the form of an upgrade * command. * * @return a reply in the form of an upgrade command */ UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException; /** * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])} * } */ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException; /** * @return the next GenerationStamp to be associated with the specified * block. */ public long nextGenerationStamp(Block block) throws IOException; /** * Commit block synchronization in lease recovery */ public void commitBlockSynchronization(Block block, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets ) throws IOException; } diff --git a/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 5207cb7..62f3a6d 100644 --- a/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -146,513 +146,517 @@ public class SimulatedFSDataset implements FSConstants, FSDatasetInterface, Con DataNode.LOG.error("Null oStream on unfinalized block - bug"); throw new IOException("Unexpected error on finalize"); } if (oStream.getLength() != finalSize) { DataNode.LOG.warn("Size passed to finalize (" + finalSize + ")does not match what was written:" + oStream.getLength()); throw new IOException( "Size passed to finalize does not match the amount of data written"); } // We had allocated the expected length when block was created; // adjust if necessary long extraLen = finalSize - theBlock.getNumBytes(); if (extraLen > 0) { if (!storage.alloc(extraLen)) { DataNode.LOG.warn("Lack of free storage on a block alloc"); throw new IOException("Creating block, no free space available"); } } else { storage.free(-extraLen); } theBlock.setNumBytes(finalSize); finalized = true; oStream = null; return; } SimulatedInputStream getMetaIStream() { return new SimulatedInputStream(nullCrcFileData); } synchronized boolean isFinalized() { return finalized; } } static private class SimulatedStorage { private long capacity; // in bytes private long used; // in bytes synchronized long getFree() { return capacity - used; } synchronized long getCapacity() { return capacity; } synchronized long getUsed() { return used; } synchronized boolean alloc(long amount) { if (getFree() >= amount) { used += amount; return true; } else { return false; } } synchronized void free(long amount) { used -= amount; } SimulatedStorage(long cap) { capacity = cap; used = 0; } } private HashMap<Block, BInfo> blockMap = null; private SimulatedStorage storage = null; private String storageId; public SimulatedFSDataset(Configuration conf) throws IOException { setConf(conf); } private SimulatedFSDataset() { // real construction when setConf called.. Uggg } public Configuration getConf() { return conf; } public void setConf(Configuration iconf) { conf = iconf; storageId = conf.get("StorageId", "unknownStorageId" + new Random().nextInt()); registerMBean(storageId); storage = new SimulatedStorage( conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY)); //DataNode.LOG.info("Starting Simulated storage; Capacity = " + getCapacity() + // "Used = " + getDfsUsed() + "Free =" + getRemaining()); blockMap = new HashMap<Block,BInfo>(); } public synchronized void injectBlocks(Block[] injectBlocks) throws IOException { if (injectBlocks != null) { for (Block b: injectBlocks) { // if any blocks in list is bad, reject list if (b == null) { throw new NullPointerException("Null blocks in block list"); } if (isValidBlock(b)) { throw new IOException("Block already exists in block list"); } } HashMap<Block, BInfo> oldBlockMap = blockMap; blockMap = new HashMap<Block,BInfo>(injectBlocks.length + oldBlockMap.size()); blockMap.putAll(oldBlockMap); for (Block b: injectBlocks) { BInfo binfo = new BInfo(b, false); blockMap.put(b, binfo); } } } public synchronized void finalizeBlock(Block b) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("Finalizing a non existing block " + b); } binfo.finalizeBlock(b.getNumBytes()); } public synchronized void unfinalizeBlock(Block b) throws IOException { if (isBeingWritten(b)) { blockMap.remove(b); } } public synchronized Block[] getBlockReport() { Block[] blockTable = new Block[blockMap.size()]; int count = 0; for (BInfo b : blockMap.values()) { if (b.isFinalized()) { blockTable[count++] = b.theBlock; } } if (count != blockTable.length) { blockTable = Arrays.copyOf(blockTable, count); } return blockTable; } public long getCapacity() throws IOException { return storage.getCapacity(); } public long getDfsUsed() throws IOException { return storage.getUsed(); } public long getRemaining() throws IOException { return storage.getFree(); } public synchronized long getLength(Block b) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("Finalizing a non existing block " + b); } return binfo.getlength(); } /** {@inheritDoc} */ public Block getStoredBlock(long blkid) throws IOException { Block b = new Block(blkid); BInfo binfo = blockMap.get(b); if (binfo == null) { return null; } b.setGenerationStamp(binfo.getGenerationStamp()); b.setNumBytes(binfo.getlength()); return b; } /** {@inheritDoc} */ public void updateBlock(Block oldblock, Block newblock) throws IOException { BInfo binfo = blockMap.get(newblock); if (binfo == null) { throw new IOException("BInfo not found, b=" + newblock); } binfo.updateBlock(newblock); } public synchronized void invalidate(Block[] invalidBlks) throws IOException { boolean error = false; if (invalidBlks == null) { return; } for (Block b: invalidBlks) { if (b == null) { continue; } BInfo binfo = blockMap.get(b); if (binfo == null) { error = true; DataNode.LOG.warn("Invalidate: Missing block"); continue; } storage.free(binfo.getlength()); blockMap.remove(b); } if (error) { throw new IOException("Invalidate: Missing blocks."); } } public synchronized boolean isValidBlock(Block b) { // return (blockMap.containsKey(b)); BInfo binfo = blockMap.get(b); if (binfo == null) { return false; } return binfo.isFinalized(); } /* check if a block is created but not finalized */ private synchronized boolean isBeingWritten(Block b) { BInfo binfo = blockMap.get(b); if (binfo == null) { return false; } return !binfo.isFinalized(); } public String toString() { return getStorageInfo(); } public synchronized BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOException { if (isValidBlock(b)) { throw new BlockAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } if (isBeingWritten(b)) { throw new BlockAlreadyExistsException("Block " + b + " is being written, and cannot be written to."); } BInfo binfo = new BInfo(b, true); blockMap.put(b, binfo); SimulatedOutputStream crcStream = new SimulatedOutputStream(); return new BlockWriteStreams(binfo.oStream, crcStream); } public synchronized InputStream getBlockInputStream(Block b) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } //DataNode.LOG.info("Opening block(" + b.blkid + ") of length " + b.len); return binfo.getIStream(); } public synchronized InputStream getBlockInputStream(Block b, long seekOffset) throws IOException { InputStream result = getBlockInputStream(b); result.skip(seekOffset); return result; } /** Not supported */ public BlockInputStreams getTmpInputStreams(Block b, long blkoff, long ckoff ) throws IOException { throw new IOException("Not supported"); } /** No-op */ public void validateBlockMetadata(Block b) { } /** * Returns metaData of block b as an input stream * @param b - the block for which the metadata is desired * @return metaData of block b as an input stream * @throws IOException - block does not exist or problems accessing * the meta file */ private synchronized InputStream getMetaDataInStream(Block b) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } if (!binfo.finalized) { throw new IOException("Block " + b + " is being written, its meta cannot be read"); } return binfo.getMetaIStream(); } public synchronized long getMetaDataLength(Block b) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } if (!binfo.finalized) { throw new IOException("Block " + b + " is being written, its metalength cannot be read"); } return binfo.getMetaIStream().getLength(); } public MetaDataInputStream getMetaDataInputStream(Block b) throws IOException { return new MetaDataInputStream(getMetaDataInStream(b), getMetaDataLength(b)); } public synchronized boolean metaFileExists(Block b) throws IOException { if (!isValidBlock(b)) { throw new IOException("Block " + b + " is valid, and cannot be written to."); } return true; // crc exists for all valid blocks } public void checkDataDir() throws DiskErrorException { // nothing to check for simulated data set } public synchronized long getChannelPosition(Block b, BlockWriteStreams stream) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } return binfo.getlength(); } public synchronized void setChannelPosition(Block b, BlockWriteStreams stream, long dataOffset, long ckOffset) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } binfo.setlength(dataOffset); } /** * Simulated input and output streams * */ static private class SimulatedInputStream extends java.io.InputStream { byte theRepeatedData = 7; long length; // bytes int currentPos = 0; byte[] data = null; /** * An input stream of size l with repeated bytes * @param l * @param iRepeatedData */ SimulatedInputStream(long l, byte iRepeatedData) { length = l; theRepeatedData = iRepeatedData; } /** * An input stream of of the supplied data * * @param iData */ SimulatedInputStream(byte[] iData) { data = iData; length = data.length; } /** * * @return the lenght of the input stream */ long getLength() { return length; } @Override public int read() throws IOException { if (currentPos >= length) return -1; if (data !=null) { return data[currentPos++]; } else { currentPos++; return theRepeatedData; } } @Override public int read(byte[] b) throws IOException { if (b == null) { throw new NullPointerException(); } if (b.length == 0) { return 0; } if (currentPos >= length) { // EOF return -1; } int bytesRead = (int) Math.min(b.length, length-currentPos); if (data != null) { System.arraycopy(data, currentPos, b, 0, bytesRead); } else { // all data is zero for (int i : b) { b[i] = theRepeatedData; } } currentPos += bytesRead; return bytesRead; } } /** * This class implements an output stream that merely throws its data away, but records its * length. * */ static private class SimulatedOutputStream extends OutputStream { long length = 0; /** * constructor for Simulated Output Steram */ SimulatedOutputStream() { } /** * * @return the length of the data created so far. */ long getLength() { return length; } /** */ void setLength(long length) { this.length = length; } @Override public void write(int arg0) throws IOException { length++; } @Override public void write(byte[] b) throws IOException { length += b.length; } @Override public void write(byte[] b, int off, int len) throws IOException { length += len; } } private ObjectName mbeanName; /** * Register the FSDataset MBean using the name * "hadoop:service=DataNode,name=FSDatasetState-<storageid>" * We use storage id for MBean name since a minicluster within a single * Java VM may have multiple Simulated Datanodes. */ void registerMBean(final String storageId) { // We wrap to bypass standard mbean naming convetion. // This wraping can be removed in java 6 as it is more flexible in // package naming for mbeans and their impl. StandardMBean bean; try { bean = new StandardMBean(this,FSDatasetMBean.class); mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageId, bean); } catch (NotCompliantMBeanException e) { e.printStackTrace(); } DataNode.LOG.info("Registered FSDatasetStatusMBean"); } public void shutdown() { if (mbeanName != null) MBeanUtil.unregisterMBean(mbeanName); } public String getStorageInfo() { return "Simulated FSDataset-" + storageId; } + + public boolean hasEnoughResource() { + return true; + } }