diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,16 @@ #ignore public assets??? /public/assets +/public + +/data #ignore .orig and .swp *.orig *.swp + +#ignore rvm setting file +.ruby-gemset +.ruby-version + +/config/secrets.yml diff --git a/Gemfile b/Gemfile --- a/Gemfile +++ b/Gemfile @@ -1,27 +1,34 @@ source 'https://rubygems.org' -gem 'rails', '~> 3.2' +#rails +gem 'rails', '~>4.2.0' +gem 'activerecord-session_store' -gem 'select2-rails' # Bundle edge Rails instead: # gem 'rails', :git => 'git://github.com/rails/rails.git' +#---------------- database --------------------- +#the database gem 'mysql2' +#for testing +gem 'sqlite3' +#for dumping database into yaml +gem 'yaml_db' # Gems used only for assets and not required # in production environments by default. -group :assets do - gem 'sass-rails', '~> 3.2.6' - gem 'coffee-rails', '~> 3.2.2' +gem 'sass-rails' +gem 'coffee-rails' # See https://github.com/sstephenson/execjs#readme for more supported runtimes # gem 'therubyracer', :platforms => :ruby - gem 'uglifier' -end +gem 'uglifier' -gem 'prototype-rails' +gem 'haml' +gem 'haml-rails' +# gem 'prototype-rails' # To use ActiveModel has_secure_password # gem 'bcrypt-ruby', '~> 3.0.0' @@ -44,7 +51,7 @@ # jquery addition gem 'jquery-rails' -gem 'jquery-ui-sass-rails' +gem 'jquery-ui-rails' gem 'jquery-timepicker-addon-rails' gem 'jquery-tablesorter' gem 'jquery-countdown-rails' @@ -52,29 +59,34 @@ #syntax highlighter gem 'rouge' -#add bootstrap +#bootstrap add-ons gem 'bootstrap-sass', '~> 3.2.0' gem 'bootstrap-switch-rails' gem 'bootstrap-toggle-rails' gem 'autoprefixer-rails' - -#bootstrap sortable gem 'momentjs-rails' gem 'rails_bootstrap_sortable' +gem 'bootstrap-datepicker-rails' +gem 'bootstrap3-datetimepicker-rails' +gem 'jquery-datatables-rails' +#----------- user interface ----------------- +#select 2 +gem 'select2-rails' #ace editor gem 'ace-rails-ap' +#paginator +gem 'will_paginate', '~> 3.0.7' -gem 'haml' -gem 'haml-rails' gem 'mail' gem 'rdiscount' -gem 'test-unit' -gem 'will_paginate', '~> 3.0.7' gem 'dynamic_form' gem 'in_place_editing' gem 'verification', :git => 'https://github.com/sikachu/verification.git' -group :test, :development do - gem 'rspec-rails', '~> 2.99.0' -end + +#---------------- testiing ----------------------- +gem 'minitest-reporters' + +#---------------- for console -------------------- +gem 'fuzzy-string-match' diff --git a/Gemfile.lock b/Gemfile.lock --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,204 +1,247 @@ GIT remote: https://github.com/sikachu/verification.git - revision: 76eaf51b13276ecae54bd9cd115832595d2ff56d + revision: ff31697b940d7b0e2ec65f08764215c96104e76d specs: verification (1.0.3) - actionpack (>= 3.0.0, < 5.0) - activesupport (>= 3.0.0, < 5.0) + actionpack (>= 3.0.0, < 5.1) + activesupport (>= 3.0.0, < 5.1) GEM remote: https://rubygems.org/ specs: - ace-rails-ap (4.0.2) - actionmailer (3.2.22.5) - actionpack (= 3.2.22.5) - mail (~> 2.5.4) - actionpack (3.2.22.5) - activemodel (= 3.2.22.5) - activesupport (= 3.2.22.5) - builder (~> 3.0.0) + RubyInline (3.12.4) + ZenTest (~> 4.3) + ZenTest (4.11.1) + ace-rails-ap (4.1.1) + actionmailer (4.2.7.1) + actionpack (= 4.2.7.1) + actionview (= 4.2.7.1) + activejob (= 4.2.7.1) + mail (~> 2.5, >= 2.5.4) + rails-dom-testing (~> 1.0, >= 1.0.5) + actionpack (4.2.7.1) + actionview (= 4.2.7.1) + activesupport (= 4.2.7.1) + rack (~> 1.6) + rack-test (~> 0.6.2) + rails-dom-testing (~> 1.0, >= 1.0.5) + rails-html-sanitizer (~> 1.0, >= 1.0.2) + actionview (4.2.7.1) + activesupport (= 4.2.7.1) + builder (~> 3.1) erubis (~> 2.7.0) - journey (~> 1.0.4) - rack (~> 1.4.5) - rack-cache (~> 1.2) - rack-test (~> 0.6.1) - sprockets (~> 2.2.1) - activemodel (3.2.22.5) - activesupport (= 3.2.22.5) - builder (~> 3.0.0) - activerecord (3.2.22.5) - activemodel (= 3.2.22.5) - activesupport (= 3.2.22.5) - arel (~> 3.0.2) - tzinfo (~> 0.3.29) - activeresource (3.2.22.5) - activemodel (= 3.2.22.5) - activesupport (= 3.2.22.5) - activesupport (3.2.22.5) - i18n (~> 0.6, >= 0.6.4) - multi_json (~> 1.0) - arel (3.0.3) - autoprefixer-rails (6.0.3) + rails-dom-testing (~> 1.0, >= 1.0.5) + rails-html-sanitizer (~> 1.0, >= 1.0.2) + activejob (4.2.7.1) + activesupport (= 4.2.7.1) + globalid (>= 0.3.0) + activemodel (4.2.7.1) + activesupport (= 4.2.7.1) + builder (~> 3.1) + activerecord (4.2.7.1) + activemodel (= 4.2.7.1) + activesupport (= 4.2.7.1) + arel (~> 6.0) + activerecord-session_store (1.0.0) + actionpack (>= 4.0, < 5.1) + activerecord (>= 4.0, < 5.1) + multi_json (~> 1.11, >= 1.11.2) + rack (>= 1.5.2, < 3) + railties (>= 4.0, < 5.1) + activesupport (4.2.7.1) + i18n (~> 0.7) + json (~> 1.7, >= 1.7.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + ansi (1.5.0) + arel (6.0.4) + autoprefixer-rails (6.6.0) execjs - json best_in_place (3.0.3) actionpack (>= 3.2) railties (>= 3.2) + bootstrap-datepicker-rails (1.7.1.1) + railties (>= 3.0) bootstrap-sass (3.2.0.2) sass (~> 3.2) bootstrap-switch-rails (3.3.3) bootstrap-toggle-rails (2.2.1.0) - builder (3.0.4) - coffee-rails (3.2.2) + bootstrap3-datetimepicker-rails (4.17.47) + momentjs-rails (>= 2.8.1) + builder (3.2.2) + coffee-rails (4.2.1) coffee-script (>= 2.2.0) - railties (~> 3.2.0) - coffee-script (2.3.0) + railties (>= 4.0.0, < 5.2.x) + coffee-script (2.4.1) coffee-script-source execjs - coffee-script-source (1.9.0) - diff-lcs (1.2.5) + coffee-script-source (1.12.2) + concurrent-ruby (1.0.4) dynamic_form (1.1.4) erubis (2.7.0) - execjs (2.3.0) - haml (4.0.6) + execjs (2.7.0) + fuzzy-string-match (1.0.0) + RubyInline (>= 3.8.6) + globalid (0.3.7) + activesupport (>= 4.1.0) + haml (4.0.7) tilt - haml-rails (0.4) - actionpack (>= 3.1, < 4.1) - activesupport (>= 3.1, < 4.1) - haml (>= 3.1, < 4.1) - railties (>= 3.1, < 4.1) - hike (1.2.3) + haml-rails (0.9.0) + actionpack (>= 4.0.1) + activesupport (>= 4.0.1) + haml (>= 4.0.6, < 5.0) + html2haml (>= 1.0.1) + railties (>= 4.0.1) + html2haml (2.0.0) + erubis (~> 2.7.0) + haml (~> 4.0.0) + nokogiri (~> 1.6.0) + ruby_parser (~> 3.5) i18n (0.7.0) in_place_editing (1.2.0) - journey (1.0.4) jquery-countdown-rails (2.0.2) - jquery-rails (3.1.2) - railties (>= 3.0, < 5.0) + jquery-datatables-rails (3.4.0) + actionpack (>= 3.1) + jquery-rails + railties (>= 3.1) + sass-rails + jquery-rails (4.2.1) + rails-dom-testing (>= 1, < 3) + railties (>= 4.2.0) thor (>= 0.14, < 2.0) - jquery-tablesorter (1.13.4) - railties (>= 3.1, < 5) + jquery-tablesorter (1.23.3) + railties (>= 3.2, < 6) jquery-timepicker-addon-rails (1.4.1) railties (>= 3.1) - jquery-ui-rails (4.0.3) - jquery-rails - railties (>= 3.1.0) - jquery-ui-sass-rails (4.0.3.0) - jquery-rails - jquery-ui-rails (= 4.0.3) - railties (>= 3.1.0) - json (2.0.2) - mail (2.5.4) - mime-types (~> 1.16) - treetop (~> 1.4.8) - mime-types (1.25.1) - momentjs-rails (2.11.1) + jquery-ui-rails (6.0.1) + railties (>= 3.2.16) + json (1.8.3) + loofah (2.0.3) + nokogiri (>= 1.5.9) + mail (2.6.4) + mime-types (>= 1.16, < 4) + mime-types (3.1) + mime-types-data (~> 3.2015) + mime-types-data (3.2016.0521) + mini_portile2 (2.1.0) + minitest (5.10.1) + minitest-reporters (1.1.13) + ansi + builder + minitest (>= 5.0) + ruby-progressbar + momentjs-rails (2.15.1) railties (>= 3.1) multi_json (1.12.1) - mysql2 (0.3.20) - polyglot (0.3.5) - power_assert (0.2.2) - prototype-rails (3.2.1) - rails (~> 3.2) - rack (1.4.7) - rack-cache (1.6.1) - rack (>= 0.4) - rack-ssl (1.3.4) - rack + mysql2 (0.4.5) + nokogiri (1.6.8.1) + mini_portile2 (~> 2.1.0) + rack (1.6.5) rack-test (0.6.3) rack (>= 1.0) - rails (3.2.22.5) - actionmailer (= 3.2.22.5) - actionpack (= 3.2.22.5) - activerecord (= 3.2.22.5) - activeresource (= 3.2.22.5) - activesupport (= 3.2.22.5) - bundler (~> 1.0) - railties (= 3.2.22.5) - rails_bootstrap_sortable (2.0.0) - momentjs-rails (~> 2, >= 2.8.3) - railties (3.2.22.5) - actionpack (= 3.2.22.5) - activesupport (= 3.2.22.5) - rack-ssl (~> 1.3.2) + rails (4.2.7.1) + actionmailer (= 4.2.7.1) + actionpack (= 4.2.7.1) + actionview (= 4.2.7.1) + activejob (= 4.2.7.1) + activemodel (= 4.2.7.1) + activerecord (= 4.2.7.1) + activesupport (= 4.2.7.1) + bundler (>= 1.3.0, < 2.0) + railties (= 4.2.7.1) + sprockets-rails + rails-deprecated_sanitizer (1.0.3) + activesupport (>= 4.2.0.alpha) + rails-dom-testing (1.0.8) + activesupport (>= 4.2.0.beta, < 5.0) + nokogiri (~> 1.6) + rails-deprecated_sanitizer (>= 1.0.1) + rails-html-sanitizer (1.0.3) + loofah (~> 2.0) + rails_bootstrap_sortable (2.0.1) + momentjs-rails (>= 2.8.3) + railties (4.2.7.1) + actionpack (= 4.2.7.1) + activesupport (= 4.2.7.1) rake (>= 0.8.7) - rdoc (~> 3.4) - thor (>= 0.14.6, < 2.0) - rake (11.2.2) - rdiscount (2.1.8) - rdoc (3.9.5) - rouge (1.8.0) - rspec-collection_matchers (1.1.2) - rspec-expectations (>= 2.99.0.beta1) - rspec-core (2.99.2) - rspec-expectations (2.99.2) - diff-lcs (>= 1.1.3, < 2.0) - rspec-mocks (2.99.3) - rspec-rails (2.99.0) - actionpack (>= 3.0) - activemodel (>= 3.0) - activesupport (>= 3.0) - railties (>= 3.0) - rspec-collection_matchers - rspec-core (~> 2.99.0) - rspec-expectations (~> 2.99.0) - rspec-mocks (~> 2.99.0) - sass (3.4.11) - sass-rails (3.2.6) - railties (~> 3.2.0) - sass (>= 3.1.10) - tilt (~> 1.3) - select2-rails (4.0.1) + thor (>= 0.18.1, < 2.0) + rake (12.0.0) + rdiscount (2.2.0.1) + rouge (2.0.7) + ruby-progressbar (1.8.1) + ruby_parser (3.8.3) + sexp_processor (~> 4.1) + sass (3.4.23) + sass-rails (5.0.6) + railties (>= 4.0.0, < 6) + sass (~> 3.1) + sprockets (>= 2.8, < 4.0) + sprockets-rails (>= 2.0, < 4.0) + tilt (>= 1.1, < 3) + select2-rails (4.0.3) thor (~> 0.14) - sprockets (2.2.3) - hike (~> 1.2) - multi_json (~> 1.0) - rack (~> 1.0) - tilt (~> 1.1, != 1.3.0) - test-unit (3.0.9) - power_assert - thor (0.19.1) - tilt (1.4.1) - treetop (1.4.15) - polyglot - polyglot (>= 0.3.1) - tzinfo (0.3.51) - uglifier (2.7.0) - execjs (>= 0.3.0) - json (>= 1.8.0) - will_paginate (3.0.7) + sexp_processor (4.7.0) + sprockets (3.7.1) + concurrent-ruby (~> 1.0) + rack (> 1, < 3) + sprockets-rails (3.2.0) + actionpack (>= 4.0) + activesupport (>= 4.0) + sprockets (>= 3.0.0) + sqlite3 (1.3.12) + thor (0.19.4) + thread_safe (0.3.5) + tilt (2.0.5) + tzinfo (1.2.2) + thread_safe (~> 0.1) + uglifier (3.0.4) + execjs (>= 0.3.0, < 3) + will_paginate (3.0.12) + yaml_db (0.4.2) + rails (>= 3.0, < 5.1) + rake (>= 0.8.7) PLATFORMS ruby DEPENDENCIES ace-rails-ap + activerecord-session_store autoprefixer-rails best_in_place (~> 3.0.1) + bootstrap-datepicker-rails bootstrap-sass (~> 3.2.0) bootstrap-switch-rails bootstrap-toggle-rails - coffee-rails (~> 3.2.2) + bootstrap3-datetimepicker-rails + coffee-rails dynamic_form + fuzzy-string-match haml haml-rails in_place_editing jquery-countdown-rails + jquery-datatables-rails jquery-rails jquery-tablesorter jquery-timepicker-addon-rails - jquery-ui-sass-rails + jquery-ui-rails mail + minitest-reporters momentjs-rails mysql2 - prototype-rails - rails (~> 3.2) + rails (~> 4.2.0) rails_bootstrap_sortable rdiscount rouge - rspec-rails (~> 2.99.0) - sass-rails (~> 3.2.6) + sass-rails select2-rails - test-unit + sqlite3 uglifier verification! will_paginate (~> 3.0.7) + yaml_db + +BUNDLED WITH + 1.15.4 diff --git a/app/assets/javascripts/application.js b/app/assets/javascripts/application.js --- a/app/assets/javascripts/application.js +++ b/app/assets/javascripts/application.js @@ -12,10 +12,14 @@ // //= require jquery //= require jquery_ujs -//= require jquery.ui.all +//= require dataTables/jquery.dataTables +//= require dataTables/bootstrap/3/jquery.dataTables.bootstrap +//= require jquery-ui //= require bootstrap-sprockets //= require moment +//= require moment/th //= require bootstrap-sortable +//= require bootstrap-datetimepicker //= require select2 //= require ace-rails-ap //= require ace/mode-c_cpp @@ -28,13 +32,12 @@ //= require custom //= require jquery.countdown //-------------- addition from local_jquery ----------- -//= require jquery.ui.datepicker -//= require jquery.ui.slider -//= require jquery-ui-timepicker-addon //= require jquery-tablesorter //= require best_in_place //= require best_in_place.jquery-ui //= require brython +//= require bootstrap-datepicker +//= require bootstrap-datetimepicker // since this is after blank line, it is not downloaded //x= require prototype diff --git a/app/assets/javascripts/brython.js b/app/assets/javascripts/brython.js new file mode 100644 --- /dev/null +++ b/app/assets/javascripts/brython.js @@ -0,0 +1,10848 @@ +// brython.js brython.info +// version [3, 3, 0, 'alpha', 0] +// implementation [3, 2, 7, 'final', 0] +// version compiled from commented, indented source files at github.com/brython-dev/brython +var __BRYTHON__=__BRYTHON__ ||{} +;(function($B){ +var scripts=document.getElementsByTagName('script') +var this_url=scripts[scripts.length-1].src +var elts=this_url.split('/') +elts.pop() +var $path=$B.brython_path=elts.join('/')+'/' +var $href=$B.script_path=window.location.href +var $href_elts=$href.split('/') +$href_elts.pop() +var $script_dir=$B.script_dir=$href_elts.join('/') +$B.$py_module_path={} +$B.$py_src={} +$B.path=[$path+'Lib',$path+'libs',$script_dir,$path+'Lib/site-packages'] +$B.bound={} +$B.type={} +$B.async_enabled=false +if($B.async_enabled)$B.block={} +$B.modules={} +$B.imported={} +$B.vars={} +$B._globals={} +$B.frames_stack=[] +$B.builtins={__repr__:function(){return "'"},__str__:function(){return ""},} +$B.builtins_block={id:'__builtins__',module:'__builtins__'} +$B.modules['__builtins__']=$B.builtins_block +$B.bound['__builtins__']={'__BRYTHON__':true,'$eval':true,'$open': true} +$B.bound['__builtins__']['BaseException']=true +$B.type['__builtins__']={} +$B.builtin_funcs={} +$B.__getattr__=function(attr){return this[attr]} +$B.__setattr__=function(attr,value){ +if(['debug','stdout','stderr'].indexOf(attr)>-1){$B[attr]=value} +else{throw $B.builtins.AttributeError('__BRYTHON__ object has no attribute '+attr)}} +$B.language=window.navigator.userLanguage ||window.navigator.language +$B.charset=document.characterSet ||document.inputEncoding ||"utf-8" +$B.max_int=Math.pow(2,53)-1 +$B.min_int=-$B.max_int +$B.$py_next_hash=Math.pow(2,53)-1 +$B.$py_UUID=0 +$B.lambda_magic=Math.random().toString(36).substr(2,8) +$B.callbacks={} +var has_storage=typeof(Storage)!=="undefined" +if(has_storage){$B.has_local_storage=false +try{ +if(localStorage){$B.local_storage=localStorage +$B.has_local_storage=true}}catch(err){} +$B.has_session_storage=false +try{ +if(sessionStorage){$B.session_storage=sessionStorage +$B.has_session_storage=true}}catch(err){}}else{ +$B.has_local_storage=false +$B.has_session_storage=false} +$B.globals=function(){ +return $B.frames_stack[$B.frames_stack.length-1][3]} +$B.regexIdentifier=/^(?:[\$A-Z_a-z\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0-\u08B4\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0AF9\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58-\u0C5A\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D5F-\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303C\u3041-\u3096\u309B-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6EF\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA8FD\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDE80-\uDE9C\uDEA0-\uDED0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF75\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00\uDE10-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE4\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC03-\uDC37\uDC83-\uDCAF\uDCD0-\uDCE8\uDD03-\uDD26\uDD50-\uDD72\uDD76\uDD83-\uDDB2\uDDC1-\uDDC4\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE2B\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEDE\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3D\uDF50\uDF5D-\uDF61]|\uD805[\uDC80-\uDCAF\uDCC4\uDCC5\uDCC7\uDD80-\uDDAE\uDDD8-\uDDDB\uDE00-\uDE2F\uDE44\uDE80-\uDEAA\uDF00-\uDF19]|\uD806[\uDCA0-\uDCDF\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF99]|\uD809[\uDC00-\uDC6E\uDC80-\uDD43]|[\uD80C\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDED0-\uDEED\uDF00-\uDF2F\uDF40-\uDF43\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50\uDF93-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB]|\uD83A[\uDC00-\uDCC4]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1]|\uD87E[\uDC00-\uDE1D])(?:[\$0-9A-Z_a-z\xAA\xB5\xB7\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0300-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u0483-\u0487\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u061A\u0620-\u0669\u066E-\u06D3\u06D5-\u06DC\u06DF-\u06E8\u06EA-\u06FC\u06FF\u0710-\u074A\u074D-\u07B1\u07C0-\u07F5\u07FA\u0800-\u082D\u0840-\u085B\u08A0-\u08B4\u08E3-\u0963\u0966-\u096F\u0971-\u0983\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC-\u09C4\u09C7\u09C8\u09CB-\u09CE\u09D7\u09DC\u09DD\u09DF-\u09E3\u09E6-\u09F1\u0A01-\u0A03\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A59-\u0A5C\u0A5E\u0A66-\u0A75\u0A81-\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABC-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AD0\u0AE0-\u0AE3\u0AE6-\u0AEF\u0AF9\u0B01-\u0B03\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3C-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B5C\u0B5D\u0B5F-\u0B63\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD0\u0BD7\u0BE6-\u0BEF\u0C00-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C58-\u0C5A\u0C60-\u0C63\u0C66-\u0C6F\u0C81-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CDE\u0CE0-\u0CE3\u0CE6-\u0CEF\u0CF1\u0CF2\u0D01-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D-\u0D44\u0D46-\u0D48\u0D4A-\u0D4E\u0D57\u0D5F-\u0D63\u0D66-\u0D6F\u0D7A-\u0D7F\u0D82\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DE6-\u0DEF\u0DF2\u0DF3\u0E01-\u0E3A\u0E40-\u0E4E\u0E50-\u0E59\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDF\u0F00\u0F18\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F47\u0F49-\u0F6C\u0F71-\u0F84\u0F86-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1049\u1050-\u109D\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135D-\u135F\u1369-\u1371\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F8\u1700-\u170C\u170E-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176C\u176E-\u1770\u1772\u1773\u1780-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1877\u1880-\u18AA\u18B0-\u18F5\u1900-\u191E\u1920-\u192B\u1930-\u193B\u1946-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u19D0-\u19DA\u1A00-\u1A1B\u1A20-\u1A5E\u1A60-\u1A7C\u1A7F-\u1A89\u1A90-\u1A99\u1AA7\u1AB0-\u1ABD\u1B00-\u1B4B\u1B50-\u1B59\u1B6B-\u1B73\u1B80-\u1BF3\u1C00-\u1C37\u1C40-\u1C49\u1C4D-\u1C7D\u1CD0-\u1CD2\u1CD4-\u1CF6\u1CF8\u1CF9\u1D00-\u1DF5\u1DFC-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u200C\u200D\u203F\u2040\u2054\u2071\u207F\u2090-\u209C\u20D0-\u20DC\u20E1\u20E5-\u20F0\u2102\u2107\u210A-\u2113\u2115\u2118-\u211D\u2124\u2126\u2128\u212A-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2160-\u2188\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D7F-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2DE0-\u2DFF\u3005-\u3007\u3021-\u302F\u3031-\u3035\u3038-\u303C\u3041-\u3096\u3099-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FD5\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA62B\uA640-\uA66F\uA674-\uA67D\uA67F-\uA6F1\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AD\uA7B0-\uA7B7\uA7F7-\uA827\uA840-\uA873\uA880-\uA8C4\uA8D0-\uA8D9\uA8E0-\uA8F7\uA8FB\uA8FD\uA900-\uA92D\uA930-\uA953\uA960-\uA97C\uA980-\uA9C0\uA9CF-\uA9D9\uA9E0-\uA9FE\uAA00-\uAA36\uAA40-\uAA4D\uAA50-\uAA59\uAA60-\uAA76\uAA7A-\uAAC2\uAADB-\uAADD\uAAE0-\uAAEF\uAAF2-\uAAF6\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABEA\uABEC\uABED\uABF0-\uABF9\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE00-\uFE0F\uFE20-\uFE2F\uFE33\uFE34\uFE4D-\uFE4F\uFE70-\uFE74\uFE76-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDD40-\uDD74\uDDFD\uDE80-\uDE9C\uDEA0-\uDED0\uDEE0\uDF00-\uDF1F\uDF30-\uDF4A\uDF50-\uDF7A\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF\uDFD1-\uDFD5]|\uD801[\uDC00-\uDC9D\uDCA0-\uDCA9\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00-\uDE03\uDE05\uDE06\uDE0C-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE38-\uDE3A\uDE3F\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE6\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC00-\uDC46\uDC66-\uDC6F\uDC7F-\uDCBA\uDCD0-\uDCE8\uDCF0-\uDCF9\uDD00-\uDD34\uDD36-\uDD3F\uDD50-\uDD73\uDD76\uDD80-\uDDC4\uDDCA-\uDDCC\uDDD0-\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE37\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEEA\uDEF0-\uDEF9\uDF00-\uDF03\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3C-\uDF44\uDF47\uDF48\uDF4B-\uDF4D\uDF50\uDF57\uDF5D-\uDF63\uDF66-\uDF6C\uDF70-\uDF74]|\uD805[\uDC80-\uDCC5\uDCC7\uDCD0-\uDCD9\uDD80-\uDDB5\uDDB8-\uDDC0\uDDD8-\uDDDD\uDE00-\uDE40\uDE44\uDE50-\uDE59\uDE80-\uDEB7\uDEC0-\uDEC9\uDF00-\uDF19\uDF1D-\uDF2B\uDF30-\uDF39]|\uD806[\uDCA0-\uDCE9\uDCFF\uDEC0-\uDEF8]|\uD808[\uDC00-\uDF99]|\uD809[\uDC00-\uDC6E\uDC80-\uDD43]|[\uD80C\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDE60-\uDE69\uDED0-\uDEED\uDEF0-\uDEF4\uDF00-\uDF36\uDF40-\uDF43\uDF50-\uDF59\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50-\uDF7E\uDF8F-\uDF9F]|\uD82C[\uDC00\uDC01]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99\uDC9D\uDC9E]|\uD834[\uDD65-\uDD69\uDD6D-\uDD72\uDD7B-\uDD82\uDD85-\uDD8B\uDDAA-\uDDAD\uDE42-\uDE44]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB\uDFCE-\uDFFF]|\uD836[\uDE00-\uDE36\uDE3B-\uDE6C\uDE75\uDE84\uDE9B-\uDE9F\uDEA1-\uDEAF]|\uD83A[\uDC00-\uDCC4\uDCD0-\uDCD6]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1]|\uD87E[\uDC00-\uDE1D]|\uDB40[\uDD00-\uDDEF])*$/ +$B.cased_letters_regexp=/[\u0041-\u005A\u0061-\u007A\u00B5\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BC-\u01BF\u01C4-\u0293\u0295-\u02AF\u0370-\u0373\u0376-\u0377\u037B-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0561-\u0587\u10A0-\u10C5\u10C7\u10CD\u13A0-\u13F5\u13F8-\u13FD\u1D00-\u1D2B\u1D6B-\u1D77\u1D79-\u1D9A\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2134\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183-\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2C7B\u2C7E-\u2CE4\u2CEB-\u2CEE\u2CF2-\u2CF3\u2D00-\u2D25\u2D27\u2D2D\uA640-\uA66D\uA680-\uA69B\uA722-\uA76F\uA771-\uA787\uA78B-\uA78E\uA790-\uA7AD\uA7B0-\uA7B7\uA7FA\uAB30-\uAB5A\uAB60-\uAB65\uAB70-\uABBF\uFB00-\uFB06\uFB13-\uFB17\uFF21-\uFF3A\uFF41-\uFF5A]/})(__BRYTHON__) +__BRYTHON__.implementation=[3,2,7,'final',0] +__BRYTHON__.__MAGIC__="3.2.7" +__BRYTHON__.version_info=[3,3,0,'alpha',0] +__BRYTHON__.compiled_date="2016-06-21 18:43:25.263849" +__BRYTHON__.builtin_module_names=["posix","sys","errno","time","_ajax","_base64","_browser","_html","_jsre","_multiprocessing","_posixsubprocess","_svg","_sys","builtins","dis","hashlib","javascript","json","long_int","math","modulefinder","random","_abcoll","_codecs","_collections","_csv","_functools","_imp","_io","_random","_socket","_sre","_string","_struct","_sysconfigdata","_testcapi","_thread","_warnings","_weakref"] + +;(function($B){var js,$pos,res,$op +var keys=$B.keys=function(obj){var res=[],pos=0 +for(var attr in obj){res[pos++]=attr} +res.sort() +return res} +var clone=$B.clone=function(obj){var res={} +for(var attr in obj){res[attr]=obj[attr]} +return res} +$B.last=function(table){return table[table.length-1]} +$B.list2obj=function(list,value){var res={},i=list.length +if(value===undefined){value=true} +while(i-->0){res[list[i]]=value} +return res} +var $operators={"//=":"ifloordiv",">>=":"irshift","<<=":"ilshift","**=":"ipow","**":"pow","//":"floordiv","<<":"lshift",">>":"rshift","+=":"iadd","-=":"isub","*=":"imul","/=":"itruediv","%=":"imod","&=":"iand","|=":"ior","^=":"ixor","+":"add","-":"sub","*":"mul","/":"truediv","%":"mod","&":"and","|":"or","~":"invert","^":"xor","<":"lt",">":"gt","<=":"le",">=":"ge","==":"eq","!=":"ne","or":"or","and":"and","in":"in","not": "not","is":"is","not_in":"not_in","is_not":"is_not" } +var $augmented_assigns={"//=":"ifloordiv",">>=":"irshift","<<=":"ilshift","**=":"ipow","+=":"iadd","-=":"isub","*=":"imul","/=":"itruediv","%=":"imod","&=":"iand","|=":"ior","^=":"ixor"} +var noassign=$B.list2obj(['True','False','None','__debug__']) +var $op_order=[['or'],['and'],['not'],['in','not_in'],['<','<=','>','>=','!=','==','is','is_not'],['|'],['^'],['&'],['>>','<<'],['+'],['-'],['*','/','//','%'],['unary_neg','unary_inv','unary_pos'],['**'] +] +var $op_weight={} +var $weight=1 +for(var $i=0;$i<$op_order.length;$i++){var _tmp=$op_order[$i] +for(var $j=0;$j<_tmp.length;$j++){$op_weight[_tmp[$j]]=$weight} +$weight++} +var $loop_num=0 +$B.func_magic=Math.random().toString(36).substr(2,8) +function $_SyntaxError(C,msg,indent){ +var ctx_node=C +while(ctx_node.type!=='node'){ctx_node=ctx_node.parent} +var tree_node=ctx_node.node,root=tree_node +while(root.parent!==undefined){root=root.parent} +var module=tree_node.module +var line_num=tree_node.line_num +if(root.line_info){line_num=root.line_info} +if(indent!==undefined){line_num++} +if(indent===undefined){if(Array.isArray(msg)){$B.$SyntaxError(module,msg[0],$pos)} +if(msg==="Triple string end not found"){ +$B.$SyntaxError(module,'invalid syntax : triple string end not found',$pos,line_num)} +$B.$SyntaxError(module,'invalid syntax',$pos,line_num)}else{throw $B.$IndentationError(module,msg,$pos)}} +function $Node(type){this.type=type +this.children=[] +this.yield_atoms=[] +this.add=function(child){ +this.children[this.children.length]=child +child.parent=this +child.module=this.module} +this.insert=function(pos,child){ +this.children.splice(pos,0,child) +child.parent=this +child.module=this.module} +this.toString=function(){return ""} +this.show=function(indent){ +var res='' +if(this.type==='module'){for(var i=0;i0)res +='{' +res +='\n' +for(var i=0;i0){res +=' '.repeat(indent) +res+='}\n'} +return res} +this.to_js=function(indent){ +if(this.js!==undefined)return this.js +this.res=[] +var pos=0 +this.unbound=[] +if(this.type==='module'){for(var i=0;i0)this.res[pos++]='{' +this.res[pos++]='\n' +for(var i=0;i0){this.res[pos++]=' '.repeat(indent) +this.res[pos++]='}\n'}} +this.js=this.res.join('') +return this.js} +this.transform=function(rank){ +if(this.yield_atoms.length>0){ +this.parent.children.splice(rank,1) +var offset=0 +for(var i=0;i0){assigned.push(left) +var ctx=node.C +ctx.tree=[] +var nleft=new $RawJSCtx(ctx,'var $temp'+$loop_num) +nleft.tree=ctx.tree +nassign=new $AssignCtx(nleft) +nassign.tree[1]=right +for(var i=0;i1){left_items=left.tree}else if(left.tree[0].type==='list_or_tuple'||left.tree[0].type==='target_list'){left_items=left.tree[0].tree}else if(left.tree[0].type=='id'){ +var name=left.tree[0].value +if($B._globals && $B._globals[scope.id] +&& $B._globals[scope.id][name]){void(0)}else{left.tree[0].bound=true}} +break +case 'target_list': +case 'list_or_tuple': +left_items=left.tree} +if(left_items===null){return} +var right=this.tree[1] +var right_items=null +if(right.type==='list'||right.type==='tuple'|| +(right.type==='expr' && right.tree.length>1)){right_items=right.tree} +if(right_items!==null){ +if(right_items.length>left_items.length){throw Error('ValueError : too many values to unpack (expected '+left_items.length+')')}else if(right_items.length=0;i--){node.parent.insert(rank,new_nodes[i])} +$loop_num++}else{ +var new_node=new $Node() +new_node.line_num=node.line_num +var js='var $right'+$loop_num+'=getattr' +js +='(iter('+right.to_js()+'),"__next__");' +new $NodeJSCtx(new_node,js) +var new_nodes=[new_node],pos=1 +var rlist_node=new $Node() +var $var='$rlist'+$loop_num +js='var '+$var+'=[], $pos=0;' +js +='while(1){try{'+$var+'[$pos++]=$right' +js +=$loop_num+'()}catch(err){break}};' +new $NodeJSCtx(rlist_node,js) +new_nodes[pos++]=rlist_node +var packed=null +for(var i=0;i1 ?' +js +=' "s" : "")+" to unpack")}' +new $NodeJSCtx(check_node,js) +new_nodes[pos++]=check_node +if(packed==null){var check_node=new $Node() +var min_length=left_items.length +js='if($rlist'+$loop_num+'.length>'+min_length+')' +js +='{throw ValueError("too many values to unpack ' +js +='(expected '+left_items.length+')")}' +new $NodeJSCtx(check_node,js) +new_nodes[pos++]=check_node} +var j=0 +for(var i=0;i=0;i--){node.parent.insert(rank,new_nodes[i])} +$loop_num++}} +this.to_js=function(){this.js_processed=true +if(this.parent.type==='call'){ +return '{$nat:"kw",name:'+this.tree[0].to_js()+',value:'+this.tree[1].to_js()+'}'} +var left=this.tree[0] +if(left.type==='expr')left=left.tree[0] +var right=this.tree[1] +if(left.type=='attribute' ||left.type=='sub'){ +var node=$get_node(this),right_js=right.to_js() +var res='',rvar='',$var='$temp'+$loop_num +if(right.type=='expr' && right.tree[0]!==undefined && +right.tree[0].type=='call' && +('eval'==right.tree[0].func.value || +'exec'==right.tree[0].func.value)){res +='var '+$var+'='+right_js+';\n' +rvar=$var}else if(right.type=='expr' && right.tree[0]!==undefined && +right.tree[0].type=='sub'){res +='var '+$var+'='+right_js+';\n' +rvar=$var}else{rvar=right_js} +if(left.type==='attribute'){ +$loop_num++ +left.func='setattr' +res +=left.to_js() +left.func='getattr' +res=res.substr(0,res.length-1) +return res + ','+rvar+');None;'} +if(left.type==='sub'){ +var seq=left.value.to_js(),temp='$temp'+$loop_num,type +if(left.value.type=='id'){type=$get_node(this).locals[left.value.value]} +$loop_num++ +var res='var '+temp+'='+seq+'\n' +if(type!=='list'){res +='if(Array.isArray('+temp+') && !'+temp+'.__class__){'} +if(left.tree.length==1){res +='$B.set_list_key('+temp+','+ +(left.tree[0].to_js()+''||'null')+','+ +right.to_js()+')'}else if(left.tree.length==2){res +='$B.set_list_slice('+temp+','+ +(left.tree[0].to_js()+''||'null')+','+ +(left.tree[1].to_js()+''||'null')+','+ +right.to_js()+')'}else if(left.tree.length==3){res +='$B.set_list_slice_step('+temp+','+ +(left.tree[0].to_js()+''||'null')+','+ +(left.tree[1].to_js()+''||'null')+','+ +(left.tree[2].to_js()+''||'null')+','+ +right.to_js()+')'} +if(type=='list'){return res} +res +='\n}else{' +if(left.tree.length==1){res +='$B.$setitem('+left.value.to_js() +res +=','+left.tree[0].to_js()+','+right_js+')};None;'}else{left.func='setitem' +res +=left.to_js() +res=res.substr(0,res.length-1) +left.func='getitem' +res +=','+right_js+')};None;'} +return res}} +return left.to_js()+'='+right.to_js()}} +function $AttrCtx(C){ +this.type='attribute' +this.value=C.tree[0] +this.parent=C +C.tree.pop() +C.tree[C.tree.length]=this +this.tree=[] +this.func='getattr' +this.toString=function(){return '(attr) '+this.value+'.'+this.name} +this.to_js=function(){this.js_processed=true +return this.func+'('+this.value.to_js()+',"'+this.name+'")'}} +function $AugmentedAssignCtx(C,op){ +this.type='augm_assign' +this.parent=C.parent +C.parent.tree.pop() +C.parent.tree[C.parent.tree.length]=this +this.op=op +this.tree=[C] +var scope=this.scope=$get_scope(this) +if(C.type=='expr' && C.tree[0].type=='id'){var name=C.tree[0].value +if(noassign[name]===true){$_SyntaxError(C,["can't assign to keyword"])}else if((scope.ntype=='def'||scope.ntype=='generator')&& +$B.bound[scope.id][name]===undefined){if(scope.globals===undefined ||scope.globals.indexOf(name)==-1){ +C.tree[0].unbound=true}}} +$get_node(this).bound_before=$B.keys($B.bound[scope.id]) +this.module=scope.module +this.toString=function(){return '(augm assign) '+this.tree} +this.transform=function(node,rank){var func='__'+$operators[op]+'__' +var offset=0,parent=node.parent +var line_num=node.line_num,lnum_set=false +parent.children.splice(rank,1) +var left_is_id=(this.tree[0].type=='expr' && +this.tree[0].tree[0].type=='id') +if(left_is_id){ +this.tree[0].tree[0].augm_assign=true +if($B.debug>0){var check_node=$NodeJS('if('+this.tree[0].to_js()+ +'===undefined){throw NameError("name \''+ + this.tree[0].tree[0].value+'\' is not defined")}') +node.parent.insert(rank,check_node) +offset++} +var left_id=this.tree[0].tree[0].value,was_bound=$B.bound[this.scope.id][left_id]!==undefined,left_id_unbound=this.tree[0].tree[0].unbound} +var right_is_int=(this.tree[1].type=='expr' && +this.tree[1].tree[0].type=='int') +var right=right_is_int ? this.tree[1].tree[0].to_js(): '$temp' +if(!right_is_int){ +var new_node=new $Node() +new_node.line_num=line_num +lnum_set=true +new $NodeJSCtx(new_node,'var $temp,$left;') +parent.insert(rank,new_node) +offset++ +var new_node=new $Node() +new_node.id=this.scope.id +var new_ctx=new $NodeCtx(new_node) +var new_expr=new $ExprCtx(new_ctx,'js',false) +var _id=new $RawJSCtx(new_expr,'$temp') +var assign=new $AssignCtx(new_expr) +assign.tree[1]=this.tree[1] +_id.parent=assign +parent.insert(rank+offset,new_node) +offset++} +var prefix='',in_class=false +switch(op){case '+=': +case '-=': +case '*=': +case '/=': +if(left_is_id){var scope=this.scope,local_ns='$local_'+scope.id.replace(/\./g,'_'),global_ns='$local_'+scope.module.replace(/\./g,'_'),prefix +switch(scope.ntype){case 'module': +prefix=global_ns +break +case 'def': +case 'generator': +if(scope.globals && scope.globals.indexOf(C.tree[0].value)>-1){prefix=global_ns}else{prefix='$locals'} +break; +case 'class': +var new_node=new $Node() +if(!lnum_set){new_node.line_num=line_num;lnum_set=true} +new $NodeJSCtx(new_node,'var $left='+C.to_js()) +parent.insert(rank+offset,new_node) +in_class=true +offset++}}} +var left=C.tree[0].to_js() +prefix=prefix && !C.tree[0].unknown_binding && left_id_unbound===undefined +var op1=op.charAt(0) +if(prefix){var left1=in_class ? '$left' : left +var new_node=new $Node() +if(!lnum_set){new_node.line_num=line_num;lnum_set=true} +js=right_is_int ? 'if(' : 'if(typeof $temp.valueOf()=="number" && ' +js +=left1+'.constructor===Number' +js +='&& '+left+op1+right+'>$B.min_int && '+left+op1+right+ +'< $B.max_int){' +js +=right_is_int ? '(' : '(typeof $temp=="number" && ' +js +='typeof '+left1+'=="number") ? ' +js +=left+op+right +js +=' : ('+left1+'.constructor===Number ? ' +js +=left+'=float('+left+op1 +js +=right_is_int ? right : right+'.valueOf()' +js +=') : '+left + op +js +=right_is_int ? right : right+'.valueOf()' +js +=')}' +new $NodeJSCtx(new_node,js) +parent.insert(rank+offset,new_node) +offset++} +var aaops={'+=':'add','-=':'sub','*=':'mul'} +if(C.tree[0].type=='sub' && +('+='==op ||'-='==op ||'*='==op)&& +C.tree[0].tree.length==1){var js1='$B.augm_item_'+aaops[op]+'(' +js1 +=C.tree[0].value.to_js() +js1 +=','+C.tree[0].tree[0].to_js()+',' +js1 +=right+');None;' +var new_node=new $Node() +if(!lnum_set){new_node.line_num=line_num;lnum_set=true} +new $NodeJSCtx(new_node,js1) +parent.insert(rank+offset,new_node) +offset++ +return} +var new_node=new $Node() +if(!lnum_set){new_node.line_num=line_num;lnum_set=true} +var js='' +if(prefix){js +='else '} +js +='if(!hasattr('+C.to_js()+',"'+func+'"))' +new $NodeJSCtx(new_node,js) +parent.insert(rank+offset,new_node) +offset ++ +var aa1=new $Node() +aa1.id=this.scope.id +var ctx1=new $NodeCtx(aa1) +var expr1=new $ExprCtx(ctx1,'clone',false) +if(left_id_unbound){new $RawJSCtx(expr1,'$locals["'+left_id+'"]')}else{expr1.tree=C.tree +for(var i=0;i0){if(this.tree[this.tree.length-1].tree.length==0){ +this.tree.pop()}} +var func_js=this.func.to_js() +if(this.func!==undefined){switch(this.func.value){case 'classmethod': +return 'classmethod('+$to_js(this.tree)+')' +case '$$super': +if(this.tree.length==0){ +var scope=$get_scope(this) +if(scope.ntype=='def' ||scope.ntype=='generator'){var def_scope=$get_scope(scope.C.tree[0]) +if(def_scope.ntype=='class'){new $IdCtx(this,def_scope.C.tree[0].name)}}} +if(this.tree.length==1){ +var scope=$get_scope(this) +if(scope.ntype=='def' ||scope.ntype=='generator'){var args=scope.C.tree[0].args +if(args.length>0){new $IdCtx(this,args[0])}}} +break +default: +if(this.func.type=='unary'){ +switch(this.func.op){case '+': +return 'getattr('+$to_js(this.tree)+',"__pos__")()' +case '-': +return 'getattr('+$to_js(this.tree)+',"__neg__")()' +case '~': +return 'getattr('+$to_js(this.tree)+',"__invert__")()'}}} +var _block=false +if($B.async_enabled){var scope=$get_scope(this.func) +if($B.block[scope.id]===undefined){} +else if($B.block[scope.id][this.func.value])_block=true} +var pos_args=[],kw_args=[],star_args=null,dstar_args=null +for(var i=0;i0){args_str +=','} +args_str +='_b_.list('+star_args+'))'} +if(this.func.value=="fghjk"){console.log('fghjk') +var kw_args_str='{'+kw_args.join(', ')+'}' +if(dstar_args){kw_args_str='$B.extend("'+this.func.value+'",'+kw_args_str +kw_args_str +=','+dstar_args+')'}else if(kw_args_str=='{}'){kw_args_str=''} +var res='getattr('+func_js+',"__call__")(['+args_str+']' +if(kw_args_str.length>0){res +=', '+kw_args_str} +return res + ')'} +var kw_args_str='{'+kw_args.join(', ')+'}' +if(dstar_args){kw_args_str='{$nat:"kw",kw:$B.extend("'+this.func.value+'",'+kw_args_str +kw_args_str +=','+dstar_args+')}'}else if(kw_args_str!=='{}'){kw_args_str='{$nat:"kw",kw:'+kw_args_str+'}'}else{kw_args_str=''} +if(star_args && kw_args_str){args_str +='.concat(['+kw_args_str+'])' }else{if(args_str && kw_args_str){args_str +=','+kw_args_str} +else if(!args_str){args_str=kw_args_str}} +if(star_args){ +args_str='.apply(null,'+args_str+')'}else{args_str='('+args_str+')'} +if($B.debug>0){ +var res='getattr('+func_js+',"__call__")' +return res+args_str} +if(this.tree.length>-1){if(this.func.type=='id'){if(this.func.is_builtin){ +if($B.builtin_funcs[this.func.value]!==undefined){return func_js+args_str}}else{var bound_obj=this.func.found +if(bound_obj &&(bound_obj.type=='class' || +bound_obj.type=='def')){return func_js+args_str}} +var res='('+func_js+'.$is_func ? ' +res +=func_js+' : ' +res +='getattr('+func_js+',"__call__"))'+args_str}else{var res='getattr('+func_js+',"__call__")'+args_str} +return res} +return 'getattr('+func_js+',"__call__")()'}}} +function $ClassCtx(C){ +this.type='class' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.expect='id' +this.toString=function(){return '(class) '+this.name+' '+this.tree+' args '+this.args} +var scope=this.scope=$get_scope(this) +this.parent.node.parent_block=scope +this.parent.node.bound={} +this.set_name=function(name){this.random=$B.UUID() +this.name=name +this.id=C.node.module+'_'+name+'_'+this.random +$B.bound[this.id]={} +$B.type[this.id]={} +if($B.async_enabled)$B.block[this.id]={} +$B.modules[this.id]=this.parent.node +this.parent.node.id=this.id +var parent_block=scope +while(parent_block.C && parent_block.C.tree[0].type=='class'){parent_block=parent_block.parent} +while(parent_block.C && +'def' !=parent_block.C.tree[0].type && +'generator' !=parent_block.C.tree[0].type){parent_block=parent_block.parent} +this.parent.node.parent_block=parent_block +this.level=this.scope.level +$B.bound[this.scope.id][name]=this +$B.type[this.scope.id][name]='class' +if(scope.is_function){if(scope.C.tree[0].locals.indexOf(name)==-1){scope.C.tree[0].locals.push(name)}}} +this.transform=function(node,rank){ +this.doc_string=$get_docstring(node) +var instance_decl=new $Node() +var local_ns='$locals_'+this.id.replace(/\./g,'_') +var js=';var '+local_ns+'={}' +js +=', $locals = '+local_ns+';' +new $NodeJSCtx(instance_decl,js) +node.insert(0,instance_decl) +var ret_obj=new $Node() +new $NodeJSCtx(ret_obj,'return '+local_ns+';') +node.insert(node.children.length,ret_obj) +var run_func=new $Node() +new $NodeJSCtx(run_func,')();') +node.parent.insert(rank+1,run_func) +var scope=$get_scope(this) +var name_ref=';$locals_'+scope.id.replace(/\./g,'_') +name_ref +='["'+this.name+'"]' +if(this.name=="FF"){ +var js=[name_ref +'=$B.$class_constructor1("'+this.name],pos=1}else{var js=[name_ref +'=$B.$class_constructor("'+this.name],pos=1} +js[pos++]='",$'+this.name+'_'+this.random +if(this.args!==undefined){ +var arg_tree=this.args.tree,args=[],kw=[] +for(var i=0;i'+ +__BRYTHON__.loop_timeout*1000+ +'){throw _b_.RuntimeError("script timeout")}'+ +h4+'return true'+h+'}\n' +res.splice(0,0,test_timeout) +res.push('$test_timeout'+num+'() && ')} +res.push('$locals["$no_break'+this.loop_num+'"] && ')}else if(tok=='else if'){var line_info=$get_node(this).line_num+','+$get_scope(this).id +res.push('($locals.$line_info="'+line_info+'") && ')} +if(this.tree.length==1){res.push($to_js(this.tree)+'))')}else{ +res.push(this.tree[0].to_js()+'))') +if(this.tree[1].tree.length>0){res.push('{'+this.tree[1].to_js()+'}')}} +return res.join('')}} +function $ContinueCtx(C){ +this.type='continue' +this.parent=C +C.tree[C.tree.length]=this +this.toString=function(){return '(continue)'} +this.to_js=function(){this.js_processed=true +return 'continue'}} +function $DebuggerCtx(C){ +this.type='continue' +this.parent=C +C.tree[C.tree.length]=this +this.toString=function(){return '(debugger)'} +this.to_js=function(){this.js_processed=true +return 'debugger'}} +function $DecoratorCtx(C){ +this.type='decorator' +this.parent=C +C.tree[C.tree.length]=this +this.tree=[] +this.toString=function(){return '(decorator) '+this.tree} +this.transform=function(node,rank){var func_rank=rank+1,children=node.parent.children +var decorators=[this.tree] +while(1){if(func_rank>=children.length){$_SyntaxError(C,['decorator expects function'])} +else if(children[func_rank].C.type=='node_js'){func_rank++} +else if(children[func_rank].C.tree[0].type==='decorator'){decorators.push(children[func_rank].C.tree[0].tree) +children.splice(func_rank,1)}else{break}} +this.dec_ids=[] +var pos=0 +for(var i=0;i0){defaults[dpos++]='"'+arg.name+'"' +defs1[dpos1++]=arg.name+':'+$to_js(arg.tree) +this.__defaults__.push($to_js(arg.tree))}}else if(arg.type=='func_star_arg'){if(arg.op=='*'){this.star_arg=arg.name} +else if(arg.op=='**'){this.kw_arg=arg.name}} +if(arg.annotation){annotations.push(arg.name+': '+arg.annotation.to_js())}} +var flags=67 +if(this.star_arg){flags |=4} +if(this.kw_arg){flags |=8} +if(this.type=='generator'){flags |=32} +var positional_str=[],positional_obj=[],pos=0 +for(var i=0,_len=this.positional_list.length;i<_len;i++){positional_str[pos]='"'+this.positional_list[i]+'"' +positional_obj[pos++]=this.positional_list[i]+':null'} +positional_str=positional_str.join(',') +positional_obj='{'+positional_obj.join(',')+'}' +var dobj=[],pos=0 +for(var i=0;i0 ||this.positional_list.length>0){ +nodes.push($NodeJS('var $len = arguments.length;')) +var new_node=new $Node() +var js='if($len>0 && arguments[$len-1].$nat)' +new $NodeJSCtx(new_node,js) +nodes.push(new_node) +new_node.add(make_args_nodes[0]) +if(make_args_nodes.length>1){new_node.add(make_args_nodes[1])} +var else_node=new $Node() +new $NodeJSCtx(else_node,'else') +nodes.push(else_node)} +if($B.debug>0){ +var pos_len=this.positional_list.length +js='if(arguments.length!='+pos_len+')' +var wrong_nb_node=new $Node() +new $NodeJSCtx(wrong_nb_node,js) +else_node.add(wrong_nb_node) +if(pos_len>0){ +js='if(arguments.length<'+pos_len+')'+ +'{var $missing='+pos_len+'-arguments.length;'+ +'throw TypeError("'+this.name+'() missing "+$missing+'+ +'" positional argument"+($missing>1 ? "s" : "")+": "'+ +'+new Array('+positional_str+').slice(arguments.length))}' +new_node=new $Node() +new $NodeJSCtx(new_node,js) +wrong_nb_node.add(new_node) +js='else if'}else{js='if'} +js +='(arguments.length>'+pos_len+')' +js +='{throw TypeError("'+this.name+'() takes '+pos_len +js +=' positional argument' +js +=(pos_len>1 ? "s" : "") +js +=' but more were given")}' +new_node=new $Node() +new $NodeJSCtx(new_node,js) +wrong_nb_node.add(new_node)} +if(this.positional_list.length>0){if(this.type=='generator'){for(var i=0;i1){nodes.push(make_args_nodes[1])}} +nodes.push($NodeJS('$B.frames_stack[$B.frames_stack.length-1][1] = $locals;')) +for(var i=nodes.length-1;i>=0;i--){node.children.splice(0,0,nodes[i])} +var def_func_node=new $Node() +if(only_positional){var params=Object.keys(this.varnames).join(', ') +new $NodeJSCtx(def_func_node,'return function('+params+')')}else{new $NodeJSCtx(def_func_node,'return function()')} +def_func_node.is_def_func=true +def_func_node.module=this.module +for(var i=0;i0){js='var $defaults = {'+defs1.join(',')+'};'} +new $NodeJSCtx(default_node,js) +node.insert(0,default_node) +node.add(def_func_node) +var ret_node=new $Node() +new $NodeJSCtx(ret_node,')();') +node.parent.insert(rank+1,ret_node) +var offset=2 +if(this.type==='generator' && !this.declared){var code=['var env=[], module=$B.last($B.frames_stack)[2]','for(var i=$B.frames_stack.length-1; i>=0; i--){',' var frame = $B.frames_stack[i]',' if(frame[2]!=module){break}',' env.push([frame[0], frame[1]])','}','env.push([module, $B.last($B.frames_stack)[3]])', +] +for(var i=0;i0){lnum='($locals.$line_info="'+$get_node(this).line_num+','+ +this.scope.id+'") && '} +return 'else if('+lnum+'$B.is_exc('+this.error_name+',['+res.join(',')+']))'}} +function $ExprCtx(C,name,with_commas){ +this.type='expr' +this.name=name +this.with_commas=with_commas +this.expect=',' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return '(expr '+with_commas+') '+this.tree} +this.to_js=function(arg){this.js_processed=true +if(this.type==='list')return '['+$to_js(this.tree)+']' +if(this.tree.length===1)return this.tree[0].to_js(arg) +return 'tuple('+$to_js(this.tree)+')'}} +function $ExprNot(C){ +this.type='expr_not' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return '(expr_not)'}} +function $FloatCtx(C,value){ +this.type='float' +this.value=value +this.toString=function(){return 'float '+this.value} +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.to_js=function(){this.js_processed=true +return 'float('+this.value+')'}} +function $ForExpr(C){ +this.type='for' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.loop_num=$loop_num +this.module=$get_scope(this).module +$loop_num++ +this.toString=function(){return '(for) '+this.tree} +this.transform=function(node,rank){var scope=$get_scope(this),mod_name=scope.module,target=this.tree[0],target_is_1_tuple=target.tree.length==1 && target.expect=='id',iterable=this.tree[1],num=this.loop_num,local_ns='$locals_'+scope.id.replace(/\./g,'_'),h='\n'+' '.repeat(node.indent+4) +if(__BRYTHON__.loop_timeout){ +var test_timeout='var $time'+num+' = new Date()'+h+ +'function $test_timeout'+num+'(){if((new Date())-$time'+ +num+'>'+__BRYTHON__.loop_timeout*1000+ +'){throw _b_.RuntimeError("script timeout")}'+h+'return true}'} +var $range=false +if(target.tree.length==1 && +target.expct !='id' && +iterable.type=='expr' && +iterable.tree[0].type=='expr' && +iterable.tree[0].tree[0].type=='call'){var call=iterable.tree[0].tree[0] +if(call.func.type=='id'){var func_name=call.func.value +if(func_name=='range' && call.tree.length<3){$range=call}}} +var new_nodes=[],pos=0 +var children=node.children +var offset=1 +if($range && scope.ntype!='generator'){if(this.has_break){ +new_node=new $Node() +new $NodeJSCtx(new_node,local_ns+'["$no_break'+num+'"]=true') +new_nodes[pos++]=new_node} +var range_is_builtin=false +if(!scope.blurred){var _scope=$get_scope(this),found=[],fpos=0 +while(1){if($B.bound[_scope.id]['range']){found[fpos++]=_scope.id} +if(_scope.parent_block){_scope=_scope.parent_block} +else{break}} +range_is_builtin=found.length==1 && found[0]=="__builtins__" +if(found==['__builtins__']){range_is_builtin=true}} +var test_range_node=new $Node() +if(range_is_builtin){new $NodeJSCtx(test_range_node,'if(1)')}else{new $NodeJSCtx(test_range_node,'if('+call.func.to_js()+'===$B.builtins.range)')} +new_nodes[pos++]=test_range_node +var idt=target.to_js() +if($range.tree.length==1){var start=0,stop=$range.tree[0].to_js()}else{var start=$range.tree[0].to_js(),stop=$range.tree[1].to_js()} +var js=idt+'='+start+';'+h+'var $stop_'+num +'=$B.int_or_bool('+ +stop+'),'+h+ +' $next'+num+'= '+idt+','+h+ +' $safe'+num+'= typeof $next'+num+'=="number" && typeof '+ +'$stop_'+num+'=="number";'+h +if(__BRYTHON__.loop_timeout){js +=test_timeout+h+'while($test_timeout'+num+'())'}else{js +='while(true)'} +var for_node=new $Node() +new $NodeJSCtx(for_node,js) +for_node.add($NodeJS('if($safe'+num+' && $next'+num+'>= $stop_'+ +num+'){break}')) +for_node.add($NodeJS('else if(!$safe'+num+ +' && $B.ge($next'+num+', $stop_'+num+ +')){break}')) +for_node.add($NodeJS(idt+' = $next'+num)) +for_node.add($NodeJS('if($safe'+num+'){$next'+num+'+=1'+'}')) +for_node.add($NodeJS('else{$next'+num+'=$B.add($next'+num+',1)}')) +for(var i=0;i=0;i--){node.parent.insert(rank+k,new_nodes[k].children[i])} +node.parent.children[rank].line_num=node.line_num +node.children=[] +return 0} +var else_node=new $Node() +new $NodeJSCtx(else_node,'else') +new_nodes[pos++]=else_node +for(var i=new_nodes.length-1;i>=0;i--){node.parent.insert(rank+1,new_nodes[i])} +this.test_range=true +new_nodes=[],pos=0} +var new_node=new $Node() +new_node.line_num=$get_node(this).line_num +var js='$locals["$next'+num+'"]' +js +='=getattr(iter('+iterable.to_js()+'),"__next__");\n' +new $NodeJSCtx(new_node,js) +new_nodes[pos++]=new_node +if(this.has_break){ +new_node=new $Node() +new $NodeJSCtx(new_node,local_ns+'["$no_break'+num+'"]=true;') +new_nodes[pos++]=new_node} +var while_node=new $Node() +if(__BRYTHON__.loop_timeout){js=test_timeout+h +if(this.has_break){js +='while($test_timeout'+num+'() && '+ +local_ns+'["$no_break'+num+'"])'} +else{js +='while($test_timeout'+num+'())'}}else{if(this.has_break){js='while('+local_ns+'["$no_break'+num+'"])'} +else{js='while(1)'}} +new $NodeJSCtx(while_node,js) +while_node.C.loop_num=num +while_node.C.type='for' +while_node.line_num=node.line_num +if(scope.ntype=='generator'){ +while_node.loop_start=num} +new_nodes[pos++]=while_node +node.parent.children.splice(rank,1) +if(this.test_range){for(var i=new_nodes.length-1;i>=0;i--){else_node.insert(0,new_nodes[i])}}else{for(var i=new_nodes.length-1;i>=0;i--){node.parent.insert(rank,new_nodes[i]) +offset +=new_nodes.length}} +var try_node=new $Node() +new $NodeJSCtx(try_node,'try') +while_node.add(try_node) +var iter_node=new $Node() +iter_node.parent=$get_node(this).parent +iter_node.id=this.module +var C=new $NodeCtx(iter_node) +var target_expr=new $ExprCtx(C,'left',true) +if(target_is_1_tuple){ +var t=new $ListOrTupleCtx(target_expr) +t.real='tuple' +t.tree=target.tree}else{target_expr.tree=target.tree} +var assign=new $AssignCtx(target_expr) +assign.tree[1]=new $JSCode('$locals["$next'+num+'"]()') +try_node.add(iter_node) +var catch_node=new $Node() +var js='catch($err){if($B.is_exc($err,[StopIteration]))' +js +='{delete $locals["$next'+num+'"];$B.clear_exc();break;}' +js +='else{throw($err)}}' +new $NodeJSCtx(catch_node,js) +while_node.add(catch_node) +for(var i=0;i0){if(_mod.charAt(0)=='.'){if(package===undefined){if($B.imported[mod]!==undefined){package=$B.imported[mod].__package__}}else{package=$B.imported[package]} +if(package===undefined){return 'throw SystemError("Parent module \'\' not loaded,'+ +' cannot perform relative import")'}else if(package=='None'){console.log('package is None !')}else{packages.push(package)} +_mod=_mod.substr(1)}else{break}} +if(_mod){packages.push(_mod)} +this.module=packages.join('.') +var mod_name=this.module.replace(/\$/g,''),localns='$locals_'+scope.id.replace(/\./g,'_'); +if(this.blocking){res[pos++]='$B.$import("'; +res[pos++]=mod_name+'",["'; +res[pos++]=this.names.join('","')+'"], {'; +var sep=''; +for(var attr in this.aliases){res[pos++]=sep + '"'+attr+'": "'+this.aliases[attr]+'"'; +sep=',';} +res[pos++]='}, {}, true);'; +if(this.names[0]=='*'){res[pos++]='\n'+head+'for(var $attr in $B.imported["'+mod_name+ +'"]){if($attr.charAt(0)!=="_"){'+ +'$locals[$attr]=$B.imported["'+mod_name+'"][$attr]}};'}else{for(var i=0;i-1){found.push(scope)} +else if(scope.C && +scope.C.tree[0].type=='def' && +scope.C.tree[0].env.indexOf(val)>-1){found.push(scope)}}else{if($B.bound[scope.id][val]){found.push(scope)}}}else{ +if($B.bound[scope.id]===undefined){console.log('no bound',scope.id)} +if($B.bound[scope.id][val]){found.push(scope)}} +if(scope.parent_block){scope=scope.parent_block} +else{break}} +this.found=found +if(this.nonlocal && found[0]===innermost){found.shift()} +if(val=='fghj'){console.log('found for',val,found)} +if(found.length>0){ +if(!this.bound && found[0].C && found[0]===innermost +&& val.charAt(0)!='$'){var locs=$get_node(this).locals ||{},nonlocs=innermost.nonlocals +if(locs[val]===undefined && +((innermost.type!='def' ||innermost.type!='generator')&& +innermost.C.tree[0].args.indexOf(val)==-1)&& +(nonlocs===undefined ||nonlocs[val]===undefined)){this.result='$B.$local_search("'+val+'")' +return this.result}} +if(found.length>1 && found[0].C){if(found[0].C.tree[0].type=='class' && !this.bound){var ns0='$locals_'+found[0].id.replace(/\./g,'_'),ns1='$locals_'+found[1].id.replace(/\./g,'_'),res +if(bound_before){if(bound_before.indexOf(val)>-1){this.found=$B.bound[found[0].id][val] +res=ns0}else{this.found=$B.bound[found[1].id][val] +res=ns1} +this.result=res+'["'+val+'"]' +return this.result}else{this.found=false +var res=ns0 + '["'+val+'"]!==undefined ? ' +res +=ns0 + '["'+val+'"] : ' +this.result=res + ns1 + '["'+val+'"]' +return this.result}}} +var scope=found[0] +this.found=$B.bound[scope.id][val] +var scope_ns='$locals_'+scope.id.replace(/\./g,'_') +if(scope.C===undefined){ +if(scope.id=='__builtins__'){if(gs.blurred){ +val='('+global_ns+'["'+val+'"] || '+val+')'}else{ +if(val!=='__builtins__'){val='$B.builtins.'+val} +this.is_builtin=true}}else if(scope.id==scope.module){ +if(this.bound ||this.augm_assign){ +val=scope_ns+'["'+val+'"]'}else{if(scope===innermost && this.env[val]===undefined){var locs=$get_node(this).locals ||{} +if(locs[val]===undefined){ +if(found.length>1 && found[1].id=='__builtins__'){this.is_builtin=true +this.result='$B.builtins.'+val+$to_js(this.tree,'') +return this.result}} +this.result='$B.$search("'+val+'")' +return this.result}else{if(scope.level<=2){ +val=scope_ns+'["'+val+'"]'}else{ +val='$B.$check_def("'+val+'",'+scope_ns+'["'+val+'"])'}}}}else{val=scope_ns+'["'+val+'"]'}}else if(scope===innermost){if($B._globals[scope.id]&& $B._globals[scope.id][val]){val=global_ns+'["'+val+'"]'}else if(!this.bound && !this.augm_assign){if(scope.level<=3){ +val='$locals["'+val+'"]'}else{ +val='$B.$check_def_local("'+val+'",$locals["'+val+'"])'}}else{val='$locals["'+val+'"]'}}else if(!this.bound && !this.augm_assign){ +if(scope.ntype=='generator'){ +var up=0, +sc=innermost +while(sc!==scope){up++;sc=sc.parent_block} +var scope_name="$B.frames_stack[$B.frames_stack.length-1-"+up+"][1]" +val='$B.$check_def_free("'+val+'",'+scope_name+'["'+val+'"])'}else{val='$B.$check_def_free("'+val+'",'+scope_ns+'["'+val+'"])'}}else{val=scope_ns+'["'+val+'"]'} +this.result=val+$to_js(this.tree,'') +return this.result}else{ +this.unknown_binding=true +this.result='$B.$search("'+val+'")' +return this.result}}} +function $ImaginaryCtx(C,value){ +this.type='imaginary' +this.value=value +this.toString=function(){return 'imaginary '+this.value} +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.to_js=function(){this.js_processed=true +return 'complex(0,'+this.value+')'}} +function $ImportCtx(C){ +this.type='import' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.expect='id' +this.toString=function(){return 'import '+this.tree} +this.bind_names=function(){ +var scope=$get_scope(this) +for(var i=0;i1){bound=parts[0]}}else{bound=this.tree[i].alias} +$B.bound[scope.id][bound]={level: scope.level} +$B.type[scope.id][bound]='module'}} +this.to_js=function(){this.js_processed=true +var scope=$get_scope(this) +var mod=scope.module +var res=[],pos=0 +for(var i=0;i$B.min_int && v<$B.max_int){return v} +else{return '$B.LongInt("'+value[1]+'", '+value[0]+')'}}} +function $JSCode(js){this.js=js +this.toString=function(){return this.js} +this.to_js=function(){this.js_processed=true +return this.js}} +function $KwArgCtx(C){ +this.type='kwarg' +this.parent=C.parent +this.tree=[C.tree[0]] +C.parent.tree.pop() +C.parent.tree.push(this) +var value=this.tree[0].value +var ctx=C.parent.parent +if(ctx.kwargs===undefined){ctx.kwargs=[value]} +else if(ctx.kwargs.indexOf(value)===-1){ctx.kwargs.push(value)} +else{$_SyntaxError(C,['keyword argument repeated'])} +var scope=$get_scope(this) +this.toString=function(){return 'kwarg '+this.tree[0]+'='+this.tree[1]} +this.to_js=function(){this.js_processed=true +var key=this.tree[0].value +if(key.substr(0,2)=='$$'){key=key.substr(2)} +var res='{$nat:"kw",name:"'+key+'",' +return res + 'value:'+$to_js(this.tree.slice(1,this.tree.length))+'}'}} +function $LambdaCtx(C){ +this.type='lambda' +this.parent=C +C.tree[C.tree.length]=this +this.tree=[] +this.args_start=$pos+6 +this.vars=[] +this.locals=[] +this.toString=function(){return '(lambda) '+this.args_start+' '+this.body_start} +this.to_js=function(){this.js_processed=true +var module=$get_module(this) +var src=$B.$py_src[module.id] +var qesc=new RegExp('"',"g"), +args=src.substring(this.args_start,this.body_start),body=src.substring(this.body_start+1,this.body_end) +body=body.replace(/\n/g,' ') +var scope=$get_scope(this) +var rand=$B.UUID(),func_name='lambda_'+$B.lambda_magic+'_'+rand,py='def '+func_name+'('+args+'):\n' +py +=' return '+body +var lambda_name='lambda'+rand,module_name=module.id.replace(/\./g,'_'),scope_id=scope.id.replace(/\./g,'_') +var js=$B.py2js(py,module_name,lambda_name,scope_id).to_js() +js='(function(){\n'+js+'\nreturn $locals.'+func_name+'\n})()' +delete $B.modules[lambda_name] +$B.clear_ns(lambda_name) +return js}} +function $ListOrTupleCtx(C,real){ +this.type='list_or_tuple' +this.start=$pos +this.real=real +this.expect='id' +this.closed=false +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){switch(this.real){case 'list': +return '(list) ['+this.tree+']' +case 'list_comp': +case 'gen_expr': +return '('+this.real+') ['+this.intervals+'-'+this.tree+']' +default: +return '(tuple) ('+this.tree+')'}} +this.is_comp=function(){switch(this.real){case 'list_comp': +case 'gen_expr': +case 'dict_or_set_comp': +return true} +return false} +this.get_src=function(){ +var scope=$get_scope(this) +var ident=scope.id +while($B.$py_src[ident]===undefined && $B.modules[ident].parent_block){ident=$B.modules[ident].parent_block.id} +if($B.$py_src[ident]===undefined){ +return $B.$py_src[scope.module]} +return $B.$py_src[ident]} +this.ids=function(){ +var _ids={} +for(var i=0;i1){var new_node=new $Node() +var ctx=new $NodeCtx(new_node) +ctx.tree=[this.tree[1]] +new_node.indent=node.indent+4 +this.tree.pop() +node.add(new_node)} +if(node.children.length==0){this.js=$to_js(this.tree)+';'}else{this.js=$to_js(this.tree)} +return this.js}} +function $NodeJS(js){var node=new $Node() +new $NodeJSCtx(node,js) +return node} +function $NodeJSCtx(node,js){ +this.node=node +node.C=this +this.type='node_js' +this.tree=[js] +this.toString=function(){return 'js '+js} +this.to_js=function(){this.js_processed=true +return js}} +function $NonlocalCtx(C){ +this.type='global' +this.parent=C +this.tree=[] +this.names={} +C.tree[C.tree.length]=this +this.expect='id' +this.scope=$get_scope(this) +this.scope.nonlocals=this.scope.nonlocals ||{} +if(this.scope.C===undefined){$_SyntaxError(C,["nonlocal declaration not allowed at module level"])} +this.toString=function(){return 'global '+this.tree} +this.add=function(name){if($B.bound[this.scope.id][name]=='arg'){$_SyntaxError(C,["name '"+name+"' is parameter and nonlocal"])} +this.names[name]=[false,$pos] +this.scope.nonlocals[name]=true} +this.transform=function(node,rank){var pscope=this.scope.parent_block +if(pscope.C===undefined){$_SyntaxError(C,["no binding for nonlocal '"+ +$B.last(Object.keys(this.names))+"' found"])}else{while(pscope!==undefined && pscope.C!==undefined){for(var name in this.names){if($B.bound[pscope.id][name]!==undefined){this.names[name]=[true]}} +pscope=pscope.parent_block} +for(var name in this.names){if(!this.names[name][0]){console.log('nonlocal error, C '+C) +$pos=this.names[name][1] +$_SyntaxError(C,["no binding for nonlocal '"+name+"' found"])}}}} +this.to_js=function(){this.js_processed=true +return ''}} +function $NotCtx(C){ +this.type='not' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return 'not ('+this.tree+')'} +this.to_js=function(){this.js_processed=true +return '!bool('+$to_js(this.tree)+')'}} +function $OpCtx(C,op){ +this.type='op' +this.op=op +this.parent=C.parent +this.tree=[C] +this.scope=$get_scope(this) +if(C.type=="expr"){if(['int','float','str'].indexOf(C.tree[0].type)>-1){this.left_type=C.tree[0].type}else if(C.tree[0].type=="id"){var binding=$B.bound[this.scope.id][C.tree[0].value] +if(binding){this.left_type=binding.type}}} +C.parent.tree.pop() +C.parent.tree.push(this) +this.toString=function(){return '(op '+this.op+') ['+this.tree+']'} +this.to_js=function(){this.js_processed=true +var comps={'==':'eq','!=':'ne','>=':'ge','<=':'le','<':'lt','>':'gt'} +if(comps[this.op]!==undefined){var method=comps[this.op] +if(this.tree[0].type=='expr' && this.tree[1].type=='expr'){var t0=this.tree[0].tree[0],t1=this.tree[1].tree[0] +switch(t1.type){case 'int': +switch(t0.type){case 'int': +return t0.to_js()+this.op+t1.to_js() +case 'str': +return '$B.$TypeError("unorderable types: int() < str()")' +case 'id': +var res='typeof '+t0.to_js()+'=="number" ? ' +res +=t0.to_js()+this.op+t1.to_js()+' : ' +res +='getattr('+this.tree[0].to_js() +res +=',"__'+method+'__")('+this.tree[1].to_js()+')' +return res} +break; +case 'str': +switch(t0.type){case 'str': +return t0.to_js()+this.op+t1.to_js() +case 'int': +return '$B.$TypeError("unorderable types: str() < int()")' +case 'id': +var res='typeof '+t0.to_js()+'=="string" ? ' +res +=t0.to_js()+this.op+t1.to_js()+' : ' +res +='getattr('+this.tree[0].to_js() +res +=',"__'+method+'__")('+this.tree[1].to_js()+')' +return res} +break; +case 'id': +if(t0.type=='id'){var res='typeof '+t0.to_js()+'!="object" && ' +res +='typeof '+t0.to_js()+'==typeof '+t1.to_js() +res +=' ? '+t0.to_js()+this.op+t1.to_js()+' : ' +res +='getattr('+this.tree[0].to_js() +res +=',"__'+method+'__")('+this.tree[1].to_js()+')' +return res} +break;}}} +switch(this.op){case 'and': +var res='$B.$test_expr($B.$test_item('+this.tree[0].to_js()+')&&' +return res + '$B.$test_item('+this.tree[1].to_js()+'))' +case 'or': +var res='$B.$test_expr($B.$test_item('+this.tree[0].to_js()+')||' +return res + '$B.$test_item('+this.tree[1].to_js()+'))' +case 'in': +return '$B.$is_member('+$to_js(this.tree)+')' +case 'not_in': +return '!$B.$is_member('+$to_js(this.tree)+')' +case 'unary_neg': +case 'unary_pos': +case 'unary_inv': +var op,method +if(this.op=='unary_neg'){op='-';method='__neg__'} +else if(this.op=='unary_pos'){op='+';method='__pos__'} +else{op='~';method='__invert__'} +if(this.tree[1].type=="expr"){var x=this.tree[1].tree[0] +switch(x.type){case 'int': +var v=parseInt(x.value[1],x.value[0]) +if(v>$B.min_int && v<$B.max_int){return op+v} +return 'getattr('+x.to_js()+', "'+method+'")()' +case 'float': +return 'float('+op+x.value+')' +case 'imaginary': +return 'complex(0,'+op+x.value+')'}} +return 'getattr('+this.tree[1].to_js()+',"'+method+'")()' +case 'is': +return this.tree[0].to_js()+ '===' + this.tree[1].to_js() +case 'is_not': +return this.tree[0].to_js()+ '!==' + this.tree[1].to_js() +case '*': +case '+': +case '-': +var op=this.op,vars=[],has_float_lit=false,scope=$get_scope(this) +function is_simple(elt){if(elt.type=='expr' && elt.tree[0].type=='int'){return true} +else if(elt.type=='expr' && elt.tree[0].type=='float'){has_float_lit=true +return true}else if(elt.type=='expr' && elt.tree[0].type=='list_or_tuple' +&& elt.tree[0].real=='tuple' +&& elt.tree[0].tree.length==1 +&& elt.tree[0].tree[0].type=='expr'){return is_simple(elt.tree[0].tree[0].tree[0])}else if(elt.type=='expr' && elt.tree[0].type=='id'){var _var=elt.tree[0].to_js() +if(vars.indexOf(_var)==-1){vars.push(_var)} +return true}else if(elt.type=='op' &&['*','+','-'].indexOf(elt.op)>-1){for(var i=0;i-1){t=v.type}else if(v.type=='id' && ns[v.value]){t=ns[v.value].type} +return t} +var e0=this.tree[0],e1=this.tree[1] +if(is_simple(this)){var v0=this.tree[0].tree[0] +var v1=this.tree[1].tree[0] +if(vars.length==0 && !has_float_lit){ +return this.simple_js()}else if(vars.length==0){ +return 'new Number('+this.simple_js()+')'}else{ +var ns=$B.bound[scope.id],t0=get_type(ns,v0),t1=get_type(ns,v1) +if((t0=='float' && t1=='float')|| +(this.op=='+' && t0=='str' && t1=='str')){this.result_type=t0 +return v0.to_js()+this.op+v1.to_js()}else if(['int','float'].indexOf(t0)>-1 && +['int','float'].indexOf(t1)>-1){if(t0=='int' && t1=='int'){this.result_type='int'} +else{this.result_type='float'} +switch(this.op){case '+': +return '$B.add('+v0.to_js()+','+v1.to_js()+')' +case '-': +return '$B.sub('+v0.to_js()+','+v1.to_js()+')' +case '*': +return '$B.mul('+v0.to_js()+','+v1.to_js()+')'}} +var tests=[],tests1=[],pos=0 +for(var i=0;i0 +&& child.tree[0].type=='packed'){$_SyntaxError(C,["two starred expressions in assignment"])}}} +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return '(packed) '+this.tree} +this.to_js=function(){this.js_processed=true +return $to_js(this.tree)}} +function $PassCtx(C){ +this.type='pass' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return '(pass)'} +this.to_js=function(){this.js_processed=true +return 'void(0)'}} +function $RaiseCtx(C){ +this.type='raise' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return ' (raise) '+this.tree} +this.to_js=function(){this.js_processed=true +var res='' +if(this.tree.length===0)return '$B.$raise()' +var exc=this.tree[0],exc_js=exc.to_js() +if(exc.type==='id' || +(exc.type==='expr' && exc.tree[0].type==='id')){res='if(isinstance('+exc_js+',type)){throw '+exc_js+'()}' +return res + 'else{throw '+exc_js+'}'} +while(this.tree.length>1)this.tree.pop() +return res+'throw '+$to_js(this.tree)}} +function $RawJSCtx(C,js){this.type="raw_js" +C.tree[C.tree.length]=this +this.parent=C +this.toString=function(){return '(js) '+js} +this.to_js=function(){this.js_processed=true +return js}} +function $ReturnCtx(C){ +this.type='return' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +var node=$get_node(this) +while(node.parent){if(node.parent.C && node.parent.C.tree[0].type=='for'){node.parent.C.tree[0].has_return=true +break} +node=node.parent} +this.toString=function(){return 'return '+this.tree} +this.to_js=function(){this.js_processed=true +if(this.tree.length==1 && this.tree[0].type=='abstract_expr'){ +this.tree.pop() +new $IdCtx(new $ExprCtx(this,'rvalue',false),'None')} +var scope=$get_scope(this) +if(scope.ntype=='generator'){return 'return [$B.generator_return(' + $to_js(this.tree)+')]'} +var node=$get_node(this),pnode,flag,leave_frame=true,in_try=false +while(node && leave_frame){if(node.is_try){in_try=true +pnode=node.parent,flag=false +for(var i=0;i0){var elt=pctx.tree[0] +if(elt.type=='for' || +elt.type=='asyncfor' || +(elt.type=='condition' && elt.token=='while')){elt.has_break=true +elt.else_node=$get_node(this) +this.loop_num=elt.loop_num}}} +this.toString=function(){return this.token} +this.to_js=function(){this.js_processed=true +if(this.token=='finally')return this.token +if(this.loop_num!==undefined){var scope=$get_scope(this) +var res='if($locals_'+scope.id.replace(/\./g,'_') +return res +'["$no_break'+this.loop_num+'"])'} +return this.token}} +function $StarArgCtx(C){ +this.type='star_arg' +this.parent=C +this.tree=[] +C.tree[C.tree.length]=this +this.toString=function(){return '(star arg) '+this.tree} +this.to_js=function(){this.js_processed=true +return '{$nat:"ptuple",arg:'+$to_js(this.tree)+'}'}} +function $StringCtx(C,value){ +this.type='str' +this.parent=C +this.tree=[value] +this.raw=false +C.tree[C.tree.length]=this +this.toString=function(){return 'string '+(this.tree||'')} +this.to_js=function(){this.js_processed=true +var res='',type=null +for(var i=0;i0 && ctx.tree[0].alias!==null +&& ctx.tree[0].alias!==undefined){ +var new_node=new $Node() +var alias=ctx.tree[0].alias +var js='$locals["'+alias+'"]' +js +='=$B.exception($err'+$loop_num+')' +new $NodeJSCtx(new_node,js) +node.parent.children[pos].insert(0,new_node)} +catch_node.insert(catch_node.children.length,node.parent.children[pos]) +if(ctx.tree.length===0){if(has_default){$_SyntaxError(C,'more than one except: line')} +has_default=true} +node.parent.children.splice(pos,1)}else if(ctx.type==='single_kw' && ctx.token==='finally'){has_finally=true +pos++}else if(ctx.type==='single_kw' && ctx.token==='else'){if(has_else){$_SyntaxError(C,"more than one 'else'")} +if(has_finally){$_SyntaxError(C,"'else' after 'finally'")} +has_else=true +var else_body=node.parent.children[pos] +node.parent.children.splice(pos,1)}else{break}} +if(!has_default){ +var new_node=new $Node(),ctx=new $NodeCtx(new_node) +catch_node.insert(catch_node.children.length,new_node) +new $SingleKwCtx(ctx,'else') +new_node.add($NodeJS('throw $err'+$loop_num))} +if(has_else){var else_node=new $Node() +else_node.module=scope.module +new $NodeJSCtx(else_node,'if(!$failed'+$loop_num+')') +for(var i=0;i1){ +var suite=node.children,item=this.tree.pop(),new_node=new $Node(),ctx=new $NodeCtx(new_node),with_ctx=new $WithCtx(ctx) +item.parent=with_ctx +with_ctx.tree=[item] +for(var i=0;i1){var nw=new $Node() +var ctx=new $NodeCtx(nw) +nw.parent=node +nw.module=node.module +nw.indent=node.indent+4 +var wc=new $WithCtx(ctx) +wc.tree=this.tree.slice(1) +for(var i=0;i=0;i--){ids[i].alias=alias[i].value +this.tree.splice(0,0,ids[i])}} +var block=node.children +node.children=[] +var try_node=new $Node() +try_node.is_try=true +new $NodeJSCtx(try_node,'try') +node.add(try_node) +if(this.tree[0].alias){var alias=this.tree[0].alias +var js='$locals'+'["'+alias+'"] = $value'+num +var value_node=new $Node() +new $NodeJSCtx(value_node,js) +try_node.add(value_node)} +for(var i=0;i=$B.bound[scope_id][name].level){$B.bound[scope_id][name].level=level}}else{$B.bound[scope_id][name]={level: level}}} +function $previous(C){var previous=C.node.parent.children[C.node.parent.children.length-2] +if(!previous ||!previous.C){$_SyntaxError(C,'keyword not following correct keyword')} +return previous.C.tree[0]} +function $get_docstring(node){var doc_string='' +if(node.children.length>0){var firstchild=node.children[0] +if(firstchild.C.tree && firstchild.C.tree[0].type=='expr'){if(firstchild.C.tree[0].tree[0].type=='str') +doc_string=firstchild.C.tree[0].tree[0].to_js()}} +return doc_string} +function $get_scope(C){ +var ctx_node=C.parent +while(ctx_node.type!=='node'){ctx_node=ctx_node.parent} +var tree_node=ctx_node.node,scope=null,level=1 +while(tree_node.parent && tree_node.parent.type!=='module'){var ntype=tree_node.parent.C.tree[0].type +switch(ntype){case 'def': +case 'class': +case 'generator': +var scope=tree_node.parent +scope.ntype=ntype +scope.is_function=ntype!='class' +scope.level=level +return scope} +tree_node=tree_node.parent +level++} +var scope=tree_node.parent ||tree_node +scope.ntype="module" +scope.level=level +return scope} +function $get_module(C){ +var ctx_node=C.parent +while(ctx_node.type!=='node'){ctx_node=ctx_node.parent} +var tree_node=ctx_node.node +var scope=null +while(tree_node.parent.type!=='module'){tree_node=tree_node.parent} +var scope=tree_node.parent +scope.ntype="module" +return scope} +function $get_node(C){var ctx=C +while(ctx.parent){ctx=ctx.parent} +return ctx.node} +function $get_blocks(name,scope){var res=[] +while(true){if($B.bound[scope.id][name]!==undefined){res.push(scope.id)} +if(scope.parent_block){if(scope.parent_block.id=='__builtins__'){if(scope.blurred){return false}}}else{break} +scope=scope.parent_block} +return res} +function $set_type(scope,expr,value){ +if(expr.type=='expr'){expr=expr.tree[0]} +while(value.type=='expr' && value.tree.length==1){value=value.tree[0]} +if(value.type=='list_or_tuple' && value.real=='tuple' && +value.tree.length==1){return $set_type(scope.id,expr,value.tree[0])} +if($B.type[scope.id]===undefined){return} +if(expr.type=="id"){switch(value.type){case 'int': +case 'str': +$B.type[scope.id][expr.value]=value.type +return +case 'list_or_tuple': +case 'dict_or_set': +$B.type[scope.id][expr.value]=value.real +return +case 'id': +$B.type[scope.id][expr.value]=$B.type[scope.id][value.value] +return +case 'call': +var func_name=value.func.value +if($B.bound.__builtins__[func_name]!==undefined){var blocks=$get_blocks(func_name,scope) +if(blocks.length==1 && blocks[0]=='__builtins__'){switch(func_name){case 'int': +case 'list': +case 'str': +$B.type[scope.id][expr.value]=func_name +return}}} +break +default: +break}} +$B.type[scope.id][expr.value]=false} +function $ws(n){return ' '.repeat(n)} +function $to_js_map(tree_element){if(tree_element.to_js !==undefined)return tree_element.to_js() +throw Error('no to_js() for '+tree_element)} +function $to_js(tree,sep){if(sep===undefined){sep=','} +return tree.map($to_js_map).join(sep)} +var $expr_starters=['id','imaginary','int','float','str','bytes','[','(','{','not','lambda'] +function $arbo(ctx){while(ctx.parent!=undefined){ctx=ctx.parent} +return ctx} +function $transition(C,token){ +switch(C.type){case 'abstract_expr': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lambda': +case 'yield': +C.parent.tree.pop() +var commas=C.with_commas +C=C.parent} +switch(token){case 'id': +return new $IdCtx(new $ExprCtx(C,'id',commas),arguments[2]) +case 'str': +return new $StringCtx(new $ExprCtx(C,'str',commas),arguments[2]) +case 'bytes': +return new $StringCtx(new $ExprCtx(C,'bytes',commas),arguments[2]) +case 'int': +return new $IntCtx(new $ExprCtx(C,'int',commas),arguments[2]) +case 'float': +return new $FloatCtx(new $ExprCtx(C,'float',commas),arguments[2]) +case 'imaginary': +return new $ImaginaryCtx(new $ExprCtx(C,'imaginary',commas),arguments[2]) +case '(': +return new $ListOrTupleCtx(new $ExprCtx(C,'tuple',commas),'tuple') +case '[': +return new $ListOrTupleCtx(new $ExprCtx(C,'list',commas),'list') +case '{': +return new $DictOrSetCtx(new $ExprCtx(C,'dict_or_set',commas)) +case '.': +return new $EllipsisCtx(new $ExprCtx(C,'ellipsis',commas)) +case 'not': +if(C.type==='op'&&C.op==='is'){ +C.op='is_not' +return C} +return new $NotCtx(new $ExprCtx(C,'not',commas)) +case 'lambda': +return new $LambdaCtx(new $ExprCtx(C,'lambda',commas)) +case 'op': +var tg=arguments[2] +switch(tg){case '*': +C.parent.tree.pop() +var commas=C.with_commas +C=C.parent +return new $PackedCtx(new $ExprCtx(C,'expr',commas)) +case '-': +case '~': +case '+': +C.parent.tree.pop() +var left=new $UnaryCtx(C.parent,tg) +if(tg=='-'){var op_expr=new $OpCtx(left,'unary_neg')} +else if(tg=='+'){var op_expr=new $OpCtx(left,'unary_pos')} +else{var op_expr=new $OpCtx(left,'unary_inv')} +return new $AbstractExprCtx(op_expr,false) +case 'not': +C.parent.tree.pop() +var commas=C.with_commas +C=C.parent +return new $NotCtx(new $ExprCtx(C,'not',commas))} +$_SyntaxError(C,'token '+token+' after '+C) +case '=': +$_SyntaxError(C,token) +case 'yield': +return new $AbstractExprCtx(new $YieldCtx(C),true) +case ':': +return $transition(C.parent,token,arguments[2]) +case ')': +case ',': +switch(C.parent.type){case 'list_or_tuple': +case 'call_arg': +case 'op': +case 'yield': +break +default: +$_SyntaxError(C,token)}} +return $transition(C.parent,token,arguments[2]) +case 'annotation': +return $transition(C.parent,token) +case 'assert': +if(token==='eol')return $transition(C.parent,token) +$_SyntaxError(C,token) +case 'assign': +if(token==='eol'){if(C.tree[1].type=='abstract_expr'){$_SyntaxError(C,'token '+token+' after '+C)} +C.guess_type() +return $transition(C.parent,'eol')} +$_SyntaxError(C,'token '+token+' after '+C) +case 'attribute': +if(token==='id'){var name=arguments[2] +if(noassign[name]===true){$_SyntaxError(C,["cannot assign to "+name])} +C.name=name +return C.parent} +$_SyntaxError(C,token) +case 'augm_assign': +if(token==='eol'){if(C.tree[1].type=='abstract_expr'){$_SyntaxError(C,'token '+token+' after '+C)} +return $transition(C.parent,'eol')} +$_SyntaxError(C,'token '+token+' after '+C) +case 'break': +if(token==='eol')return $transition(C.parent,'eol') +$_SyntaxError(C,token) +case 'call': +switch(token){case ',': +if(C.expect=='id'){$_SyntaxError(C,token)} +return C +case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lambda': +if(C.has_dstar)$_SyntaxError(C,token) +C.expect=',' +return $transition(new $CallArgCtx(C),token,arguments[2]) +case ')': +C.end=$pos +return C.parent +case 'op': +C.expect=',' +switch(arguments[2]){case '-': +case '~': +case '+': +C.expect=',' +return $transition(new $CallArgCtx(C),token,arguments[2]) +case '*': +C.has_star=true; +return new $StarArgCtx(C) +case '**': +C.has_dstar=true +return new $DoubleStarArgCtx(C)} +throw Error('SyntaxError')} +return $transition(C.parent,token,arguments[2]) +case 'call_arg': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lambda': +if(C.expect==='id'){C.expect=',' +var expr=new $AbstractExprCtx(C,false) +return $transition(expr,token,arguments[2])} +break +case '=': +if(C.expect===','){return new $ExprCtx(new $KwArgCtx(C),'kw_value',false)} +break +case 'for': +var lst=new $ListOrTupleCtx(C,'gen_expr') +lst.vars=C.vars +lst.locals=C.locals +lst.intervals=[C.start] +C.tree.pop() +lst.expression=C.tree +C.tree=[lst] +lst.tree=[] +var comp=new $ComprehensionCtx(lst) +return new $TargetListCtx(new $CompForCtx(comp)) +case 'op': +if(C.expect==='id'){var op=arguments[2] +C.expect=',' +switch(op){case '+': +case '-': +case '~': +return $transition(new $AbstractExprCtx(C,false),token,op) +case '*': +return new $StarArgCtx(C) +case '**': +return new $DoubleStarArgCtx(C)}} +$_SyntaxError(C,'token '+token+' after '+C) +case ')': +if(C.parent.kwargs && +$B.last(C.parent.tree).tree[0]&& +['kwarg','star_arg','double_star_arg'].indexOf($B.last(C.parent.tree).tree[0].type)==-1){$_SyntaxError(C,['non-keyword arg after keyword arg'])} +if(C.tree.length>0){var son=C.tree[C.tree.length-1] +if(son.type==='list_or_tuple'&&son.real==='gen_expr'){son.intervals.push($pos)}} +return $transition(C.parent,token) +case ':': +if(C.expect===',' && C.parent.parent.type==='lambda'){return $transition(C.parent.parent,token)} +break +case ',': +if(C.expect===','){if(C.parent.kwargs && +['kwarg','star_arg','double_star_arg'].indexOf($B.last(C.parent.tree).tree[0].type)==-1){console.log('err2') +$_SyntaxError(C,['non-keyword arg after keyword arg'])} +return $transition(C.parent,token,arguments[2])} +console.log('C '+C+'token '+token+' expect '+C.expect)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'class': +switch(token){case 'id': +if(C.expect==='id'){C.set_name(arguments[2]) +C.expect='(:' +return C} +break +case '(': +return new $CallCtx(C) +case ':': +return $BodyCtx(C)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'comp_if': +return $transition(C.parent,token,arguments[2]) +case 'comp_for': +if(token==='in' && C.expect==='in'){C.expect=null +return new $AbstractExprCtx(new $CompIterableCtx(C),true)} +if(C.expect===null){ +return $transition(C.parent,token,arguments[2])} +$_SyntaxError(C,'token '+token+' after '+C) +case 'comp_iterable': +return $transition(C.parent,token,arguments[2]) +case 'comprehension': +switch(token){case 'if': +return new $AbstractExprCtx(new $CompIfCtx(C),false) +case 'for': +return new $TargetListCtx(new $CompForCtx(C))} +return $transition(C.parent,token,arguments[2]) +case 'condition': +if(token===':')return $BodyCtx(C) +$_SyntaxError(C,'token '+token+' after '+C) +case 'continue': +if(token=='eol')return C.parent +$_SyntaxError(C,'token '+token+' after '+C) +case 'ctx_manager_alias': +switch(token){case ',': +case ':': +return $transition(C.parent,token,arguments[2])} +$_SyntaxError(C,'token '+token+' after '+C) +case 'decorator': +if(token==='id' && C.tree.length===0){return $transition(new $AbstractExprCtx(C,false),token,arguments[2])} +if(token==='eol'){return $transition(C.parent,token)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'def': +switch(token){case 'id': +if(C.name){$_SyntaxError(C,'token '+token+' after '+C)} +C.set_name(arguments[2]) +return C +case '(': +if(C.name===null){$_SyntaxError(C,'token '+token+' after '+C)} +C.has_args=true; +return new $FuncArgs(C) +case 'annotation': +return new $AbstractExprCtx(new $AnnotationCtx(C),true) +case ':': +if(C.has_args)return $BodyCtx(C)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'del': +if(token==='eol')return $transition(C.parent,token) +$_SyntaxError(C,'token '+token+' after '+C) +case 'dict_or_set': +if(C.closed){switch(token){case '[': +return new $SubCtx(C.parent) +case '(': +return new $CallArgCtx(new $CallCtx(C)) +case 'op': +return new $AbstractExprCtx(new $OpCtx(C,arguments[2]),false)} +return $transition(C.parent,token,arguments[2])}else{if(C.expect===','){switch(token){case '}': +switch(C.real){case 'dict_or_set': +if(C.tree.length !==1)break +C.real='set' +case 'set': +case 'set_comp': +case 'dict_comp': +C.items=C.tree +C.tree=[] +C.closed=true +return C +case 'dict': +if(C.tree.length%2===0){C.items=C.tree +C.tree=[] +C.closed=true +return C}} +$_SyntaxError(C,'token '+token+' after '+C) +case ',': +if(C.real==='dict_or_set'){C.real='set'} +if(C.real==='dict' && C.tree.length%2){$_SyntaxError(C,'token '+token+' after '+C)} +C.expect='id' +return C +case ':': +if(C.real==='dict_or_set'){C.real='dict'} +if(C.real==='dict'){C.expect=',' +return new $AbstractExprCtx(C,false)}else{$_SyntaxError(C,'token '+token+' after '+C)} +case 'for': +if(C.real==='dict_or_set'){C.real='set_comp'} +else{C.real='dict_comp'} +var lst=new $ListOrTupleCtx(C,'dict_or_set_comp') +lst.intervals=[C.start+1] +lst.vars=C.vars +C.tree.pop() +lst.expression=C.tree +C.tree=[lst] +lst.tree=[] +var comp=new $ComprehensionCtx(lst) +return new $TargetListCtx(new $CompForCtx(comp))} +$_SyntaxError(C,'token '+token+' after '+C)}else if(C.expect==='id'){switch(token){case '}': +if(C.tree.length==0){ +C.items=[] +C.real='dict'}else{ +C.items=C.tree} +C.tree=[] +C.closed=true +return C +case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lambda': +C.expect=',' +var expr=new $AbstractExprCtx(C,false) +return $transition(expr,token,arguments[2]) +case 'op': +switch(arguments[2]){case '+': +return C +case '-': +case '~': +C.expect=',' +var left=new $UnaryCtx(C,arguments[2]) +if(arguments[2]=='-'){var op_expr=new $OpCtx(left,'unary_neg')} +else if(arguments[2]=='+'){var op_expr=new $OpCtx(left,'unary_pos')} +else{var op_expr=new $OpCtx(left,'unary_inv')} +return new $AbstractExprCtx(op_expr,false)} +$_SyntaxError(C,'token '+token+' after '+C)} +$_SyntaxError(C,'token '+token+' after '+C)} +return $transition(C.parent,token,arguments[2])} +break +case 'double_star_arg': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lambda': +return $transition(new $AbstractExprCtx(C,false),token,arguments[2]) +case ',': +return C.parent +case ')': +return $transition(C.parent,token) +case ':': +if(C.parent.parent.type==='lambda'){return $transition(C.parent.parent,token)}} +$_SyntaxError(C,'token '+token+' after '+C) +case 'ellipsis': +if(token=='.'){C.nbdots++;return C} +else{if(C.nbdots!=3){$pos--;$_SyntaxError(C,'token '+token+' after '+C)}else{return $transition(C.parent,token,arguments[2])}} +case 'except': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case 'not': +case 'lamdba': +if(C.expect==='id'){C.expect='as' +return $transition(new $AbstractExprCtx(C,false),token,arguments[2])} +case 'as': +if(C.expect==='as' && C.has_alias===undefined){C.expect='alias' +C.has_alias=true +return C} +case 'id': +if(C.expect==='alias'){C.expect=':' +C.set_alias(arguments[2]) +return C} +break +case ':': +var _ce=C.expect +if(_ce=='id' ||_ce=='as' ||_ce==':'){return $BodyCtx(C)} +break +case '(': +if(C.expect==='id' && C.tree.length===0){C.parenth=true +return C} +break +case ')': +if(C.expect==',' ||C.expect=='as'){C.expect='as' +return C} +case ',': +if(C.parenth!==undefined && C.has_alias===undefined && +(C.expect=='as' ||C.expect==',')){C.expect='id' +return C}} +$_SyntaxError(C,'token '+token+' after '+C.expect) +case 'expr': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case 'lamdba': +$_SyntaxError(C,'token '+token+' after '+C) +break +case '[': +case '(': +case '{': +case '.': +case 'not': +if(C.expect==='expr'){C.expect=',' +return $transition(new $AbstractExprCtx(C,false),token,arguments[2])}} +switch(token){case 'not': +if(C.expect===',')return new $ExprNot(C) +break +case 'in': +if(C.parent.type=='target_list'){ +return $transition(C.parent,token)} +if(C.expect===',')return $transition(C,'op','in') +break +case ',': +if(C.expect===','){if(C.with_commas){ +C.parent.tree.pop() +var tuple=new $ListOrTupleCtx(C.parent,'tuple') +tuple.implicit=true +tuple.has_comma=true +tuple.tree=[C] +C.parent=tuple +return tuple}} +return $transition(C.parent,token) +case '.': +return new $AttrCtx(C) +case '[': +return new $AbstractExprCtx(new $SubCtx(C),true) +case '(': +return new $CallCtx(C) +case 'op': +var op_parent=C.parent,op=arguments[2] +if(op_parent.type=='ternary' && op_parent.in_else){var new_op=new $OpCtx(C,op) +return new $AbstractExprCtx(new_op,false)} +var op1=C.parent,repl=null +while(1){if(op1.type==='expr'){op1=op1.parent} +else if(op1.type==='op' +&&$op_weight[op1.op]>=$op_weight[op] +&& !(op1.op=='**' && op=='**') +){repl=op1;op1=op1.parent}else if(op1.type=="not" && $op_weight['not']>$op_weight[op]){repl=op1;op1=op1.parent}else{break}} +if(repl===null){while(1){if(C.parent!==op1){C=C.parent +op_parent=C.parent}else{break}} +C.parent.tree.pop() +var expr=new $ExprCtx(op_parent,'operand',C.with_commas) +expr.expect=',' +C.parent=expr +var new_op=new $OpCtx(C,op) +return new $AbstractExprCtx(new_op,false)}else{ +if(op==='and' ||op==='or'){while(repl.parent.type==='not'|| +(repl.parent.type==='expr'&&repl.parent.parent.type==='not')){ +repl=repl.parent +op_parent=repl.parent}}} +if(repl.type==='op'){var _flag=false +switch(repl.op){case '<': +case '<=': +case '==': +case '!=': +case 'is': +case '>=': +case '>': +_flag=true} +if(_flag){switch(op){case '<': +case '<=': +case '==': +case '!=': +case 'is': +case '>=': +case '>': +var c2=repl.tree[1] +var c2_clone=new Object() +for(var attr in c2){c2_clone[attr]=c2[attr]} +while(repl.parent && repl.parent.type=='op'){if($op_weight[repl.parent.op]<$op_weight[repl.op]){repl=repl.parent}else{break}} +repl.parent.tree.pop() +var and_expr=new $OpCtx(repl,'and') +c2_clone.parent=and_expr +and_expr.tree.push('xxx') +var new_op=new $OpCtx(c2_clone,op) +return new $AbstractExprCtx(new_op,false)}}} +repl.parent.tree.pop() +var expr=new $ExprCtx(repl.parent,'operand',false) +expr.tree=[op1] +repl.parent=expr +var new_op=new $OpCtx(repl,op) +return new $AbstractExprCtx(new_op,false) +case 'augm_assign': +if(C.expect===','){return new $AbstractExprCtx(new $AugmentedAssignCtx(C,arguments[2]),true)} +break +case '=': +if(C.expect===','){if(C.parent.type==="call_arg"){return new $AbstractExprCtx(new $KwArgCtx(C),true)}else if(C.parent.type=="annotation"){return $transition(C.parent.parent,token,arguments[2])} +while(C.parent!==undefined){C=C.parent +if(C.type=='condition'){$_SyntaxError(C,'token '+token+' after '+C)}} +C=C.tree[0] +return new $AbstractExprCtx(new $AssignCtx(C),true)} +break +case 'if': +if(C.parent.type!=='comp_iterable'){ +var ctx=C +while(ctx.parent && ctx.parent.type=='op'){ctx=ctx.parent +if(ctx.type=='expr' && ctx.parent && ctx.parent.type=='op'){ctx=ctx.parent}} +return new $AbstractExprCtx(new $TernaryCtx(ctx),false)}} +return $transition(C.parent,token) +case 'expr_not': +if(token=='in'){ +C.parent.tree.pop() +return new $AbstractExprCtx(new $OpCtx(C.parent,'not_in'),false)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'for': +switch(token){case 'in': +return new $AbstractExprCtx(new $ExprCtx(C,'target list',true),false) +case ':': +return $BodyCtx(C)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'from': +switch(token){case 'id': +if(C.expect=='id'){C.add_name(arguments[2]) +C.expect=',' +return C} +if(C.expect==='alias'){C.aliases[C.names[C.names.length-1]]=arguments[2] +C.expect=',' +return C} +case '.': +if(C.expect=='module'){if(token=='id'){C.module +=arguments[2]} +else{C.module +='.'} +return C} +case 'import': +C.blocking=token=='import' +if(C.expect=='module'){C.expect='id' +return C} +case 'op': +if(arguments[2]=='*' && C.expect=='id' +&& C.names.length==0){if($get_scope(C).ntype!=='module'){$_SyntaxError(C,["import * only allowed at module level"])} +C.add_name('*') +C.expect='eol' +return C} +case ',': +if(C.expect==','){C.expect='id' +return C} +case 'eol': +switch(C.expect){case ',': +case 'eol': +C.bind_names() +return $transition(C.parent,token) +default: +$_SyntaxError(C,['trailing comma not allowed without surrounding parentheses'])} +case 'as': +if(C.expect==',' ||C.expect=='eol'){C.expect='alias' +return C} +case '(': +if(C.expect=='id'){C.expect='id' +return C} +case ')': +if(C.expect==',' ||C.expect=='id'){C.expect='eol' +return C}} +$_SyntaxError(C,'token '+token+' after '+C) +case 'func_arg_id': +switch(token){case '=': +if(C.expect==='='){C.parent.has_default=true +var def_ctx=C.parent.parent +if(C.parent.has_star_arg){def_ctx.default_list.push(def_ctx.after_star.pop())}else{def_ctx.default_list.push(def_ctx.positional_list.pop())} +return new $AbstractExprCtx(C,false)} +break +case ',': +case ')': +if(C.parent.has_default && C.tree.length==0 && +C.parent.has_star_arg===undefined){console.log('parent '+C.parent,C.parent) +$pos -=C.name.length +$_SyntaxError(C,['non-default argument follows default argument'])}else{return $transition(C.parent,token)} +case ':': +return new $AbstractExprCtx(new $AnnotationCtx(C),false)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'func_args': +switch(token){case 'id': +if(C.expect==='id'){C.expect=',' +if(C.names.indexOf(arguments[2])>-1){$_SyntaxError(C,['duplicate argument '+arguments[2]+' in function definition'])}} +return new $FuncArgIdCtx(C,arguments[2]) +case ',': +if(C.has_kw_arg)$_SyntaxError(C,'duplicate kw arg') +if(C.expect===','){C.expect='id' +return C} +$_SyntaxError(C,'token '+token+' after '+C) +case ')': +return C.parent +case 'op': +var op=arguments[2] +C.expect=',' +if(op=='*'){if(C.has_star_arg){$_SyntaxError(C,'duplicate star arg')} +return new $FuncStarArgCtx(C,'*')} +if(op=='**')return new $FuncStarArgCtx(C,'**') +$_SyntaxError(C,'token '+op+' after '+C)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'func_star_arg': +switch(token){case 'id': +if(C.name===undefined){if(C.parent.names.indexOf(arguments[2])>-1){$_SyntaxError(C,['duplicate argument '+arguments[2]+' in function definition'])}} +C.set_name(arguments[2]) +C.parent.names.push(arguments[2]) +return C +case ',': +case ')': +if(C.name===undefined){ +C.set_name('$dummy') +C.parent.names.push('$dummy')} +return $transition(C.parent,token) +case ':': +if(C.name===undefined){$_SyntaxError(C,'annotation on an unnamed parameter')} +return new $AbstractExprCtx(new $AnnotationCtx(C),false)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'global': +switch(token){case 'id': +if(C.expect==='id'){new $IdCtx(C,arguments[2]) +C.add(arguments[2]) +C.expect=',' +return C} +break +case ',': +if(C.expect===','){C.expect='id' +return C} +break +case 'eol': +if(C.expect===','){return $transition(C.parent,token)} +break} +$_SyntaxError(C,'token '+token+' after '+C) +case 'id': +switch(token){case '=': +if(C.parent.type==='expr' && +C.parent.parent !==undefined && +C.parent.parent.type==='call_arg'){return new $AbstractExprCtx(new $KwArgCtx(C.parent),false)} +return $transition(C.parent,token,arguments[2]) +case 'op': +return $transition(C.parent,token,arguments[2]) +case 'id': +case 'str': +case 'int': +case 'float': +case 'imaginary': +$_SyntaxError(C,'token '+token+' after '+C)} +return $transition(C.parent,token,arguments[2]) +case 'import': +switch(token){case 'id': +if(C.expect==='id'){new $ImportedModuleCtx(C,arguments[2]) +C.expect=',' +return C} +if(C.expect==='qual'){C.expect=',' +C.tree[C.tree.length-1].name +='.'+arguments[2] +C.tree[C.tree.length-1].alias +='.'+arguments[2] +return C} +if(C.expect==='alias'){C.expect=',' +C.tree[C.tree.length-1].alias=arguments[2] +return C} +break +case '.': +if(C.expect===','){C.expect='qual' +return C} +break +case ',': +if(C.expect===','){C.expect='id' +return C} +break +case 'as': +if(C.expect===','){C.expect='alias' +return C} +break +case 'eol': +if(C.expect===','){C.bind_names() +return $transition(C.parent,token)} +break} +$_SyntaxError(C,'token '+token+' after '+C) +case 'imaginary': +case 'int': +case 'float': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case 'not': +case 'lamdba': +$_SyntaxError(C,'token '+token+' after '+C)} +return $transition(C.parent,token,arguments[2]) +case 'kwarg': +if(token===',')return new $CallArgCtx(C.parent.parent) +return $transition(C.parent,token) +case 'lambda': +if(token===':' && C.args===undefined){C.args=C.tree +C.tree=[] +C.body_start=$pos +return new $AbstractExprCtx(C,false)} +if(C.args!==undefined){ +C.body_end=$pos +return $transition(C.parent,token)} +if(C.args===undefined){return $transition(new $CallCtx(C),token,arguments[2])} +$_SyntaxError(C,'token '+token+' after '+C) +case 'list_or_tuple': +if(C.closed){if(token==='[')return new $SubCtx(C.parent) +if(token==='(')return new $CallCtx(C) +return $transition(C.parent,token,arguments[2])}else{if(C.expect===','){switch(C.real){case 'tuple': +case 'gen_expr': +if(token===')'){C.closed=true +if(C.real==='gen_expr'){C.intervals.push($pos)} +return C.parent} +break +case 'list': +case 'list_comp': +if(token===']'){C.closed=true +if(C.real==='list_comp'){C.intervals.push($pos)} +return C} +break +case 'dict_or_set_comp': +if(token==='}'){C.intervals.push($pos) +return $transition(C.parent,token)} +break} +switch(token){case ',': +if(C.real==='tuple'){C.has_comma=true} +C.expect='id' +return C +case 'for': +if(C.real==='list'){C.real='list_comp'} +else{C.real='gen_expr'} +C.intervals=[C.start+1] +C.expression=C.tree +C.tree=[] +var comp=new $ComprehensionCtx(C) +return new $TargetListCtx(new $CompForCtx(comp))} +return $transition(C.parent,token,arguments[2])}else if(C.expect==='id'){switch(C.real){case 'tuple': +if(token===')'){C.closed=true +return C.parent} +if(token=='eol' && C.implicit===true){C.closed=true +return $transition(C.parent,token)} +break +case 'gen_expr': +if(token===')'){C.closed=true +return $transition(C.parent,token)} +break +case 'list': +if(token===']'){C.closed=true +return C} +break} +switch(token){case '=': +if(C.real=='tuple' && C.implicit===true){C.closed=true +C.parent.tree.pop() +var expr=new $ExprCtx(C.parent,'tuple',false) +expr.tree=[C] +C.parent=expr +return $transition(C.parent,token)} +break +case ')': +break +case ']': +if(C.real=='tuple' && C.implicit===true){ +return $transition(C.parent,token,arguments[2])}else{break} +case ',': +$_SyntaxError(C,'unexpected comma inside list') +default: +C.expect=',' +var expr=new $AbstractExprCtx(C,false) +return $transition(expr,token,arguments[2])}}else{return $transition(C.parent,token,arguments[2])}} +case 'list_comp': +switch(token){case ']': +return C.parent +case 'in': +return new $ExprCtx(C,'iterable',true) +case 'if': +return new $ExprCtx(C,'condition',true)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'node': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case 'not': +case 'lamdba': +case '.': +var expr=new $AbstractExprCtx(C,true) +return $transition(expr,token,arguments[2]) +case 'op': +switch(arguments[2]){case '*': +case '+': +case '-': +case '~': +var expr=new $AbstractExprCtx(C,true) +return $transition(expr,token,arguments[2])} +break +case 'class': +return new $ClassCtx(C) +case 'continue': +return new $ContinueCtx(C) +case '__debugger__': +return new $DebuggerCtx(C) +case 'break': +return new $BreakCtx(C) +case 'def': +return new $DefCtx(C) +case 'for': +return new $TargetListCtx(new $ForExpr(C)) +case 'if': +case 'while': +return new $AbstractExprCtx(new $ConditionCtx(C,token),false) +case 'elif': +var previous=$previous(C) +if(['condition'].indexOf(previous.type)==-1 || +previous.token=='while'){$_SyntaxError(C,'elif after '+previous.type)} +return new $AbstractExprCtx(new $ConditionCtx(C,token),false) +case 'else': +var previous=$previous(C) +if(['condition','except','for'].indexOf(previous.type)==-1){$_SyntaxError(C,'else after '+previous.type)} +return new $SingleKwCtx(C,token) +case 'finally': +var previous=$previous(C) +if(['try','except'].indexOf(previous.type)==-1 && +(previous.type!='single_kw' ||previous.token!='else')){$_SyntaxError(C,'finally after '+previous.type)} +return new $SingleKwCtx(C,token) +case 'try': +return new $TryCtx(C) +case 'except': +var previous=$previous(C) +if(['try','except'].indexOf(previous.type)==-1){$_SyntaxError(C,'except after '+previous.type)} +return new $ExceptCtx(C) +case 'assert': +return new $AbstractExprCtx(new $AssertCtx(C),'assert',true) +case 'from': +return new $FromCtx(C) +case 'import': +return new $ImportCtx(C) +case 'global': +return new $GlobalCtx(C) +case 'nonlocal': +return new $NonlocalCtx(C) +case 'lambda': +return new $LambdaCtx(C) +case 'pass': +return new $PassCtx(C) +case 'raise': +return new $RaiseCtx(C) +case 'return': +return new $AbstractExprCtx(new $ReturnCtx(C),true) +case 'with': +return new $AbstractExprCtx(new $WithCtx(C),false) +case 'yield': +return new $AbstractExprCtx(new $YieldCtx(C),true) +case 'del': +return new $AbstractExprCtx(new $DelCtx(C),true) +case '@': +return new $DecoratorCtx(C) +case 'eol': +if(C.tree.length===0){ +C.node.parent.children.pop() +return C.node.parent.C} +return C} +$_SyntaxError(C,'token '+token+' after '+C) +case 'not': +switch(token){case 'in': +C.parent.parent.tree.pop() +return new $ExprCtx(new $OpCtx(C.parent,'not_in'),'op',false) +case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lamdba': +var expr=new $AbstractExprCtx(C,false) +return $transition(expr,token,arguments[2]) +case 'op': +var a=arguments[2] +if('+'==a ||'-'==a ||'~'==a){var expr=new $AbstractExprCtx(C,false) +return $transition(expr,token,arguments[2])}} +return $transition(C.parent,token) +case 'op': +if(C.op===undefined){$_SyntaxError(C,['C op undefined '+C])} +if(C.op.substr(0,5)=='unary'){if(C.parent.type=='assign' ||C.parent.type=='return'){ +C.parent.tree.pop() +var t=new $ListOrTupleCtx(C.parent,'tuple') +t.tree.push(C) +C.parent=t +return t}} +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lamdba': +return $transition(new $AbstractExprCtx(C,false),token,arguments[2]) +case 'op': +switch(arguments[2]){case '+': +case '-': +case '~': +return new $UnaryCtx(C,arguments[2])} +default: +if(C.tree[C.tree.length-1].type=='abstract_expr'){$_SyntaxError(C,'token '+token+' after '+C)}} +var t0=C.tree[0],t1=C.tree[1] +if(t0.tree && t1.tree){t0=t0.tree[0] +t1=t1.tree[0]} +return $transition(C.parent,token) +case 'packed': +if(token==='id'){new $IdCtx(C,arguments[2]) +C.parent.expect=',' +return C.parent} +$_SyntaxError(C,'token '+token+' after '+C) +case 'pass': +if(token==='eol')return C.parent +$_SyntaxError(C,'token '+token+' after '+C) +case 'raise': +switch(token){case 'id': +if(C.tree.length===0){return new $IdCtx(new $ExprCtx(C,'exc',false),arguments[2])} +break +case 'from': +if(C.tree.length>0){return new $AbstractExprCtx(C,false)} +break +case 'eol': +return $transition(C.parent,token)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'return': +var no_args=C.tree[0].type=='abstract_expr' +if(!no_args){var scope=$get_scope(C) +if(scope.ntype=='generator'){$_SyntaxError(C,["'return' with argument inside generator"])} +scope.has_return_with_arguments=true} +return $transition(C.parent,token) +case 'single_kw': +if(token===':')return $BodyCtx(C) +$_SyntaxError(C,'token '+token+' after '+C) +case 'star_arg': +switch(token){case 'id': +if(C.parent.type=="target_list"){C.tree.push(arguments[2]) +C.parent.expect=',' +console.log('return parent',C.parent) +return C.parent} +return $transition(new $AbstractExprCtx(C,false),token,arguments[2]) +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case 'not': +case 'lamdba': +return $transition(new $AbstractExprCtx(C,false),token,arguments[2]) +case ',': +return $transition(C.parent,token) +case ')': +return $transition(C.parent,token) +case ':': +if(C.parent.parent.type==='lambda'){return $transition(C.parent.parent,token)}} +$_SyntaxError(C,'token '+token+' after '+C) +case 'str': +switch(token){case '[': +return new $AbstractExprCtx(new $SubCtx(C.parent),false) +case '(': +C.parent.tree[0]=C +return new $CallCtx(C.parent) +case 'str': +C.tree.push(arguments[2]) +return C} +return $transition(C.parent,token,arguments[2]) +case 'sub': +switch(token){case 'id': +case 'imaginary': +case 'int': +case 'float': +case 'str': +case 'bytes': +case '[': +case '(': +case '{': +case '.': +case 'not': +case 'lamdba': +var expr=new $AbstractExprCtx(C,false) +return $transition(expr,token,arguments[2]) +case ']': +return C.parent +case ':': +if(C.tree.length==0){new $AbstractExprCtx(C,false)} +return new $AbstractExprCtx(C,false)} +$_SyntaxError(C,'token '+token+' after '+C) +case 'target_list': +switch(token){case 'id': +if(C.expect==='id'){C.expect=',' +return new $IdCtx(new $ExprCtx(C,'target',false),arguments[2])} +case 'op': +if(C.expect=='id' && arguments[2]=='*'){ +return new $PackedCtx(C)} +case '(': +case '[': +if(C.expect==='id'){C.expect=',' +return new $TargetListCtx(C)} +case ')': +case ']': +if(C.expect===',')return C.parent +case ',': +if(C.expect==','){C.expect='id' +return C}} +if(C.expect===','){return $transition(C.parent,token,arguments[2])}else if(token=='in'){ +return $transition(C.parent,token,arguments[2])} +$_SyntaxError(C,'token '+token+' after '+C) +case 'ternary': +if(token==='else'){C.in_else=true +return new $AbstractExprCtx(C,false)} +return $transition(C.parent,token,arguments[2]) +case 'try': +if(token===':')return $BodyCtx(C) +$_SyntaxError(C,'token '+token+' after '+C) +case 'unary': +switch(token){case 'int': +case 'float': +case 'imaginary': +console.log(token,arguments[2],'after',C) +var expr=C.parent +C.parent.parent.tree.pop() +var value=arguments[2] +if(C.op==='-'){value="-"+value} +else if(C.op==='~'){value=~value} +return $transition(C.parent.parent,token,value) +case 'id': +C.parent.parent.tree.pop() +var expr=new $ExprCtx(C.parent.parent,'call',false) +var expr1=new $ExprCtx(expr,'id',false) +new $IdCtx(expr1,arguments[2]) +if(true){ +var repl=new $AttrCtx(expr) +if(C.op==='+'){repl.name='__pos__'} +else if(C.op==='-'){repl.name='__neg__'} +else{repl.name='__invert__'} +var call=new $CallCtx(expr) +return expr1} +return C.parent +case 'op': +if('+'==arguments[2]||'-'==arguments[2]){var op=arguments[2] +if(C.op===op){C.op='+'}else{C.op='-'} +return C}} +return $transition(C.parent,token,arguments[2]) +case 'with': +switch(token){case 'id': +if(C.expect==='id'){C.expect='as' +return $transition(new $AbstractExprCtx(C,false),token,arguments[2])} +if(C.expect==='alias'){if(C.parenth!==undefined){C.expect=','} +else{C.expect=':'} +C.set_alias(arguments[2]) +return C} +break +case 'as': +return new $AbstractExprCtx(new $AliasCtx(C)) +case ':': +switch(C.expect){case 'id': +case 'as': +case ':': +return $BodyCtx(C)} +break +case '(': +if(C.expect==='id' && C.tree.length===0){C.parenth=true +return C}else if(C.expect=='alias'){console.log('C',C,'token',token) +C.expect=':' +return new $TargetListCtx(C,false)} +break +case ')': +if(C.expect==',' ||C.expect=='as'){C.expect=':' +return C} +break +case ',': +if(C.parenth!==undefined && C.has_alias===undefined && +(C.expect==',' ||C.expect=='as')){C.expect='id' +return C}else if(C.expect=='as'){C.expect='id' +return C}else if(C.expect==':'){C.expect='id' +return C} +break} +$_SyntaxError(C,'token '+token+' after '+C.expect) +case 'yield': +if(token=='from'){ +if(C.tree[0].type!='abstract_expr'){ +$_SyntaxError(C,"'from' must follow 'yield'")} +C.from=true +C.tree=[] +return new $AbstractExprCtx(C,true)} +return $transition(C.parent,token)}} +$B.forbidden=['case','catch','constructor','Date','delete','default','enum','eval','extends','Error','history','function','location','Math','new','null','Number','RegExp','super','this','throw','var','toString'] +var s_escaped='abfnrtvxuU"'+"'"+'\\',is_escaped={} +for(var i=0;i0)indent+=8-indent%8}else{break}} +var _s=src.charAt(pos) +if(_s=='\n'){pos++;lnum++;indent=null;continue} +else if(_s==='#'){ +var offset=src.substr(pos).search(/\n/) +if(offset===-1){break} +pos+=offset+1;lnum++;indent=null;continue} +new_node.indent=indent +new_node.line_num=lnum +new_node.module=module +if(indent>current.indent){ +if(C!==null){if($indented.indexOf(C.tree[0].type)==-1){$pos=pos +$_SyntaxError(C,'unexpected indent',pos)}} +current.add(new_node)}else if(indent<=current.indent && +$indented.indexOf(C.tree[0].type)>-1 && +C.tree.length<2){$pos=pos +$_SyntaxError(C,'expected an indented block',pos)}else{ +while(indent!==current.indent){current=current.parent +if(current===undefined ||indent>current.indent){$pos=pos +$_SyntaxError(C,'unexpected indent',pos)}} +current.parent.add(new_node)} +current=new_node +C=new $NodeCtx(new_node) +continue} +if(car=="#"){var end=src.substr(pos+1).search('\n') +if(end==-1){end=src.length-1} +pos +=end+1;continue} +if(car=='"' ||car=="'"){var raw=C.type=='str' && C.raw,bytes=false ,end=null; +if(string_modifier){switch(string_modifier){case 'r': +raw=true +break +case 'u': +break +case 'b': +bytes=true +break +case 'rb': +case 'br': +bytes=true;raw=true +break} +string_modifier=false} +if(src.substr(pos,3)==car+car+car){_type="triple_string";end=pos+3} +else{_type="string";end=pos+1} +var escaped=false +var zone=car +var found=false +while(end-1){$pos=pos-name.length +if(unsupported.indexOf(name)>-1){$_SyntaxError(C,"Unsupported Python keyword '"+name+"'")} +C=$transition(C,name)}else if($operators[name]!==undefined +&& $B.forbidden.indexOf(name)==-1){ +if(name=='is'){ +var re=/^\s+not\s+/ +var res=re.exec(src.substr(pos)) +if(res!==null){pos +=res[0].length +$pos=pos-name.length +C=$transition(C,'op','is_not')}else{$pos=pos-name.length +C=$transition(C,'op',name)}}else if(name=='not'){ +var re=/^\s+in\s+/ +var res=re.exec(src.substr(pos)) +if(res!==null){pos +=res[0].length +$pos=pos-name.length +C=$transition(C,'op','not_in')}else{$pos=pos-name.length +C=$transition(C,name)}}else{$pos=pos-name.length +C=$transition(C,'op',name)}}else if((src.charAt(pos)=='"'||src.charAt(pos)=="'") +&&['r','b','u','rb','br'].indexOf(name.toLowerCase())!==-1){string_modifier=name.toLowerCase() +name="" +continue}else{ +if($B.forbidden.indexOf(name)>-1){name='$$'+name} +$pos=pos-name.length +C=$transition(C,'id',name)} +name="" +continue}} +switch(car){case ' ': +case '\t': +pos++ +break +case '.': +if(pos-1){j++} +C=$transition(C,'float','0'+src.substr(pos,j-pos)) +pos=j +break} +$pos=pos +C=$transition(C,'.') +pos++ +break +case '0': +var res=hex_pattern.exec(src.substr(pos)) +if(res){C=$transition(C,'int',[16,res[1]]) +pos +=res[0].length +break} +var res=octal_pattern.exec(src.substr(pos)) +if(res){C=$transition(C,'int',[8,res[1]]) +pos +=res[0].length +break} +var res=binary_pattern.exec(src.substr(pos)) +if(res){C=$transition(C,'int',[2,res[1]]) +pos +=res[0].length +break} +if(src.charAt(pos+1).search(/\d/)>-1){ +if(parseInt(src.substr(pos))===0){res=int_pattern.exec(src.substr(pos)) +$pos=pos +C=$transition(C,'int',[10,res[0]]) +pos +=res[0].length +break}else{$_SyntaxError(C,('invalid literal starting with 0'))}} +case '0': +case '1': +case '2': +case '3': +case '4': +case '5': +case '6': +case '7': +case '8': +case '9': +var res=float_pattern1.exec(src.substr(pos)) +if(res){$pos=pos +if(res[2]!==undefined){C=$transition(C,'imaginary',res[0].substr(0,res[0].length-1))}else{C=$transition(C,'float',res[0])}}else{res=float_pattern2.exec(src.substr(pos)) +if(res){$pos=pos +if(res[2]!==undefined){C=$transition(C,'imaginary',res[0].substr(0,res[0].length-1))}else{C=$transition(C,'float',res[0])}}else{res=int_pattern.exec(src.substr(pos)) +$pos=pos +if(res[1]!==undefined){C=$transition(C,'imaginary',res[0].substr(0,res[0].length-1))}else{C=$transition(C,'int',[10,res[0]])}}} +pos +=res[0].length +break +case '\n': +lnum++ +if(br_stack.length>0){ +pos++;}else{ +if(current.C.tree.length>0){$pos=pos +C=$transition(C,'eol') +indent=null +new_node=new $Node()}else{new_node.line_num=lnum} +pos++} +break +case '(': +case '[': +case '{': +br_stack +=car +br_pos[br_stack.length-1]=[C,pos] +$pos=pos +C=$transition(C,car) +pos++ +break +case ')': +case ']': +case '}': +if(br_stack==""){$_SyntaxError(C,"Unexpected closing bracket")}else if(br_close[car]!=br_stack.charAt(br_stack.length-1)){$_SyntaxError(C,"Unbalanced bracket")}else{ +br_stack=br_stack.substr(0,br_stack.length-1) +$pos=pos +C=$transition(C,car) +pos++} +break +case '=': +if(src.charAt(pos+1)!="="){$pos=pos +C=$transition(C,'=') +pos++;}else{ +$pos=pos +C=$transition(C,'op','==') +pos+=2} +break +case ',': +case ':': +$pos=pos +C=$transition(C,car) +pos++ +break +case ';': +$transition(C,'eol') +if(current.C.tree.length===0){ +$pos=pos +$_SyntaxError(C,'invalid syntax')} +var pos1=pos+1 +var ends_line=false +while(pos1': +case '<': +case '-': +case '+': +case '*': +case '/': +case '^': +case '=': +case '|': +case '~': +case '!': +if(car=='-' && src.charAt(pos+1)=='>'){C=$transition(C,'annotation') +pos +=2 +continue} +var op_match="" +for(var op_sign in $operators){if(op_sign==src.substr(pos,op_sign.length) +&& op_sign.length>op_match.length){op_match=op_sign}} +$pos=pos +if(op_match.length>0){if(op_match in $augmented_assigns){C=$transition(C,'augm_assign',op_match)}else{C=$transition(C,'op',op_match)} +pos +=op_match.length}else{$_SyntaxError(C,'invalid character: '+car)} +break +case '\\': +if(src.charAt(pos+1)=='\n'){lnum++ +pos+=2 +break} +case '@': +$pos=pos +C=$transition(C,car) +pos++ +break +default: +$pos=pos;$_SyntaxError(C,'unknown token ['+car+']')}} +if(br_stack.length!=0){var br_err=br_pos[0] +$pos=br_err[1] +$_SyntaxError(br_err[0],["Unbalanced bracket "+br_stack.charAt(br_stack.length-1)])} +if(C!==null && $indented.indexOf(C.tree[0].type)>-1){$pos=pos-1 +$_SyntaxError(C,'expected an indented block',pos)} +return root} +$B.py2js=function(src,module,locals_id,parent_block_id,line_info){ +var t0=new Date().getTime() +src=src.replace(/\r\n/gm,'\n') +if(src.charAt(src.length-1)!="\n"){src+='\n'} +var locals_is_module=Array.isArray(locals_id) +if(locals_is_module){locals_id=locals_id[0]} +var internal=locals_id.charAt(0)=='$' +var local_ns='$locals_'+locals_id.replace(/\./g,'_') +var global_ns='$locals_'+module.replace(/\./g,'_') +$B.bound[module]=$B.bound[module]||{} +$B.bound[module]['__doc__']=true +$B.bound[module]['__name__']=true +$B.bound[module]['__file__']=true +$B.type[module]=$B.type[module]||{} +$B.type[locals_id]=$B.type[locals_id]||{} +$B.$py_src[locals_id]=$B.$py_src[locals_id]||src +var root=$tokenize(src,module,locals_id,parent_block_id,line_info) +root.transform() +var js=['var $B = __BRYTHON__;\n'],pos=1 +js[pos++]='eval(__BRYTHON__.InjectBuiltins());\n\n' +js[pos]='var ' +if(locals_is_module){js[pos]+=local_ns+'=$locals_'+module+', '}else if(!internal){js[pos]+=local_ns+'=$B.imported["'+locals_id+'"] || {}, '} +js[pos]+='$locals='+local_ns+';' +var offset=0 +root.insert(0,$NodeJS(js.join(''))) +offset++ +var ds_node=new $Node() +new $NodeJSCtx(ds_node,local_ns+'["__doc__"]='+(root.doc_string||'None')+';') +root.insert(offset++,ds_node) +var name_node=new $Node() +var lib_module=module +new $NodeJSCtx(name_node,local_ns+'["__name__"]='+local_ns+'["__name__"] || "'+locals_id+'";') +root.insert(offset++,name_node) +var file_node=new $Node() +new $NodeJSCtx(file_node,local_ns+'["__file__"]="'+$B.$py_module_path[module]+'";None;\n') +root.insert(offset++,file_node) +var enter_frame_pos=offset +root.insert(offset++,$NodeJS('$B.enter_frame(["'+locals_id+'", '+local_ns+','+ +'"'+module+'", '+global_ns+']);\n')) +var try_node=new $Node(),children=root.children.slice(enter_frame_pos+1,root.children.length),ctx=new $NodeCtx(try_node) +root.insert(enter_frame_pos+1,try_node) +new $TryCtx(ctx) +if(children.length==0){children=[$NodeJS('')]} +for(var i=0;i0){$add_line_num(root,null,module)} +if($B.debug>=2){var t1=new Date().getTime() +console.log('module '+module+' translated in '+(t1 - t0)+' ms')} +return root} +function load_scripts(scripts,run_script,onerror){ +if(run_script===undefined){run_script=$B._run_script;} +function callback(ev,script){var ok=false,skip=false; +if(ev !==null){req=ev.target +if(req.readyState==4){if(req.status==200){ok=true; +script={name:req.module_name,url:req.responseURL,src:req.responseText};}} +else{ +skip=true;}} +else{ +ok=true;} +if(skip){return;} +if(ok){try{ +run_script(script)} +catch(e){if(onerror===undefined){throw e;} +else{onerror(e);}} +if(scripts.length>0){load_scripts(scripts)}}else{try{ +throw Error("cannot load script "+ +req.module_name+' at '+req.responseURL+ +': error '+req.status)} +catch(e){if(onerror===undefined){throw e;} +else{onerror(e);}}}} +var noajax=true +while(scripts.length>0 && noajax){var script=scripts.shift() +if(script['src']===undefined){ +noajax=false; +var req=new XMLHttpRequest() +req.onreadystatechange=callback +req.module_name=script.name +req.open('GET',script.url,true) +req.send()}else{ +callback(null,script) +load_scripts(scripts)}}} +$B._load_scripts=load_scripts; +function run_script(script){ +$B.$py_module_path[script.name]=script.url +try{ +var $root=$B.py2js(script.src,script.name,script.name,'__builtins__') +var $js=$root.to_js() +if($B.debug>1){console.log($js)} +eval($js) +$B.imported[script.name]=$locals}catch($err){if($B.debug>1){console.log($err) +for(var attr in $err){console.log(attr+' : ',$err[attr])}} +if($err.$py_error===undefined){console.log('Javascript error',$err) +$err=_b_.RuntimeError($err+'')} +var name=$err.__name__ +var $trace=_b_.getattr($err,'info')+'\n'+name+': ' +if(name=='SyntaxError' ||name=='IndentationError'){$trace +=$err.args[0]}else{$trace +=$err.args} +try{_b_.getattr($B.stderr,'write')($trace)}catch(print_exc_err){console.log($trace)} +throw $err}finally{$B.clear_ns(script.name)}} +$B._run_script=run_script; +function brython(options){var _b_=$B.builtins +if($B.meta_path===undefined){$B.meta_path=[]} +$B.$options={} +if(options===undefined)options={'debug':0} +if(typeof options==='number')options={'debug':options} +if(options.debug===undefined){options.debug=0 } +$B.debug=options.debug +_b_.__debug__=$B.debug>0 +if(options.static_stdlib_import===undefined){options.static_stdlib_import=true} +$B.static_stdlib_import=options.static_stdlib_import +if(options.open !==undefined){_b_.open=options.open; +console.log("DeprecationWarning: \'open\' option of \'brython\' function will be deprecated in future versions of Brython.");} +$B.$options=options +var meta_path=[] +var path_hooks=[] +if($B.use_VFS){meta_path.push($B.$meta_path[0]) +path_hooks.push($B.$path_hooks[0])} +if(options.static_stdlib_import!==false){ +meta_path.push($B.$meta_path[1]) +if($B.path.length>3){$B.path.shift() +$B.path.shift()}} +meta_path.push($B.$meta_path[2]) +$B.meta_path=meta_path +path_hooks.push($B.$path_hooks[1]) +$B.path_hooks=path_hooks +if(options.ipy_id!==undefined){var $elts=[]; +for(var $i=0;$i1)console.log($js) +if($B.async_enabled){$js=$B.execution_object.source_conversion($js) +eval($js)}else{ +eval($js)}}catch($err){if($B.debug>1){console.log($err) +for(var attr in $err){console.log(attr+' : ',$err[attr])}} +if($err.$py_error===undefined){console.log('Javascript error',$err) +$err=_b_.RuntimeError($err+'')} +var $trace=_b_.getattr($err,'info')+'\n'+$err.__name__+ +': ' +$err.args +try{_b_.getattr($B.stderr,'write')($trace)}catch(print_exc_err){console.log($trace)} +throw $err}}else{ +var defined_ids={} +for(var i=0;i<$elts.length;i++){var elt=$elts[i] +if(elt.id){if(defined_ids[elt.id]){throw Error("Brython error : Found 2 scripts with the same id '"+ +elt.id+"'")}else{defined_ids[elt.id]=true}}} +var scripts=[] +for(var $i=0;$i<$elts.length;$i++){var $elt=$elts[$i] +if($elt.type=="text/python"||$elt.type==="text/python3"){if($elt.id){module_name=$elt.id} +else{if(first_script){module_name='__main__';first_script=false} +else{module_name='__main__'+$B.UUID()} +while(defined_ids[module_name]!==undefined){module_name='__main__'+$B.UUID()}} +$B.scripts.push(module_name) +var $src=null +if($elt.src){ +scripts.push({name:module_name,url:$elt.src})}else{ +var $src=($elt.innerHTML ||$elt.textContent) +$B.$py_module_path[module_name]=$href +scripts.push({name: module_name,src: $src,url: $href})}}}} +if(options.ipy_id===undefined){$B._load_scripts(scripts)}} +$B.$operators=$operators +$B.$Node=$Node +$B.$NodeJSCtx=$NodeJSCtx +$B.brython=brython})(__BRYTHON__) +var brython=__BRYTHON__.brython + +__BRYTHON__.$__new__=function(factory){return function(cls){ +var res=factory.apply(null,[]) +res.__class__=cls.$dict +var init_func=null +try{init_func=__BRYTHON__.builtins.getattr(res,'__init__')} +catch(err){} +if(init_func!==null){var args=[],pos=0 +for(var i=1,_len_i=arguments.length;i < _len_i;i++){args[pos++]=arguments[i]} +init_func.apply(null,args) +res.__initialized__=true} +return res}} +__BRYTHON__.builtins.object=(function($B){var _b_=$B.builtins +var $ObjectDict={ +__name__:'object',$native:true} +var reverse_func={'__lt__':'__gt__','__gt__':'__lt__','__le__': '__ge__','__ge__': '__le__'} +var $ObjectNI=function(name,op){return function(self,other){var klass=$B.get_class(other),other_comp=_b_.getattr(klass,reverse_func[name]) +if(other_comp.__func__===$ObjectDict[reverse_func[name]]){throw _b_.TypeError('unorderable types: object() '+op+ +' '+ _b_.str($B.get_class(other).__name__)+'()')}else{return other_comp(other,self)}}} +var opnames=['add','sub','mul','truediv','floordiv','mod','pow','lshift','rshift','and','xor','or'] +var opsigns=['+','-','*','/','//','%','**','<<','>>','&','^','|'] +$ObjectDict.__delattr__=function(self,attr){_b_.getattr(self,attr) +delete self[attr]; +return _b_.None} +$ObjectDict.__dir__=function(self){var objects=[self],pos=1 +var mro=$B.get_class(self).__mro__ +for(var i=0,_len_i=mro.length;i < _len_i;i++){objects[pos++]=mro[i]} +var res=[],pos=0 +for(var i=0,_len_i=objects.length;i < _len_i;i++){for(var attr in objects[i]){ +if(attr.charAt(0)=='$' && attr.charAt(1)!='$'){ +continue} +if(!isNaN(parseInt(attr.charAt(0)))){ +continue} +if(attr=='__mro__'){continue} +res[pos++]=attr}} +res=_b_.list(_b_.set(res)) +_b_.list.$dict.sort(res) +return res} +$ObjectDict.__eq__=function(self,other){ +var _class=$B.get_class(self) +if(_class.$native ||_class.__name__=='function'){var _class1=$B.get_class(other) +if(!_class1.$native && _class1.__name__ !='function'){return _b_.getattr(other,'__eq__')(self)}} +return self===other} +$ObjectDict.__format__=function(){var $=$B.args('__format__',2,{self:null,spec:null},['self','spec'],arguments,{},null,null) +if($.spec!==''){throw _b_.TypeError("non-empty format string passed to object.__format__")} +return _b_.getattr($.self,'__repr__')()} +$ObjectDict.__ge__=$ObjectNI('__ge__','>=') +$ObjectDict.__getattribute__=function(obj,attr){var klass=$B.get_class(obj) +if(attr==='__class__'){return klass.$factory} +var res=obj[attr],args=[] +if(res===undefined){ +var mro=klass.__mro__ +for(var i=0,_len_i=mro.length;i < _len_i;i++){if(mro[i].$methods){var method=mro[i].$methods[attr] +if(method!==undefined){return method(obj)}} +var v=mro[i][attr] +if(v!==undefined){res=v +break}else if(attr=='__str__' && mro[i]['__repr__']!==undefined){ +res=mro[i]['repr'] +break}}}else{if(res.__set__===undefined){ +return res}} +if(res!==undefined){if(res.__class__===_b_.property.$dict){return res.__get__(res,obj,klass)} +var __get__=_b_.getattr(res,'__get__',null) +if(__get__!==null){try{return __get__.apply(null,[obj,klass])} +catch(err){console.log('error in get.apply',err) +console.log(__get__+'') +throw err}} +if(typeof res=='object'){if(__get__ &&(typeof __get__=='function')){get_func=function(x,y){return __get__.apply(x,[y,klass])}}} +if(__get__===null &&(typeof res=='function')){__get__=function(x){return x}} +if(__get__!==null){ +res.__name__=attr +if(attr=='__new__'){res.$type='staticmethod'} +var res1=__get__.apply(null,[res,obj,klass]) +if(typeof res1=='function'){ +if(res1.__class__===$B.$factory)return res +else if(res1.__class__===$B.$MethodDict){return res} +return $B.make_method(attr,klass,res,res1)(obj)}else{ +return res1}} +return res}else{ +var _ga=obj['__getattr__'] +if(_ga===undefined){var mro=klass.__mro__ +if(mro===undefined){console.log('in getattr mro undefined for '+obj)} +for(var i=0,_len_i=mro.length;i < _len_i;i++){var v=mro[i]['__getattr__'] +if(v!==undefined){_ga=v +break}}} +if(_ga!==undefined){try{return _ga(obj,attr)} +catch(err){}} +if(attr.substr(0,2)=='__' && attr.substr(attr.length-2)=='__'){var attr1=attr.substr(2,attr.length-4) +var rank=opnames.indexOf(attr1) +if(rank > -1){var rop='__r'+opnames[rank]+'__' +var func=function(){try{ +if($B.get_class(arguments[0])===klass){throw Error('')} +return _b_.getattr(arguments[0],rop)(obj)}catch(err){var msg="unsupported operand types for "+ +opsigns[rank]+": '"+ klass.__name__+"' and '"+ +$B.get_class(arguments[0]).__name__+"'" +throw _b_.TypeError(msg)}} +func.$infos={__name__ : klass.__name__+'.'+attr} +return func}}}} +$ObjectDict.__gt__=$ObjectNI('__gt__','>') +$ObjectDict.__hash__=function(self){$B.$py_next_hash--; +return $B.$py_next_hash;} +$ObjectDict.__init__=function(){return _b_.None} +$ObjectDict.__le__=$ObjectNI('__le__','<=') +$ObjectDict.__lt__=$ObjectNI('__lt__','<') +$ObjectDict.__mro__=[$ObjectDict] +$ObjectDict.__new__=function(cls){if(cls===undefined){throw _b_.TypeError('object.__new__(): not enough arguments')} +return{__class__ : cls.$dict}} +$ObjectDict.__ne__=function(self,other){return !_b_.getattr(self,'__eq__')(other)} +$ObjectDict.__or__=function(self,other){if(_b_.bool(self))return self +return other} +$ObjectDict.__repr__=function(self){if(self===object)return "" +if(self.__class__===$B.$factory)return "" +if(self.__class__.__module__!==undefined){return "<"+self.__class__.__module__+"."+self.__class__.__name__+" object>"}else{return "<"+self.__class__.__name__+" object>"}} +$ObjectDict.__setattr__=function(self,attr,val){if(val===undefined){ +throw _b_.TypeError("can't set attributes of built-in/extension type 'object'")}else if(self.__class__===$ObjectDict){ +if($ObjectDict[attr]===undefined){throw _b_.AttributeError("'object' object has no attribute '"+attr+"'")}else{throw _b_.AttributeError("'object' object attribute '"+attr+"' is read-only")}} +self[attr]=val +return _b_.None} +$ObjectDict.__setattr__.__str__=function(){return 'method object.setattr'} +$ObjectDict.__str__=$ObjectDict.__repr__ +$ObjectDict.__subclasshook__=function(){return _b_.NotImplemented} +function object(){return{__class__:$ObjectDict}} +object.$dict=$ObjectDict +$ObjectDict.$factory=object +object.__repr__=object.__str__=function(){return ""} +$B.make_class=function(class_obj){ +function A(){var res={__class__:A.$dict} +if(class_obj.init){class_obj.init.apply(null,[res].concat(Array.prototype.slice.call(arguments)))} +return res} +A.__class__=$B.$factory +A.$dict={$factory: A,__class__: $B.type,__name__: class_obj.name} +A.$dict.__mro__=[A.$dict,object.$dict] +return A} +return object})(__BRYTHON__) +;(function($B){var _b_=$B.builtins +$B.$class_constructor=function(class_name,class_obj,parents,parents_names,kwargs){var cl_dict=_b_.dict(),bases=null +for(var attr in class_obj){ +cl_dict.$string_dict[attr]=class_obj[attr]} +if(parents!==undefined){for(var i=0;i0)non_empty[pos++]=seqs[i]} +if(non_empty.length==0)break +for(var i=0;i-1){not_head[pos++]=s}} +if(not_head.length>0){candidate=null} +else{break}} +if(candidate===null){throw _b_.TypeError("inconsistent hierarchy, no C3 MRO is possible")} +mro[mpos++]=candidate +for(var i=0;i' }}(attr) +break; +case 'classmethod': +args=[klass.$factory] +__self__=klass +__repr__=__str__=function(){var x='' +return x} +break; +case 'staticmethod': +args=[] +__repr__=__str__=function(attr){return function(){return ''}}(attr) +break;} +var method=(function(initial_args){return function(){ +var local_args=initial_args.slice() +var pos=local_args.length +for(var i=0;i < arguments.length;i++){local_args[pos++]=arguments[i]} +return res.apply(null,local_args)}})(args) +method.__class__=$B.$FunctionDict +method.__eq__=function(other){return other.__func__===__func__} +for(var attr in res){method[attr]=res[attr]} +method.__func__=__func__ +method.__repr__=__repr__ +method.__self__=__self__ +method.__str__=__str__ +method.__code__={'__class__': $B.CodeDict} +method.__doc__=res.__doc__ ||'' +method.im_class=klass +return method}}} +function $instance_creator(klass){ +if(klass.$instanciable!==undefined){console.log('klass',klass.__name__,'not instanciable') +return function(){throw _b_.TypeError("Can't instantiate abstract "+ +"class interface with abstract methods")}} +var new_func=null +try{new_func=_b_.getattr(klass,'__new__')} +catch(err){} +var init_func=null +try{init_func=_b_.getattr(klass,'__init__')} +catch(err){} +var simple=false +if(klass.__bases__.length==0){simple=true} +else if(klass.__bases__.length==1){switch(klass.__bases__[0]){case _b_.object: +case _b_.type: +simple=true +break +default: +simple=false +break}} +if(simple && klass.__new__==undefined && init_func!==null){ +return function(){var obj={__class__:klass} +init_func.apply(null,[obj].concat(Array.prototype.slice.call(arguments))) +return obj}} +return function(){var obj +var _args=Array.prototype.slice.call(arguments) +if(simple && klass.__new__==undefined){obj={__class__:klass}}else{if(new_func!==null){obj=new_func.apply(null,[klass.$factory].concat(_args))}} +if(!obj.__initialized__){if(init_func!==null){init_func.apply(null,[obj].concat(_args))}} +return obj}} +function member_descriptor(klass,attr){return{__class__:member_descriptor.$dict,klass: klass,attr: attr}} +member_descriptor.__class__=$B.$factory +member_descriptor.$dict={__class__: $B.$type,__name__: 'member_descriptor',$factory: member_descriptor,__str__: function(self){return ""}} +member_descriptor.$dict.__mro__=[member_descriptor.$dict ,_b_.object.$dict] +function $MethodFactory(){} +$MethodFactory.__class__=$B.$factory +$B.$MethodDict={__class__:$B.$type,__name__:'method',$factory:$MethodFactory} +$B.$MethodDict.__eq__=function(self,other){return self.$infos !==undefined && +other.$infos !==undefined && +self.$infos.__func__===other.$infos.__func__ && +self.$infos.__self__===other.$infos.__self__} +$B.$MethodDict.__ne__=function(self,other){return !$B.$MethodDict.__eq__(self,other)} +$B.$MethodDict.__getattribute__=function(self,attr){ +var infos=self.$infos.__func__.$infos +if(infos && infos[attr]){if(attr=='__code__'){var res={__class__:$B.$CodeDict} +for(var attr in infos.__code__){res[attr]=infos.__code__[attr]} +return res}else{return infos[attr]}}else{return _b_.object.$dict.__getattribute__(self,attr)}} +$B.$MethodDict.__mro__=[$B.$MethodDict,_b_.object.$dict] +$B.$MethodDict.__repr__=$B.$MethodDict.__str__=function(self){var res=''} +$MethodFactory.$dict=$B.$MethodDict +$B.$InstanceMethodDict={__class__:$B.$type,__name__:'instancemethod',__mro__:[_b_.object.$dict],$factory:$MethodFactory}})(__BRYTHON__) +;(function($B){var _b_=$B.builtins +$B.args=function($fname,argcount,slots,var_names,$args,$dobj,extra_pos_args,extra_kw_args){ +var has_kw_args=false,nb_pos=$args.length,$ns +if(nb_pos>0 && $args[nb_pos-1].$nat){has_kw_args=true +nb_pos-- +var kw_args=$args[nb_pos].kw} +if(extra_pos_args){slots[extra_pos_args]=[]; +slots[extra_pos_args].__class__=_b_.tuple.$dict} +if(extra_kw_args){ +slots[extra_kw_args]={__class__:_b_.dict.$dict,$numeric_dict :{},$object_dict :{},$string_dict :{},$str_hash:{},length: 0}} +if(nb_pos>argcount){ +if(extra_pos_args===null){ +msg=$fname+"() takes "+argcount+' positional argument'+ +(argcount> 1 ? '' : 's')+ ' but more were given' +throw _b_.TypeError(msg)}else{ +for(var i=argcount;i0){if(missing.length==1){throw _b_.TypeError($fname+" missing 1 positional argument: "+missing[0])}else{var msg=$fname+" missing "+missing.length+" positional arguments: " +msg +=missing.join(' and ') +throw _b_.TypeError(msg)}} +return slots} +$B.get_class=function(obj){ +if(obj===null){return $B.$NoneDict} +var klass=obj.__class__ +if(klass===undefined){switch(typeof obj){case 'number': +if(obj % 1===0){ +obj.__class__=_b_.int.$dict +return _b_.int.$dict} +obj.__class__=_b_.float.$dict +return _b_.float.$dict +case 'string': +obj.__class__=_b_.str.$dict +return _b_.str.$dict +case 'boolean': +obj.__class__=$B.$BoolDict +return $B.$BoolDict +case 'function': +obj.__class__=$B.$FunctionDict +return $B.$FunctionDict +case 'object': +if(Array.isArray(obj)){if(Object.getPrototypeOf(obj)===Array.prototype){obj.__class__=_b_.list.$dict +return _b_.list.$dict}}else if(obj.constructor===Number)return _b_.float.$dict +break}} +return klass} +$B.$mkdict=function(glob,loc){var res={} +for(var arg in glob)res[arg]=glob[arg] +for(var arg in loc)res[arg]=loc[arg] +return res} +function clear(ns){ +delete $B.vars[ns],$B.bound[ns],$B.modules[ns],$B.imported[ns]} +$B.$list_comp=function(items){ +var ix=$B.UUID() +var py="x"+ix+"=[]\n",indent=0 +for(var i=1,len=items.length;i < len;i++){py +=' '.repeat(indent) +var item=items[i] +item=item.replace(/\s*$/,'').replace(/\s+/g,' ') +py +=item+':\n' +indent +=4} +py +=' '.repeat(indent) +py +='x'+ix+'.append('+items[0]+')\n' +return[py,ix]} +$B.$dict_comp=function(env){ +var $ix=$B.UUID() +var $res='res'+$ix +var $py=$res+"={}\n" +var indent=0 +for(var $i=2,_len_$i=arguments.length;$i < _len_$i;$i++){$py+=' '.repeat(indent) +$py +=arguments[$i]+':\n' +indent +=4} +$py+=' '.repeat(indent) +$py +=$res+'.update({'+arguments[1].join('\n')+'})' +for(var i=0;i=self.value.length){throw _b_.StopIteration('')} +return self.value[self.$counter]} +$GenExprDict.$factory={__class__:$B.$factory,$dict:$GenExprDict} +var $res2={value:$res1,__class__:$GenExprDict,$counter:-1} +$res2.toString=function(){return 'ge object'} +delete $B.modules[genexpr_name] +$B.clear_ns(genexpr_name) +return $res2} +$B.$gen_expr1=function(module_name,parent_block_id,items,line_num){ +var $ix=$B.UUID() +var py='def ge'+$ix+'():\n' +var indent=1 +for(var i=1,len=items.length;i < len;i++){py +=' '.repeat(indent) +var item=items[i].replace(/\s+$/,'').replace(/\n/g,' ') +py +=item+':\n' +indent +=4} +py+=' '.repeat(indent) +py +='yield '+items[0] +var genexpr_name='ge'+$ix +var root=$B.py2js(py,module_name,genexpr_name,parent_block_id,line_num) +var js=root.to_js() +var lines=js.split('\n') +var header='for(var i=0;i<$B.frames_stack.length;i++){\n'+ +' var frame = $B.frames_stack[i];\n'+ +' eval("var $locals_"+frame[2].replace(/\\./g,"_")+" = frame[3]")\n'+ +'}\n' +lines.splice(2,0,header) +js=lines.join('\n') +js +='\nreturn $locals_'+genexpr_name+'["'+genexpr_name+'"]();\n' +js='(function(){'+js+'})()\n' +return js} +$B.clear_ns=function(name){ +var keys=[],len=name.length +for(var key in __BRYTHON__.modules){if(key.substr(0,len)==name && key!==name){keys.push(key)}} +for(var i=0;i=0;i--){var frame=$B.frames_stack[i] +res=$B.frames_stack[i][1][name] +if(res!==undefined){return res} +res=$B.frames_stack[i][3][name] +if(res!==undefined){return res}} +throw _b_.NameError("free variable '"+name+ +"' referenced before assignment in enclosing scope")} +$B.$JS2Py=function(src){if(typeof src==='number'){if(src%1===0)return src +return _b_.float(src)} +if(src===null||src===undefined)return _b_.None +var klass=$B.get_class(src) +if(klass!==undefined){if(klass===_b_.list.$dict){for(var i=0,_len_i=src.length;i< _len_i;i++)src[i]=$B.$JS2Py(src[i])}else if(klass===$B.JSObject.$dict){src=src.js}else{return src}} +if(typeof src=="object"){if($B.$isNode(src))return $B.DOMNode(src) +if($B.$isEvent(src))return $B.$DOMEvent(src) +if((Array.isArray(src)&&Object.getPrototypeOf(src)===Array.prototype)|| +$B.$isNodeList(src)){var res=[],pos=0 +for(var i=0,_len_i=src.length;i<_len_i;i++)res[pos++]=$B.$JS2Py(src[i]) +return res}} +return $B.JSObject(src)} +$B.list_key=function(obj,key){key=$B.$GetInt(key) +if(key<0){key +=obj.length} +var res=obj[key] +if(res===undefined){throw _b_.IndexError("list index out of range")} +return res} +$B.list_slice=function(obj,start,stop){if(start===null){start=0} +else{start=$B.$GetInt(start) +if(start<0){start=Math.max(0,start+obj.length)}} +if(stop===null){return obj.slice(start)} +stop=$B.$GetInt(stop) +if(stop<0){stop=Math.max(0,stop+obj.length)} +return obj.slice(start,stop)} +$B.list_slice_step=function(obj,start,stop,step){if(step===null||step==1){return $B.list_slice(obj,start,stop)} +if(step==0){throw _b_.ValueError("slice step cannot be zero")} +step=$B.$GetInt(step) +if(start===null){start=step >=0 ? 0 : obj.length-1} +else{start=$B.$GetInt(start) +if(start<0){start=Math.min(0,start+obj.length)}} +if(stop===null){stop=step >=0 ? obj.length : -1} +else{stop=$B.$GetInt(stop) +if(stop<0){stop=Math.max(0,stop+obj.length)}} +var res=[],len=obj.length +if(step>0){for(var i=start;istop;i+=step){res.push(obj[i])}} +return res} +function index_error(obj){var type=typeof obj=='string' ? 'string' : 'list' +throw _b_.IndexError(type+" index out of range")} +$B.$getitem=function(obj,item){if(typeof item=='number'){if(Array.isArray(obj)||typeof obj=='string'){item=item >=0 ? item : obj.length+item +if(obj[item]!==undefined){return obj[item]} +else{index_error(obj)}}} +try{item=$B.$GetInt(item)}catch(err){} +if((Array.isArray(obj)||typeof obj=='string') +&& typeof item=='number'){item=item >=0 ? item : obj.length+item +if(obj[item]!==undefined){return obj[item]} +else{index_error(obj)}} +return _b_.getattr(obj,'__getitem__')(item)} +$B.set_list_key=function(obj,key,value){try{key=$B.$GetInt(key)} +catch(err){if(_b_.isinstance(key,_b_.slice)){var s=_b_.slice.$dict.$conv_for_seq(key,obj.length) +return $B.set_list_slice_step(obj,s.start,s.stop,s.step,value)}} +if(key<0){key+=obj.length} +if(obj[key]===undefined){console.log(obj,key) +throw _b_.IndexError('list assignment index out of range')} +obj[key]=value} +$B.set_list_slice=function(obj,start,stop,value){if(start===null){start=0} +else{start=$B.$GetInt(start) +if(start<0){start=Math.max(0,start+obj.length)}} +if(stop===null){stop=obj.length} +stop=$B.$GetInt(stop) +if(stop<0){stop=Math.max(0,stop+obj.length)} +var res=_b_.list(value) +obj.splice.apply(obj,[start,stop-start].concat(res))} +$B.set_list_slice_step=function(obj,start,stop,step,value){if(step===null||step==1){return $B.set_list_slice(obj,start,stop,value)} +if(step==0){throw _b_.ValueError("slice step cannot be zero")} +step=$B.$GetInt(step) +if(start===null){start=step>0 ? 0 : obj.length-1} +else{start=$B.$GetInt(start) +if(start<0){start=Math.min(0,start+obj.length)}} +if(stop===null){stop=step>0 ? obj.length : -1} +else{stop=$B.$GetInt(stop) +if(stop<0){stop=Math.max(0,stop+obj.length)}} +var repl=_b_.list(value),j=0,test,nb=0 +if(step>0){test=function(i){return istop}} +for(var i=start;test(i);i+=step){nb++} +if(nb!=repl.length){throw _b_.ValueError('attempt to assign sequence of size '+ +repl.length+' to extended slice of size '+nb)} +for(var i=start;test(i);i+=step){obj[i]=repl[j] +j++}} +$B.$setitem=function(obj,item,value){if(Array.isArray(obj)&& typeof item=='number' && !_b_.isinstance(obj,_b_.tuple)){if(item<0){item+=obj.length} +if(obj[item]===undefined){throw _b_.IndexError("list assignment index out of range")} +obj[item]=value +return}else if(obj.__class__===_b_.dict.$dict){obj.__class__.__setitem__(obj,item,value) +return} +_b_.getattr(obj,'__setitem__')(item,value)} +$B.augm_item_add=function(obj,item,incr){if(Array.isArray(obj)&& typeof item=="number" && +obj[item]!==undefined){obj[item]+=incr +return} +var ga=_b_.getattr +try{var augm_func=ga(ga(obj,'__getitem__')(item),'__iadd__') +console.log('has augmfunc')}catch(err){ga(obj,'__setitem__')(item,ga(ga(obj,'__getitem__')(item),'__add__')(incr)) +return} +augm_func(value)} +var augm_item_src=''+$B.augm_item_add +var augm_ops=[['-=','sub'],['*=','mul']] +for(var i=0,_len_i=augm_ops.length;i < _len_i;i++){var augm_code=augm_item_src.replace(/add/g,augm_ops[i][1]) +augm_code=augm_code.replace(/\+=/g,augm_ops[i][0]) +eval('$B.augm_item_'+augm_ops[i][1]+'='+augm_code)} +$B.extend=function(fname,arg,mapping){var it=_b_.iter(mapping),getter=_b_.getattr(mapping,'__getitem__') +while(true){try{var key=_b_.next(it) +if(typeof key!=='string'){throw _b_.TypeError(fname+"() keywords must be strings")} +if(arg[key]!==undefined){throw _b_.TypeError( +fname+"() got multiple values for argument '"+key+"'")} +arg[key]=getter(key)}catch(err){if(_b_.isinstance(err,[_b_.StopIteration])){break} +throw err}} +return arg} +$B.extend_list=function(){ +var res=Array.prototype.slice.call(arguments,0,arguments.length-1),last=$B.last(arguments) +var it=_b_.iter(last) +while(true){try{res.push(_b_.next(it))}catch(err){if(_b_.isinstance(err,[_b_.StopIteration])){break} +throw err}} +return res} +$B.$test_item=function(expr){ +$B.$test_result=expr +return _b_.bool(expr)} +$B.$test_expr=function(){ +return $B.$test_result} +$B.$is_member=function(item,_set){ +var f,_iter +try{f=_b_.getattr(_set,"__contains__")} +catch(err){} +if(f)return f(item) +try{_iter=_b_.iter(_set)} +catch(err){} +if(_iter){while(1){try{var elt=_b_.next(_iter) +if(_b_.getattr(elt,"__eq__")(item))return true}catch(err){if(err.__name__=="StopIteration")return false +throw err}}} +try{f=_b_.getattr(_set,"__getitem__")} +catch(err){throw _b_.TypeError("'"+$B.get_class(_set).__name__+"' object is not iterable")} +if(f){var i=-1 +while(1){i++ +try{var elt=f(i) +if(_b_.getattr(elt,"__eq__")(item))return true}catch(err){if(err.__name__=='IndexError')return false +throw err}}}} +var $io={__class__:$B.$type,__name__:'io'} +$io.__mro__=[$io,_b_.object.$dict] +$B.stderr={__class__:$io,write:function(data){console.log(data)},flush:function(){}} +$B.stderr_buff='' +$B.stdout={__class__:$io,write: function(data){console.log(data)},flush:function(){}} +$B.stdin={__class__: $io,__original__:true,closed: false,len:1,pos:0,read: function(){return '';},readline: function(){return '';}} +$B.jsobject2pyobject=function(obj){switch(obj){case null: +return _b_.None +case true: +return _b_.True +case false: +return _b_.False} +if(typeof obj==='object' && !Array.isArray(obj)&& +obj.__class__===undefined){ +var res=_b_.dict() +for(var attr in obj){res.$string_dict[attr]=$B.jsobject2pyobject(obj[attr])} +return res} +if(_b_.isinstance(obj,_b_.list)){var res=[],pos=0 +for(var i=0,_len_i=obj.length;i < _len_i;i++){res[pos++]=$B.jsobject2pyobject(obj[i])} +return res} +if(obj.__class__!==undefined){if(obj.__class__===_b_.list){for(var i=0,_len_i=obj.length;i < _len_i;i++){obj[i]=$B.jsobject2pyobject(obj[i])} +return obj} +return obj} +if(obj._type_==='iter'){ +return _b_.iter(obj.data)} +return $B.JSObject(obj)} +$B.pyobject2jsobject=function(obj){ +switch(obj){case _b_.None: +return null +case _b_.True: +return true +case _b_.False: +return false} +if(_b_.isinstance(obj,[_b_.int,_b_.float,_b_.str]))return obj +if(_b_.isinstance(obj,[_b_.list,_b_.tuple])){var res=[],pos=0 +for(var i=0,_len_i=obj.length;i < _len_i;i++){res[pos++]=$B.pyobject2jsobject(obj[i])} +return res} +if(_b_.isinstance(obj,_b_.dict)){var res={} +var items=_b_.list(_b_.dict.$dict.items(obj)) +for(var i=0,_len_i=items.length;i < _len_i;i++){res[$B.pyobject2jsobject(items[i][0])]=$B.pyobject2jsobject(items[i][1])} +return res} +if(_b_.hasattr(obj,'__iter__')){ +var _a=[],pos=0 +while(1){try{ +_a[pos++]=$B.pyobject2jsobject(_b_.next(obj))}catch(err){if(err.__name__ !=="StopIteration")throw err +break}} +return{'_type_': 'iter',data: _a}} +if(_b_.hasattr(obj,'__getstate__')){return _b_.getattr(obj,'__getstate__')()} +if(_b_.hasattr(obj,'__dict__')){return $B.pyobject2jsobject(_b_.getattr(obj,'__dict__'))} +throw _b_.TypeError(_b_.str(obj)+' is not JSON serializable')} +$B.set_line=function(line_num,module_name){$B.line_info=line_num+','+module_name +return _b_.None} +$B.$iterator=function(items,klass){var res={__class__:klass,__iter__:function(){return res},__len__:function(){return items.length},__next__:function(){res.counter++ +if(res.counter"},counter:-1} +res.__str__=res.toString=res.__repr__ +return res} +$B.$iterator_class=function(name){var res={__class__:$B.$type,__name__:name,} +res.__mro__=[res,_b_.object.$dict] +function as_array(s){var _a=[],pos=0 +var _it=_b_.iter(s) +while(1){try{ +_a[pos++]=_b_.next(_it)}catch(err){if(err.__name__=='StopIteration'){break}}} +return _a} +function as_list(s){return _b_.list(as_array(s))} +function as_set(s){return _b_.set(as_array(s))} +res.__eq__=function(self,other){if(_b_.isinstance(other,[_b_.tuple,_b_.set,_b_.list])){return _b_.getattr(as_list(self),'__eq__')(other)} +if(_b_.hasattr(other,'__iter__')){return _b_.getattr(as_list(self),'__eq__')(as_list(other))} +_b_.NotImplementedError("__eq__ not implemented yet for list and " + _b_.type(other))} +var _ops=['eq','ne'] +var _f=res.__eq__+'' +for(var i=0;i < _ops.length;i++){var _op='__'+_ops[i]+'__' +eval('res.'+_op+'='+_f.replace(new RegExp('__eq__','g'),_op))} +res.__or__=function(self,other){if(_b_.isinstance(other,[_b_.tuple,_b_.set,_b_.list])){return _b_.getattr(as_set(self),'__or__')(other)} +if(_b_.hasattr(other,'__iter__')){return _b_.getattr(as_set(self),'__or__')(as_set(other))} +_b_.NotImplementedError("__or__ not implemented yet for set and " + _b_.type(other))} +var _ops=['sub','and','xor','gt','ge','lt','le'] +var _f=res.__or__+'' +for(var i=0;i < _ops.length;i++){var _op='__'+_ops[i]+'__' +eval('res.'+_op+'='+_f.replace(new RegExp('__or__','g'),_op))} +res.$factory={__class__:$B.$factory,$dict:res} +return res} +$B.$CodeDict={__class__:$B.$type,__name__:'code'} +$B.$CodeDict.__mro__=[$B.$CodeDict,_b_.object.$dict] +function _code(){} +_code.__class__=$B.$factory +_code.$dict=$B.$CodeDict +$B.$CodeDict.$factory=_code +function $err(op,klass,other){var msg="unsupported operand type(s) for "+op +msg +=": '"+klass.__name__+"' and '"+$B.get_class(other).__name__+"'" +throw _b_.TypeError(msg)} +var ropnames=['add','sub','mul','truediv','floordiv','mod','pow','lshift','rshift','and','xor','or'] +var ropsigns=['+','-','*','/','//','%','**','<<','>>','&','^','|'] +$B.make_rmethods=function(klass){for(var j=0,_len_j=ropnames.length;j < _len_j;j++){if(klass['__'+ropnames[j]+'__']===undefined){ +klass['__'+ropnames[j]+'__']=(function(name,sign){return function(self,other){try{return _b_.getattr(other,'__r'+name+'__')(self)} +catch(err){$err(sign,klass,other)}}})(ropnames[j],ropsigns[j])}}} +$B.set_func_names=function(klass){var name=klass.__name__ +for(var attr in klass){if(typeof klass[attr]=='function'){klass[attr].$infos={__name__ : name+'.'+attr}}}} +$B.UUID=function(){return $B.$py_UUID++} +$B.InjectBuiltins=function(){var _str=["var _b_=$B.builtins"],pos=1 +for(var $b in $B.builtins)_str[pos++]='var ' + $b +'=_b_["'+$b+'"]' +return _str.join(';')} +$B.$GetInt=function(value){ +if(typeof value=="number"||value.constructor===Number){return value} +else if(typeof value==="boolean"){return value ? 1 : 0} +else if(_b_.isinstance(value,_b_.int)){return value} +else if(_b_.isinstance(value,_b_.float)){return value.valueOf()} +if(value.__class__!==$B.$factory){try{var v=_b_.getattr(value,'__int__')();return v}catch(e){} +try{var v=_b_.getattr(value,'__index__')();return v}catch(e){}} +throw _b_.TypeError("'"+$B.get_class(value).__name__+ +"' object cannot be interpreted as an integer")} +$B.PyNumber_Index=function(item){switch(typeof item){case "boolean": +return item ? 1 : 0 +case "number": +return item +case "object": +if(item.__class__===$B.LongInt.$dict){return item} +var method=_b_.getattr(item,'__index__',null) +if(method!==null){return $B.int_or_bool(_b_.getattr(method,'__call__')())} +default: +throw _b_.TypeError("'"+$B.get_class(item).__name__+ +"' object cannot be interpreted as an integer")}} +$B.int_or_bool=function(v){switch(typeof v){case "boolean": +return v ? 1 : 0 +case "number": +return v +case "object": +if(v.__class__===$B.LongInt.$dict){return v} +else{throw _b_.TypeError("'"+$B.get_class(v).__name__+ +"' object cannot be interpreted as an integer")} +default: +throw _b_.TypeError("'"+$B.get_class(v).__name__+ +"' object cannot be interpreted as an integer")}} +$B.int_value=function(v){ +try{return $B.int_or_bool(v)} +catch(err){if(_b_.isinstance(v,_b_.complex)&& v.imag==0){return $B.int_or_bool(v.real)}else if(isinstance(v,_b_.float)&& v==Math.floor(v)){return Math.floor(v)}else{throw _b_.TypeError("'"+$B.get_class(v).__name__+ +"' object cannot be interpreted as an integer")}}} +$B.enter_frame=function(frame){ +$B.frames_stack.push(frame)} +$B.leave_frame=function(arg){ +if($B.frames_stack.length==0){console.log('empty stack');return} +$B.frames_stack.pop()} +var min_int=Math.pow(-2,53),max_int=Math.pow(2,53)-1 +$B.is_safe_int=function(){for(var i=0;imax_int){return false}} +return true} +$B.add=function(x,y){var z=x+y +if(x>min_int && xmin_int && ymin_int && zmin_int && xmin_int && ymin_int && zmin_int && xmin_int && ymin_int && xmin_int && ymin_int && zmin_int && xmin_int && ymin_int && zmin_int && xmin_int && ymin_int && z=y} +else if(typeof x=='number' && typeof y!='number'){return !y.pos} +else if(typeof x !='number' && typeof y=='number'){return x.pos===true} +else{return $B.LongInt.$dict.__ge__(x,y)}} +$B.gt=function(x,y){if(typeof x=='number' && typeof y=='number'){return x>y} +else if(typeof x=='number' && typeof y!='number'){return !y.pos} +else if(typeof x !='number' && typeof y=='number'){return x.pos===true} +else{return $B.LongInt.$dict.__gt__(x,y)}} +window.is_none=function(o){return o===undefined ||o==_b_.None;} +window.is_none=function(o){return o===undefined ||o==_b_.None;}})(__BRYTHON__) +if(!Array.indexOf){Array.prototype.indexOf=function(obj){for(var i=0,_len_i=this.length;i < _len_i;i++)if(this[i]==obj)return i +return -1}} +if(!String.prototype.repeat){String.prototype.repeat=function(count){if(count < 1)return ''; +var result='',pattern=this.valueOf() +while(count > 1){if(count & 1)result +=pattern +count >>=1,pattern +=pattern} +return result + pattern;}} + +;(function($B){eval($B.InjectBuiltins()) +_b_.__debug__=false +var $ObjectDict=_b_.object.$dict +$B.$comps={'>':'gt','>=':'ge','<':'lt','<=':'le'} +$B.$inv_comps={'>': 'le','>=': 'lt','<': 'ge','<=': 'gt'} +function abs(obj){if(isinstance(obj,_b_.int))return _b_.int(Math.abs(obj)); +if(isinstance(obj,_b_.float))return _b_.float(Math.abs(obj)); +if(hasattr(obj,'__abs__')){return getattr(obj,'__abs__')()}; +throw _b_.TypeError("Bad operand type for abs(): '"+$B.get_class(obj)+"'")} +function all(obj){var iterable=iter(obj) +while(1){try{var elt=next(iterable) +if(!bool(elt))return false}catch(err){return true}}} +function any(obj){var iterable=iter(obj) +while(1){try{var elt=next(iterable) +if(bool(elt))return true}catch(err){return false}}} +function ascii(obj){var res=repr(obj),res1='',cp +for(var i=0;i=0)return prefix + value.toString(base); +return '-' + prefix +(-value).toString(base);} +function bin(obj){if(isinstance(obj,_b_.int)){return $builtin_base_convert_helper(obj,2)} +return getattr(obj,'__index__')()} +function bool(obj){ +if(obj===null ||obj===undefined )return false +switch(typeof obj){case 'boolean': +return obj +case 'number': +case 'string': +if(obj)return true +return false +default: +try{return getattr(obj,'__bool__')()} +catch(err){try{return getattr(obj,'__len__')()>0} +catch(err){return true}}}} +function callable(obj){return hasattr(obj,'__call__')} +function chr(i){if(i < 0 ||i > 1114111)_b_.ValueError('Outside valid range') +return String.fromCharCode(i)} +function classmethod(func){func.$type='classmethod' +return func} +classmethod.__class__=$B.$factory +classmethod.$dict={__class__:$B.$type,__name__:'classmethod',$factory: classmethod} +classmethod.$dict.__mro__=[classmethod.$dict,$ObjectDict] +$B.$CodeObjectDict={__class__:$B.$type,__name__:'code',__repr__:function(self){return ''},} +$B.$CodeObjectDict.__str__=$B.$CodeObjectDict.__repr__ +$B.$CodeObjectDict.__mro__=[$B.$CodeObjectDict,$ObjectDict] +function compile(source,filename,mode){var $=$B.args('compile',6,{source:null,filename:null,mode:null,flags:null,dont_inherit:null,optimize:null},['source','filename','mode','flags','dont_inherit','optimize'],arguments,{flags:0,dont_inherit:false,optimize:-1},null,null) +var module_name='exec_' + $B.UUID() +var local_name=module_name; +var root=$B.py2js(source,module_name,[module_name],local_name) +$.__class__=$B.$CodeObjectDict +return $} +compile.__class__=$B.factory +$B.$CodeObjectDict.$factory=compile +compile.$dict=$B.$CodeObjectDict +var __debug__=$B.debug>0 +function delattr(obj,attr){ +var klass=$B.get_class(obj) +var res=obj[attr] +if(res===undefined){var mro=klass.__mro__ +for(var i=0;i"},__str__:function(){return ""},counter:_start-1} +for(var attr in res){if(typeof res[attr]==='function' && attr!=="__class__"){res[attr].__str__=(function(x){return function(){return ""}})(attr)}} +return res} +enumerate.__class__=$B.$factory +enumerate.$dict=$EnumerateDict +$EnumerateDict.$factory=enumerate +function $eval(src,_globals,_locals){var current_frame=$B.frames_stack[$B.frames_stack.length-1] +if(current_frame!==undefined){var current_locals_id=current_frame[0].replace(/\./,'_'),current_globals_id=current_frame[2].replace(/\./,'_')} +var is_exec=arguments[3]=='exec',leave=false +if(src.__class__===$B.$CodeObjectDict){src=src.source} +var globals_id='$exec_'+$B.UUID(),locals_id,parent_block_id +if(_locals===_globals ||_locals===undefined){locals_id=globals_id}else{locals_id='$exec_'+$B.UUID()} +eval('var $locals_'+globals_id+' = {}\nvar $locals_'+locals_id+' = {}') +if(_globals===undefined){var gobj=current_frame[3],ex='' +for(var attr in current_frame[3]){ex=='$locals_'+globals_id+'["'+attr+ +'"] = gobj["'+attr+'"]';} +parent_block_id=current_globals_id +ex +='var $locals_'+current_globals_id+'=gobj;' +eval(ex)}else{$B.bound[globals_id]={} +var items=_b_.dict.$dict.items(_globals),item +while(1){try{var item=next(items) +eval('$locals_'+globals_id+'["'+item[0]+'"] = item[1]') +$B.bound[globals_id][item[0]]=true}catch(err){break}} +parent_block_id='__builtins__'} +if(_locals===undefined){if(_globals!==undefined){eval('var $locals_'+locals_id+' = $locals_'+globals_id)}else{var lobj=current_frame[1],ex='' +for(var attr in current_frame[1]){ex +='$locals_'+locals_id+'["'+attr+ +'"] = current_frame[1]["'+attr+'"];'} +eval(ex)}}else{var items=_b_.dict.$dict.items(_locals),item +while(1){try{var item=next(items) +eval('$locals_'+locals_id+'["'+item[0]+'"] = item[1]')}catch(err){break}}} +var root=$B.py2js(src,globals_id,locals_id,parent_block_id),leave_frame=true +try{ +if(!is_exec){var try_node=root.children[root.children.length-2],instr=$B.last(try_node.children) +var type=instr.C.tree[0].type +if(!('expr'==type ||'list_or_tuple'==type ||'op'==type)){leave_frame=false +throw _b_.SyntaxError("eval() argument must be an expression",'',1,1,src)}else{ +var children=try_node.children +root.children.splice(root.children.length-2,2) +for(var i=0;i"},$FilterDict.__mro__=[$FilterDict,$ObjectDict] +function filter(){if(arguments.length!=2){throw _b_.TypeError( +"filter expected 2 arguments, got "+arguments.length)} +var func=arguments[0],iterable=iter(arguments[1]) +if(func===_b_.None)func=bool +var __next__=function(){while(true){var _item=next(iterable) +if(func(_item)){return _item}}} +return{ +__class__: $FilterDict,__next__: __next__}} +function format(value,format_spec){if(hasattr(value,'__format__'))return getattr(value,'__format__')(format_spec) +throw _b_.NotImplementedError("__format__ is not implemented for object '" + _b_.str(value)+ "'")} +function attr_error(attr,cname){var msg="bad operand type for unary #: '"+cname+"'" +switch(attr){case '__neg__': +throw _b_.TypeError(msg.replace('#','-')) +case '__pos__': +throw _b_.TypeError(msg.replace('#','+')) +case '__invert__': +throw _b_.TypeError(msg.replace('#','~')) +case '__call__': +throw _b_.TypeError("'"+cname+"'"+' object is not callable') +default: +throw _b_.AttributeError("'"+cname+"' object has no attribute '"+attr+"'")}} +function getattr(obj,attr,_default){var klass=obj.__class__ +if(klass===undefined){ +if(typeof obj=='string'){klass=_b_.str.$dict} +else if(typeof obj=='number'){klass=obj % 1==0 ? _b_.int.$dict : _b_.float.$dict} +else{klass=$B.get_class(obj)}} +if(klass===undefined){ +if(obj[attr]!==undefined)return $B.$JS2Py(obj[attr]) +if(_default!==undefined)return _default +throw _b_.AttributeError('object has no attribute '+attr)} +switch(attr){case '__call__': +if(typeof obj=='function'){if(obj.$blocking){console.log('calling blocking function '+obj.__name__)} +return obj}else if(klass===$B.JSObject.$dict && typeof obj.js=='function'){return function(){var res=obj.js.apply(null,arguments) +if(res===undefined){return None} +return $B.JSObject(res)}} +break +case '__class__': +return klass.$factory +case '__dict__': +return $B.obj_dict(obj) +case '__doc__': +for(var i=0;i'} +return method} +return klass[attr]} +var is_class=klass.is_class,mro,attr_func +if(is_class){attr_func=$B.$type.__getattribute__ +if(obj.$dict===undefined){console.log('obj '+obj+' $dict undefined')} +obj=obj.$dict}else{var mro=klass.__mro__ +if(mro===undefined){console.log('in getattr '+attr+' mro undefined for '+obj+' dir '+dir(obj)+' class '+obj.__class__) +for(var _attr in obj){console.log('obj attr '+_attr+' : '+obj[_attr])} +console.log('obj class '+dir(klass)+' str '+klass)} +for(var i=0;i-1){return true}} +var hook=getattr(classinfo,'__subclasscheck__',null) +if(hook!==null){return hook(klass)} +return false} +var iterator_class=$B.make_class({name:'iterator',init:function(self,getitem,len){self.getitem=getitem +self.len=len +self.counter=-1}}) +iterator_class.$dict.__next__=function(self){self.counter++ +if(self.len!==null && self.counter==self.len){throw _b_.StopIteration('')} +try{return self.getitem(self.counter)} +catch(err){throw _b_.StopIteration('')}} +function iter(obj){try{var _iter=getattr(obj,'__iter__')} +catch(err){var gi=getattr(obj,'__getitem__',null),ln=getattr(obj,'__len__',null) +if(gi!==null){if(ln!==null){var len=getattr(ln,'__call__')() +return iterator_class(gi,len)}else{return iterator_class(gi,null)}} +throw _b_.TypeError("'"+$B.get_class(obj).__name__+"' object is not iterable")} +var res=_iter() +try{getattr(res,'__next__')} +catch(err){if(isinstance(err,_b_.AttributeError)){throw _b_.TypeError( +"iter() returned non-iterator of type '"+ +$B.get_class(res).__name__+"'")}} +return res} +function len(obj){try{return getattr(obj,'__len__')()} +catch(err){throw _b_.TypeError("object of type '"+$B.get_class(obj).__name__+ +"' has no len()")}} +function locals(){ +var locals_obj=$B.last($B.frames_stack)[1] +return $B.obj_dict(locals_obj)} +var $MapDict={__class__:$B.$type,__name__:'map'} +$MapDict.__mro__=[$MapDict,$ObjectDict] +$MapDict.__iter__=function(self){return self} +function map(){var func=getattr(arguments[0],'__call__') +var iter_args=[],pos=0 +for(var i=1;i"},__str__:function(){return ""},__next__: __next__} +return obj} +function $extreme(args,op){ +var $op_name='min' +if(op==='__gt__')$op_name="max" +if(args.length==0){throw _b_.TypeError($op_name+" expected 1 arguments, got 0")} +var last_arg=args[args.length-1] +var nb_args=args.length +var has_kw_args=false +var has_default=false +var func=false +if(last_arg.$nat=='kw'){nb_args-- +last_arg=last_arg.kw +for(var attr in last_arg){switch(attr){case 'key': +var func=last_arg[attr] +has_key=true +break +case '$$default': +var default_value=last_arg[attr] +has_default=true +break +default: +throw _b_.TypeError("'"+attr+"' is an invalid keyword argument for this function") +break}}} +if(!func){func=function(x){return x}} +if(nb_args==0){throw _b_.TypeError($op_name+" expected 1 arguments, got 0")}else if(nb_args==1){ +var $iter=iter(args[0]),res=null +while(true){try{var x=next($iter) +if(res===null ||bool(getattr(func(x),op)(func(res)))){res=x}}catch(err){if(err.__name__=="StopIteration"){if(res===null){if(has_default){return default_value} +else{throw _b_.ValueError($op_name+"() arg is an empty sequence")}}else{return res}} +throw err}}}else{if(has_default){throw _b_.TypeError("Cannot specify a default for "+$op_name+"() with multiple positional arguments")} +var res=null +for(var i=0;i'}} +p.__get__=function(self,obj,objtype){if(obj===undefined)return self +if(self.fget===undefined)throw _b_.AttributeError("unreadable attribute") +return getattr(self.fget,'__call__')(obj)} +if(fset!==undefined){p.__set__=function(self,obj,value){if(self.fset===undefined)throw _b_.AttributeError("can't set attribute") +getattr(self.fset,'__call__')(obj,value)}} +p.__delete__=fdel; +p.getter=function(fget){return property(fget,p.fset,p.fdel,p.__doc__)} +p.setter=function(fset){return property(p.fget,fset,p.fdel,p.__doc__)} +p.deleter=function(fdel){return property(p.fget,p.fset,fdel,p.__doc__)} +return p} +property.__class__=$B.$factory +property.$dict=$PropertyDict +$PropertyDict.$factory=property +function repr(obj){if(obj.__class__===$B.$factory){ +var func=$B.$type.__getattribute__(obj.$dict.__class__,'__repr__') +return func(obj)} +var func=getattr(obj,'__repr__') +if(func!==undefined){return func()} +throw _b_.AttributeError("object has no attribute __repr__")} +var $ReversedDict={__class__:$B.$type,__name__:'reversed'} +$ReversedDict.__mro__=[$ReversedDict,$ObjectDict] +$ReversedDict.__iter__=function(self){return self} +$ReversedDict.__next__=function(self){self.$counter-- +if(self.$counter<0)throw _b_.StopIteration('') +return self.getter(self.$counter)} +function reversed(seq){ +try{return getattr(seq,'__reversed__')()} +catch(err){if(err.__name__!='AttributeError'){throw err}} +try{var res={__class__:$ReversedDict,$counter : getattr(seq,'__len__')(),getter:getattr(seq,'__getitem__')} +return res}catch(err){throw _b_.TypeError("argument to reversed() must be a sequence")}} +reversed.__class__=$B.$factory +reversed.$dict=$ReversedDict +$ReversedDict.$factory=reversed +function round(arg,n){if(!isinstance(arg,[_b_.int,_b_.float])){throw _b_.TypeError("type "+arg.__class__+" doesn't define __round__ method")} +if(isinstance(arg,_b_.float)&&(arg.value===Infinity ||arg.value===-Infinity)){throw _b_.OverflowError("cannot convert float infinity to integer")} +if(n===undefined)return _b_.int(Math.round(arg)) +if(!isinstance(n,_b_.int)){throw _b_.TypeError( +"'"+n.__class__+"' object cannot be interpreted as an integer")} +var mult=Math.pow(10,n) +return _b_.int.$dict.__truediv__(Number(Math.round(arg.valueOf()*mult)),mult)} +function setattr(obj,attr,value){if(!(typeof attr=='string')){throw _b_.TypeError("setattr(): attribute name must be string")} +switch(attr){case 'alert': +case 'case': +case 'catch': +case 'constructor': +case 'Date': +case 'delete': +case 'default': +case 'document': +case 'Error': +case 'history': +case 'function': +case 'location': +case 'Math': +case 'new': +case 'Number': +case 'RegExp': +case 'this': +case 'throw': +case 'var': +case 'super': +case 'window': +attr='$$'+attr +break +case '__class__': +obj.__class__=value.$dict;return None +break} +if(obj.__class__===$B.$factory){ +if(obj.$dict.$methods && typeof value=='function' +&& value.__class__!==$B.$factory){ +obj.$dict.$methods[attr]=$B.make_method(attr,obj.$dict,value,value) +return None}else{obj.$dict[attr]=value;return None}} +var res=obj[attr],klass=obj.__class__ ||$B.get_class(obj) +if(res===undefined && klass){var mro=klass.__mro__,_len=mro.length +for(var i=0;i<_len;i++){res=mro[i][attr] +if(res!==undefined)break}} +if(res!==undefined){ +if(res.__set__!==undefined){res.__set__(res,obj,value);return None} +var __set__=getattr(res,'__set__',null) +if(__set__ &&(typeof __set__=='function')){__set__.apply(res,[obj,value]);return None}} +if(klass && klass.$slots && klass.$slots[attr]===undefined){throw _b_.AttributeError("'"+klass.__name__+"' object has no attribute'"+ +attr+"'")} +var _setattr=false +if(klass!==undefined){for(var i=0,_len=klass.__mro__.length;i<_len;i++){_setattr=klass.__mro__[i].__setattr__ +if(_setattr){break}}} +if(!_setattr){obj[attr]=value}else{_setattr(obj,attr,value)} +return None} +function sorted(){var $=$B.args('sorted',1,{iterable:null},['iterable'],arguments,{},null,'kw') +var _list=_b_.list(iter($.iterable)),args=[_list] +for(var i=1;i'} +return res+'>'} +function $$super(_type1,_type2){return{__class__:$SuperDict,__thisclass__:_type1,__self_class__:(_type2 ||None)}} +$$super.$dict=$SuperDict +$$super.__class__=$B.$factory +$SuperDict.$factory=$$super +$$super.$is_func=true +var $Reader={__class__:$B.$type,__name__:'reader'} +$Reader.__enter__=function(self){return self} +$Reader.__exit__=function(self){return false} +$Reader.__iter__=function(self){return iter(self.$lines)} +$Reader.__len__=function(self){return self.lines.length} +$Reader.__mro__=[$Reader,$ObjectDict] +$Reader.close=function(self){self.closed=true} +$Reader.read=function(self,nb){if(self.closed===true)throw _b_.ValueError('I/O operation on closed file') +if(nb===undefined)return self.$content +self.$counter+=nb +if(self.$bin){var res=self.$content.source.slice(self.$counter-nb,self.$counter) +return _b_.bytes(res)} +return self.$content.substr(self.$counter-nb,nb)} +$Reader.readable=function(self){return true} +$Reader.readline=function(self,limit){ +self.$lc=self.$lc===undefined ? -1 : self.$lc +if(self.closed===true)throw _b_.ValueError('I/O operation on closed file') +if(self.$lc==self.$lines.length-1){return self.$bin ? _b_.bytes(): ''} +self.$lc++ +var res=self.$lines[self.$lc] +self.$counter +=(self.$bin ? res.source.length : res.length) +return res} +$Reader.readlines=function(self,hint){if(self.closed===true)throw _b_.ValueError('I/O operation on closed file') +self.$lc=self.$lc===undefined ? -1 : self.$lc +return self.$lines.slice(self.$lc+1)} +$Reader.seek=function(self,offset,whence){if(self.closed===True)throw _b_.ValueError('I/O operation on closed file') +if(whence===undefined)whence=0 +if(whence===0){self.$counter=offset} +else if(whence===1){self.$counter +=offset} +else if(whence===2){self.$counter=self.$content.length+offset}} +$Reader.seekable=function(self){return true} +$Reader.tell=function(self){return self.$counter} +$Reader.writable=function(self){return false} +var $BufferedReader={__class__:$B.$type,__name__:'_io.BufferedReader'} +$BufferedReader.__mro__=[$BufferedReader,$Reader,$ObjectDict] +var $TextIOWrapper={__class__:$B.$type,__name__:'_io.TextIOWrapper'} +$TextIOWrapper.__mro__=[$TextIOWrapper,$Reader,$ObjectDict] +function $url_open(){ +var $ns=$B.args('open',3,{file:null,mode:null,encoding:null},['file','mode','encoding'],arguments,{mode:'r',encoding:'utf-8'},'args','kw') +for(var attr in $ns){eval('var '+attr+'=$ns["'+attr+'"]')} +if(args.length>0)var mode=args[0] +if(args.length>1)var encoding=args[1] +var is_binary=mode.search('b')>-1 +if(isinstance(file,$B.JSObject))return new $OpenFile(file.js,mode,encoding) +if(isinstance(file,_b_.str)){ +if(window.XMLHttpRequest){ +var req=new XMLHttpRequest();}else{ +var req=new ActiveXObject("Microsoft.XMLHTTP");} +req.onreadystatechange=function(){var status=req.status +if(status===404){$res=_b_.IOError('File '+file+' not found')}else if(status!==200){$res=_b_.IOError('Could not open file '+file+' : status '+status)}else{$res=req.responseText +if(is_binary){$res=_b_.str.$dict.encode($res,'utf-8')}}} +var fake_qs='?foo='+$B.UUID() +req.open('GET',file+fake_qs,false) +if(is_binary){req.overrideMimeType('text/plain; charset=utf-8');} +req.send() +if($res.constructor===Error)throw $res +if(is_binary){var lf=_b_.bytes('\n','ascii'),lines=_b_.bytes.$dict.split($res,lf) +for(var i=0;i"}})($func)}} +var $NoneDict={__class__:$B.$type,__name__:'NoneType'} +$NoneDict.__mro__=[$NoneDict,$ObjectDict] +$NoneDict.__setattr__=function(self,attr){return no_set_attr($NoneDict,attr)} +var None={__bool__ : function(){return False},__class__ : $NoneDict,__hash__ : function(){return 0},__repr__ : function(){return 'None'},__str__ : function(){return 'None'},toString : function(){return 'None'}} +$NoneDict.$factory=function(){return None} +$NoneDict.$factory.__class__=$B.$factory +$NoneDict.$factory.$dict=$NoneDict +for(var $op in $B.$comps){ +var key=$B.$comps[$op] +switch(key){case 'ge': +case 'gt': +case 'le': +case 'lt': +$NoneDict['__'+key+'__']=(function(op){return function(other){throw _b_.TypeError("unorderable types: NoneType() "+op+" "+ +$B.get_class(other).__name__+"()")}})($op)}} +for(var $func in None){if(typeof None[$func]==='function'){None[$func].__str__=(function(f){return function(){return ""}})($func)}} +var $FunctionCodeDict={__class__:$B.$type,__name__:'function code'} +$FunctionCodeDict.__mro__=[$FunctionCodeDict,$ObjectDict] +$FunctionCodeDict.$factory={__class__:$B.$factory,$dict:$FunctionCodeDict} +var $FunctionGlobalsDict={__class:$B.$type,__name__:'function globals'} +$FunctionGlobalsDict.__mro__=[$FunctionGlobalsDict,$ObjectDict] +$FunctionGlobalsDict.$factory={__class__:$B.$factory,$dict:$FunctionGlobalsDict} +var $FunctionDict=$B.$FunctionDict={__class__:$B.$type,__code__:{__class__:$FunctionCodeDict,__name__:'function code'},__globals__:{__class__:$FunctionGlobalsDict,__name__:'function globals'},__name__:'function'} +$FunctionDict.__getattribute__=function(self,attr){ +if(self.$infos && self.$infos[attr]!==undefined){if(attr=='__code__'){var res={__class__:$B.$CodeDict} +for(var attr in self.$infos.__code__){res[attr]=self.$infos.__code__[attr]} +return res}else if(attr=='__annotations__'){ +return $B.obj_dict(self.$infos[attr])}else{return self.$infos[attr]}}else{return _b_.object.$dict.__getattribute__(self,attr)}} +$FunctionDict.__repr__=$FunctionDict.__str__=function(self){return ''} +$FunctionDict.__mro__=[$FunctionDict,$ObjectDict] +$FunctionDict.__setattr__=function(self,attr,value){if(self.$infos[attr]!==undefined){self.$infos[attr]=value} +else{self[attr]=value}} +var $Function=function(){} +$Function.__class__=$B.$factory +$FunctionDict.$factory=$Function +$Function.$dict=$FunctionDict +_b_.__BRYTHON__=__BRYTHON__ +var builtin_funcs=['abs','all','any','ascii','bin','bool','bytearray','bytes','callable','chr','classmethod','compile','complex','delattr','dict','dir','divmod','enumerate','eval','exec','exit','filter','float','format','frozenset','getattr','globals','hasattr','hash','help','hex','id','input','int','isinstance','issubclass','iter','len','list','locals','map','max','memoryview','min','next','object','oct','open','ord','pow','print','property','quit','range','repr','reversed','round','set','setattr','slice','sorted','staticmethod','str','sum','$$super','tuple','type','vars','zip'] +for(var i=0;i'}})(orig_name)} +_b_[name].__module__='builtins' +_b_[name].__name__=name +_b_[name].__defaults__=_b_[name].__defaults__ ||[] +_b_[name].__kwdefaults__=_b_[name].__kwdefaults__ ||{} +_b_[name].__annotations__=_b_[name].__annotations__ ||{}} +_b_[name].__doc__=_b_[name].__doc__ ||''} +catch(err){}} +_b_['$$eval']=$eval +_b_['open']=$url_open +_b_['print']=$print +_b_['$$super']=$$super})(__BRYTHON__) +;(function($B){eval($B.InjectBuiltins()) +$B.$raise=function(){ +var es=$B.current_exception +if(es!==undefined)throw es +throw _b_.RuntimeError('No active exception to reraise')} +$B.$syntax_err_line=function(exc,module,pos,line_num){ +var pos2line={} +var lnum=1 +var src=$B.$py_src[module] +if(src===undefined){console.log('no src for',module)} +var line_pos={1:0} +for(var i=0,_len_i=src.length;i < _len_i;i++){pos2line[i]=lnum +if(src.charAt(i)=='\n'){line_pos[++lnum]=i}} +if(line_num===undefined){line_num=pos2line[pos]} +exc.$line_info=line_num+','+module +var lines=src.split('\n') +var line=lines[line_num-1] +var lpos=pos-line_pos[line_num] +var len=line.length +line=line.replace(/^\s*/,'') +lpos-=len-line.length +exc.args=_b_.tuple([$B.$getitem(exc.args,0),module,line_num,lpos,line])} +$B.$SyntaxError=function(module,msg,pos,line_num){var exc=_b_.SyntaxError(msg) +$B.$syntax_err_line(exc,module,pos,line_num) +throw exc} +$B.$IndentationError=function(module,msg,pos){var exc=_b_.IndentationError(msg) +$B.$syntax_err_line(exc,module,pos) +throw exc} +var $TracebackDict={__class__:$B.$type,__name__:'traceback'} +$TracebackDict.__getattribute__=function(self,attr){if(self.stack.length==0){alert('no stack',attr)} +var last_frame=$B.last(self.stack) +if(last_frame==undefined){alert('last frame undef ');console.log(self.stack,Object.keys(self.stack))} +var line_info=last_frame[1].$line_info +switch(attr){case 'tb_frame': +return frame(self.stack) +case 'tb_lineno': +if(line_info===undefined){return -1} +else{return parseInt(line_info.split(',')[0])} +case 'tb_lasti': +if(line_info===undefined){return ''} +else{var info=line_info.split(',') +var src=$B.$py_src[info[1]] +if(src!==undefined){return src.split('\n')[parseInt(info[0]-1)].trim()}else{return ''}} +case 'tb_next': +if(self.stack.length==1){return None} +else{return traceback(self.stack.slice(0,self.stack.length-1))} +default: +return $TracebackDict[attr]}} +$TracebackDict.__mro__=[$TracebackDict,_b_.object.$dict] +$TracebackDict.__str__=function(self){return ''} +function traceback(stack){return{__class__ : $TracebackDict,stack : stack}} +traceback.__class__=$B.$factory +traceback.$dict=$TracebackDict +$TracebackDict.$factory=traceback +var $FrameDict={__class__:$B.$type,__name__:'frame'} +$FrameDict.__getattr__=function(self,attr){ +if(attr=='f_back'){if(self.$pos>0){return frame(self.$stack,self.$pos-1)}}} +$FrameDict.__mro__=[$FrameDict,_b_.object.$dict] +function to_dict(obj){var res=_b_.dict() +var setitem=_b_.dict.$dict.__setitem__ +for(var attr in obj){if(attr.charAt(0)=='$'){continue} +setitem(res,attr,obj[attr])} +return res} +function frame(stack,pos){var mod_name=stack[2] +var fs=stack +var res={__class__:$FrameDict,f_builtins :{}, +$stack: stack,} +if(pos===undefined){pos=fs.length-1} +res.$pos=pos +if(fs.length){var _frame=fs[pos] +var locals_id=_frame[0] +try{res.f_locals=$B.obj_dict(_frame[1])}catch(err){console.log('err '+err) +throw err} +res.f_globals=$B.obj_dict(_frame[3]) +if($B.debug>0){if(_frame[1].$line_info===undefined){res.f_lineno=-1} +else{res.f_lineno=parseInt(_frame[1].$line_info.split(',')[0])}}else{res.f_lineno=-1} +res.f_code={__class__:$B.$CodeDict,co_code:None, +co_name: locals_id, +co_filename: _frame[3].__name__ } +if(res.f_code.co_filename===undefined){console.log(_frame[0],_frame[1],_frame[2],_frame[3]);alert('no cofilename')}} +return res} +frame.__class__=$B.$factory +frame.$dict=$FrameDict +$FrameDict.$factory=frame +$B._frame=frame +var $BaseExceptionDict={__class__:$B.$type,__bases__ :[_b_.object],__module__:'builtins',__name__:'BaseException',args:[]} +$BaseExceptionDict.__init__=function(self){var args=arguments[1]===undefined ?[]:[arguments[1]] +self.args=_b_.tuple(args)} +$BaseExceptionDict.__repr__=function(self){return self.__class__.__name__+repr(self.args)} +$BaseExceptionDict.__str__=function(self){return _b_.str(self.args[0])} +$BaseExceptionDict.__mro__=[$BaseExceptionDict,_b_.object.$dict] +$BaseExceptionDict.__new__=function(cls){var err=_b_.BaseException() +err.__name__=cls.$dict.__name__ +err.__class__=cls.$dict +return err} +$BaseExceptionDict.__getattr__=function(self,attr){if(attr=='info'){var name=self.__class__.__name__ +if(name=='SyntaxError' ||name=='IndentationError'){return 'File "'+self.args[1]+'", line '+self.args[2]+'\n '+ +self.args[4]} +var info='Traceback (most recent call last):' +if(self.$js_exc!==undefined){for(var attr in self.$js_exc){if(attr==='message')continue +try{info +='\n '+attr+' : '+self.$js_exc[attr]} +catch(_err){}} +info+='\n'} +for(var i=0;i0 && js_exc.info===undefined){var _frame=$B.last($B.frames_stack) +if(_frame===undefined){_frame=$B.pmframe} +if(_frame && _frame[1].$line_info!==undefined){var line_info=_frame[1].$line_info.split(',') +var mod_name=line_info[1] +var module=$B.modules[mod_name] +if(module){if(module.caller!==undefined){ +var mod_name=line_info[1]} +var lib_module=mod_name +var line_num=parseInt(line_info[0]) +if($B.$py_src[mod_name]===undefined){console.log('pas de py_src pour '+mod_name) +console.log(js_exc)} +var lines=$B.$py_src[mod_name].split('\n'),msg=js_exc.message.toString() +msg +="\n module '"+lib_module+"' line "+line_num +msg +='\n'+lines[line_num-1] +js_exc.msg=msg +js_exc.info_in_msg=true}}else{console.log('error ',js_exc)}} +var exc=Error() +exc.__name__='Internal Javascript error: '+(js_exc.__name__ ||js_exc.name) +exc.__class__=_b_.Exception.$dict +exc.$js_exc=js_exc +if(js_exc.name=='ReferenceError'){exc.__name__='NameError' +exc.__class__=_b_.NameError.$dict +js_exc.message=js_exc.message.replace('$$','')}else if(js_exc.name=="InternalError"){exc.__name__='RuntimeError' +exc.__class__=_b_.RuntimeError.$dict} +exc.$message=js_exc.msg ||'<'+js_exc+'>' +exc.args=_b_.tuple([exc.$message]) +exc.info='' +exc.$py_error=true +exc.$stack=$B.frames_stack.slice()}else{var exc=js_exc} +$B.current_exception=exc +return exc} +$B.is_exc=function(exc,exc_list){ +if(exc.__class__===undefined)exc=$B.exception(exc) +var exc_class=exc.__class__.$factory +for(var i=0;ilen){return r.stop} +return $B.add(r.start,$B.mul(r.step,i))} +$RangeDict.__getitem__=function(self,rank){if(_b_.isinstance(rank,_b_.slice)){var norm=_b_.slice.$dict.$conv_for_seq(rank,$RangeDict.__len__(self)),substep=$B.mul(self.step,norm.step),substart=compute_item(self,norm.start),substop=compute_item(self,norm.stop) +return range(substart,substop,substep)} +if(typeof rank !="number"){rank=$B.$GetInt(rank)} +if($B.gt(0,rank)){rank=$B.add(rank,$RangeDict.__len__(self))} +var res=$B.add(self.start,$B.mul(rank,self.step)) +if(($B.gt(self.step,0)&&($B.ge(res,self.stop)||$B.gt(self.start,res)))|| +($B.gt(0,self.step)&&($B.ge(self.stop,res)||$B.gt(res,self.start)))){throw _b_.IndexError('range object index out of range')} +return res } +$RangeDict.__hash__=function(self){var len=$RangeDict.__len__(self) +if(len==0){return _b_.hash(_b_.tuple([0,None,None]))} +if(len==1){return _b_.hash(_b_.tuple([1,self.start,None]))} +return _b_.hash(_b_.tuple([len,self.start,self.step]))} +$RangeIterator=function(obj){return{__class__:$RangeIterator.$dict,obj: obj}} +$RangeIterator.__class__=$B.$factory +$RangeIterator.$dict={__class__: $B.$type,__name__: 'range_iterator',$factory: $RangeIterator,__iter__: function(self){return self},__next__: function(self){return _b_.next(self.obj)}} +$RangeIterator.$dict.__mro__=[$RangeIterator.$dict,_b_.object.$dict] +$RangeDict.__iter__=function(self){var res={__class__ : $RangeDict,start:self.start,stop:self.stop,step:self.step} +if(self.$safe){res.$counter=self.start-self.step}else{res.$counter=$B.sub(self.start,self.step)} +return $RangeIterator(res)} +$RangeDict.__len__=function(self){var len +if($B.gt(self.step,0)){if($B.ge(self.start,self.stop)){return 0} +var n=$B.sub(self.stop,$B.add(1,self.start)),q=$B.floordiv(n,self.step) +len=$B.add(1,q)}else{if($B.ge(self.stop,self.start)){return 0} +var n=$B.sub(self.start,$B.add(1,self.stop)),q=$B.floordiv(n,$B.mul(-1,self.step)) +len=$B.add(1,q)} +if($B.maxsize===undefined){$B.maxsize=$B.LongInt.$dict.__pow__($B.LongInt(2),63) +$B.maxsize=$B.LongInt.$dict.__sub__($B.maxsize,1)} +return len} +$RangeDict.__next__=function(self){if(self.$safe){self.$counter +=self.step +if((self.step>0 && self.$counter >=self.stop) +||(self.step<0 && self.$counter <=self.stop)){throw _b_.StopIteration('')}}else{self.$counter=$B.add(self.$counter,self.step) +if(($B.gt(self.step,0)&& $B.ge(self.$counter,self.stop)) +||($B.gt(0,self.step)&& $B.ge(self.stop,self.$counter))){throw _b_.StopIteration('')}} +return self.$counter} +$RangeDict.__mro__=[$RangeDict,_b_.object.$dict] +$RangeDict.__reversed__=function(self){var n=$B.sub($RangeDict.__len__(self),1) +return range($B.add(self.start,$B.mul(n,self.step)),$B.sub(self.start,self.step),$B.mul(-1,self.step))} +$RangeDict.__repr__=$RangeDict.__str__=function(self){var res='range('+_b_.str(self.start)+', '+_b_.str(self.stop) +if(self.step!=1)res +=', '+_b_.str(self.step) +return res+')'} +$RangeDict.__setattr__=function(self,attr,value){throw _b_.AttributeError('readonly attribute')} +$RangeDict.start=function(self){return self.start} +$RangeDict.step=function(self){return self.step},$RangeDict.stop=function(self){return self.stop} +$RangeDict.count=function(self,ob){if(_b_.isinstance(ob,[_b_.int,_b_.float,_b_.bool])){return _b_.int($RangeDict.__contains__(self,ob))}else{var comp=_b_.getattr(ob,'__eq__'),it=$RangeDict.__iter__(self) +_next=$RangeIterator.$dict.__next__,nb=0 +while(true){try{if(comp(_next(it))){nb++}}catch(err){if(_b_.isinstance(err,_b_.StopIteration)){return nb} +throw err}}}} +$RangeDict.index=function(self,other){var $=$B.args('index',2,{self:null,other:null},['self','other'],arguments,{},null,null),self=$.self,other=$.other +try{other=$B.int_or_bool(other)}catch(err){var comp=_b_.getattr(other,'__eq__'),it=$RangeDict.__iter__(self),_next=$RangeIterator.$dict.__next__,nb=0 +while(true){try{if(comp(_next(it))){return nb} +nb++}catch(err){if(_b_.isinstance(err,_b_.StopIteration)){throw _b_.ValueError(_b_.str(other)+' not in range')} +throw err}}} +var sub=$B.sub(other,self.start),fl=$B.floordiv(sub,self.step),res=$B.mul(self.step,fl) +if($B.eq(res,sub)){if(($B.gt(self.stop,self.start)&& $B.ge(other,self.start) +&& $B.gt(self.stop,other))|| +($B.ge(self.start,self.stop)&& $B.ge(self.start,other) +&& $B.gt(other,self.stop))){return fl}else{throw _b_.ValueError(_b_.str(other)+' not in range')}}else{throw _b_.ValueError(_b_.str(other)+' not in range')}} +function range(){var $=$B.args('range',3,{start:null,stop:null,step:null},['start','stop','step'],arguments,{stop:null,step:null},null,null),start=$.start,stop=$.stop,step=$.step,safe +if(stop===null && step===null){stop=$B.PyNumber_Index(start) +safe=typeof stop==="number" +return{__class__:$RangeDict,start: 0,stop: stop,step: 1,$is_range: true,$safe: safe}} +if(step===null){step=1} +start=$B.PyNumber_Index(start) +stop=$B.PyNumber_Index(stop) +step=$B.PyNumber_Index(step) +if(step==0){throw _b_.ValueError("range() arg 3 must not be zero")} +safe=(typeof start=='number' && typeof stop=='number' && +typeof step=='number') +return{__class__: $RangeDict,start: start,stop: stop,step: step,$is_range: true,$safe: safe}} +range.__class__=$B.$factory +range.$dict=$RangeDict +$RangeDict.$factory=range +range.$is_func=true +var $SliceDict={__class__:$B.$type, __name__:'slice', $native:true, descriptors:{start:true,step:true,stop:true}} +$SliceDict.__mro__=[$SliceDict,_b_.object.$dict] +$SliceDict.__repr__=$SliceDict.__str__=function(self){return 'slice('+_b_.str(self.start)+','+ +_b_.str(self.stop)+','+_b_.str(self.step)+')'} +$SliceDict.__setattr__=function(self,attr,value){throw _b_.AttributeError('readonly attribute')} +$SliceDict.$conv=function(self,len){ +return{start: self.start===_b_.None ? 0 : self.start,stop: self.stop===_b_.None ? len : self.stop,step: self.step===_b_.None ? 1 : self.step}} +$SliceDict.$conv_for_seq=function(self,len){ +var step=self.step===None ? 1 : $B.PyNumber_Index(self.step),step_is_neg=$B.gt(0,step),len_1=$B.sub(len,1) +if(step==0){throw Error('ValueError : slice step cannot be zero');} +var start,end; +if(self.start===None){start=step_is_neg ? len_1 : 0;}else{ +start=$B.PyNumber_Index(self.start); +if($B.gt(0,start))start=$B.add(start,len); +if($B.gt(0,start))start=step<0 ? -1 : 0 +if($B.ge(start,len))start=step<0 ? len_1 : len;} +if(self.stop===None){stop=step_is_neg ? -1 : len;}else{ +stop=$B.PyNumber_Index(self.stop); +if($B.gt(0,stop))stop +=len +if($B.gt(0,stop))stop=step<0 ? -1 : 0 +if($B.ge(stop,len))stop=step_is_neg ? len_1 : len;} +return{start: start,stop: stop,step: step}} +$SliceDict.start=function(self){return self.start} +$SliceDict.step=function(self){return self.step} +$SliceDict.stop=function(self){return self.stop} +$SliceDict.indices=function(self,length){var len=$B.$GetInt(length) +if(len < 0)_b_.ValueError('length should not be negative') +if(self.step > 0){var _len=_b_.min(len,self.stop) +return _b_.tuple([self.start,_len,self.step])}else if(self.step==_b_.None){var _len=_b_.min(len,self.stop) +var _start=self.start +if(_start==_b_.None)_start=0 +return _b_.tuple([_start,_len,1])} +_b_.NotImplementedError("Error! negative step indices not implemented yet")} +function slice(){var $=$B.args('slice',3,{start:null,stop:null,step:null},['start','stop','step'],arguments,{stop:null,step:null},null,null),start,stop,step +if($.stop===null && $.step===null){start=_b_.None +stop=$.start +step=_b_.None}else{start=$.start +stop=$.stop +step=$.step===null ? _b_.None : $.step} +var res={__class__ : $SliceDict,start:start,stop:stop,step:step} +return res} +slice.__class__=$B.$factory +slice.$dict=$SliceDict +$SliceDict.$factory=slice +slice.$is_func=true +_b_.range=range +_b_.slice=slice})(__BRYTHON__) +;(function($B){var _b_=$B.builtins +var $ObjectDict=_b_.object.$dict +var isinstance=_b_.isinstance,getattr=_b_.getattr,None=_b_.None +var from_unicode={},to_unicode={} +var $BytearrayDict={__class__:$B.$type,__name__:'bytearray'} +var mutable_methods=['__delitem__','clear','copy','count','index','pop','remove','reverse','sort'] +for(var i=0,_len_i=mutable_methods.length;i < _len_i;i++){var method=mutable_methods[i] +$BytearrayDict[method]=(function(m){return function(self){var args=[self.source],pos=1 +for(var i=1,_len_i=arguments.length;i < _len_i;i++)args[pos++]=arguments[i] +return _b_.list.$dict[m].apply(null,args)}})(method)} +var $bytearray_iterator=$B.$iterator_class('bytearray_iterator') +$BytearrayDict.__iter__=function(self){return $B.$iterator(self.source,$bytearray_iterator)} +$BytearrayDict.__mro__=[$BytearrayDict,$ObjectDict] +$BytearrayDict.__repr__=$BytearrayDict.__str__=function(self){return 'bytearray('+$BytesDict.__repr__(self)+")"} +$BytearrayDict.__setitem__=function(self,arg,value){if(isinstance(arg,_b_.int)){if(!isinstance(value,_b_.int)){throw _b_.TypeError('an integer is required')}else if(value>255){throw _b_.ValueError("byte must be in range(0, 256)")} +var pos=arg +if(arg<0)pos=self.source.length+pos +if(pos>=0 && pos=0;i--){if(!isinstance($temp[i],_b_.int)){throw _b_.TypeError('an integer is required')}else if($temp[i]>255){throw ValueError("byte must be in range(0, 256)")} +self.source.splice(start,0,$temp[i])}}else{throw _b_.TypeError("can only assign an iterable")}}else{ +throw _b_.TypeError('list indices must be integer, not '+$B.get_class(arg).__name__)}} +$BytearrayDict.append=function(self,b){if(arguments.length!=2){throw _b_.TypeError( +"append takes exactly one argument ("+(arguments.length-1)+" given)")} +if(!isinstance(b,_b_.int))throw _b_.TypeError("an integer is required") +if(b>255)throw ValueError("byte must be in range(0, 256)") +self.source[self.source.length]=b} +$BytearrayDict.insert=function(self,pos,b){if(arguments.length!=3){throw _b_.TypeError( +"insert takes exactly 2 arguments ("+(arguments.length-1)+" given)")} +if(!isinstance(b,_b_.int))throw _b_.TypeError("an integer is required") +if(b>255)throw ValueError("byte must be in range(0, 256)") +_b_.list.$dict.insert(self.source,pos,b)} +function bytearray(source,encoding,errors){var _bytes=bytes(source,encoding,errors) +var obj={__class__:$BytearrayDict} +$BytearrayDict.__init__(obj,source,encoding,errors) +return obj} +bytearray.__class__=$B.$factory +bytearray.$dict=$BytearrayDict +$BytearrayDict.$factory=bytearray +bytearray.__code__={} +bytearray.__code__.co_argcount=1 +bytearray.__code__.co_consts=[] +bytearray.__code__.co_varnames=['i'] +var $BytesDict={__class__ : $B.$type,__name__ : 'bytes'} +$BytesDict.__add__=function(self,other){if(!isinstance(other,bytes)){throw _b_.TypeError("can't concat bytes to " + _b_.str(other))} +self.source=self.source.concat(other.source) +return self} +var $bytes_iterator=$B.$iterator_class('bytes_iterator') +$BytesDict.__iter__=function(self){return $B.$iterator(self.source,$bytes_iterator)} +$BytesDict.__eq__=function(self,other){return getattr(self.source,'__eq__')(other.source)} +$BytesDict.__ge__=function(self,other){return _b_.list.$dict.__ge__(self.source,other.source)} +$BytesDict.__getitem__=function(self,arg){var i +if(isinstance(arg,_b_.int)){var pos=arg +if(arg<0)pos=self.source.length+pos +if(pos>=0 && pos0){var start=arg.start===None ? 0 : arg.start +var stop=arg.stop===None ? getattr(self.source,'__len__')(): arg.stop}else{var start=arg.start===None ? +getattr(self.source,'__len__')()-1 : arg.start +var stop=arg.stop===None ? 0 : arg.stop} +if(start<0)start=self.source.length+start +if(stop<0)stop=self.source.length+stop +var res=[],i=null,pos=0 +if(step>0){if(stop<=start)return '' +for(i=start;i=start)return '' +for(i=start;i>=stop;i+=step)res[pos++]=self.source[i]} +return bytes(res)}else if(isinstance(arg,bool)){return self.source.__getitem__(_b_.int(arg))}} +$BytesDict.__gt__=function(self,other){return _b_.list.$dict.__gt__(self.source,other.source)} +$BytesDict.__hash__=function(self){if(self===undefined){return $BytesDict.__hashvalue__ ||$B.$py_next_hash-- } +var hash=1; +for(var i=0,_len_i=self.length;i < _len_i;i++){hash=(101*hash + self.source[i])& 0xFFFFFFFF} +return hash} +$BytesDict.__init__=function(self,source,encoding,errors){var int_list=[],pos=0 +if(source===undefined){}else if(isinstance(source,_b_.int)){var i=source +while(i--)int_list[pos++]=0}else{if(isinstance(source,_b_.str)){if(encoding===undefined) +throw _b_.TypeError("string argument without an encoding") +int_list=encode(source,encoding)}else{ +int_list=_b_.list(source)}} +self.source=int_list +self.encoding=encoding +self.errors=errors} +$BytesDict.__le__=function(self,other){return _b_.list.$dict.__le__(self.source,other.source)} +$BytesDict.__len__=function(self){return self.source.length} +$BytesDict.__lt__=function(self,other){return _b_.list.$dict.__lt__(self.source,other.source)} +$BytesDict.__mro__=[$BytesDict,$ObjectDict] +$BytesDict.__mul__=function(){var $=$B.args('__mul__',2,{self:null,other:null},['self','other'],arguments,{},null,null),other=$B.PyNumber_Index($.other),res=bytes() +for(var i=0;i=128){var hx=s.toString(16) +hx=(hx.length==1 ? '0' : '')+ hx +res +='\\x'+hx}else{res +=String.fromCharCode(s)}} +return res+"'"} +$BytesDict.__reduce_ex__=function(self){return $BytesDict.__repr__(self)} +$BytesDict.decode=function(self,encoding,errors){if(encoding===undefined)encoding='utf-8' +if(errors===undefined)errors='strict' +switch(errors){case 'strict': +case 'ignore': +case 'replace': +case 'surrogateescape': +case 'xmlcharrefreplace': +case 'backslashreplace': +return decode(self.source,encoding,errors) +default:}} +$BytesDict.join=function(){var $ns=$B.args('join',2,{self:null,iterable:null},['self','iterable'],arguments,{}),self=$ns['self'],iterable=$ns['iterable'] +var next_func=_b_.getattr(_b_.iter(iterable),'__next__'),res=bytes(),empty=true +while(true){try{var item=next_func() +if(empty){empty=false} +else{res=$BytesDict.__add__(res,self)} +res=$BytesDict.__add__(res,item)}catch(err){if(isinstance(err,_b_.StopIteration)){break} +throw err}} +return res} +$BytesDict.maketrans=function(from,to){var _t=[] +for(var i=0;i < 256;i++)_t[i]=i +for(var i=0,_len_i=from.source.length;i < _len_i;i++){var _ndx=from.source[i] +_t[_ndx]=to.source[i]} +return bytes(_t)} +$BytesDict.split=function(){var $=$B.args('split',2,{self:null,sep:null},['self','sep'],arguments,{},null,null),res=[],start=0,stop=0 +var seps=$.sep.source,len=seps.length,src=$.self.source,blen=src.length +while(stopstart)){res.push(bytes(src.slice(start,stop)))} +return res} +function _strip(self,cars,lr){if(cars===undefined){cars=[],pos=0 +var ws='\r\n \t' +for(var i=0,_len_i=ws.length;i < _len_i;i++)cars[pos++]=ws.charCodeAt(i)}else if(isinstance(cars,bytes)){cars=cars.source}else{throw _b_.TypeError("Type str doesn't support the buffer API")} +if(lr=='l'){for(var i=0,_len_i=self.source.length;i < _len_i;i++){if(cars.indexOf(self.source[i])==-1)break} +return bytes(self.source.slice(i))} +for(var i=self.source.length-1;i>=0;i--){if(cars.indexOf(self.source[i])==-1)break} +return bytes(self.source.slice(0,i+1))} +$BytesDict.lstrip=function(self,cars){return _strip(self,cars,'l')} +$BytesDict.rstrip=function(self,cars){return _strip(self,cars,'r')} +$BytesDict.startswith=function(){var $=$B.args('startswith',2,{self: null,start: null},['self','start'],arguments,{},null,null) +if(_b_.isinstance($.start,bytes)){var res=true +for(var i=0;i<$.start.source.length && res;i++){res=$.self.source[i]==$.start.source[i]} +return res}else if(_b_.isinstance($.start,_b_.tuple)){var items=[] +for(var i=0;i<$.start.length;i++){if(_b_.isinstance($.start[i],bytes)){items=items.concat($.start[i].source)}else{throw _b_.TypeError("startswith first arg must be bytes or "+ +"a tuple of bytes, not "+$B.get_class($.start).__name__)}} +var start=bytes(items) +return $BytesDict.startswith($.self,start)}else{throw _b_.TypeError("startswith first arg must be bytes or a tuple of bytes, not "+ +$B.get_class($.start).__name__)}} +$BytesDict.strip=function(self,cars){var res=$BytesDict.lstrip(self,cars) +return $BytesDict.rstrip(res,cars)} +$BytesDict.translate=function(self,table,_delete){if(_delete===undefined){_delete=[]} +else if(isinstance(_delete,bytes)){_delete=_delete.source} +else{throw _b_.TypeError("Type "+$B.get_class(_delete).__name+" doesn't support the buffer API")} +var res=[],pos=0 +if(isinstance(table,bytes)&& table.source.length==256){for(var i=0,_len_i=self.source.length;i < _len_i;i++){if(_delete.indexOf(self.source[i])>-1)continue +res[pos++]=table.source[self.source[i]]}} +return bytes(res)} +$BytesDict.upper=function(self){var _res=[],pos=0 +for(var i=0,_len_i=self.source.length;i < _len_i;i++)_res[pos++]=self.source[i].toUpperCase() +return bytes(_res)} +function $UnicodeEncodeError(encoding,code_point,position){throw _b_.UnicodeEncodeError("'"+encoding+ +"' codec can't encode character "+_b_.hex(code_point)+ +" in position "+position)} +function $UnicodeDecodeError(encoding,position){throw _b_.UnicodeDecodeError("'"+encoding+ +"' codec can't decode bytes in position "+position)} +function _hex(int){return int.toString(16)} +function _int(hex){return parseInt(hex,16)} +function normalise(encoding){var enc=encoding.toLowerCase() +if(enc.substr(0,7)=='windows'){enc='cp'+enc.substr(7)} +enc=enc.replace('-','') +enc=enc.replace('-','_') +return enc} +function load_decoder(enc){ +if(to_unicode[enc]===undefined){load_encoder(enc) +to_unicode[enc]={} +for(var attr in from_unicode[enc]){to_unicode[enc][from_unicode[enc][attr]]=attr}}} +function load_encoder(enc){ +if(from_unicode[enc]===undefined){var mod=_b_.__import__('encodings.'+enc),table=mod[enc].decoding_table +from_unicode[enc]={} +for(var i=0;i-1){ +return pyobj.elt}else if([_b_.list.$dict,_b_.tuple.$dict].indexOf(klass)>-1){ +var res=[] +for(var i=0,_len_i=pyobj.length;i < _len_i;i++){res.push(pyobj2jsobj(pyobj[i]))} +return res}else if(klass===_b_.dict.$dict){ +var jsobj={} +var items=_b_.list(_b_.dict.$dict.items(pyobj)) +for(var j=0,_len_j=items.length;j < _len_j;j++){if(typeof items[j][1]=='function'){ +items[j][1].bind(jsobj)} +jsobj[items[j][0]]=pyobj2jsobj(items[j][1])} +return jsobj}else if(klass===$B.builtins.float.$dict){ +return pyobj.valueOf()}else if(klass===$B.$FunctionDict){ +return function(){try{var args=[] +for(var i=0;i'} +res.__str__=function(){return ''} +res.prototype=js_attr.prototype +return{__class__:$JSObjectDict,js:res,js_func:js_attr}}else{if(Array.isArray(self.js[attr])){return self.js[attr]} +return $B.$JS2Py(self.js[attr])}}else if(self.js===window && attr==='$$location'){ +return $Location()} +var res +var mro=self.__class__.__mro__ +for(var i=0,_len_i=mro.length;i < _len_i;i++){var v=mro[i][attr] +if(v!==undefined){res=v +break}} +if(res!==undefined){if(typeof res==='function'){ +return function(){var args=[self] +for(var i=0,_len_i=arguments.length;i < _len_i;i++){arg=arguments[i] +if(arg &&(arg.__class__===$JSObjectDict ||arg.__class__===$JSConstructorDict)){args.push(arg.js)}else{args.push(arg)}} +return res.apply(self,args)}} +return $B.$JS2Py(res)}else{ +throw _b_.AttributeError("no attribute "+attr+' for '+self.js)}} +$JSObjectDict.__getitem__=function(self,rank){if(typeof self.js.length=='number' && +typeof self.js.item=='function'){var rank_to_int=_b_.int(rank) +if(rank_to_int<0){rank_to_int+=self.js.length} +var res=self.js.item(rank_to_int) +if(res===undefined){throw _b_.KeyError(rank)} +return res} +try{return getattr(self.js,'__getitem__')(rank)} +catch(err){if(self.js[rank]!==undefined){return JSObject(self.js[rank])} +throw _b_.KeyError(rank)}} +var $JSObject_iterator=$B.$iterator_class('JS object iterator') +$JSObjectDict.__iter__=function(self){var items=[] +if(window.Symbol && self.js[Symbol.iterator]!==undefined){ +if(self.js.length!==undefined && self.js.item!==undefined){for(var i=0;i"} +$JSObjectDict.__setattr__=function(self,attr,value){if(isinstance(value,JSObject)){self.js[attr]=value.js} +else{self.js[attr]=value +if(typeof value=='function'){self.js[attr]=function(){var args=[] +for(var i=0,len=arguments.length;i'} +$B.$ModuleDict.__mro__=[$B.$ModuleDict,_b_.object.$dict] +$B.$ModuleDict.__setattr__=function(self,attr,value){if(self.__name__=='__builtins__'){ +$B.builtins[attr]=value}else{self[attr]=value}} +function module(name,doc,package){return{__class__:$B.$ModuleDict,__name__:name,__doc__:doc||_b_.None,__package__:package||_b_.None}} +module.__class__=$B.$factory +module.$dict=$B.$ModuleDict +$B.$ModuleDict.$factory=module +var loader=function(){} +var Loader={__class__:$B.$type,__name__ : 'Loader'} +Loader.__mro__=[Loader,_b_.object.$dict] +Loader.$factory=loader +loader.$dict=Loader +loader.__class__=$B.$factory +function parent_package(mod_name){var parts=mod_name.split('.'); +parts.pop(); +return parts.join('.');} +function $importer(){ +var $xmlhttp=new XMLHttpRequest(); +var fake_qs; +switch($B.$options.cache){case 'version': +fake_qs="?v="+$B.version_info[2] +break; +case 'browser': +fake_qs='' +break; +default: +fake_qs="?v="+(new Date().getTime())} +var timer=setTimeout(function(){$xmlhttp.abort() +throw _b_.ImportError("No module named '"+module+"'")},5000) +return[$xmlhttp,fake_qs,timer]} +function $download_module(module,url,package,blocking){var imp=$importer(),$xmlhttp=imp[0],fake_qs=imp[1],timer=imp[2],res=null,mod_name=module.__name__,no_block=Array.isArray(blocking)||blocking===false +if(no_block){console.log('download non blocking',mod_name) +$xmlhttp.open('GET',url+fake_qs,true)}else{$xmlhttp.open('GET',url+fake_qs,false)} +if($B.$CORS){$xmlhttp.onload=function(){if($xmlhttp.status==200 ||$xmlhttp.status==0){res=$xmlhttp.responseText}else{ +res=_b_.FileNotFoundError("No module named '"+mod_name+"'")}} +$xmlhttp.onerror=function(){res=_b_.FileNotFoundError("No module named '"+mod_name+"'")}}else{ +$xmlhttp.onreadystatechange=function(){if($xmlhttp.readyState==4){window.clearTimeout(timer) +if($xmlhttp.status==200 ||$xmlhttp.status==0){res=$xmlhttp.responseText +module.$last_modified=$xmlhttp.getResponseHeader('Last-Modified') +if(no_block){var ext=url.substr(url.length-2) +if(ext=='py'){try{import_py1(module,mod_name,url,package,res)} +catch(err){console.log(err);throw err}}else if(ext=='js'){try{run_js(res,url,module)} +catch(err){console.log(err);throw err}} +console.log('non blocking ok',mod_name) +blocking[1]() +return}}else{ +console.log('Error '+$xmlhttp.status+ +' means that Python module '+mod_name+ +' was not found at url '+url) +res=_b_.FileNotFoundError("No module named '"+mod_name+"'")}}}} +if('overrideMimeType' in $xmlhttp){$xmlhttp.overrideMimeType("text/plain")} +$xmlhttp.send() +if(!no_block){ +if(res==null)throw _b_.FileNotFoundError("No module named '"+mod_name+"' (res is null)") +if(res.constructor===Error){throw res} +return res}} +$B.$download_module=$download_module +function import_js(module,path,blocking){try{var module_contents=$download_module(module,path,undefined,blocking) +if(Array.isArray(blocking)){return}}catch(err){return null} +run_js(module_contents,path,module) +return true} +function run_js(module_contents,path,module){ +try{eval(module_contents);}catch(err){console.log(err) +throw err} +try{$module} +catch(err){console.log('no $module') +throw _b_.ImportError("name '$module' is not defined in module")} +if(module !==undefined){ +for(var attr in $module){module[attr]=$module[attr];} +$module=module;} +else{ +$module.__class__=$B.$ModuleDict +$module.__name__=module.name +$module.__repr__=$module.__str__=function(){if($B.builtin_module_names.indexOf(module.name)> -1){return ""} +return ""} +$module.toString=function(){return ""} +if(module.name !='builtins'){ +$module.__file__=path}} +$B.imported[module.__name__]=$module +return true} +function show_ns(){var kk=Object.keys(window) +for(var i=0,_len_i=kk.length;i < _len_i;i++){console.log(kk[i]) +if(kk[i].charAt(0)=='$'){console.log(eval(kk[i]))}} +console.log('---')} +function import_py1(module,mod_name,path,package,module_contents){console.log('importpy1',mod_name) +$B.imported[mod_name].$is_package=module.$is_package +$B.imported[mod_name].$last_modified=module.$last_modified +if(path.substr(path.length-12)=='/__init__.py'){ +$B.imported[mod_name].__package__=mod_name +$B.imported[mod_name].__path__=path +$B.imported[mod_name].$is_package=module.$is_package=true}else if(package){$B.imported[mod_name].__package__=package}else{var mod_elts=mod_name.split('.') +mod_elts.pop() +$B.imported[mod_name].__package__=mod_elts.join('.')} +$B.imported[mod_name].__file__=path +return run_py(module_contents,path,module)} +function import_py(module,path,package,blocking){ +var mod_name=module.__name__,module_contents=$download_module(module,path,package,blocking) +if(Array.isArray(blocking)){return} +$B.imported[mod_name].$is_package=module.$is_package +$B.imported[mod_name].$last_modified=module.$last_modified +if(path.substr(path.length-12)=='/__init__.py'){ +$B.imported[mod_name].__package__=mod_name +$B.imported[mod_name].__path__=path +$B.imported[mod_name].$is_package=module.$is_package=true}else if(package){$B.imported[mod_name].__package__=package}else{var mod_elts=mod_name.split('.') +mod_elts.pop() +$B.imported[mod_name].__package__=mod_elts.join('.')} +$B.imported[mod_name].__file__=path +return run_py(module_contents,path,module)} +$B.run_py=run_py=function(module_contents,path,module,compiled){if(!compiled){var $Node=$B.$Node,$NodeJSCtx=$B.$NodeJSCtx +$B.$py_module_path[module.__name__]=path +var root=$B.py2js(module_contents,module.__name__,module.__name__,'__builtins__') +var body=root.children +root.children=[] +var mod_node=new $Node('expression') +new $NodeJSCtx(mod_node,'var $module=(function()') +root.insert(0,mod_node) +for(var i=0,_len_i=body.length;i < _len_i;i++){mod_node.add(body[i])} +var ret_node=new $Node('expression') +new $NodeJSCtx(ret_node,'return $locals_'+module.__name__.replace(/\./g,'_')) +mod_node.add(ret_node) +var ex_node=new $Node('expression') +new $NodeJSCtx(ex_node,')(__BRYTHON__)') +root.add(ex_node)} +try{var js=(compiled)? module_contents : root.to_js() +if($B.$options.debug==10){console.log('code for module '+module.__name__) +console.log(js)} +eval(js)}catch(err){ +throw err} +try{ +var mod=eval('$module') +for(var attr in mod){module[attr]=mod[attr];} +module.__initializing__=false +$B.imported[module.__name__]=module +return true}catch(err){console.log(''+err+' '+' for module '+module.name) +for(var attr in err)console.log(attr+' '+err[attr]) +if($B.debug>0){console.log('line info '+__BRYTHON__.line_info)} +throw err}finally{}} +function new_spec(fields){ +fields.__class__=$B.$ModuleDict +return fields;} +function finder_VFS(){return{__class__:finder_VFS.$dict}} +finder_VFS.__class__=$B.$factory +finder_VFS.$dict={$factory: finder_VFS,__class__: $B.$type,__name__: 'VFSFinder',create_module : function(cls,spec){ +return _b_.None;},exec_module : function(cls,module){var stored=module.__spec__.loader_state.stored; +delete module.__spec__['loader_state']; +var ext=stored[0],module_contents=stored[1]; +module.$is_package=stored[2]; +var path=$B.brython_path+'Lib/'+module.__name__ +if(module.$is_package){path +='/__init__.py'} +module.__file__=path +if(ext=='.js'){run_js(module_contents,module.__path__,module)} +else{run_py(module_contents,module.__path__,module,ext=='.pyc.js')} +if($B.debug>1){console.log('import '+module.__name__+' from VFS')}},find_module: function(cls,name,path){return{__class__:Loader,load_module:function(name,path){var spec=cls.$dict.find_spec(cls,name,path) +var mod=module(name) +$B.imported[name]=mod +mod.__spec__=spec +cls.$dict.exec_module(cls,mod)}}},find_spec : function(cls,fullname,path,prev_module){if(!$B.use_VFS){return _b_.None;} +var stored=$B.VFS[fullname]; +if(stored===undefined){return _b_.None;} +var is_package=stored[2],is_builtin=$B.builtin_module_names.indexOf(fullname)> -1; +return new_spec({name : fullname,loader: cls, +origin : is_builtin? 'built-in' : 'py_VFS', +submodule_search_locations: is_package?[]: _b_.None,loader_state:{stored: stored}, +cached: _b_.None,parent: is_package? fullname : parent_package(fullname),has_location: _b_.False});}} +finder_VFS.$dict.__mro__=[finder_VFS.$dict,_b_.object.$dict] +finder_VFS.$dict.create_module.$type='classmethod' +finder_VFS.$dict.exec_module.$type='classmethod' +finder_VFS.$dict.find_module.$type='classmethod' +finder_VFS.$dict.find_spec.$type='classmethod' +function finder_stdlib_static(){return{__class__:finder_stdlib_static.$dict}} +finder_stdlib_static.__class__=$B.$factory +finder_stdlib_static.$dict={$factory : finder_stdlib_static,__class__ : $B.$type,__name__ : 'StdlibStatic',create_module : function(cls,spec){ +return _b_.None;},exec_module : function(cls,module,blocking){var metadata=module.__spec__.loader_state; +module.$is_package=metadata.is_package; +if(metadata.ext=='py'){import_py(module,metadata.path,module.__package__,blocking);} +else{ +import_js(module,metadata.path,blocking);} +delete module.__spec__['loader_state'];},find_module: function(cls,name,path){var spec=cls.$dict.find_spec(cls,name,path) +if(spec===_b_.None){return _b_.None} +return{__class__:Loader,load_module:function(name,path){var mod=module(name) +$B.imported[name]=mod +mod.__spec__=spec +mod.__package__=spec.parent +cls.$dict.exec_module(cls,mod,spec.blocking)}}},find_spec: function(cls,fullname,path,prev_module){if($B.stdlib){var address=$B.stdlib[fullname]; +if(address===undefined){var elts=fullname.split('.') +if(elts.length>1){var mod_name=elts.pop() +var package=$B.stdlib[elts.join('.')] +if(package && package[1]){address=['py']}}} +if(address !==undefined){var ext=address[0],is_pkg=address[1]!==undefined,path=$B.brython_path +((ext=='py')? 'Lib/' : 'libs/')+ +fullname.replace(/\./g,'/'),metadata={ext: ext,is_package: is_pkg,path: path +(is_pkg? '/__init__.py' : +((ext=='py')? '.py' : '.js')),address: address} +var res=new_spec( +{name : fullname,loader: cls, +origin : metadata.path,submodule_search_locations: is_pkg?[path]: _b_.None,loader_state: metadata, +cached: _b_.None,parent: is_pkg? fullname : +parent_package(fullname),has_location: _b_.True}); +return res}} +return _b_.None;}} +finder_stdlib_static.$dict.__mro__=[finder_stdlib_static.$dict,_b_.object.$dict] +finder_stdlib_static.$dict.create_module.$type='classmethod' +finder_stdlib_static.$dict.exec_module.$type='classmethod' +finder_stdlib_static.$dict.find_module.$type='classmethod' +finder_stdlib_static.$dict.find_spec.$type='classmethod' +function finder_path(){return{__class__:finder_path.$dict}} +finder_path.__class__=$B.$factory +finder_path.$dict={$factory: finder_path,__class__: $B.$type,__name__: 'ImporterPath',create_module : function(cls,spec){ +return _b_.None;},exec_module : function(cls,module){var _spec=_b_.getattr(module,'__spec__'),code=_spec.loader_state.code; +module.$is_package=_spec.loader_state.is_package,delete _spec.loader_state['code']; +var src_type=_spec.loader_state.type +if(src_type=='py' ||src_type=='pyc.js'){run_py(code,_spec.origin,module,src_type=='pyc.js');} +else if(_spec.loader_state.type=='js'){run_js(code,_spec.origin,module)}},find_module: function(cls,name,path){return finder_path.$dict.find_spec(cls,name,path)},find_spec : function(cls,fullname,path,prev_module){if(is_none(path)){ +path=$B.path} +for(var i=0,li=path.length;i'},self.path)} +catch(e){self.vfs=undefined; +throw new _b_.ImportError(e.$message ||e.message);} +eval(code); +try{ +self.vfs=$vfs;} +catch(e){throw new _b_.ImportError('Expecting $vfs var in VFS file');} +$B.path_importer_cache[self.path + '/']=self;},find_spec: function(self,fullname,module){if(self.vfs===undefined){try{vfs_hook.$dict.load_vfs(self)} +catch(e){console.log("Could not load VFS while importing '" + fullname + "'"); +return _b_.None;}} +var stored=self.vfs[fullname]; +if(stored===undefined){return _b_.None;} +var is_package=stored[2]; +return new_spec({name : fullname,loader: finder_VFS, +origin : self.path + '#' + fullname, +submodule_search_locations: is_package?[self.path]: +_b_.None,loader_state:{stored: stored}, +cached: _b_.None,parent: is_package? fullname : parent_package(fullname),has_location: _b_.True});},invalidate_caches: function(self){self.vfs=undefined;}} +vfs_hook.$dict.__mro__=[vfs_hook.$dict,_b_.object.$dict] +function url_hook(path_entry,hint){return{__class__: url_hook.$dict,path_entry:path_entry,hint:hint }} +url_hook.__class__=$B.$factory +url_hook.$dict={$factory: url_hook,__class__: $B.$type,__name__ : 'UrlPathFinder',__repr__: function(self){return ''},find_spec : function(self,fullname,module){var loader_data={},notfound=true,hint=self.hint,base_path=self.path_entry + fullname.match(/[^.]+$/g)[0],modpaths=[]; +var tryall=hint===undefined; +if(tryall ||hint=='js'){ +modpaths=[[base_path + '.js','js',false]];} +if(tryall ||hint=='pyc.js'){ +modpaths=modpaths.concat([[base_path + '.pyc.js','pyc.js',false],[base_path + '/__init__.pyc.js','pyc.js',true]]);} +if(tryall ||hint=='py'){ +modpaths=modpaths.concat([[base_path + '.py','py',false],[base_path + '/__init__.py','py',true]]);} +for(var j=0;notfound && j < modpaths.length;++j){try{var file_info=modpaths[j]; +loader_data.code=$download_module({__name__:fullname},file_info[0]); +notfound=false; +loader_data.type=file_info[1]; +loader_data.is_package=file_info[2]; +if(hint===undefined){self.hint=file_info[1]; +$B.path_importer_cache[self.path_entry]=self;} +if(loader_data.is_package){ +$B.path_importer_cache[base_path + '/']= +url_hook(base_path + '/',self.hint);} +loader_data.path=file_info[0];}catch(err){}} +if(!notfound){return new_spec({name : fullname,loader: finder_path,origin : loader_data.path, +submodule_search_locations: loader_data.is_package?[base_path]: +_b_.None,loader_state: loader_data, +cached: _b_.None,parent: loader_data.is_package? fullname : +parent_package(fullname),has_location: _b_.True});} +return _b_.None;},invalidate_caches : function(self){}} +url_hook.$dict.__mro__=[url_hook.$dict,_b_.object.$dict] +$B.$path_hooks=[vfs_hook,url_hook]; +$B.path_importer_cache={}; +var _sys_paths=[[$B.script_dir + '/','py'],[$B.brython_path + 'Lib/','py'],[$B.brython_path + 'Lib/site-packages/','py'],[$B.brython_path + 'libs/','js']]; +for(i=0;i < _sys_paths.length;++i){var _path=_sys_paths[i],_type=_path[1]; +_path=_path[0]; +$B.path_importer_cache[_path]=url_hook(_path,_type);} +delete _path; +delete _type; +delete _sys_paths; +$B.is_none=function(o){return o===undefined ||o==_b_.None;} +$B.$__import__=function(mod_name,globals,locals,fromlist,level,blocking){ +var modobj=$B.imported[mod_name],parsed_name=mod_name.split('.'); +if(modobj==_b_.None){ +throw _b_.ImportError(mod_name)} +if(modobj===undefined){ +if(is_none(fromlist)){fromlist=[];} +for(var i=0,modsep='',_mod_name='',len=parsed_name.length - 1,__path__=_b_.None;i <=len;++i){var _parent_name=_mod_name; +_mod_name +=modsep + parsed_name[i]; +modsep='.'; +var modobj=$B.imported[_mod_name]; +if(modobj==_b_.None){ +throw _b_.ImportError(_mod_name)} +else if(modobj===undefined){try{$B.import_hooks(_mod_name,__path__,undefined,blocking)} +catch(err){delete $B.imported[_mod_name] +throw err} +if(is_none($B.imported[_mod_name])){throw _b_.ImportError(_mod_name)} +else{ +if(_parent_name){_b_.setattr($B.imported[_parent_name],parsed_name[i],$B.imported[_mod_name]);}}} +if(i < len){try{__path__=_b_.getattr($B.imported[_mod_name],'__path__')} +catch(e){ +if(i==len-1 && $B.imported[_mod_name][parsed_name[len]]&& +$B.imported[_mod_name][parsed_name[len]].__class__===$B.$ModuleDict){return $B.imported[_mod_name][parsed_name[len]]} +throw _b_.ImportError(_mod_name)}}}} +else if(Array.isArray(blocking)){var frames=$B.frames_stack +for(var i=0;i"} +$B.imported['_importlib']=$B.modules['_importlib']=_importlib_module})(__BRYTHON__) +;(function($B){eval($B.InjectBuiltins()) +var $ObjectDict=_b_.object.$dict +function $err(op,other){var msg="unsupported operand type(s) for "+op +msg +=": 'float' and '"+$.get_class(other).__name__+"'" +throw _b_.TypeError(msg)} +var $FloatDict={__class__:$B.$type,__dir__:$ObjectDict.__dir__,__name__:'float',$native:true} +$FloatDict.as_integer_ratio=function(self){if(self.valueOf()==Number.POSITIVE_INFINITY || +self.valueOf()==Number.NEGATIVE_INFINITY){throw _b_.OverflowError("Cannot pass infinity to float.as_integer_ratio.")} +if(!Number.isFinite(self.valueOf())){throw _b_.ValueError("Cannot pass NaN to float.as_integer_ratio.")} +var tmp=_b_.$frexp(self.valueOf()) +var fp=tmp[0] +var exponent=tmp[1] +for(var i=0;i < 300;i++){if(fp==Math.floor(fp)){break}else{ +fp *=2 +exponent--}} +numerator=float(fp) +py_exponent=abs(exponent) +denominator=1 +py_exponent=_b_.getattr(int(denominator),"__lshift__")(py_exponent) +if(exponent > 0){numerator=numerator * py_exponent}else{ +denominator=py_exponent} +return _b_.tuple([_b_.int(numerator),_b_.int(denominator)])} +$FloatDict.__bool__=function(self){return _b_.bool(self.valueOf())} +$FloatDict.__class__=$B.$type +$FloatDict.__eq__=function(self,other){if(isNaN(self)&& isNaN(other)){return true} +if(isinstance(other,_b_.int))return self==other +if(isinstance(other,float)){ +return self.valueOf()==other.valueOf()} +if(isinstance(other,_b_.complex)){if(other.imag !=0)return false +return self==other.real} +if(_b_.hasattr(other,'__eq__')){return _b_.getattr(other,'__eq__')(self.value)} +return self.value===other} +$FloatDict.__floordiv__=function(self,other){if(isinstance(other,[_b_.int,float])){if(other.valueOf()==0)throw ZeroDivisionError('division by zero') +return float(Math.floor(self/other))} +if(hasattr(other,'__rfloordiv__')){return getattr(other,'__rfloordiv__')(self)} +$err('//',other)} +$FloatDict.fromhex=function(arg){ +if(!isinstance(arg,_b_.str)){throw _b_.ValueError('argument must be a string')} +var value=arg.trim() +switch(value.toLowerCase()){case '+inf': +case 'inf': +case '+infinity': +case 'infinity': +return $FloatClass(Infinity) +case '-inf': +case '-infinity': +return $FloatClass(-Infinity) +case '+nan': +case 'nan': +return $FloatClass(Number.NaN) +case '-nan': +return $FloatClass(-Number.NaN) +case '': +throw _b_.ValueError('count not convert string to float')} +var _m=/^(\d*\.?\d*)$/.exec(value) +if(_m !==null)return $FloatClass(parseFloat(_m[1])) +var _m=/^(\+|-)?(0x)?([0-9A-F]+\.?)?(\.[0-9A-F]+)?(p(\+|-)?\d+)?$/i.exec(value) +if(_m==null)throw _b_.ValueError('invalid hexadecimal floating-point string') +var _sign=_m[1] +var _int=parseInt(_m[3]||'0',16) +var _fraction=_m[4]||'.0' +var _exponent=_m[5]||'p0' +if(_sign=='-'){_sign=-1}else{_sign=1} +var _sum=_int +for(var i=1,_len_i=_fraction.length;i < _len_i;i++){_sum+=parseInt(_fraction.charAt(i),16)/Math.pow(16,i)} +return new Number(_sign * _sum * Math.pow(2,parseInt(_exponent.substring(1))))} +$FloatDict.__getformat__=function(arg){if(arg=='double' ||arg=='float')return 'IEEE, little-endian' +throw _b_.ValueError("__getformat__() argument 1 must be 'double' or 'float'")} +function preformat(self,fmt){if(fmt.empty){return _b_.str(self)} +if(fmt.type && 'eEfFgGn%'.indexOf(fmt.type)==-1){throw _b_.ValueError("Unknown format code '"+fmt.type+ +"' for object of type 'float'")} +if(isNaN(self)){if(fmt.type=='f'||fmt.type=='g'){return 'nan'} +else{return 'NAN'}} +if(self==Number.POSITIVE_INFINITY){if(fmt.type=='f'||fmt.type=='g'){return 'inf'} +else{return 'INF'}} +if(fmt.precision===undefined && fmt.type !==undefined){fmt.precision=6} +if(fmt.type=='%'){self *=100} +if(fmt.type=='e'){var res=self.toExponential(fmt.precision),exp=parseInt(res.substr(res.search('e')+1)) +if(Math.abs(exp)<10){res=res.substr(0,res.length-1)+'0'+ +res.charAt(res.length-1)} +return res } +if(fmt.precision!==undefined){ +var prec=fmt.precision +if(prec==0){return Math.round(self)+''} +if(prec && 'fF%'.indexOf(fmt.type)>-1){var pos_pt=Math.abs(self).toString().search(/\./) +if(pos_pt>-1){prec+=pos_pt}else{prec=Math.abs(self).toString().length}} +var res=self.toPrecision(prec),pt_pos=res.indexOf('.') +if(fmt.type!==undefined && +(fmt.type=='%' ||fmt.type.toLowerCase()=='f')){if(pt_pos==-1){res +='.'+'0'.repeat(fmt.precision)} +else{missing=fmt.precision-res.length+pt_pos+1 +if(missing>0)res +='0'.repeat(missing)}}else{var res1=self.toExponential(fmt.precision-1),exp=parseInt(res1.substr(res1.search('e')+1)) +if(exp<-4 ||exp>=fmt.precision-1){res=res1 +if(Math.abs(exp)<10){res=res.substr(0,res.length-1)+'0'+ +res.charAt(res.length-1)}}}}else{var res=_b_.str(self)} +if(fmt.type===undefined||'gGn'.indexOf(fmt.type)!=-1){ +while(res.charAt(res.length-1)=='0'){res=res.substr(0,res.length-1)} +if(res.charAt(res.length-1)=='.'){if(fmt.type===undefined){res +='0'} +else{res=res.substr(0,res.length-1)}}} +if(fmt.sign!==undefined){if((fmt.sign==' ' ||fmt.sign=='+')&& self>0){res=fmt.sign+res}} +if(fmt.type=='%'){res+='%'} +return res} +$FloatDict.__format__=function(self,format_spec){var fmt=new $B.parse_format_spec(format_spec) +fmt.align=fmt.align ||'>' +var raw=preformat(self,fmt).split('.'),_int=raw[0] +if(fmt.comma){var len=_int.length,nb=Math.ceil(_int.length/3),chunks=[] +for(var i=0;i0?float(x):float(-x)} +_b_.$frexp=function(x){var x1=x +if(isinstance(x,float))x1=x.valueOf() +if(isNaN(x1)||_b_.$isinf(x1)){return[x1,-1]} +if(x1==0)return[0,0] +var sign=1,ex=0,man=x1 +if(man < 0.){sign=-sign +man=-man} +while(man < 0.5){man *=2.0 +ex--} +while(man >=1.0){man *=0.5 +ex++} +man *=sign +return[man ,ex]} +_b_.$ldexp=function(x,i){if(_b_.$isninf(x))return float('-inf') +if(_b_.$isinf(x))return float('inf') +var y=x +if(isinstance(x,float))y=x.valueOf() +if(y==0)return y +var j=i +if(isinstance(i,float))j=i.valueOf() +return y * Math.pow(2,j)} +$FloatDict.hex=function(self){ +var DBL_MANT_DIG=53 +var TOHEX_NBITS=DBL_MANT_DIG + 3 -(DBL_MANT_DIG+2)%4; +switch(self.valueOf()){case Infinity: +case -Infinity: +case Number.NaN: +case -Number.NaN: +return self +case -0: +return '-0x0.0p0' +case 0: +return '0x0.0p0'} +var _a=_b_.$frexp(_b_.$fabs(self.valueOf())) +var _m=_a[0],_e=_a[1] +var _shift=1 - Math.max(-1021 - _e,0) +_m=_b_.$ldexp(_m,_shift) +_e -=_shift +var _int2hex='0123456789ABCDEF'.split('') +var _s=_int2hex[Math.floor(_m)] +_s+='.' +_m -=Math.floor(_m) +for(var i=0;i <(TOHEX_NBITS-1)/4;i++){_m*=16.0 +_s+=_int2hex[Math.floor(_m)] +_m-=Math.floor(_m)} +var _esign='+' +if(_e < 0){_esign='-' +_e=-_e} +if(self.value < 0)return "-0x" + _s + 'p' + _esign + _e; +return "0x" + _s + 'p' + _esign + _e;} +$FloatDict.__init__=function(self,value){self=new Number(value)} +$FloatDict.__int__=function(self){return parseInt(self)} +$FloatDict.is_integer=function(self){return _b_.int(self)==self} +$FloatDict.__mod__=function(self,other){ +if(other==0){throw ZeroDivisionError('float modulo')} +if(isinstance(other,_b_.int))return new Number((self%other+other)%other) +if(isinstance(other,float)){ +var q=Math.floor(self/other),r=self-other*q +return new Number(r)} +if(isinstance(other,_b_.bool)){var bool_value=0; +if(other.valueOf())bool_value=1; +return new Number((self%bool_value+bool_value)%bool_value)} +if(hasattr(other,'__rmod__'))return getattr(other,'__rmod__')(self) +$err('%',other)} +$FloatDict.__mro__=[$FloatDict,$ObjectDict] +$FloatDict.__mul__=function(self,other){if(isinstance(other,_b_.int)){if(other.__class__==$B.LongInt.$dict){return new Number(self*parseFloat(other.value))} +return new Number(self*other)} +if(isinstance(other,float))return new Number(self*other) +if(isinstance(other,_b_.bool)){var bool_value=0; +if(other.valueOf())bool_value=1; +return new Number(self*bool_value)} +if(isinstance(other,_b_.complex)){return _b_.complex(float(self*other.real),float(self*other.imag))} +if(hasattr(other,'__rmul__'))return getattr(other,'__rmul__')(self) +$err('*',other)} +$FloatDict.__ne__=function(self,other){return !$FloatDict.__eq__(self,other)} +$FloatDict.__neg__=function(self,other){return float(-self)} +$FloatDict.__pos__=function(self){return self} +$FloatDict.__pow__=function(self,other){var other_int=isinstance(other,_b_.int) +if(other_int ||isinstance(other,float)){if(self==1){return self} +if(other==0){return new Number(1)} +if(self==-1 && +(!isFinite(other)||other.__class__===$B.LongInt.$dict ||!$B.is_safe_int(other)) +&& !isNaN(other)){return new Number(1)} +else if(self==0 && isFinite(other)&& other<0){throw _b_.ZeroDivisionError("0.0 cannot be raised to a negative power")}else if(self==Number.NEGATIVE_INFINITY && !isNaN(other)){if(other<0 && other%2==1){return new Number(-0.0)}else if(other<0){return new Number(0)} +else if(other>0 && other%2==1){return Number.NEGATIVE_INFINITY}else{return Number.POSITIVE_INFINITY}}else if(self==Number.POSITIVE_INFINITY && !isNaN(other)){return other>0 ? self : new Number(0)} +if(other==Number.NEGATIVE_INFINITY && !isNaN(self)){return Math.abs(self)<1 ? Number.POSITIVE_INFINITY : new Number(0)}else if(other==Number.POSITIVE_INFINITY && !isNaN(self)){return Math.abs(self)<1 ? new Number(0): Number.POSITIVE_INFINITY} +if(self<0 && !_b_.getattr(other,'__eq__')(_b_.int(other))){ +return _b_.complex.$dict.__pow__(_b_.complex(self,0),other)} +return float(Math.pow(self,other))}else if(isinstance(other,_b_.complex)){var img=other.imag,preal=Math.pow(self,other.real),ln=Math.log(self) +return _b_.complex(preal*Math.cos(ln),preal*Math.sin(ln))} +if(hasattr(other,'__rpow__'))return getattr(other,'__rpow__')(self) +$err("** or pow()",other)} +$FloatDict.__repr__=$FloatDict.__str__=function(self){if(self===float)return "" +if(self.valueOf()==Infinity)return 'inf' +if(self.valueOf()==-Infinity)return '-inf' +if(isNaN(self.valueOf()))return 'nan' +var res=self.valueOf()+'' +if(res.indexOf('.')==-1)res+='.0' +return _b_.str(res)} +$FloatDict.__setattr__=function(self,attr,value){if(self.constructor===Number){if($FloatDict[attr]===undefined){throw _b_.AttributeError("'float' object has no attribute '"+attr+"'")}else{throw _b_.AttributeError("'float' object attribute '"+attr+"' is read-only")}} +self[attr]=value +return $N} +$FloatDict.__truediv__=function(self,other){if(isinstance(other,[_b_.int,float])){if(other.valueOf()==0)throw ZeroDivisionError('division by zero') +return float(self/other)} +if(isinstance(other,_b_.complex)){var cmod=other.real*other.real+other.imag*other.imag +if(cmod==0)throw ZeroDivisionError('division by zero') +return _b_.complex(float(self*other.real/cmod),float(-self*other.imag/cmod))} +if(hasattr(other,'__rtruediv__'))return getattr(other,'__rtruediv__')(self) +$err('/',other)} +var $op_func=function(self,other){if(isinstance(other,_b_.int)){if(typeof other=='boolean'){return other ? self-1 : self}else if(other.__class__===$B.LongInt.$dict){return float(self-parseInt(other.value))}else{return float(self-other)}} +if(isinstance(other,float))return float(self-other) +if(isinstance(other,_b_.bool)){var bool_value=0; +if(other.valueOf())bool_value=1; +return float(self-bool_value)} +if(isinstance(other,_b_.complex)){return _b_.complex(self - other.real,-other.imag)} +if(hasattr(other,'__rsub__'))return getattr(other,'__rsub__')(self) +$err('-',other)} +$op_func +='' +var $ops={'+':'add','-':'sub'} +for(var $op in $ops){var $opf=$op_func.replace(/-/gm,$op) +$opf=$opf.replace(/__rsub__/gm,'__r'+$ops[$op]+'__') +eval('$FloatDict.__'+$ops[$op]+'__ = '+$opf)} +var $comp_func=function(self,other){if(isinstance(other,_b_.int)){if(other.__class__===$B.LongInt.$dict){return self > parseInt(other.value)} +return self > other.valueOf()} +if(isinstance(other,float))return self > other +if(isinstance(other,_b_.bool)){return self.valueOf()> _b_.bool.$dict.__hash__(other)} +if(hasattr(other,'__int__')||hasattr(other,'__index__')){return $IntDict.__gt__(self,$B.$GetInt(other))} +var inv_op=getattr(other,'__le__',null) +if(inv_op !==null){return inv_op(self)} +var inv_op=getattr(other,'__le__',null) +if(inv_op !==null){return inv_op(self)} +throw _b_.TypeError( +"unorderable types: "+self.__class__.__name__+'() > '+$B.get_class(other).__name__+"()")} +$comp_func +='' +for(var $op in $B.$comps){eval("$FloatDict.__"+$B.$comps[$op]+'__ = '+ +$comp_func.replace(/>/gm,$op). +replace(/__gt__/gm,'__'+$B.$comps[$op]+'__'). +replace(/__le__/,'__'+$B.$inv_comps[$op]+'__'))} +$B.make_rmethods($FloatDict) +var $notimplemented=function(self,other){throw _b_.TypeError( +"unsupported operand types for OPERATOR: '"+self.__class__.__name__+ +"' and '"+$B.get_class(other).__name__+"'")} +$notimplemented +='' +for(var $op in $B.$operators){ +switch($op){case '+=': +case '-=': +case '*=': +case '/=': +case '%=': +break +default: +var $opfunc='__'+$B.$operators[$op]+'__' +if($FloatDict[$opfunc]===undefined){eval('$FloatDict.'+$opfunc+"="+$notimplemented.replace(/OPERATOR/gm,$op))}}} +function $FloatClass(value){return new Number(value)} +function to_digits(s){ +var arabic_digits='\u0660\u0661\u0662\u0663\u0664\u0665\u0666\u0667\u0668\u0669',res='' +for(var i=0;i-1){res +=x} +else{res +=s[i]}} +return res} +var float=function(value){switch(value){case undefined: +return $FloatClass(0.0) +case Number.MAX_VALUE: +return $FloatClass(Infinity) +case -Number.MAX_VALUE: +return $FloatClass(-Infinity)} +if(typeof value=="number")return $FloatClass(value) +if(isinstance(value,float)){return value} +if(isinstance(value,_b_.bytes)){var s=getattr(value,'decode')('latin-1') +return float(getattr(value,'decode')('latin-1'))} +if(hasattr(value,'__float__')){return $FloatClass(getattr(value,'__float__')())} +if(typeof value=='string'){value=value.trim() +switch(value.toLowerCase()){case '+inf': +case 'inf': +case '+infinity': +case 'infinity': +return Number.POSITIVE_INFINITY +case '-inf': +case '-infinity': +return Number.NEGATIVE_INFINITY +case '+nan': +case 'nan': +return Number.NaN +case '-nan': +return -Number.NaN +case '': +throw _b_.ValueError('count not convert string to float') +default: +value=to_digits(value) +if(isFinite(value))return $FloatClass(eval(value)) +else{ +_b_.str.$dict.encode(value,'latin-1') +throw _b_.ValueError("Could not convert to float(): '"+_b_.str(value)+"'")}}} +throw _b_.TypeError("float() argument must be a string or a number, not '"+ +$B.get_class(value).__name__+"'")} +float.__class__=$B.$factory +float.$dict=$FloatDict +$FloatDict.$factory=float +$FloatDict.__new__=$B.$__new__(float) +$B.$FloatClass=$FloatClass +_b_.float=float})(__BRYTHON__) +;(function($B){eval($B.InjectBuiltins()) +var $ObjectDict=_b_.object.$dict,$N=_b_.None +function $err(op,other){var msg="unsupported operand type(s) for "+op +msg +=": 'int' and '"+$B.get_class(other).__name__+"'" +throw _b_.TypeError(msg)} +var $IntDict={__class__:$B.$type,__name__:'int',__dir__:$ObjectDict.__dir__,toString:function(){return '$IntDict'},$native:true,descriptors:{'numerator':true,'denominator':true,'imag':true,'real':true}} +$IntDict.from_bytes=function(){var $=$B.args("from_bytes",3,{bytes:null,byteorder:null,signed:null},['bytes','byteorder','signed'],arguments,{signed:False},null,null) +var x=$.bytes,byteorder=$.byteorder,signed=$.signed +var _bytes,_len +if(isinstance(x,[_b_.list,_b_.tuple])){_bytes=x +_len=len(x)}else if(isinstance(x,[_b_.bytes,_b_.bytearray])){_bytes=x.source +_len=x.source.length}else{ +_b_.TypeError("Error! " + _b_.type(x)+ " is not supported in int.from_bytes. fix me!")} +switch(byteorder){case 'big': +var num=_bytes[_len - 1]; +var _mult=256 +for(var i=(_len - 2);i >=0;i--){ +num=$B.add($B.mul(_mult,_bytes[i]),num) +_mult=$B.mul(_mult,256)} +if(!signed)return num +if(_bytes[0]< 128)return num +return $B.sub(num,_mult) +case 'little': +var num=_bytes[0] +if(num >=128)num=num - 256 +var _mult=256 +for(var i=1;i < _len;i++){num=$B.add($B.mul(_mult,_bytes[i]),num) +_mult=$B.mul(_mult,256)} +if(!signed)return num +if(_bytes[_len - 1]< 128)return num +return $B.sub(num,_mult)} +throw _b_.ValueError("byteorder must be either 'little' or 'big'");} +$IntDict.to_bytes=function(length,byteorder,star){ +throw _b_.NotImplementedError("int.to_bytes is not implemented yet")} +$IntDict.__abs__=function(self){return abs(self)} +$IntDict.__bool__=function(self){return new Boolean(self.valueOf())} +$IntDict.__ceil__=function(self){return Math.ceil(self)} +$IntDict.__class__=$B.$type +$IntDict.__divmod__=function(self,other){return divmod(self,other)} +$IntDict.__eq__=function(self,other){ +if(other===undefined)return self===int +if(isinstance(other,int))return self.valueOf()==other.valueOf() +if(isinstance(other,_b_.float))return self.valueOf()==other.valueOf() +if(isinstance(other,_b_.complex)){if(other.imag !=0)return False +return self.valueOf()==other.real} +if(hasattr(other,'__eq__'))return getattr(other,'__eq__')(self) +return self.valueOf()===other} +function preformat(self,fmt){if(fmt.empty){return _b_.str(self)} +if(fmt.type && 'bcdoxXn'.indexOf(fmt.type)==-1){throw _b_.ValueError("Unknown format code '"+fmt.type+ +"' for object of type 'int'")} +switch(fmt.type){case undefined: +case 'd': +return self.toString() +case 'b': +return(fmt.alternate ? '0b' : '')+ self.toString(2) +case 'c': +return _b_.chr(self) +case 'o': +return(fmt.alternate ? '0o' : '')+ self.toString(8) +case 'x': +return(fmt.alternate ? '0x' : '')+ self.toString(16) +case 'X': +return(fmt.alternate ? '0X' : '')+ self.toString(16).toUpperCase() +case 'n': +return self } +return res} +$IntDict.__format__=function(self,format_spec){var fmt=new $B.parse_format_spec(format_spec) +if(fmt.type && 'eEfFgG%'.indexOf(fmt.type)!=-1){ +return _b_.float.$dict.__format__(self,format_spec)} +fmt.align=fmt.align ||'>' +var res=preformat(self,fmt) +if(fmt.comma){var len=res.length,nb=Math.ceil(res.length/3),chunks=[] +for(var i=0;i$B.min_int && res<$B.max_int){return res} +else{return int($B.LongInt.$dict.__mul__($B.LongInt(self),$B.LongInt(other)))}} +if(isinstance(other,_b_.float)){return new Number(self*other)} +if(isinstance(other,_b_.bool)){if(other.valueOf())return self +return int(0)} +if(isinstance(other,_b_.complex)){return _b_.complex($IntDict.__mul__(self,other.real),$IntDict.__mul__(self,other.imag))} +if(isinstance(other,[_b_.list,_b_.tuple])){var res=[] +var $temp=other.slice(0,other.length) +for(var i=0;i$B.min_int && res<$B.max_int){return res} +else{return int($B.LongInt.$dict.__pow__($B.LongInt(self),$B.LongInt(other)))}} +if(isinstance(other,_b_.float)){if(self>=0){return new Number(Math.pow(self,other.valueOf()))} +else{ +return _b_.complex.$dict.__pow__(_b_.complex(self,0),other)}}else if(isinstance(other,_b_.complex)){var img=other.imag,preal=Math.pow(self,other.real),ln=Math.log(self) +return _b_.complex(preal*Math.cos(ln),preal*Math.sin(ln))} +if(hasattr(other,'__rpow__'))return getattr(other,'__rpow__')(self) +$err("**",other)} +$IntDict.__repr__=function(self){if(self===int)return "" +return self.toString()} +$IntDict.__rshift__=function(self,other){if(isinstance(other,int)){return int($B.LongInt.$dict.__rshift__($B.LongInt(self),$B.LongInt(other)))} +var rrshift=getattr(other,'__rrshift__',null) +if(rrshift!==null){return rrshift(self)} +$err('>>',other)} +$IntDict.__setattr__=function(self,attr,value){if(typeof self=="number"){if($IntDict[attr]===undefined){throw _b_.AttributeError("'int' object has no attribute '"+attr+"'")}else{throw _b_.AttributeError("'int' object attribute '"+attr+"' is read-only")}} +self[attr]=value +return $N} +$IntDict.__str__=$IntDict.__repr__ +$IntDict.__truediv__=function(self,other){if(isinstance(other,int)){if(other==0)throw ZeroDivisionError('division by zero') +if(other.__class__==$B.LongInt.$dict){return new Number(self/parseInt(other.value))} +return new Number(self/other)} +if(isinstance(other,_b_.float)){if(!other.valueOf())throw ZeroDivisionError('division by zero') +return new Number(self/other)} +if(isinstance(other,_b_.complex)){var cmod=other.real*other.real+other.imag*other.imag +if(cmod==0)throw ZeroDivisionError('division by zero') +return _b_.complex(self*other.real/cmod,-self*other.imag/cmod)} +if(hasattr(other,'__rtruediv__'))return getattr(other,'__rtruediv__')(self) +$err("/",other)} +$IntDict.bit_length=function(self){s=bin(self) +s=getattr(s,'lstrip')('-0b') +return s.length } +$IntDict.numerator=function(self){return self} +$IntDict.denominator=function(self){return int(1)} +$IntDict.imag=function(self){return int(0)} +$IntDict.real=function(self){return self} +$B.max_int32=(1<<30)* 2 - 1 +$B.min_int32=- $B.max_int32 +var $op_func=function(self,other){if(isinstance(other,int)){if(other.__class__===$B.LongInt.$dict){return $B.LongInt.$dict.__sub__($B.LongInt(self),$B.LongInt(other))} +if(self > $B.max_int32 ||self < $B.min_int32 || +other > $B.max_int32 ||other < $B.min_int32){return $B.LongInt.$dict.__sub__($B.LongInt(self),$B.LongInt(other))} +return self-other} +if(isinstance(other,_b_.bool))return self-other +if(hasattr(other,'__rsub__'))return getattr(other,'__rsub__')(self) +$err("-",other)} +$op_func +='' +var $ops={'&':'and','|':'or','^':'xor'} +for(var $op in $ops){var opf=$op_func.replace(/-/gm,$op) +opf=opf.replace(new RegExp('sub','gm'),$ops[$op]) +eval('$IntDict.__'+$ops[$op]+'__ = '+opf)} +var $op_func=function(self,other){if(isinstance(other,int)){if(typeof other=='number'){var res=self.valueOf()-other.valueOf() +if(res>=$B.min_int && res<=$B.max_int){return res} +else{return $B.LongInt.$dict.__sub__($B.LongInt(self),$B.LongInt(other))}}else if(typeof other=="boolean"){return other ? self-1 : self}else{return $B.LongInt.$dict.__sub__($B.LongInt(self),$B.LongInt(other))}} +if(isinstance(other,_b_.float)){return new Number(self-other)} +if(isinstance(other,_b_.complex)){return _b_.complex(self-other.real,-other.imag)} +if(isinstance(other,_b_.bool)){var bool_value=0; +if(other.valueOf())bool_value=1; +return self-bool_value} +if(isinstance(other,_b_.complex)){return _b_.complex(self.valueOf()- other.real,other.imag)} +if(hasattr(other,'__rsub__'))return getattr(other,'__rsub__')(self) +throw $err('-',other)} +$op_func +='' +var $ops={'+':'add','-':'sub'} +for(var $op in $ops){var opf=$op_func.replace(/-/gm,$op) +opf=opf.replace(new RegExp('sub','gm'),$ops[$op]) +eval('$IntDict.__'+$ops[$op]+'__ = '+opf)} +var $comp_func=function(self,other){if(other.__class__===$B.LongInt.$dict)return $B.LongInt.$dict.__gt__($B.LongInt(self),other) +if(isinstance(other,int))return self.valueOf()> other.valueOf() +if(isinstance(other,_b_.float))return self.valueOf()> other.valueOf() +if(isinstance(other,_b_.bool)){return self.valueOf()> _b_.bool.$dict.__hash__(other)} +if(hasattr(other,'__int__')||hasattr(other,'__index__')){return $IntDict.__gt__(self,$B.$GetInt(other))} +var inv_op=getattr(other,'__le__',null) +if(inv_op !==null){return inv_op(self)} +throw _b_.TypeError( +"unorderable types: int() > "+$B.get_class(other).__name__+"()")} +$comp_func +='' +for(var $op in $B.$comps){eval("$IntDict.__"+$B.$comps[$op]+'__ = '+ +$comp_func.replace(/>/gm,$op). +replace(/__gt__/gm,'__'+$B.$comps[$op]+'__'). +replace(/__le__/,'__'+$B.$inv_comps[$op]+'__'))} +$B.make_rmethods($IntDict) +var $valid_digits=function(base){var digits='' +if(base===0)return '0' +if(base < 10){for(var i=0;i < base;i++)digits+=String.fromCharCode(i+48) +return digits} +var digits='0123456789' +for(var i=10;i < base;i++)digits+=String.fromCharCode(i+55) +return digits} +var int=function(value,base){ +if(value===undefined){return 0} +if(typeof value=='number' && +(base===undefined ||base==10)){return parseInt(value)} +if(base!==undefined){if(!isinstance(value,[_b_.str,_b_.bytes,_b_.bytearray])){throw TypeError("int() can't convert non-string with explicit base")}} +if(isinstance(value,_b_.complex)){throw TypeError("can't convert complex to int")} +var $ns=$B.args('int',2,{x:null,base:null},['x','base'],arguments,{'base':10},'null','null') +var value=$ns['x'] +var base=$ns['base'] +if(isinstance(value,_b_.float)&& base===10){if(value<$B.min_int ||value>$B.max_int){return $B.LongInt.$dict.$from_float(value)} +else{return value>0 ? Math.floor(value): Math.ceil(value)}} +if(!(base >=2 && base <=36)){ +if(base !=0)throw _b_.ValueError("invalid base")} +if(typeof value=='number'){if(base==10){if(value < $B.min_int ||value > $B.max_int)return $B.LongInt(value) +return value}else if(value.toString().search('e')>-1){ +throw _b_.OverflowError("can't convert to base "+base)}else{var res=parseInt(value,base) +if(res < $B.min_int ||res > $B.max_int)return $B.LongInt(value,base) +return res}} +if(value===true)return Number(1) +if(value===false)return Number(0) +if(value.__class__===$B.LongInt.$dict){var z=parseInt(value.value) +if(z>$B.min_int && z<$B.max_int){return z} +else{return value}} +base=$B.$GetInt(base) +if(isinstance(value,_b_.str))value=value.valueOf() +if(typeof value=="string"){var _value=value.trim() +if(_value.length==2 && base==0 &&(_value=='0b' ||_value=='0o' ||_value=='0x')){throw _b_.ValueError('invalid value')} +if(_value.length >2){var _pre=_value.substr(0,2).toUpperCase() +if(base==0){if(_pre=='0B')base=2 +if(_pre=='0O')base=8 +if(_pre=='0X')base=16} +if(_pre=='0B' ||_pre=='0O' ||_pre=='0X'){_value=_value.substr(2)}} +var _digits=$valid_digits(base) +var _re=new RegExp('^[+-]?['+_digits+']+$','i') +if(!_re.test(_value)){throw _b_.ValueError( +"invalid literal for int() with base "+base +": '"+_b_.str(value)+"'")} +if(base <=10 && !isFinite(value)){throw _b_.ValueError( +"invalid literal for int() with base "+base +": '"+_b_.str(value)+"'")} +var res=parseInt(_value,base) +if(res < $B.min_int ||res > $B.max_int)return $B.LongInt(_value,base) +return res} +if(isinstance(value,[_b_.bytes,_b_.bytearray])){var _digits=$valid_digits(base) +for(var i=0;i=0;i--){iself-- +if(iself<0){sv=0}else{sv=parseInt(v1.charAt(iself))} +x=(carry+sv+parseInt(v2.charAt(i))).toString() +if(x.length==2){res=x.charAt(1)+res;carry=parseInt(x.charAt(0))} +else{res=x+res;carry=0}} +while(iself>0){iself-- +x=(carry+parseInt(v1.charAt(iself))).toString() +if(x.length==2){res=x.charAt(1)+res;carry=parseInt(x.charAt(0))} +else{res=x+res;carry=0}} +if(carry){res=carry+res} +return{__class__:$LongIntDict,value:res,pos:true}} +function check_shift(shift){ +if(!isinstance(shift,LongInt)){throw TypeError("shift must be int, not '"+ +$B.get_class(shift).__name__+"'")} +if(!shift.pos){throw ValueError("negative shift count")}} +function clone(obj){ +var obj1={} +for(var attr in obj){obj1[attr]=obj[attr]} +return obj1} +function comp_pos(v1,v2){ +if(v1.length>v2.length){return 1} +else if(v1.lengthv2){return 1} +else if(v1=0;i--){i1-- +sv=parseInt(v1.charAt(i1)) +x=(sv-carry-parseInt(v2.charAt(i))) +if(x<0){res=(10+x)+res;carry=1} +else{res=x+res;carry=0}} +while(i1>0){i1-- +x=(parseInt(v1.charAt(i1))-carry) +if(x<0){res=(10+x)+res;carry=1} +else{res=x+res;carry=0}} +while(res.charAt(0)=='0' && res.length>1){res=res.substr(1)} +return{__class__:$LongIntDict,value:res,pos:true}} +$LongIntDict.$from_float=function(value){var s=Math.abs(value).toString(),v=s +if(s.search('e')>-1){var t=/-?(\d)(\.\d+)?e([+-])(\d*)/.exec(s),n1=t[1],n2=t[2],pos=t[3],exp=t[4] +if(pos=='+'){if(n2===undefined){v=n1+'0'.repeat(exp-1)}else{v=n1+n2+'0'.repeat(exp-1-n2.length)}}} +return{__class__:$LongIntDict,value: v,pos: value >=0}} +$LongIntDict.__abs__=function(self){return{__class__:$LongIntDict,value: self.value,pos:true}} +$LongIntDict.__add__=function(self,other){if(isinstance(other,_b_.float)){return _b_.float(parseInt(self.value)+other.value)} +if(typeof other=='number')other=LongInt(_b_.str(other)) +var res +if(self.pos&&other.pos){ +return add_pos(self.value,other.value)}else if(!self.pos&&!other.pos){ +res=add_pos(self.value,other.value) +res.pos=false +return intOrLong(res)}else if(self.pos && !other.pos){ +switch(comp_pos(self.value,other.value)){case 1: +res=sub_pos(self.value,other.value) +break +case 0: +res={__class__:$LongIntDict,value:0,pos:true} +break +case -1: +res=sub_pos(other.value,self.value) +res.pos=false +break} +return intOrLong(res)}else{ +switch(comp_pos(self.value,other.value)){case 1: +res=sub_pos(self.value,other.value) +res.pos=false +break +case 0: +res={__class__:$LongIntDict,value:0,pos:true} +break +case -1: +res=sub_pos(other.value,self.value) +break} +return intOrLong(res)}} +$LongIntDict.__and__=function(self,other){if(typeof other=='number')other=LongInt(_b_.str(other)) +var v1=$LongIntDict.__index__(self) +var v2=$LongIntDict.__index__(other) +if(v1.lengthother.value.length){return self.pos} +else if(self.value.length=other.value : self.value <=other.value}} +$LongIntDict.__gt__=function(self,other){return !$LongIntDict.__le__(self,other)} +$LongIntDict.__index__=function(self){ +var res='',pos=self.value.length,temp=self.value,d +while(true){d=divmod_pos(temp,'2') +res=d[1].value + res +temp=d[0].value +if(temp=='0'){break}} +return intOrLong(res)} +$LongIntDict.__invert__=function(self){return $LongIntDict.__sub__(LongInt('-1'),self)} +$LongIntDict.__le__=function(self,other){if(typeof other=='number')other=LongInt(_b_.str(other)) +if(self.pos !==other.pos){return !self.pos} +if(self.value.length>other.value.length){return !self.pos} +else if(self.value.length=other.value}} +$LongIntDict.__lt__=function(self,other){return !$LongIntDict.__ge__(self,other)} +$LongIntDict.__lshift__=function(self,shift){var is_long=shift.__class__==$LongIntDict +if(is_long){var shift_value=parseInt(shift.value) +if(shift_value<0){throw _b_.ValueError('negative shift count')} +if(shift_value < $B.max_int){shift_safe=true;shift=shift_value}} +if(shift_safe){if(shift_value==0){return self}}else{shift=LongInt(shift) +if(shift.value=='0'){return self}} +var res=self.value +while(true){var x,carry=0,res1='' +for(var i=res.length-1;i>=0;i--){x=(carry+parseInt(res.charAt(i))*2).toString() +if(x.length==2){res1=x.charAt(1)+res1;carry=parseInt(x.charAt(0))} +else{res1=x+res1;carry=0}} +if(carry){res1=carry+res1} +res=res1 +if(shift_safe){shift-- +if(shift==0){break}}else{shift=sub_pos(shift.value,'1') +if(shift.value=='0'){break}}} +return intOrLong({__class__:$LongIntDict,value:res,pos:self.pos})} +$LongIntDict.__mod__=function(self,other){return intOrLong($LongIntDict.__divmod__(self,other)[1])} +$LongIntDict.__mro__=[$LongIntDict,_b_.int.$dict,_b_.object.$dict] +$LongIntDict.__mul__=function(self,other){switch(self){case Number.NEGATIVE_INFINITY: +case Number.POSITIVE_INFINITY: +var eq=_b_.getattr(other,'__eq__') +if(eq(0)){return NaN} +else if(_b_.getattr(other,'__gt__')(0)){return self} +else{return -self}} +if(isinstance(other,_b_.float)){return _b_.float(parseInt(self.value)*other)} +if(typeof other=='number')other=LongInt(_b_.str(other)) +var res=mul_pos(self.value,other.value) +if(self.pos==other.pos){return intOrLong(res)} +res.pos=false +return intOrLong(res)} +$LongIntDict.__neg__=function(obj){return{__class__:$LongIntDict,value:obj.value,pos:!obj.pos}} +$LongIntDict.__or__=function(self,other){other=LongInt(other) +var v1=$LongIntDict.__index__(self) +var v2=$LongIntDict.__index__(other) +if(v1.length0){var dm=divmod_pos(v,base.toString()) +res=parseInt(dm[1].value).toString(base)+res +v=dm[0].value +if(v==0){break}} +return res} +function digits(base){ +var is_digits={} +for(var i=0;i10){ +for(var i=0;iMIN_SAFE_INTEGER && v2){throw _b_.TypeError("LongInt takes at most 2 arguments ("+ +arguments.length+" given)")} +if(base===undefined){base=10} +else if(!isinstance(base,int)){throw TypeError("'"+$B.get_class(base).__name__+"' object cannot be interpreted as an integer")} +if(base<0 ||base==1 ||base>36){throw ValueError("LongInt() base must be >= 2 and <= 36")} +if(isinstance(value,_b_.float)){if(value===Number.POSITIVE_INFINITY ||value===Number.NEGATIVE_INFINITY){return value} +if(value>=0){value=new Number(Math.round(value.value))} +else{value=new Number(Math.ceil(value.value))}}else if(isinstance(value,_b_.bool)){if(value.valueOf())return int(1) +return int(0)} +if(typeof value=='number'){if(isSafeInteger(value)){value=value.toString()} +else if(value.constructor==Number){console.log('big number',value);value=value.toString()} +else{console.log('wrong value',value);throw ValueError("argument of long_int is not a safe integer")}}else if(value.__class__===$LongIntDict){return value} +else if(isinstance(value,_b_.bool)){value=_b_.bool.$dict.__int__(value)+''} +else if(typeof value!='string'){throw ValueError("argument of long_int must be a string, not "+ +$B.get_class(value).__name__)} +var has_prefix=false,pos=true,start=0 +while(value.charAt(0)==' ' && value.length){value=value.substr(1)} +while(value.charAt(value.length-1)==' ' && value.length){value=value.substr(0,value.length-1)} +if(value.charAt(0)=='+'){has_prefix=true} +else if(value.charAt(0)=='-'){has_prefix=true;pos=false} +if(has_prefix){ +if(value.length==1){ +throw ValueError('LongInt argument is not a valid number: "'+value+'"')}else{value=value.substr(1)}} +while(start=0)return '('+self.real+'+'+self.imag+'j)' +return '('+self.real+'-'+(-self.imag)+'j)'} +$ComplexDict.__sqrt__=function(self){if(self.imag==0)return complex(Math.sqrt(self.real)) +var r=self.real,i=self.imag +var _sqrt=Math.sqrt(r*r+i*i) +var _a=Math.sqrt((r + sqrt)/2) +var _b=Number.sign(i)* Math.sqrt((-r + sqrt)/2) +return complex(_a,_b)} +$ComplexDict.__truediv__=function(self,other){if(isinstance(other,complex)){if(other.real==0 && other.imag==0){throw ZeroDivisionError('division by zero')} +var _num=self.real*other.real + self.imag*other.imag +var _div=other.real*other.real + other.imag*other.imag +var _num2=self.imag*other.real - self.real*other.imag +return complex(_num/_div,_num2/_div)} +if(isinstance(other,_b_.int)){if(!other.valueOf())throw ZeroDivisionError('division by zero') +return $ComplexDict.__truediv__(self,complex(other.valueOf()))} +if(isinstance(other,_b_.float)){if(!other.value)throw ZeroDivisionError('division by zero') +return $ComplexDict.__truediv__(self,complex(other.value))} +$UnsupportedOpType("//","complex",other.__class__)} +var $op_func=function(self,other){throw _b_.TypeError("TypeError: unsupported operand type(s) for -: 'complex' and '" + +$B.get_class(other).__name__+"'")} +$op_func +='' +var $ops={'&':'and','|':'ior','<<':'lshift','>>':'rshift','^':'xor'} +for(var $op in $ops){eval('$ComplexDict.__'+$ops[$op]+'__ = '+$op_func.replace(/-/gm,$op))} +$ComplexDict.__ior__=$ComplexDict.__or__ +var $op_func=function(self,other){if(isinstance(other,complex))return complex(self.real-other.real,self.imag-other.imag) +if(isinstance(other,_b_.int))return complex($B.sub(self.real,other.valueOf()),self.imag) +if(isinstance(other,_b_.float))return complex(self.real - other.value,self.imag) +if(isinstance(other,_b_.bool)){var bool_value=0; +if(other.valueOf())bool_value=1; +return complex(self.real - bool_value,self.imag)} +throw _b_.TypeError("unsupported operand type(s) for -: "+self.__repr__()+ +" and '"+$B.get_class(other).__name__+"'")} +$ComplexDict.__sub__=$op_func +$op_func +='' +$op_func=$op_func.replace(/-/gm,'+').replace(/sub/gm,'add') +eval('$ComplexDict.__add__ = '+$op_func) +var $comp_func=function(self,other){throw _b_.TypeError("TypeError: unorderable types: complex() > " + +$B.get_class(other).__name__ + "()")} +$comp_func +='' +for(var $op in $B.$comps){eval("$ComplexDict.__"+$B.$comps[$op]+'__ = '+$comp_func.replace(/>/gm,$op))} +$B.make_rmethods($ComplexDict) +$ComplexDict.real=function(self){return new Number(self.real)} +$ComplexDict.imag=function(self){return new Number(self.imag)} +var complex_re=/^(\d*\.?\d*)([\+\-]?)(\d*\.?\d*)(j?)$/ +var complex=function(real,imag){if(typeof real=='string'){if(imag!==undefined){throw _b_.TypeError("complex() can't take second arg if first is a string")} +var parts=complex_re.exec(real) +if(parts===null){throw _b_.ValueError("complex() arg is a malformed string")}else if(parts[1]=='.' ||parts[3]=='.'){throw _b_.ValueError("complex() arg is a malformed string")}else if(parts[4]=='j'){if(parts[2]==''){real=0;imag=parseFloat(parts[1])}else{real=parseFloat(parts[1]) +imag=parts[3]=='' ? 1 : parseFloat(parts[3]) +imag=parts[2]=='-' ? -imag : imag}}else{real=parseFloat(parts[1]) +imag=0}} +var res={__class__:$ComplexDict,real:real ||0,imag:imag ||0} +res.__repr__=res.__str__=function(){if(real==0)return imag + 'j' +return '('+real+'+'+imag+'j)'} +return res} +complex.$dict=$ComplexDict +complex.__class__=$B.$factory +$ComplexDict.$factory=complex +$B.set_func_names($ComplexDict) +_b_.complex=complex})(__BRYTHON__) +;(function($B){eval($B.InjectBuiltins()) +var $ObjectDict=_b_.object.$dict,$N=_b_.None +function $list(){ +var args=[],pos=0 +for(var i=0,_len_i=arguments.length;i < _len_i;i++){args[pos++]=arguments[i]} +return new $ListDict(args)} +var $ListDict={__class__:$B.$type,__name__:'list',$native:true,__dir__:$ObjectDict.__dir__} +$ListDict.__add__=function(self,other){if($B.get_class(self)!==$B.get_class(other)){throw TypeError('can only concatenate list (not "'+ +$B.get_class(other).__name__+'") to list')} +var res=self.valueOf().concat(other.valueOf()) +if(isinstance(self,tuple))res=tuple(res) +return res} +$ListDict.__contains__=function(self,item){var $=$B.args('__contains__',2,{self:null,item:null},['self','item'],arguments,{},null,null),self=$.self,item=$.item +var _eq=getattr(item,'__eq__') +var i=self.length +while(i--){if(_eq(self[i]))return true} +return false} +$ListDict.__delitem__=function(self,arg){if(isinstance(arg,_b_.int)){var pos=arg +if(arg<0)pos=self.length+pos +if(pos>=0 && pos0 ? 0 : self.length} +var stop=arg.stop +if(stop===None){stop=step >0 ? self.length : 0} +if(start<0)start=self.length+start +if(stop<0)stop=self.length+stop +var res=[],i=null,pos=0 +if(step>0){if(stop>start){for(var i=start;istop;i+=step){if(self[i]!==undefined){res[pos++]=i}} +res.reverse()}} +var i=res.length +while(i--){ +self.splice(res[i],1)} +return $N} +if(hasattr(arg,'__int__')||hasattr(arg,'__index__')){$ListDict.__delitem__(self,_b_.int(arg)) +return $N} +throw _b_.TypeError('list indices must be integer, not '+_b_.str(arg.__class__))} +$ListDict.__eq__=function(self,other){if(isinstance(other,$B.get_class(self).$factory)){if(other.length==self.length){var i=self.length +while(i--){if(!getattr(self[i],'__eq__')(other[i]))return false} +return true}} +return false} +$ListDict.__getitem__=function(self,arg){var $=$B.args('__getitem__',2,{self:null,key:null},['self','key'],arguments,{},null,null),self=$.self,key=$.key +var klass=$B.get_class(self).$factory +if(isinstance(key,_b_.int)){var items=self.valueOf() +var pos=key +if(key<0)pos=items.length+pos +if(pos>=0 && pos 0){if(stop <=start)return res; +for(var i=start;i start)return res; +for(var i=start;i>stop;i+=step){res[pos++]=items[i]} +return klass(res);}} +if(hasattr(key,'__int__')||hasattr(key,'__index__')){return $ListDict.__getitem__(self,_b_.int(key))} +throw _b_.TypeError('list indices must be integer, not '+ +$B.get_class(key).__name__)} +$ListDict.__ge__=function(self,other){if(!isinstance(other,[list,_b_.tuple])){throw _b_.TypeError("unorderable types: list() >= "+ +$B.get_class(other).__name__+'()')} +var i=0 +while(i=other.length)return true +if(getattr(self[i],'__eq__')(other[i])){i++} +else return(getattr(self[i],"__ge__")(other[i]))} +return other.length==self.length} +$ListDict.__gt__=function(self,other){if(!isinstance(other,[list,_b_.tuple])){throw _b_.TypeError("unorderable types: list() > "+ +$B.get_class(other).__name__+'()')} +var i=0 +while(i=other.length)return true +if(getattr(self[i],'__eq__')(other[i])){i++} +else return(getattr(self[i],'__gt__')(other[i]))} +return false} +$ListDict.__iadd__=function(){var $=$B.args('__iadd__',2,{self:null,x:null},['self','x'],arguments,{},null,null) +var x=list(iter($.x)) +for(var i=0;i < x.length;i++){$.self.push(x[i])} +return $.self} +$ListDict.__imul__=function(){var $=$B.args('__imul__',2,{self:null,x:null},['self','x'],arguments,{},null,null) +var x=$B.$GetInt($.x),len=$.self.length,pos=len +if(x==0){$ListDict.clear($.self);return $.self} +for(var i=1;i < x;i++){for(j=0;j" +var _r=[] +for(var i=0;i=0 && posbegin){var pivot=begin+Math.floor(Math.random()*(end-begin)),len=array.length +pivot=$partition(arg,array,begin,end,pivot); +$qsort(arg,array,begin,pivot); +$qsort(arg,array,pivot+1,end);}} +function $elts_class(self){ +if(self.length==0){return null} +var cl=$B.get_class(self[0]),i=self.length +while(i--){ +if($B.get_class(self[i])!==cl)return false} +return cl} +$ListDict.sort=function(self){var $=$B.args('sort',1,{self:null},['self'],arguments,{},null,'kw') +var func=null +var reverse=false +var kw_args=$.kw,keys=_b_.list(_b_.dict.$dict.keys(kw_args)) +for(var i=0;i0){var args=[arguments[0].$t] +var pos=1 +for(var i=1,_len_i=arguments.length;i < _len_i;i++){args[pos++]=arguments[i]}} +return $ListDict[attr].apply(null,args)}})($attr)}} +$ListSubclassDict.__mro__=[$ListSubclassDict,$ObjectDict] +$B.$ListSubclassFactory={__class__:$B.$factory,$dict:$ListSubclassDict} +function $tuple(arg){return arg} +var $TupleDict={__class__:$B.$type,__name__:'tuple',$native:true} +$TupleDict.__iter__=function(self){return $B.$iterator(self,$tuple_iterator)} +var $tuple_iterator=$B.$iterator_class('tuple_iterator') +function tuple(){var obj=list.apply(null,arguments) +obj.__class__=$TupleDict +return obj} +tuple.__class__=$B.$factory +tuple.$dict=$TupleDict +tuple.$is_func=true +$TupleDict.$factory=tuple +$TupleDict.__new__=$B.$__new__(tuple) +tuple.__module__='builtins' +for(var attr in $ListDict){switch(attr){case '__delitem__': +case '__setitem__': +case 'append': +case 'extend': +case 'insert': +case 'remove': +case 'pop': +case 'reverse': +case 'sort': +break +default: +if($TupleDict[attr]===undefined){if(typeof $ListDict[attr]=='function'){$TupleDict[attr]=(function(x){return function(){return $ListDict[x].apply(null,arguments)}})(attr)}else{$TupleDict[attr]=$ListDict[attr]}}}} +$TupleDict.__delitem__=function(){throw _b_.TypeError("'tuple' object doesn't support item deletion")} +$TupleDict.__setitem__=function(){throw _b_.TypeError("'tuple' object does not support item assignment")} +$TupleDict.__eq__=function(self,other){ +if(other===undefined)return self===tuple +return $ListDict.__eq__(self,other)} +$TupleDict.__hash__=function(self){ +var x=0x345678 +for(var i=0,_len_i=self.length;i < _len_i;i++){var y=_b_.hash(self[i]); +x=(1000003 * x)^ y & 0xFFFFFFFF;} +return x} +$TupleDict.__mro__=[$TupleDict,$ObjectDict] +$TupleDict.__name__='tuple' +$B.set_func_names($TupleDict) +_b_.list=list +_b_.tuple=tuple +_b_.object.$dict.__bases__=tuple()})(__BRYTHON__) +;(function($B){eval($B.InjectBuiltins()) +var $ObjectDict=object.$dict +var $StringDict={__class__:$B.$type,__dir__:$ObjectDict.__dir__,__name__:'str',$native:true} +function normalize_start_end($){if($.start===null||$.start===_b_.None){$.start=0} +else if($.start<0){$.start +=$.self.length;$.start=Math.max(0,$.start)} +if($.end===null||$.end===_b_.None){$.end=$.self.length} +else if($.end<0){$.end +=$.self.length;$.end=Math.max(0,$.end)} +if(!isinstance($.start,_b_.int)||!isinstance($.end,_b_.int)){throw _b_.TypeError( +"slice indices must be integers or None or have an __index__ method")}} +function reverse(s){ +return s.split('').reverse().join('')} +function check_str(obj){if(!_b_.isinstance(obj,str)){throw _b_.TypeError("can't convert '"+ +$B.get_class(obj).__name__+"' object to str implicitely")}} +$StringDict.__add__=function(self,other){if(!(typeof other==="string")){try{return getattr(other,'__radd__')(self)} +catch(err){throw _b_.TypeError( +"Can't convert "+$B.get_class(other).__name__+" to str implicitely")}} +return self+other} +$StringDict.__contains__=function(self,item){if(!(typeof item==="string")){throw _b_.TypeError( +"'in ' requires string as left operand, not "+item.__class__)} +var nbcar=item.length +if(nbcar==0)return true +if(self.length==0)return nbcar==0 +for(var i=0,_len_i=self.length;i < _len_i;i++){if(self.substr(i,nbcar)==item)return true} +return false} +$StringDict.__delitem__=function(){throw _b_.TypeError("'str' object doesn't support item deletion")} +$StringDict.__dir__=$ObjectDict.__dir__ +$StringDict.__eq__=function(self,other){if(other===undefined){ +return self===str} +if(_b_.isinstance(other,_b_.str)){return other.valueOf()==self.valueOf()} +return other===self.valueOf()} +function preformat(self,fmt){if(fmt.empty){return _b_.str(self)} +if(fmt.type && fmt.type!='s'){throw _b_.ValueError("Unknown format code '"+fmt.type+ +"' for object of type 'str'")} +return self} +$StringDict.__format__=function(self,format_spec){var fmt=new $B.parse_format_spec(format_spec) +fmt.align=fmt.align ||'<' +return $B.format_width(preformat(self,fmt),fmt)} +$StringDict.__getitem__=function(self,arg){if(isinstance(arg,_b_.int)){var pos=arg +if(arg<0)pos+=self.length +if(pos>=0 && pos0){if(stop<=start)return '' +for(var i=start;i=start)return '' +for(var i=start;i>stop;i+=step)res +=self.charAt(i)} +return res} +if(isinstance(arg,bool))return self.__getitem__(_b_.int(arg)) +throw _b_.TypeError('string indices must be integers')} +$StringDict.__hash__=function(self){if(self===undefined){return $StringDict.__hashvalue__ ||$B.$py_next_hash-- } +var hash=1; +for(var i=0,_len_i=self.length;i < _len_i;i++){hash=(101*hash + self.charCodeAt(i))& 0xFFFFFFFF} +return hash} +$StringDict.__init__=function(self,arg){self.valueOf=function(){return arg} +self.toString=function(){return arg} +return _b_.None} +var $str_iterator=$B.$iterator_class('str_iterator') +$StringDict.__iter__=function(self){var items=self.split('') +return $B.$iterator(items,$str_iterator)} +$StringDict.__len__=function(self){return self.length} +var kwarg_key=new RegExp('([^\\)]*)\\)') +var NotANumber=function(){this.name='NotANumber'} +var number_check=function(s){if(!isinstance(s,[_b_.int,_b_.float])){throw new NotANumber()}} +var get_char_array=function(size,char){if(size <=0) +return '' +return new Array(size + 1).join(char)} +var format_padding=function(s,flags,minus_one){var padding=flags.padding +if(!padding){ +return s} +s=s.toString() +padding=parseInt(padding,10) +if(minus_one){ +padding -=1} +if(!flags.left){return get_char_array(padding - s.length,flags.pad_char)+ s}else{ +return s + get_char_array(padding - s.length,flags.pad_char)}} +var format_int_precision=function(val,flags){var precision=flags.precision +if(!precision){return val.toString()} +precision=parseInt(precision,10) +var s +if(val.__class__===$B.LongInt.$dict){s=$B.LongInt.$dict.to_base(val,10)}else{ +s=val.toString()} +var sign=s[0] +if(s[0]==='-'){return '-' + get_char_array(precision - s.length + 1,'0')+ s.slice(1)} +return get_char_array(precision - s.length,'0')+ s} +var format_float_precision=function(val,upper,flags,modifier){var precision=flags.precision +if(isFinite(val)){val=modifier(val,precision,flags,upper) +return val} +if(val===Infinity){val='inf'}else if(val===-Infinity){val='-inf'}else{ +val='nan'} +if(upper){return val.toUpperCase()} +return val} +var format_sign=function(val,flags){if(flags.sign){if(val >=0){return "+"}}else if(flags.space){if(val >=0){return " "}} +return ""} +var str_format=function(val,flags){ +flags.pad_char=" " +return format_padding(str(val),flags)} +var num_format=function(val,flags){number_check(val) +if(val.__class__===$B.LongInt.$dict){val=$B.LongInt.$dict.to_base(val,10)}else{ +val=parseInt(val)} +var s=format_int_precision(val,flags) +if(flags.pad_char==='0'){if(val < 0){s=s.substring(1) +return '-' + format_padding(s,flags,true)} +var sign=format_sign(val,flags) +if(sign !==''){return sign + format_padding(s,flags,true)}} +return format_padding(format_sign(val,flags)+ s,flags)} +var repr_format=function(val,flags){flags.pad_char=" " +return format_padding(repr(val),flags)} +var ascii_format=function(val,flags){flags.pad_char=" " +return format_padding(ascii(val),flags)} +var _float_helper=function(val,flags){number_check(val) +if(!flags.precision){if(!flags.decimal_point){flags.precision=6}else{ +flags.precision=0}}else{ +flags.precision=parseInt(flags.precision,10) +validate_precision(flags.precision)} +return parseFloat(val)} +var trailing_zeros=/(.*?)(0+)([eE].*)/ +var leading_zeros=/\.(0*)/ +var trailing_dot=/\.$/ +var validate_precision=function(precision){ +if(precision > 20){precision=20 }} +var floating_point_format=function(val,upper,flags){val=_float_helper(val,flags) +var v=val.toString() +var v_len=v.length +var dot_idx=v.indexOf('.') +if(dot_idx < 0){dot_idx=v_len} +if(val < 1 && val > -1){var zeros=leading_zeros.exec(v) +var numzeros +if(zeros){numzeros=zeros[1].length}else{ +numzeros=0} +if(numzeros >=4){val=format_sign(val,flags)+ format_float_precision(val,upper,flags,_floating_g_exp_helper) +if(!flags.alternate){var trl=trailing_zeros.exec(val) +if(trl){val=trl[1].replace(trailing_dot,'')+ trl[3]}}else{ +if(flags.precision <=1){val=val[0]+ '.' + val.substring(1)}} +return format_padding(val,flags)} +flags.precision +=numzeros +return format_padding(format_sign(val,flags)+ format_float_precision(val,upper,flags,function(val,precision){val=val.toFixed(min(precision,v_len - dot_idx)+ numzeros)}),flags)} +if(dot_idx > flags.precision){val=format_sign(val,flags)+ format_float_precision(val,upper,flags,_floating_g_exp_helper) +if(!flags.alternate){var trl=trailing_zeros.exec(val) +if(trl){val=trl[1].replace(trailing_dot,'')+ trl[3]}}else{ +if(flags.precision <=1){val=val[0]+ '.' + val.substring(1)}} +return format_padding(val,flags)} +return format_padding(format_sign(val,flags)+ format_float_precision(val,upper,flags,function(val,precision){if(!flags.decimal_point){precision=min(v_len - 1,6)}else if(precision > v_len){if(!flags.alternate){precision=v_len}} +if(precision < dot_idx){precision=dot_idx} +return val.toFixed(precision - dot_idx)}),flags)} +var _floating_g_exp_helper=function(val,precision,flags,upper){if(precision){--precision} +val=val.toExponential(precision) +var e_idx=val.lastIndexOf('e') +if(e_idx > val.length - 4){val=val.substring(0,e_idx + 2)+ '0' + val.substring(e_idx + 2)} +if(upper){return val.toUpperCase()} +return val} +var floating_point_decimal_format=function(val,upper,flags){val=_float_helper(val,flags) +return format_padding(format_sign(val,flags)+ format_float_precision(val,upper,flags,function(val,precision,flags){val=val.toFixed(precision) +if(precision===0 && flags.alternate){val +='.'} +return val}),flags)} +var _floating_exp_helper=function(val,precision,flags,upper){val=val.toExponential(precision) +var e_idx=val.lastIndexOf('e') +if(e_idx > val.length - 4){val=val.substring(0,e_idx + 2)+ '0' + val.substring(e_idx + 2)} +if(upper){return val.toUpperCase()} +return val} +var floating_point_exponential_format=function(val,upper,flags){val=_float_helper(val,flags) +return format_padding(format_sign(val,flags)+ format_float_precision(val,upper,flags,_floating_exp_helper),flags)} +var signed_hex_format=function(val,upper,flags){var ret +number_check(val) +if(val.__class__===$B.LongInt.$dict){ret=$B.LongInt.$dict.to_base(val,16)}else{ +ret=parseInt(val) +ret=ret.toString(16)} +ret=format_int_precision(ret,flags) +if(upper){ret=ret.toUpperCase()} +if(flags.pad_char==='0'){if(val < 0){ret=ret.substring(1) +ret='-' + format_padding(ret,flags,true)} +var sign=format_sign(val,flags) +if(sign !==''){ret=sign + format_padding(ret,flags,true)}} +if(flags.alternate){if(ret.charAt(0)==='-'){if(upper){ret="-0X" + ret.slice(1)}else{ +ret="-0x" + ret.slice(1)}}else{ +if(upper){ret="0X" + ret}else{ +ret="0x" + ret}}} +return format_padding(format_sign(val,flags)+ ret,flags)} +var octal_format=function(val,flags){number_check(val) +var ret +if(val.__class__===$B.LongInt.$dict){ret=$B.LongInt.$dict.to_base(8)}else{ +ret=parseInt(val) +ret=ret.toString(8)} +ret=format_int_precision(ret,flags) +if(flags.pad_char==='0'){if(val < 0){ret=ret.substring(1) +ret='-' + format_padding(ret,flags,true)} +var sign=format_sign(val,flags) +if(sign !==''){ret=sign + format_padding(ret,flags,true)}} +if(flags.alternate){if(ret.charAt(0)==='-'){ret="-0o" + ret.slice(1)}else{ +ret="0o" + ret}} +return format_padding(ret,flags)} +var single_char_format=function(val,flags){if(isinstance(val,str)&& val.length==1)return val +try{ +val=_b_.int(val)}catch(err){throw _b_.TypeError('%c requires int or char')} +return format_padding(chr(val),flags)} +var num_flag=function(c,flags){if(c==='0' && !flags.padding && !flags.decimal_point && !flags.left){flags.pad_char='0' +return} +if(!flags.decimal_point){flags.padding=(flags.padding ||"")+ c}else{ +flags.precision=(flags.precision ||"")+ c}} +var decimal_point_flag=function(val,flags){if(flags.decimal_point){ +throw new UnsupportedChar()} +flags.decimal_point=true} +var neg_flag=function(val,flags){flags.pad_char=' ' +flags.left=true} +var space_flag=function(val,flags){flags.space=true} +var sign_flag=function(val,flags){flags.sign=true} +var alternate_flag=function(val,flags){flags.alternate=true} +var char_mapping={'s': str_format,'d': num_format,'i': num_format,'u': num_format,'o': octal_format,'r': repr_format,'a': ascii_format,'g': function(val,flags){return floating_point_format(val,false,flags)},'G': function(val,flags){return floating_point_format(val,true,flags)},'f': function(val,flags){return floating_point_decimal_format(val,false,flags)},'F': function(val,flags){return floating_point_decimal_format(val,true,flags)},'e': function(val,flags){return floating_point_exponential_format(val,false,flags)},'E': function(val,flags){return floating_point_exponential_format(val,true,flags)},'x': function(val,flags){return signed_hex_format(val,false,flags)},'X': function(val,flags){return signed_hex_format(val,true,flags)},'c': single_char_format,'0': function(val,flags){return num_flag('0',flags)},'1': function(val,flags){return num_flag('1',flags)},'2': function(val,flags){return num_flag('2',flags)},'3': function(val,flags){return num_flag('3',flags)},'4': function(val,flags){return num_flag('4',flags)},'5': function(val,flags){return num_flag('5',flags)},'6': function(val,flags){return num_flag('6',flags)},'7': function(val,flags){return num_flag('7',flags)},'8': function(val,flags){return num_flag('8',flags)},'9': function(val,flags){return num_flag('9',flags)},'-': neg_flag,' ': space_flag,'+': sign_flag,'.': decimal_point_flag,'#': alternate_flag} +var UnsupportedChar=function(){this.name="UnsupportedChar"} +$StringDict.__mod__=function(self,args){var length=self.length,pos=0 |0,argpos=null,getitem +if(_b_.isinstance(args,_b_.tuple)){argpos=0 |0}else{getitem=_b_.getattr(args,'__getitem__',null)} +var ret='' +var $get_kwarg_string=function(s){ +++pos +var rslt=kwarg_key.exec(s.substring(newpos)) +if(!rslt){throw _b_.ValueError("incomplete format key")} +var key=rslt[1] +newpos +=rslt[0].length +try{ +var self=getitem(key)}catch(err){if(err.name==="KeyError"){throw err} +throw _b_.TypeError("format requires a mapping")} +return get_string_value(s,self)} +var $get_arg_string=function(s){ +var self +if(argpos===null){ +self=args}else{ +self=args[argpos++] +if(self===undefined){throw _b_.TypeError("not enough arguments for format string")}} +return get_string_value(s,self)} +var get_string_value=function(s,self){ +var flags={'pad_char': ' '} +do{ +var func=char_mapping[s[newpos]] +try{ +if(func===undefined){throw new UnsupportedChar()}else{ +var ret=func(self,flags) +if(ret !==undefined){return ret} +++newpos}}catch(err){if(err.name==="UnsupportedChar"){invalid_char=s[newpos] +if(invalid_char===undefined){throw _b_.ValueError("incomplete format")} +throw _b_.ValueError("unsupported format character '" + invalid_char + +"' (0x" + invalid_char.charCodeAt(0).toString(16)+ ") at index " + newpos)}else if(err.name==="NotANumber"){var try_char=s[newpos] +var cls=self.__class__ +if(!cls){if(typeof(self)==='string'){cls='str'}else{ +cls=typeof(self)}}else{ +cls=cls.__name__} +throw _b_.TypeError("%" + try_char + " format: a number is required, not " + cls)}else{ +throw err}}}while(true)} +var nbph=0 +do{ +var newpos=self.indexOf('%',pos) +if(newpos < 0){ret +=self.substring(pos) +break} +ret +=self.substring(pos,newpos) +++newpos +if(newpos < length){if(self[newpos]==='%'){ret +='%'}else{ +nbph++ +if(self[newpos]==='('){++newpos +ret +=$get_kwarg_string(self)}else{ +ret +=$get_arg_string(self)}}}else{ +throw _b_.ValueError("incomplete format")} +pos=newpos + 1}while(pos < length) +if(argpos!==null){if(args.length>argpos){throw _b_.TypeError('not enough arguments for format string')}else if(args.length" +return self.toString()} +$StringDict.toString=function(){return 'string!'} +var $comp_func=function(self,other){if(typeof other !=="string"){throw _b_.TypeError( +"unorderable types: 'str' > "+$B.get_class(other).__name__+"()")} +return self > other} +$comp_func +='' +var $comps={'>':'gt','>=':'ge','<':'lt','<=':'le'} +for(var $op in $comps){eval("$StringDict.__"+$comps[$op]+'__ = '+$comp_func.replace(/>/gm,$op))} +$B.make_rmethods($StringDict) +var $notimplemented=function(self,other){throw NotImplementedError("OPERATOR not implemented for class str")} +$StringDict.capitalize=function(self){if(self.length==0)return '' +return self.charAt(0).toUpperCase()+self.substr(1).toLowerCase()} +$StringDict.casefold=function(self){throw _b_.NotImplementedError("function casefold not implemented yet");} +$StringDict.center=function(self,width,fillchar){var $=$B.args("center",3,{self:null,width:null,fillchar:null},['self','width','fillchar'],arguments,{fillchar:' '},null,null) +if($.width<=self.length)return self +var pad=parseInt(($.width-self.length)/2) +var res=$.fillchar.repeat(pad) +res +=self + res +if(res.length<$.width){res +=$.fillchar} +return res} +$StringDict.count=function(){var $=$B.args('count',4,{self:null,sub:null,start:null,stop:null},['self','sub','start','stop'],arguments,{start:null,stop:null},null,null) +if(!(typeof $.sub==="string")){throw _b_.TypeError( +"Can't convert '"+$B.get_class($.sub).__name__+"' object to str implicitly")} +var substr=$.self +if($.start!==null){var _slice +if($.stop!==null){_slice=_b_.slice($.start,$.stop)} +else{_slice=_b_.slice($.start,$.self.length)} +substr=$StringDict.__getitem__.apply(null,[$.self].concat(_slice))}else{if($.self.length+$.sub.length==0){return 1}} +if($.sub.length==0){if($.start==$.self.length){return 1} +else if(substr.length==0){return 0} +return substr.length+1} +var n=0,pos=0 +while(pos=0){n++;pos+=$.sub.length}else break;} +return n} +$StringDict.encode=function(self,encoding){if(encoding===undefined)encoding='utf-8' +if(encoding=='rot13' ||encoding=='rot_13'){ +var res='' +for(var i=0,_len=self.length;i<_len ;i++){var char=self.charAt(i) +if(('a'<=char && char<='m')||('A'<=char && char<='M')){res +=String.fromCharCode(String.charCodeAt(char)+13)}else if(('m' 0){res +=' ';col++} +break +case '\r': +case '\n': +res +=car +col=0 +break +default: +res +=car +col++ +break} +pos++} +return res} +$StringDict.find=function(){ +var $=$B.args("$StringDict.find",4,{self:null,sub:null,start:null,end:null},['self','sub','start','end'],arguments,{start:0,end:null},null,null) +check_str($.sub) +normalize_start_end($) +if(!isinstance($.start,_b_.int)||!isinstance($.end,_b_.int)){throw _b_.TypeError( +"slice indices must be integers or None or have an __index__ method")} +var s=$.self.substring($.start,$.end) +if($.sub.length==0 && $.start==$.self.length){return $.self.length} +if(s.length+$.sub.length==0){return -1} +var last_search=s.length-$.sub.length +for(var i=0;i<=last_search;i++){if(s.substr(i,$.sub.length)==$.sub){return $.start+i}} +return -1} +function parse_format(fmt_string){ +var elts=fmt_string.split(':'),name,conv,spec,name_ext=[] +if(elts.length==1){ +name=fmt_string}else{ +name=elts[0] +spec=elts.splice(1).join(':')} +var elts=name.split('!') +if(elts.length>1){name=elts[0] +conv=elts[1] +if(conv.length!==1 ||'ras'.search(conv)==-1){throw _b_.ValueError('wrong conversion flag '+conv)}} +if(name!==undefined){ +function name_repl(match){name_ext.push(match) +return ''} +var name_ext_re=/\.[_a-zA-Z][_a-zA-Z0-9]*|\[[_a-zA-Z][_a-zA-Z0-9]*\]|\[[0-9]+\]/g +name=name.replace(name_ext_re,name_repl)} +return{name: name,name_ext: name_ext,conv: conv,spec: spec||''}} +$StringDict.format=function(self){var $=$B.args('format',1,{self:null},['self'],arguments,{},'args','kw') +var pos=0,_len=self.length,car,text='',parts=[],rank=0,defaults={} +while(pos<_len){car=self.charAt(pos) +if(car=='{' && self.charAt(pos+1)=='{'){ +text +='{' +pos+=2}else if(car=='}' && self.charAt(pos+1)=='}'){ +text +='}' +pos+=2}else if(car=='{'){ +parts.push(text) +var end=pos+1,nb=1 +while(end<_len){if(self.charAt(end)=='{'){nb++;end++} +else if(self.charAt(end)=='}'){nb--;end++ +if(nb==0){ +var fmt_string=self.substring(pos+1,end-1) +var fmt_obj=parse_format(fmt_string) +if(!fmt_obj.name){fmt_obj.name=rank+'' +rank++} +if(fmt_obj.spec!==undefined){ +function replace_nested(name,key){if(/\d+/.exec(key)){ +return _b_.tuple.$dict.__getitem__($.args,parseInt(key))}else{ +return _b_.dict.$dict.__getitem__($.kw,key)}} +fmt_obj.spec=fmt_obj.spec.replace(/\{(.+?)\}/g,replace_nested)} +parts.push(fmt_obj) +text='' +break}}else{end++}} +if(nb>0){throw ValueError("wrong format "+self)} +pos=end}else{text +=car;pos++}} +if(text){parts.push(text)} +var res='',fmt +for(var i=0;i-1){ +var pos=parseInt(fmt.name),value=_b_.tuple.$dict.__getitem__($.args,pos)}else{ +var value=_b_.dict.$dict.__getitem__($.kw,fmt.name)} +for(var j=0;j-1){key=parseInt(key)} +value=_b_.getattr(value,'__getitem__')(key)}} +if(fmt.conv=='a'){value=_b_.ascii(value)} +else if(fmt.conv=='r'){value=_b_.repr(value)} +else if(fmt.conv=='s'){value=_b_.str(value)} +res +=_b_.getattr(value,'__format__')(fmt.spec)} +return res} +$StringDict.format_map=function(self){throw NotImplementedError("function format_map not implemented yet");} +$StringDict.index=function(self){ +var res=$StringDict.find.apply(null,arguments) +if(res===-1)throw _b_.ValueError("substring not found") +return res} +$StringDict.isalnum=function(){var $=$B.args('isalnum',1,{self:null},['self'],arguments,{},null,null) +return /^[a-z0-9]+$/i.test($.self)} +$StringDict.isalpha=function(self){var $=$B.args('isalpha',1,{self:null},['self'],arguments,{},null,null) +return /^[a-z]+$/i.test($.self)} +$StringDict.isdecimal=function(){var $=$B.args('isdecimal',1,{self:null},['self'],arguments,{},null,null) +return /^[0-9]+$/.test($.self)} +$StringDict.isdigit=function(){var $=$B.args('isdigit',1,{self:null},['self'],arguments,{},null,null) +return /^[0-9]+$/.test($.self)} +$StringDict.isidentifier=function(){var $=$B.args('isidentifier',1,{self:null},['self'],arguments,{},null,null) +if($.self.search(/\$/)>-1){return false} +var last=$.self.charAt($.self.length-1) +if(' \n;'.search(last)>-1){return false} +var dummy={} +try{eval("dummy."+$.self);return true} +catch(err){return false}} +$StringDict.islower=function(){var $=$B.args('islower',1,{self:null},['self'],arguments,{},null,null) +return($B.cased_letters_regexp.exec($.self)!==null)&& +$.self==$.self.toLowerCase()&& $.self.search(/^\s*$/)==-1} +$StringDict.isnumeric=function(){var $=$B.args('isnumeric',1,{self:null},['self'],arguments,{},null,null) +return /^[0-9]+$/.test($.self)} +$StringDict.isprintable=function(){var $=$B.args('isprintable',1,{self:null},['self'],arguments,{},null,null) +return !/[^ -~]/.test($.self)} +$StringDict.isspace=function(){var $=$B.args('isspace',1,{self:null},['self'],arguments,{},null,null) +return /^\s+$/i.test($.self)} +$StringDict.istitle=function(){var $=$B.args('istitle',1,{self:null},['self'],arguments,{},null,null) +if($.self.search(/^\s*$/)>-1){return false} +function get_case(char){if(char.toLowerCase()==char.toUpperCase()){return false} +else if(char==char.toLowerCase()){return 'lower'} +else{return 'upper'}} +var pos=0,char,previous=false +while(pos<$.self.length){char=$.self.charAt(pos) +if(previous===undefined){previous=get_case(char)} +else{_case=get_case(char) +if(_case=='upper' && previous){return false} +else if(_case=='lower' && !previous){return false} +previous=_case} +pos++} +return true} +$StringDict.isupper=function(){var $=$B.args('isupper',1,{self:null},['self'],arguments,{},null,null) +return($B.cased_letters_regexp.exec($.self)!==null)&& +$.self==$.self.toUpperCase()&& $.self.search(/^\s*$/)==-1} +$StringDict.join=function(){var $=$B.args('join',2,{self:null,iterable:null},['self','iterable'],arguments,{},null,null) +var iterable=_b_.iter($.iterable) +var res=[],count=0 +while(1){try{var obj2=_b_.next(iterable) +if(!isinstance(obj2,str)){throw _b_.TypeError( +"sequence item "+count+": expected str instance, "+$B.get_class(obj2).__name__+" found")} +res.push(obj2)}catch(err){if(_b_.isinstance(err,_b_.StopIteration)){break} +else{throw err}}} +return res.join($.self)} +$StringDict.ljust=function(self){var $=$B.args('ljust',3,{self:null,width:null,fillchar:null},['self','width','fillchar'],arguments,{fillchar:' '},null,null) +if($.width <=self.length)return self +return self + $.fillchar.repeat($.width - self.length)} +$StringDict.lower=function(){var $=$B.args('lower',1,{self:null},['self'],arguments,{},null,null) +return $.self.toLowerCase()} +$StringDict.lstrip=function(self,x){var $=$B.args('lstrip',2,{self:null,chars:null},['self','chars'],arguments,{chars:_b_.None},null,null) +if($.chars===_b_.None){return $.self.replace(/^\s+/,'')} +return $.self.replace(new RegExp("^["+$.chars+"]*"),"")} +$StringDict.maketrans=function(){var $=$B.args('maketrans',3,{x:null,y:null,z:null},['x','y','z'],arguments,{y:null,z:null},null,null) +var _t=_b_.dict() +for(var i=0;i < 256;i++)_t.$numeric_dict[i]=i +if($.y===null && $.z===null){ +if(!_b_.isinstance($.x,_b_.dict)){throw _b_.TypeError('maketrans only argument must be a dict')} +var items=_b_.list(_b_.dict.$dict.items($.x)) +for(var i=0,len=items.length;i-1 && elts.length>=count){var rest=elts.slice(count).join('') +return _new+elts.slice(0,count).join(_new)+rest}else{return _new+elts.join(_new)+_new}}else{var elts=$StringDict.split(self,old,count)} +var res=self,pos=-1 +if(old.length==0){var res=_new +for(var i=0;i 0){pos=res.indexOf(old,pos); +if(pos < 0) +break; +res=res.substr(0,pos)+ _new + res.substr(pos + old.length); +pos=pos + _new.length; +count--;} +return res;} +$StringDict.rfind=function(self){ +var $=$B.args("rfind",4,{self:null,sub:null,start:null,end:null},['self','sub','start','end'],arguments,{start:0,end:null},null,null) +normalize_start_end($) +check_str($.sub) +if($.sub.length==0){if($.start>$.self.length){return -1} +else{return $.self.length}} +var sublen=$.sub.length +for(var i=$.end-sublen;i>=$.start;i--){if($.self.substr(i,sublen)==$.sub){return i}} +return -1} +$StringDict.rindex=function(){ +var res=$StringDict.rfind.apply(null,arguments) +if(res==-1){throw _b_.ValueError("substring not found")} +return res} +$StringDict.rjust=function(self){var $=$B.args("rjust",3,{self:null,width:null,fillchar:null},['self','width','fillchar'],arguments,{fillchar:' '},null,null) +if($.width <=self.length)return self +return $.fillchar.repeat($.width - self.length)+ self} +$StringDict.rpartition=function(self,sep){var $=$B.args('rpartition',2,{self:null,sep:null},['self','sep'],arguments,{},null,null) +check_str($.sep) +var self=reverse($.self),sep=reverse($.sep) +var items=$StringDict.partition(self,sep).reverse() +for(var i=0;i-1){pos++} +if(pos===self.length-1){return[self]} +var name='' +while(1){if(self.charAt(pos).search(/\s/)===-1){if(name===''){name=self.charAt(pos)} +else{name+=self.charAt(pos)}}else{if(name!==''){res.push(name) +if(maxsplit!==-1&&res.length===maxsplit+1){res.pop() +res.push(name+self.substr(pos)) +return res} +name=''}} +pos++ +if(pos>self.length-1){if(name){res.push(name)} +break}} +return res}else{var res=[],s='',pos=0,seplen=sep.length +if(maxsplit==0){return[self]} +while(pos-1 && res.length>=maxsplit){res.push(self.substr(pos)) +return res} +s=''}else{s +=self.charAt(pos) +pos++}} +res.push(s) +return res}} +$StringDict.splitlines=function(self){var $=$B.args('splitlines',2,{self:null,keepends:null},['self','keepends'],arguments,{keepends:false},null,null) +if(!_b_.isinstance($.keepends,[_b_.bool,_b_.int])){throw _b_.TypeError('integer argument expected, got '+ +$B.get_class($.keepends).__name)} +var keepends=_b_.int($.keepends) +if(keepends){var res=[],start=pos,pos=0,x,self=$.self +while(pos1){console.log(err)} +console.log('Warning - no method __str__ or __repr__, default to toString',arg) +return arg.toString()}}} +str.__class__=$B.$factory +str.$dict=$StringDict +$StringDict.$factory=str +$StringDict.__new__=function(cls){if(cls===undefined){throw _b_.TypeError('str.__new__(): not enough arguments')} +return{__class__:cls.$dict}} +$B.set_func_names($StringDict) +var $StringSubclassDict={__class__:$B.$type,__name__:'str'} +for(var $attr in $StringDict){if(typeof $StringDict[$attr]=='function'){$StringSubclassDict[$attr]=(function(attr){return function(){var args=[],pos=0 +if(arguments.length>0){var args=[arguments[0].valueOf()],pos=1 +for(var i=1,_len_i=arguments.length;i < _len_i;i++){args[pos++]=arguments[i]}} +return $StringDict[attr].apply(null,args)}})($attr)}} +$StringSubclassDict.__mro__=[$StringSubclassDict,$ObjectDict] +$B.$StringSubclassFactory={__class__:$B.$factory,$dict:$StringSubclassDict} +_b_.str=str +$B.parse_format_spec=function(spec){if(spec==''){this.empty=true} +else{var pos=0,aligns='<>=^',digits='0123456789',types='bcdeEfFgGnosxX%',align_pos=aligns.indexOf(spec.charAt(0)) +if(align_pos!=-1){if(spec.charAt(1)&& aligns.indexOf(spec.charAt(1))!=-1){ +this.fill=spec.charAt(0) +this.align=spec.charAt(1) +pos=2}else{ +this.align=aligns[align_pos]; +this.fill=' '; +pos++}}else{align_pos=aligns.indexOf(spec.charAt(1)) +if(spec.charAt(1)&& align_pos!=-1){ +this.align=aligns[align_pos] +this.fill=spec.charAt(0) +pos=2}} +var car=spec.charAt(pos) +if(car=='+'||car=='-'||car==' '){this.sign=car; +pos++; +car=spec.charAt(pos);} +if(car=='#'){this.alternate=true;pos++;car=spec.charAt(pos)} +if(car=='0'){ +this.fill='0' +this.align='=' +pos++;car=spec.charAt(pos)} +while(car && digits.indexOf(car)>-1){if(this.width===undefined){this.width=car} +else{this.width+=car} +pos++;car=spec.charAt(pos)} +if(this.width!==undefined){this.width=parseInt(this.width)} +if(car==','){this.comma=true;pos++;car=spec.charAt(pos)} +if(car=='.'){if(digits.indexOf(spec.charAt(pos+1))==-1){throw _b_.ValueError("Missing precision in format spec")} +this.precision=spec.charAt(pos+1) +pos+=2;car=spec.charAt(pos) +while(car && digits.indexOf(car)>-1){this.precision+=car;pos++;car=spec.charAt(pos)} +this.precision=parseInt(this.precision)} +if(car && types.indexOf(car)>-1){this.type=car;pos++;car=spec.charAt(pos)} +if(pos!==spec.length){ +throw _b_.ValueError("Invalid format specifier")}} +this.toString=function(){return(this.fill===undefined ? '' : _b_.str(this.fill))+ +(this.align||'')+ +(this.sign||'')+ +(this.alternate ? '#' : '')+ +(this.sign_aware ? '0' : '')+ +(this.width ||'')+ +(this.comma ? ',' : '')+ +(this.precision ? '.'+this.precision : '')+ +(this.type ||'')}} +$B.format_width=function(s,fmt){if(fmt.width && s.length': +return fill.repeat(missing)+s +case '=': +if('+-'.indexOf(s.charAt(0))>-1){return s.charAt(0)+fill.repeat(missing)+s.substr(1)}else{return fill.repeat(missing)+s } +case '^': +left=parseInt(missing/2) +return fill.repeat(left)+s+fill.repeat(missing-left)}} +return s}})(__BRYTHON__) +;(function($B){eval($B.InjectBuiltins()) +var $ObjectDict=_b_.object.$dict,str_hash=_b_.str.$dict.__hash__,$N=_b_.None +function $DictClass($keys,$values){this.iter=null +this.__class__=$DictDict +$DictDict.clear(this) +var setitem=$DictDict.__setitem__ +var i=$keys.length +while(i--)setitem($keys[i],$values[i])} +var $DictDict={__class__:$B.$type,__name__ : 'dict',$native:true,__dir__:$ObjectDict.__dir__} +var $key_iterator=function(d){this.d=d +this.current=0 +this.iter=new $item_generator(d)} +$key_iterator.prototype.length=function(){return this.iter.length } +$key_iterator.prototype.next=function(){return this.iter.next()[0]} +var $value_iterator=function(d){this.d=d +this.current=0 +this.iter=new $item_generator(d)} +$value_iterator.prototype.length=function(){return this.iter.length } +$value_iterator.prototype.next=function(){return this.iter.next()[1]} +var $item_generator=function(d){this.i=0 +if(d.$jsobj){this.items=[] +for(var attr in d.$jsobj){if(attr.charAt(0)!='$'){this.items.push([attr,d.$jsobj[attr]])}} +this.length=this.items.length; +return} +var items=[] +var pos=0 +for(var k in d.$numeric_dict){items[pos++]=[parseFloat(k),d.$numeric_dict[k]]} +for(var k in d.$string_dict){items[pos++]=[k,d.$string_dict[k]]} +for(var k in d.$object_dict){items[pos++]=d.$object_dict[k]} +this.items=items +this.length=items.length} +$item_generator.prototype.next=function(){if(this.i < this.items.length){return this.items[this.i++]} +throw _b_.StopIteration("StopIteration")} +$item_generator.prototype.as_list=function(){return this.items} +var $item_iterator=function(d){this.d=d +this.current=0 +this.iter=new $item_generator(d)} +$item_iterator.prototype.length=function(){return this.iter.items.length } +$item_iterator.prototype.next=function(){return _b_.tuple(this.iter.next())} +var $copy_dict=function(left,right){var _l=new $item_generator(right).as_list() +var si=$DictDict.__setitem__ +var i=_l.length +while(i--)si(left,_l[i][0],_l[i][1])} +$iterator_wrapper=function(items,klass){var res={__class__:klass,__iter__:function(){items.iter.i=0;return res},__len__:function(){return items.length()},__next__:function(){return items.next()},__repr__:function(){return klass.__name__+'('+ new $item_generator(items).as_list().join(',')+ ')'},} +res.__str__=res.toString=res.__repr__ +return res} +$DictDict.__bool__=function(self){var $=$B.args('__bool__',1,{self:null},['self'],arguments,{},null,null) +return $DictDict.__len__(self)> 0} +$DictDict.__contains__=function(){var $=$B.args('__contains__',2,{self:null,item:null},['self','item'],arguments,{},null,null),self=$.self,item=$.item +if(self.$jsobj)return self.$jsobj[item]!==undefined +switch(typeof item){case 'string': +return self.$string_dict[item]!==undefined +case 'number': +return self.$numeric_dict[item]!==undefined} +var _key=hash(item) +if(self.$str_hash[_key]!==undefined && +_b_.getattr(item,'__eq__')(self.$str_hash[_key])){return true} +if(self.$numeric_dict[_key]!==undefined && +_b_.getattr(item,'__eq__')(_key)){return true} +if(self.$object_dict[_key]!==undefined){ +var _eq=getattr(item,'__eq__') +if(_eq(self.$object_dict[_key][0])){return true}} +return false} +$DictDict.__delitem__=function(){var $=$B.args('__eq__',2,{self:null,arg:null},['self','arg'],arguments,{},null,null),self=$.self,arg=$.arg +if(self.$jsobj){if(self.$jsobj[arg]===undefined){throw KeyError(arg)} +delete self.$jsobj[arg] +return $N} +switch(typeof arg){case 'string': +if(self.$string_dict[arg]===undefined)throw KeyError(_b_.str(arg)) +delete self.$string_dict[arg] +delete self.$str_hash[str_hash(arg)] +return $N +case 'number': +if(self.$numeric_dict[arg]===undefined)throw KeyError(_b_.str(arg)) +delete self.$numeric_dict[arg] +return $N} +var _key=hash(arg) +if(self.$object_dict[_key]!==undefined){delete self.$object_dict[_key]} +if(self.$jsobj)delete self.$jsobj[arg] +return $N} +$DictDict.__eq__=function(){var $=$B.args('__eq__',2,{self:null,other:null},['self','other'],arguments,{},null,null),self=$.self,other=$.other +if(!isinstance(other,dict))return false +if($DictDict.__len__(self)!=$DictDict.__len__(other)){return false} +if((self.$numeric_dict.length!=other.$numeric_dict.length)|| +(self.$string_dict.length!=other.$string_dict.length)|| +(self.$object_dict.length!=other.$object_dict.length)){return false} +for(var k in self.$numeric_dict){if(!_b_.getattr(other.$numeric_dict[k],'__eq__')(self.$numeric_dict[k])){return false}} +for(var k in self.$string_dict){if(!_b_.getattr(other.$string_dict[k],'__eq__')(self.$string_dict[k])){return false}} +for(var k in self.$object_dict){if(!_b_.getattr(other.$object_dict[k][1],'__eq__')(self.$object_dict[k][1])){return false}} +return true} +$DictDict.__getitem__=function(){var $=$B.args('__getitem__',2,{self:null,arg:null},['self','arg'],arguments,{},null,null),self=$.self,arg=$.arg +if(self.$jsobj){if(self.$jsobj[arg]===undefined){return None} +return self.$jsobj[arg]} +switch(typeof arg){case 'string': +if(self.$string_dict[arg]!==undefined)return self.$string_dict[arg] +break +case 'number': +if(self.$numeric_dict[arg]!==undefined)return self.$numeric_dict[arg]} +var _key=_b_.getattr(arg,'__hash__')(),_eq=_b_.getattr(arg,'__eq__') +var sk=self.$str_hash[_key] +if(sk!==undefined && _eq(sk)){return self.$string_dict[sk]} +if(self.$numeric_dict[_key]!==undefined && _eq(_key)){return self.$numeric_dict[_key]} +var obj_ref=self.$object_dict[_key] +if(obj_ref!==undefined){ +_eq(self.$object_dict[_key][0]) +return self.$object_dict[_key][1]} +if(self.__class__!==$DictDict){try{var missing_method=getattr(self.__class__.$factory,'__missing__') +return missing_method(self,arg)}catch(err){}} +throw KeyError(_b_.str(arg))} +$DictDict.__hash__=function(self){if(self===undefined){return $DictDict.__hashvalue__ ||$B.$py_next_hash-- } +throw _b_.TypeError("unhashable type: 'dict'");} +$DictDict.__init__=function(self){var args=[],pos=0 +for(var i=1;i0)si(self,obj[i-1][0],obj[i-1][1]) +return $N}else if(isinstance(obj,dict)){$copy_dict(self,obj) +return $N} +if(obj.__class__===$B.JSObject.$dict){ +var si=$DictDict.__setitem__ +for(var attr in obj.js)si(self,attr,obj.js[attr]) +self.$jsobj=obj.js +return $N}} +var $ns=$B.args('dict',0,{},[],args,{},'args','kw') +var args=$ns['args'] +var kw=$ns['kw'] +if(args.length>0){if(isinstance(args[0],dict)){$B.$copy_dict(self,args[0]) +return $N} +if(Array.isArray(args[0])){var src=args[0] +var i=src.length -1 +var si=$DictDict.__setitem__ +while(i-->0)si(self,src[i-1][0],src[i-1][1])}else{var iterable=iter(args[0]) +while(1){try{var elt=next(iterable) +var key=getattr(elt,'__getitem__')(0) +var value=getattr(elt,'__getitem__')(1) +$DictDict.__setitem__(self,key,value)}catch(err){if(err.__name__==='StopIteration'){break} +throw err}}}} +if($DictDict.__len__(kw)> 0)$copy_dict(self,kw) +return $N} +var $dict_iterator=$B.$iterator_class('dict iterator') +$DictDict.__iter__=function(self){return $DictDict.keys(self)} +$DictDict.__len__=function(self){var _count=0 +if(self.$jsobj){for(var attr in self.$jsobj){if(attr.charAt(0)!='$'){_count++}} +return _count} +for(var k in self.$numeric_dict)_count++ +for(var k in self.$string_dict)_count++ +for(var k in self.$object_dict)_count+=self.$object_dict[k].length +return _count} +$DictDict.__mro__=[$DictDict,$ObjectDict] +$DictDict.__ne__=function(self,other){return !$DictDict.__eq__(self,other)} +$DictDict.__next__=function(self){if(self.$iter==null){self.$iter=new $item_generator(self)} +try{ +return self.$iter.next()}catch(err){if(err.__name__ !=="StopIteration"){throw err }}} +$DictDict.__repr__=function(self){if(self===undefined)return "" +if(self.$jsobj){ +var res=[] +for(var attr in self.$jsobj){if(attr.charAt(0)=='$' ||attr=='__class__'){continue} +else{try{res.push("'"+attr+"': "+_b_.repr(self.$jsobj[attr]))}catch(err){}}} +return '{'+res.join(', ')+'}'} +var _objs=[self] +var res=[],pos=0 +var items=new $item_generator(self).as_list() +for(var i=0;i < items.length;i++){var itm=items[i] +if(itm[1]===self){res[pos++]=repr(itm[0])+': {...}'} +else{res[pos++]=repr(itm[0])+': '+repr(itm[1])}} +return '{'+ res.join(', ')+'}'} +$DictDict.__setitem__=function(self,key,value){var $=$B.args('__setitem__',3,{self:null,key:null,value:null},['self','key','value'],arguments,{},null,null),self=$.self,key=$.key,value=$.value +if(self.$jsobj){self.$jsobj[key]=value;return} +switch(typeof key){case 'string': +self.$string_dict[key]=value +self.$str_hash[str_hash(key)]=key +return $N +case 'number': +self.$numeric_dict[key]=value +return $N} +var _key=hash(key) +var _eq=getattr(key,'__eq__') +if(self.$numeric_dict[_key]!==undefined && _eq(_key)){self.$numeric_dict[_key]=value +return $N} +var sk=self.$str_hash[_key] +if(sk!==undefined && _eq(sk)){self.$string_dict[sk]=value +return $N} +var obj_ref=self.$object_dict[_key] +if(obj_ref!==undefined){ +_eq(self.$object_dict[_key][0])} +self.$object_dict[_key]=[key,value] +return $N} +$DictDict.__str__=$DictDict.__repr__ +$B.make_rmethods($DictDict) +$DictDict.clear=function(){ +var $=$B.args('clear',1,{self:null},['self'],arguments,{},null,null),self=$.self +self.$numeric_dict={} +self.$string_dict={} +self.$str_hash={} +self.$object_dict={} +if(self.$jsobj)self.$jsobj={} +return $N} +$DictDict.copy=function(self){ +var $=$B.args('copy',1,{self:null},['self'],arguments,{},null,null),self=$.self,res=_b_.dict() +$copy_dict(res,self) +return res} +$DictDict.fromkeys=function(){var $=$B.args('fromkeys',3,{cls:null,keys:null,value:null},['cls','keys','value'],arguments,{value:_b_.None},null,null),keys=$.keys,value=$.value +var res=$.cls() +var keys_iter=_b_.iter(keys) +while(1){try{var key=_b_.next(keys_iter) +$DictDict.__setitem__(res,key,value)}catch(err){if($B.is_exc(err,[_b_.StopIteration])){return res} +throw err}}} +$DictDict.fromkeys.$type='classmethod' +$DictDict.get=function(){var $=$B.args('get',3,{self:null,key:null,_default:null},['self','key','_default'],arguments,{_default:$N},null,null) +try{return $DictDict.__getitem__($.self,$.key)} +catch(err){if(_b_.isinstance(err,_b_.KeyError)){return $._default} +else{throw err}}} +var $dict_itemsDict=$B.$iterator_class('dict_items') +$DictDict.items=function(self){if(arguments.length > 1){var _len=arguments.length - 1 +var _msg="items() takes no arguments ("+_len+" given)" +throw _b_.TypeError(_msg)} +return $iterator_wrapper(new $item_iterator(self),$dict_itemsDict)} +var $dict_keysDict=$B.$iterator_class('dict_keys') +$DictDict.keys=function(self){if(arguments.length > 1){var _len=arguments.length - 1 +var _msg="keys() takes no arguments ("+_len+" given)" +throw _b_.TypeError(_msg)} +return $iterator_wrapper(new $key_iterator(self),$dict_keysDict)} +$DictDict.pop=function(){var $=$B.args('pop',3,{self:null,key: null,_default:null},['self','key','_default'],arguments,{_default:$N},null,null),self=$.self,key=$.key,_default=$._default +try{var res=$DictDict.__getitem__(self,key) +$DictDict.__delitem__(self,key) +return res}catch(err){if(err.__name__==='KeyError'){if(_default!==undefined)return _default +throw err} +throw err}} +$DictDict.popitem=function(self){try{var itm=new $item_iterator(self).next() +$DictDict.__delitem__(self,itm[0]) +return _b_.tuple(itm)}catch(err){if(err.__name__=="StopIteration"){throw KeyError("'popitem(): dictionary is empty'")}}} +$DictDict.setdefault=function(){var $=$B.args('setdefault',3,{self:null,key: null,_default:null},['self','key','_default'],arguments,{},null,null),self=$.self,key=$.key,_default=$._default +try{return $DictDict.__getitem__(self,key)} +catch(err){if(_default===undefined)_default=None +$DictDict.__setitem__(self,key,_default) +return _default}} +$DictDict.update=function(self){var $=$B.args('update',1,{'self':null},['self'],arguments,{},'args','kw'),self=$.self,args=$.args,kw=$.kw +if(args.length>0){var o=args[0] +if(isinstance(o,dict)){$copy_dict(self,o)}else if(hasattr(o,'__getitem__')&& hasattr(o,'keys')){var _keys=_b_.list(getattr(o,'keys')()) +var si=$DictDict.__setitem__ +var i=_keys.length +while(i--){ +var _value=getattr(o,'__getitem__')(_keys[i]) +si(self,_keys[i],_value)}}} +$copy_dict(self,kw) +return $N} +var $dict_valuesDict=$B.$iterator_class('dict_values') +$DictDict.values=function(self){if(arguments.length > 1){var _len=arguments.length - 1 +var _msg="values() takes no arguments ("+_len+" given)" +throw _b_.TypeError(_msg)} +return $iterator_wrapper(new $value_iterator(self),$dict_valuesDict)} +function dict(args,second){var res={__class__:$DictDict,$numeric_dict :{},$object_dict :{},$string_dict :{},$str_hash:{},length: 0} +if(args===undefined){return res} +if(second===undefined){if(Array.isArray(args)){ +var i=-1,stop=args.length-1 +var si=$DictDict.__setitem__ +while(i++=0;i--){if(isNaN(self.$items[i])){return true}} +return false}else{return self.$items.indexOf(item)>-1}} +if(self.$str &&(typeof item=='string')){return self.$items.indexOf(item)>-1} +if(! _b_.isinstance(item,set)){_b_.hash(item)} +var eq_func=_b_.getattr(item,'__eq__') +for(var i=0,_len_i=self.$items.length;i < _len_i;i++){if(_.getattr(self.$items[i],'__eq__')(item))return true} +return false} +$SetDict.__eq__=function(self,other){ +if(other===undefined)return self===set +if(_.isinstance(other,_.set)){if(other.$items.length==self.$items.length){for(var i=0,_len_i=self.$items.length;i < _len_i;i++){if($SetDict.__contains__(self,other.$items[i])===false)return false} +return true} +return false} +if(_.isinstance(other,[_.list])){if(_.len(other)!=self.$items.length)return false +for(var i=0,_len_i=_.len(other);i < _len_i;i++){var _value=getattr(other,'__getitem__')(i) +if($SetDict.__contains__(self,_value)===false)return false} +return true} +if(_.hasattr(other,'__iter__')){ +if(_.len(other)!=self.$items.length)return false +var _it=_.iter(other) +while(1){try{ +var e=_.next(_it) +if(!$SetDict.__contains__(self,e))return false}catch(err){if(err.__name__=="StopIteration"){break} +throw err}} +return true} +return false} +$SetDict.__format__=function(self,format_string){return $SetDict.__str__(self)} +$SetDict.__ge__=function(self,other){if(_b_.isinstance(other,[set,frozenset])){return !$SetDict.__lt__(self,other)}else{return _b_.object.$dict.__ge__(self,other)}} +$SetDict.__gt__=function(self,other){if(_b_.isinstance(other,[set,frozenset])){return !$SetDict.__le__(self,other)}else{return _b_.object.$dict.__gt__(self,other)}} +$SetDict.__init__=function(self){var $=$B.args('__init__',2,{self:null,iterable:null},['self','iterable'],arguments,{iterable:[]},null,null),self=$.self,iterable=$.iterable +if(_.isinstance(iterable,[set,frozenset])){self.$items=iterable.$items +return $N} +var it=_b_.iter(iterable),obj={$items:[],$str:true,$num:true} +while(1){try{var item=_.next(it) +$SetDict.add(obj,item)}catch(err){if(_b_.isinstance(err,_b_.StopIteration)){break} +throw err}} +self.$items=obj.$items +return $N} +var $set_iterator=$B.$iterator_class('set iterator') +$SetDict.__iter__=function(self){var it=$B.$iterator(self.$items,$set_iterator),len=self.$items.length,nxt=it.__next__ +it.__next__=function(){if(it.__len__()!=len){throw _b_.RuntimeError("size changed during iteration")} +return nxt()} +return it} +$SetDict.__le__=function(self,other){if(_b_.isinstance(other,[set,frozenset])){var cfunc=_.getattr(other,'__contains__') +for(var i=0,_len_i=self.$items.length;i < _len_i;i++){if(!cfunc(self.$items[i]))return false} +return true}else{return _b_.object.$dict.__le__(self,other)}} +$SetDict.__len__=function(self){return self.$items.length} +$SetDict.__lt__=function(self,other){if(_b_.isinstance(other,[set,frozenset])){return($SetDict.__le__(self,other)&& +$SetDict.__len__(self)<_.getattr(other,'__len__')())}else{return _b_.object.$dict['__lt__'](self,other)}} +$SetDict.__mro__=[$SetDict,_.object.$dict] +$SetDict.__ne__=function(self,other){return !$SetDict.__eq__(self,other)} +$SetDict.__or__=function(self,other,accept_iter){ +var res=clone(self) +var func=_.getattr(_.iter(other),'__next__') +while(1){try{$SetDict.add(res,func())} +catch(err){if(_.isinstance(err,_.StopIteration)){break} +throw err}} +res.__class__=self.__class__ +return res} +$SetDict.__str__=$SetDict.toString=$SetDict.__repr__=function(self){frozen=self.$real==='frozen' +self.$cycle=self.$cycle===undefined ? 0 : self.$cycle+1 +if(self.$items.length===0){if(frozen)return 'frozenset()' +return 'set()'} +var klass_name=$B.get_class(self).__name__,head=klass_name+'({',tail='})' +if(head=='set('){head='{';tail='}'} +var res=[] +if(self.$cycle){self.$cycle-- +return klass_name+'(...)'} +for(var i=0,_len_i=self.$items.length;i < _len_i;i++){var r=_.repr(self.$items[i]) +if(r===self||r===self.$items[i]){res.push('{...}')} +else{res.push(r)}} +res=res.join(', ') +self.$cycle-- +return head+res+tail} +$SetDict.__sub__=function(self,other,accept_iter){ +$test(accept_iter,other,'-') +var res=create_type(self) +var cfunc=_.getattr(other,'__contains__') +for(var i=0,_len_i=self.$items.length;i < _len_i;i++){if(!cfunc(self.$items[i])){res.$items.push(self.$items[i])}} +return res} +$SetDict.__xor__=function(self,other,accept_iter){ +$test(accept_iter,other,'^') +var res=create_type(self) +var cfunc=_.getattr(other,'__contains__') +for(var i=0,_len_i=self.$items.length;i < _len_i;i++){if(!cfunc(self.$items[i])){$SetDict.add(res,self.$items[i])}} +for(var i=0,_len_i=other.$items.length;i < _len_i;i++){if(!$SetDict.__contains__(self,other.$items[i])){$SetDict.add(res,other.$items[i])}} +return res} +function $test(accept_iter,other,op){if(accept_iter===undefined && !_.isinstance(other,[set,frozenset])){throw _b_.TypeError("unsupported operand type(s) for "+op+ +": 'set' and '"+$B.get_class(other).__name__+"'")}} +$B.make_rmethods($SetDict) +$SetDict.add=function(){var $=$B.args('add',2,{self:null,item:null},['self','item'],arguments,{},null,null),self=$.self,item=$.item +_b_.hash(item) +if(self.$str && !(typeof item=='string')){self.$str=false} +if(self.$num && !(typeof item=='number')){self.$num=false} +if(self.$num||self.$str){if(self.$items.indexOf(item)==-1){self.$items.push(item)} +return $N} +var cfunc=_.getattr(item,'__eq__') +for(var i=0,_len_i=self.$items.length;i < _len_i;i++){if(cfunc(self.$items[i]))return} +self.$items.push(item) +return $N} +$SetDict.clear=function(){var $=$B.args('clear',1,{self:null},['self'],arguments,{},null,null) +$.self.$items=[]; +return $N} +$SetDict.copy=function(){var $=$B.args('copy',1,{self:null},['self'],arguments,{},null,null) +if(_b_.isinstance($.self,frozenset)){return $.self} +var res=set() +for(var i=0,_len_i=$.self.$items.length;i < _len_i;i++){res.$items[i]=$.self.$items[i]} +return res} +$SetDict.difference_update=function(self){var $=$B.args('difference_update',1,{self:null},['self'],arguments,{},'args',null) +for(var i=0;i<$.args.length;i++){var s=set($.args[i]),_next=_b_.getattr(_b_.iter(s),'__next__'),item +while(true){try{item=_next() +var _type=typeof item +if(_type=='string' ||_type=="number"){var _index=self.$items.indexOf(item) +if(_index > -1){self.$items.splice(_index,1)}}else{ +for(var j=0;j < self.$items.length;j++){if(getattr(self.$items[j],'__eq__')(item)){self.$items.splice(j,1)}}}}catch(err){if(_b_.isinstance(err,_b_.StopIteration)){break} +throw err}}} +return $N} +$SetDict.discard=function(){var $=$B.args('discard',2,{self:null,item:null},['self','item'],arguments,{},null,null) +try{$SetDict.remove($.self,$.item)} +catch(err){if(!_b_.isinstance(err,[_b_.KeyError,_b_.LookupError])){throw err}} +return $N} +$SetDict.intersection_update=function(){ +var $=$B.args('intersection_update',1,{self:null},['self'],arguments,{},'args',null),self=$.self +for(var i=0;i<$.args.length;i++){var remove=[],s=set($.args[i]) +for(var j=0;j -1){remove.push(_index)}else{add.push(item)}}else{ +var found=false +for(var j=0;!found && j < self.$items.length;j++){if(_b_.getattr(self.$items[j],'__eq__')(item)){remove.push(j) +found=true}} +if(!found){add.push(item)}}}catch(err){if(_b_.isinstance(err,_b_.StopIteration)){break} +throw err}} +remove.sort().reverse() +for(var i=0;i 0)) +)}catch(err){return false}} +var $DOMEventAttrs_W3C=['NONE','CAPTURING_PHASE','AT_TARGET','BUBBLING_PHASE','type','target','currentTarget','eventPhase','bubbles','cancelable','timeStamp','stopPropagation','preventDefault','initEvent'] +var $DOMEventAttrs_IE=['altKey','altLeft','button','cancelBubble','clientX','clientY','contentOverflow','ctrlKey','ctrlLeft','data','dataFld','dataTransfer','fromElement','keyCode','nextPage','offsetX','offsetY','origin','propertyName','reason','recordset','repeat','screenX','screenY','shiftKey','shiftLeft','source','srcElement','srcFilter','srcUrn','toElement','type','url','wheelDelta','x','y'] +$B.$isEvent=function(obj){var flag=true +for(var i=0;i<$DOMEventAttrs_W3C.length;i++){if(obj[$DOMEventAttrs_W3C[i]]===undefined){flag=false;break}} +if(flag)return true +for(var i=0;i<$DOMEventAttrs_IE.length;i++){if(obj[$DOMEventAttrs_IE[i]]===undefined)return false} +return true} +var $NodeTypes={1:"ELEMENT",2:"ATTRIBUTE",3:"TEXT",4:"CDATA_SECTION",5:"ENTITY_REFERENCE",6:"ENTITY",7:"PROCESSING_INSTRUCTION",8:"COMMENT",9:"DOCUMENT",10:"DOCUMENT_TYPE",11:"DOCUMENT_FRAGMENT",12:"NOTATION"} +var $DOMEventDict={__class__:$B.$type,__name__:'DOMEvent'} +$DOMEventDict.__mro__=[$DOMEventDict,$ObjectDict] +$DOMEventDict.__getattribute__=function(self,attr){switch(attr){case 'x': +return $mouseCoords(self).x +case 'y': +return $mouseCoords(self).y +case 'data': +if(self.dataTransfer!==undefined)return $Clipboard(self.dataTransfer) +return self['data'] +case 'target': +if(self.target===undefined)return DOMNode(self.target) +return DOMNode(self.target) +case 'char': +return String.fromCharCode(self.which)} +var res=self[attr] +if(res!==undefined){if(typeof res=='function'){var func=function(){return res.apply(self,arguments)} +func.$infos={__name__:res.toString().substr(9,res.toString().search('{'))} +return func} +return $B.$JS2Py(res)} +throw _b_.AttributeError("object DOMEvent has no attribute '"+attr+"'")} +function $DOMEvent(ev){ev.__class__=$DOMEventDict +if(ev.preventDefault===undefined){ev.preventDefault=function(){ev.returnValue=false}} +if(ev.stopPropagation===undefined){ev.stopPropagation=function(){ev.cancelBubble=true}} +ev.__repr__=function(){return ''} +ev.toString=ev.__str__=ev.__repr__ +return ev} +$B.$DOMEvent=$DOMEvent +$B.DOMEvent=function(evt_name){ +return $DOMEvent(new Event(evt_name))} +$B.DOMEvent.__class__=$B.$factory +$B.DOMEvent.$dict=$DOMEventDict +$DOMEventDict.$factory=$B.DOMEvent +var $ClipboardDict={__class__:$B.$type,__name__:'Clipboard'} +$ClipboardDict.__getitem__=function(self,name){return self.data.getData(name)} +$ClipboardDict.__mro__=[$ClipboardDict,$ObjectDict] +$ClipboardDict.__setitem__=function(self,name,value){self.data.setData(name,value)} +function $Clipboard(data){ +return{ +data : data,__class__ : $ClipboardDict,}} +function $EventsList(elt,evt,arg){ +this.elt=elt +this.evt=evt +if(isintance(arg,list)){this.callbacks=arg} +else{this.callbacks=[arg]} +this.remove=function(callback){var found=false +for(var i=0;i"} +dom.FileReader.__class__=$B.$type +dom.FileReader.__str__=function(){return ""} +function $Options(parent){return{ +__class__:$OptionsDict,parent:parent}} +var $OptionsDict={__class__:$B.$type,__name__:'Options'} +$OptionsDict.__delitem__=function(self,arg){self.parent.options.remove(arg.elt)} +$OptionsDict.__getitem__=function(self,key){return DOMNode(self.parent.options[key])} +$OptionsDict.__len__=function(self){return self.parent.options.length} +$OptionsDict.__mro__=[$OptionsDict,$ObjectDict] +$OptionsDict.__setattr__=function(self,attr,value){self.parent.options[attr]=value} +$OptionsDict.__setitem__=function(self,attr,value){self.parent.options[attr]=$B.$JS2Py(value)} +$OptionsDict.__str__=function(self){return ""} +$OptionsDict.append=function(self,element){self.parent.options.add(element.elt)} +$OptionsDict.insert=function(self,index,element){if(index===undefined){self.parent.options.add(element.elt)} +else{self.parent.options.add(element.elt,index)}} +$OptionsDict.item=function(self,index){return self.parent.options.item(index)} +$OptionsDict.namedItem=function(self,name){return self.parent.options.namedItem(name)} +$OptionsDict.remove=function(self,arg){self.parent.options.remove(arg.elt)} +var $StyleDict={__class__:$B.$type,__name__:'CSSProperty'} +$StyleDict.__mro__=[$StyleDict,$ObjectDict] +$StyleDict.__getattr__=function(self,attr){return $ObjectDict.__getattribute__(self.js,attr)} +$StyleDict.__setattr__=function(self,attr,value){if(attr.toLowerCase()==='float'){self.js.cssFloat=value +self.js.styleFloat=value}else{switch(attr){case 'top': +case 'left': +case 'height': +case 'width': +case 'borderWidth': +if(isinstance(value,_b_.int))value=value+'px'} +self.js[attr]=value}} +function $Style(style){ +return{__class__:$StyleDict,js:style}} +$Style.__class__=$B.$factory +$Style.$dict=$StyleDict +$StyleDict.$factory=$Style +var DOMNode=$B.DOMNode=function(elt){ +var res={} +res.$dict={} +res.elt=elt +if(elt['$brython_id']===undefined||elt.nodeType===9){ +elt.$brython_id='DOM-'+$B.UUID() +res.__repr__=res.__str__=res.toString=function(){var res=""}} +res.__class__=DOMNodeDict +return res} +DOMNodeDict={__class__ : $B.$type,__name__ : 'DOMNode'} +DOMNode.__class__=$B.$factory +DOMNode.$dict=DOMNodeDict +DOMNodeDict.$factory=DOMNode +DOMNodeDict.__mro__=[DOMNodeDict,_b_.object.$dict] +DOMNodeDict.__add__=function(self,other){ +var res=$TagSum() +res.children=[self],pos=1 +if(isinstance(other,$TagSum)){res.children=res.children.concat(other.children)}else if(isinstance(other,[_b_.str,_b_.int,_b_.float,_b_.list,_b_.dict,_b_.set,_b_.tuple])){res.children[pos++]=DOMNode(document.createTextNode(_b_.str(other)))}else if(isinstance(other,DOMNode)){res.children[pos++]=other}else{ +try{res.children=res.children.concat(_b_.list(other))} +catch(err){throw _b_.TypeError("can't add '"+ +$B.get_class(other).__name__+"' object to DOMNode instance")}} +return res} +DOMNodeDict.__bool__=function(self){return true} +DOMNodeDict.__class__=$B.$type +DOMNodeDict.__contains__=function(self,key){try{DOMNodeDict.__getitem__(self,key);return True} +catch(err){return False}} +DOMNodeDict.__del__=function(self){ +if(!self.elt.parentNode){throw _b_.ValueError("can't delete "+str(elt))} +self.elt.parentNode.removeChild(self.elt)} +DOMNodeDict.__delitem__=function(self,key){if(self.elt.nodeType===9){ +var res=self.elt.getElementById(key) +if(res){res.parentNode.removeChild(res)} +else{throw KeyError(key)}}else{ +console.log('delitem') +self.elt.parentNode.removeChild(self.elt)}} +DOMNodeDict.__eq__=function(self,other){return self.elt==other.elt} +DOMNodeDict.__getattribute__=function(self,attr){switch(attr){case 'class_name': +case 'children': +case 'html': +case 'id': +case 'parent': +case 'query': +case 'text': +case 'value': +return DOMNodeDict[attr](self) +case 'height': +case 'left': +case 'top': +case 'width': +if(self.elt instanceof SVGElement){return self.elt.getAttributeNS(null,attr)} +return DOMNodeDict[attr].__get__(self) +break +case 'clear': +case 'closest': +case 'remove': +return function(){return DOMNodeDict[attr](self,arguments[0])} +case 'headers': +if(self.elt.nodeType==9){ +var req=new XMLHttpRequest(); +req.open('GET',document.location,false); +req.send(null); +var headers=req.getAllResponseHeaders(); +headers=headers.split('\r\n') +var res=_b_.dict() +for(var i=0;i0){var res=$TagSum() +var pos=res.children.length +for(var i=0;i" +var res=""} +DOMNodeDict.__setattr__=function(self,attr,value){if(attr.substr(0,2)=='on'){ +if(!_b_.bool(value)){ +DOMNodeDict.unbind(self,attr.substr(2))}else{ +DOMNodeDict.bind(self,attr.substr(2),value)}}else{if(DOMNodeDict['set_'+attr]!==undefined){return DOMNodeDict['set_'+attr](self,value)} +var attr1=attr.replace('_','-').toLowerCase() +if(self.elt instanceof SVGElement && +self.elt.getAttributeNS(null,attr1)!==null){self.elt.setAttributeNS(null,attr1,value) +return} +if(self.elt[attr1]!==undefined){self.elt[attr1]=value;return} +if(typeof self.elt.getAttribute=='function' && +typeof self.elt.setAttribute=='function'){var res=self.elt.getAttribute(attr1) +if(res!==undefined&&res!==null&&res!=''){if(value===false){self.elt.removeAttribute(attr1)}else{self.elt.setAttribute(attr1,value)} +console.log(self.elt) +return}} +self.elt[attr]=value}} +DOMNodeDict.__setitem__=function(self,key,value){self.elt.childNodes[key]=value} +DOMNodeDict.abs_left={__get__: function(self){return $getPosition(self.elt).left},__set__: function(){throw _b_.AttributeError("'DOMNode' objectattribute 'abs_left' is read-only")}} +DOMNodeDict.abs_top={__get__: function(self){return $getPosition(self.elt).top},__set__: function(){throw _b_.AttributeError("'DOMNode' objectattribute 'abs_top' is read-only")}} +DOMNodeDict.bind=function(self,event){ +var _id +if(self.elt.nodeType===9){_id=0} +else{_id=self.elt.$brython_id} +var _d=_b_.dict.$dict +if(!_d.__contains__($B.events,_id)){_d.__setitem__($B.events,_id,dict())} +var item=_d.__getitem__($B.events,_id) +if(!_d.__contains__(item,event)){_d.__setitem__(item,event,[])} +var evlist=_d.__getitem__(item,event) +var pos=evlist.length +for(var i=2;i=0;i--){elt.removeChild(elt.childNodes[i])}} +DOMNodeDict.Class=function(self){if(self.elt.className !==undefined)return self.elt.className +return None} +DOMNodeDict.class_name=function(self){return DOMNodeDict.Class(self)} +DOMNodeDict.clone=function(self){res=DOMNode(self.elt.cloneNode(true)) +res.elt.$brython_id='DOM-' + $B.UUID() +var _d=_b_.dict.$dict +if(_d.__contains__($B.events,self.elt.$brython_id)){var events=_d.__getitem__($B.events,self.elt.$brython_id) +var items=_b_.list(_d.items(events)) +for(var i=0;i=0;i--)res.splice(to_delete[i],1)} +return res} +DOMNodeDict.getContext=function(self){ +if(!('getContext' in self.elt)){throw _b_.AttributeError("object has no attribute 'getContext'")} +var obj=self.elt +return function(ctx){return JSObject(obj.getContext(ctx))}} +DOMNodeDict.getSelectionRange=function(self){ +if(self.elt['getSelectionRange']!==undefined){return self.elt.getSelectionRange.apply(null,arguments)}} +DOMNodeDict.height={'__get__': function(self){ +if(self.elt.tagName=='CANVAS'){return self.elt.height} +var res=parseInt(self.elt.style.height) +if(isNaN(res)){return self.elt.offsetHeight} +return res},'__set__': function(obj,self,value){if(self.elt.tagName=='CANVAS'){self.elt.height=value} +self.elt.style.height=value+'px'}} +DOMNodeDict.html=function(self){return self.elt.innerHTML} +DOMNodeDict.id=function(self){if(self.elt.id !==undefined)return self.elt.id +return None} +DOMNodeDict.inside=function(self,other){ +other=other.elt +var elt=self.elt +while(true){if(other===elt){return true} +elt=elt.parentElement +if(!elt){return false}}} +DOMNodeDict.options=function(self){ +return new $OptionsClass(self.elt)} +DOMNodeDict.parent=function(self){if(self.elt.parentElement)return DOMNode(self.elt.parentElement) +return None} +DOMNodeDict.left={'__get__': function(self){var res=parseInt(self.elt.style.left) +if(isNaN(res)){throw _b_.AttributeError("node has no attribute 'left'")} +return res},'__set__': function(obj,self,value){self.elt.style.left=value+'px'}} +DOMNodeDict.remove=function(self,child){ +var elt=self.elt,flag=false,ch_elt=child.elt +if(self.elt.nodeType==9){elt=self.elt.body} +while(ch_elt.parentElement){if(ch_elt.parentElement===elt){elt.removeChild(ch_elt) +flag=true +break}else{ch_elt=ch_elt.parentElement}} +if(!flag){throw _b_.ValueError('element '+child+' is not inside '+self)}} +DOMNodeDict.reset=function(self){ +return function(){self.elt.reset()}} +DOMNodeDict.style=function(self){ +self.elt.style.float=self.elt.style.cssFloat ||self.style.styleFloat +return $B.JSObject(self.elt.style)} +DOMNodeDict.top={'__get__': function(self){var res=parseInt(self.elt.style.top) +if(isNaN(res)){throw _b_.AttributeError("node has no attribute 'top'")} +return res},'__set__': function(obj,self,value){self.elt.style.top=value+'px'}} +DOMNodeDict.setSelectionRange=function(self){ +if(this['setSelectionRange']!==undefined){return(function(obj){return function(){return obj.setSelectionRange.apply(obj,arguments)}})(this)}else if(this['createTextRange']!==undefined){return(function(obj){return function(start_pos,end_pos){if(end_pos==undefined){end_pos=start_pos} +var range=obj.createTextRange(); +range.collapse(true); +range.moveEnd('character',start_pos); +range.moveStart('character',end_pos); +range.select();}})(this)}} +DOMNodeDict.set_class_name=function(self,arg){self.elt.setAttribute('class',arg)} +DOMNodeDict.set_html=function(self,value){self.elt.innerHTML=str(value)} +DOMNodeDict.set_style=function(self,style){ +if(!_b_.isinstance(style,_b_.dict)){throw TypeError('style must be dict, not '+$B.get_class(style).__name__)} +var items=_b_.list(_b_.dict.$dict.items(style)) +for(var i=0;i-1} +$QueryDict.__getitem__=function(self,key){ +var result=self._values[key] +if(result===undefined)throw KeyError(key) +if(result.length==1)return result[0] +return result} +var $QueryDict_iterator=$B.$iterator_class('query string iterator') +$QueryDict.__iter__=function(self){return $B.$iterator(self._keys,$QueryDict_iterator)} +$QueryDict.__mro__=[$QueryDict,$ObjectDict] +$QueryDict.getfirst=function(self,key,_default){ +var result=self._values[key] +if(result===undefined){if(_default===undefined)return None +return _default} +return result[0]} +$QueryDict.getlist=function(self,key){ +var result=self._values[key] +if(result===undefined)return[] +return result} +$QueryDict.getvalue=function(self,key,_default){try{return $QueryDict.__getitem__(self,key)} +catch(err){if(_default===undefined)return None +return _default}} +$QueryDict.keys=function(self){return self._keys} +DOMNodeDict.query=function(self){var res={__class__:$QueryDict,_keys :[],_values :{}} +var qs=location.search.substr(1).split('&') +for(var i=0;i-1){res._values[key].push(value)} +else{res._keys.push(key) +res._values[key]=[value]}} +return res} +var $TagSumDict={__class__ : $B.$type,__name__:'TagSum'} +$TagSumDict.appendChild=function(self,child){self.children.push(child)} +$TagSumDict.__add__=function(self,other){if($B.get_class(other)===$TagSumDict){self.children=self.children.concat(other.children)}else if(isinstance(other,[_b_.str,_b_.int,_b_.float,_b_.dict,_b_.set,_b_.list])){self.children=self.children.concat(DOMNode(document.createTextNode(other)))}else{self.children.push(other)} +return self} +$TagSumDict.__mro__=[$TagSumDict,$ObjectDict] +$TagSumDict.__radd__=function(self,other){var res=$TagSum() +res.children=self.children.concat(DOMNode(document.createTextNode(other))) +return res} +$TagSumDict.__repr__=function(self){var res=' ' +for(var i=0;i'}} +$B.GeneratorBreak={} +$B.$GeneratorSendError={} +var $GeneratorReturn={} +$B.generator_return=function(){return{__class__:$GeneratorReturn}} +function in_loop(node){ +while(node){if(node.loop_start!==undefined)return node +node=node.parent} +return false} +function in_try(node){ +var tries=[],pnode=node.parent,pos=0 +while(pnode){if(pnode.is_try){tries[pos++]=pnode} +pnode=pnode.parent} +return tries} +var $BRGeneratorDict={__class__:$B.$type,__name__:'generator'} +$BRGeneratorDict.__iter__=function(self){return self} +$BRGeneratorDict.__enter__=function(self){console.log("generator.__enter__ called")} +$BRGeneratorDict.__exit__=function(self){console.log("generator.__exit__ called")} +function clear_ns(iter_id){delete $B.vars[iter_id] +delete $B.modules[iter_id] +delete $B.bound[iter_id] +delete $B.generators[iter_id] +delete $B.$generators[iter_id]} +$BRGeneratorDict.__next__=function(self){ +var scope_id=self.func_root.scope.id +if(self._next===undefined){ +var src=self.func_root.src()+'\n)()' +try{eval(src)} +catch(err){console.log("cant eval\n"+src+'\n'+err) +clear_ns(self.iter_id) +throw err} +self._next=$B.$generators[self.iter_id]} +if(self.gi_running){throw _b_.ValueError("ValueError: generator already executing")} +self.gi_running=true +for(var i=0;i'} +$BRGeneratorDict.close=function(self,value){self.sent_value=_b_.GeneratorExit() +try{var res=$BRGeneratorDict.__next__(self) +if(res!==_b_.None){throw _b_.RuntimeError("closed generator returned a value")}}catch(err){if($B.is_exc(err,[_b_.StopIteration,_b_.GeneratorExit])){return _b_.None} +throw err}} +$BRGeneratorDict.send=function(self,value){self.sent_value=value +return $BRGeneratorDict.__next__(self)} +$BRGeneratorDict.$$throw=function(self,value){if(_b_.isinstance(value,_b_.type))value=value() +self.sent_value={__class__:$B.$GeneratorSendError,err:value} +return $BRGeneratorDict.__next__(self)} +$B.$BRgenerator=function(env,func_name,func,def_id){ +if(func.$def_node){var def_node=func.$def_node +delete $B.modules[def_id]}else{var def_node=func.$def_node=$B.modules[def_id]} +if(def_node===undefined){console.log('def node undef',def_id)} +var def_ctx=def_node.C.tree[0] +var counter=0 +$B.generators=$B.generators ||{} +$B.$generators=$B.$generators ||{} +var module=def_node.module +var res=function(){var args=[],pos=0 +for(var i=0,_len_i=arguments.length;i<_len_i;i++){args[pos++]=arguments[i]} +var iter_id=def_id+'_'+counter++ +$B.bound[iter_id]={} +for(var attr in $B.bound[def_id]){$B.bound[iter_id][attr]=true} +var func_root=new $B.genNode(def_ctx.to_js('$B.$generators["'+iter_id+'"]')) +func_root.scope=env[0][1] +func_root.module=module +func_root.yields=[] +func_root.loop_ends={} +func_root.def_id=def_id +func_root.iter_id=iter_id +for(var i=0,_len_i=def_node.children.length;i < _len_i;i++){func_root.addChild($B.make_node(func_root,def_node.children[i]))} +var func_node=func_root.children[1].children[0] +var obj={__class__ : $BRGeneratorDict,args:args,def_id:def_id,def_ctx:def_ctx,def_node:def_node,env:env,func:func,func_name:func_name,func_root:func_root,module:module,func_node:func_node,next_root:func_root,gi_running:false,iter_id:iter_id,id:iter_id,num:0} +$B.modules[iter_id]=obj +obj.parent_block=def_node.parent_block +return obj} +res.__call__=function(){console.log('call generator');return res.apply(null,arguments)} +res.__repr__=function(){return ""} +return res} +$B.$BRgenerator.__repr__=function(){return ""} +$B.$BRgenerator.__str__=function(){return ""} +$B.$BRgenerator.__class__=$B.$type})(__BRYTHON__) +;(function($B){var modules={} +modules['browser']={$package: true,$is_package: true,__package__:'browser',__file__:$B.brython_path.replace(/\/*$/g,'')+ +'/Lib/browser/__init__.py',alert:function(message){window.alert($B.builtins.str(message))},confirm: $B.JSObject(window.confirm),console:$B.JSObject(window.console),document:$B.DOMNode(document),doc: $B.DOMNode(document), +DOMEvent:$B.DOMEvent,DOMNode:$B.DOMNode,mouseCoords: function(ev){return $B.JSObject($mouseCoords(ev))},prompt: function(message,default_value){return $B.JSObject(window.prompt(message,default_value||''))},reload: function(){ +var scripts=document.getElementsByTagName('script'),js_scripts=[] +for(var i=0;i"} +$B.imported[name]=$B.modules[name]=module_obj} +for(var attr in modules){load(attr,modules[attr])} +modules['browser'].html=modules['browser.html'] +$B.builtins.__builtins__=$B.$ModuleDict.$factory('__builtins__','Python builtins') +for(var attr in $B.builtins){$B.builtins.__builtins__[attr]=$B.builtins[attr]} +$B.builtins.__builtins__.__setattr__=function(attr,value){console.log('set attr of builtins',attr) +$B.builtins[attr]=value} +$B.bound.__builtins__.__builtins__=$B.builtins.__builtins__})(__BRYTHON__) +;(function($B){var _b_=$B.builtins, +$sys=$B.imported['_sys']; +function import_hooks(mod_name,_path,module,blocking){ +var is_none=$B.is_none +if(is_none(module)){module=undefined;} +var _meta_path=_b_.getattr($sys,'meta_path'); +var spec=undefined; +for(var i=0,_len_i=_meta_path.length;i < _len_i && is_none(spec);i++){var _finder=_meta_path[i],find_spec=_b_.getattr(_finder,'find_spec',null) +if(find_spec !==null){spec=_b_.getattr(find_spec,'__call__')(mod_name,_path,undefined); +spec.blocking=blocking}} +if(is_none(spec)){ +throw _b_.ImportError('No module named '+mod_name);} +var _loader=_b_.getattr(spec,'loader',_b_.None),_sys_modules=$B.imported,_spec_name=_b_.getattr(spec,'name'); +if(is_none(module)){ +if(!is_none(_loader)){var create_module=_b_.getattr(_loader,'create_module',_b_.None); +if(!is_none(create_module)){module=_b_.getattr(create_module,'__call__')(spec);}} +if(module===undefined){throw _b_.ImportError(mod_name)} +if(is_none(module)){ +module=$B.$ModuleDict.$factory(mod_name); +var mod_desc=_b_.getattr(spec,'origin'); +if(_b_.getattr(spec,'has_location')){mod_desc="from '" + mod_desc + "'";} +else{ +mod_desc='(' + mod_desc + ')';} +module.toString=module.__repr__=module.__str__= +function(){return ""}}} +module.__name__=_spec_name; +module.__loader__=_loader; +module.__package__=_b_.getattr(spec,'parent',''); +module.__spec__=spec; +var locs=_b_.getattr(spec,'submodule_search_locations'); +if(module.$is_package=!is_none(locs)){module.__path__=locs;} +if(_b_.getattr(spec,'has_location')){module.__file__=_b_.getattr(spec,'origin') +$B.$py_module_path[module.__name__]=module.__file__;} +var cached=_b_.getattr(spec,'cached'); +if(!is_none(cached)){module.__cached__=cached;} +if(is_none(_loader)){if(!is_none(locs)){$B.modules[_spec_name]=_sys_modules[_spec_name]=module;} +else{ +throw _b_.ImportError(mod_name);}} +else{ +var exec_module=_b_.getattr(_loader,'exec_module',_b_.None); +if(is_none(exec_module)){ +module=_b_.getattr(_b_.getattr(_loader,'load_module'),'__call__')(_spec_name);} +else{ +$B.modules[_spec_name]=_sys_modules[_spec_name]=module; +try{_b_.getattr(exec_module,'__call__')(module,blocking)} +catch(e){delete $B.modules[_spec_name]; +delete _sys_modules[_spec_name]; +throw e;}}} +return _sys_modules[_spec_name];} +$B.import_hooks=import_hooks})(__BRYTHON__) +;(function($B){_b_=$B.builtins +$B.execution_object={} +$B.execution_object.queue=[] +$B.execution_object.start_flag=true +$B.execution_object.$execute_next_segment=function(){if($B.execution_object.queue.length==0){return} +$B.execution_object.start_flag=false +var element=$B.execution_object.queue.shift() +var code=element[0] +var delay=10 +if(element.length==2)delay=element[1] +setTimeout(function(){ +console.log(code) +try{eval(code)}catch(e){console.log(e)} +$B.execution_object.start_flag=$B.execution_object.queue.length==0;},delay);} +$B.execution_object.$append=function(code,delay){$B.execution_object.queue.push([code,delay]); +if($B.execution_object.start_flag)$B.execution_object.$execute_next_segment()} +$B.execution_object.source_conversion=function(js){js=js.replace("\n","",'g') +js=js.replace("'","\\'",'g') +js=js.replace('"','\\"','g') +js=js.replace("@@","\'",'g') +js+="';$B.execution_object.$append($jscode, 10); " +js+="$B.execution_object.$execute_next_segment(); " +return "var $jscode='" + js} +_b_['brython_block']=function(f,sec){if(sec===undefined ||sec==_b_.None)sec=1 +return f} +$B.builtin_funcs['brython_block']=true +$B.bound['__builtins__']['brython_block']=true +_b_['brython_async']=function(f){return f} +$B.builtin_funcs['brython_async']=true +$B.bound['__builtins__']['brython_async']=true})(__BRYTHON__) diff --git a/app/assets/javascripts/groups.js.coffee b/app/assets/javascripts/groups.js.coffee new file mode 100644 diff --git a/app/assets/javascripts/local_jquery.js b/app/assets/javascripts/local_jquery.js --- a/app/assets/javascripts/local_jquery.js +++ b/app/assets/javascripts/local_jquery.js @@ -1,13 +1,3 @@ -//= require jquery -//= require jquery_ujs -//= require jquery.ui.all -//= require jquery.ui.datepicker -//= require jquery.ui.slider -//= require jquery-ui-timepicker-addon -//= require jquery-tablesorter -//= require best_in_place -//= require best_in_place.jquery-ui - $(document).ready(function() { /* Activating Best In Place */ jQuery(".best_in_place").best_in_place(); diff --git a/app/assets/javascripts/submissions.js.coffee b/app/assets/javascripts/submissions.js.coffee new file mode 100644 --- /dev/null +++ b/app/assets/javascripts/submissions.js.coffee @@ -0,0 +1,29 @@ +# Place all the behaviors and hooks related to the matching controller here. +# All this logic will automatically be available in application.js. +# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/ + + +$ -> + $("#live_submit").on "click", (event) -> + h = $("#editor_text") + e = ace.edit("editor") + h.val(e.getValue()) + + $("#language_id").on "change", (event) -> + text = $("#language_id option:selected").text() + mode = 'ace/mode/c_cpp' + switch text + when 'Pascal' then mode = 'ace/mode/pascal' + when 'C++','C' then mode = 'ace/mode/c_cpp' + when 'Ruby' then mode = 'ace/mode/ruby' + when 'Python' then mode = 'ace/mode/python' + when 'Java' then mode = 'ace/mode/java' + editor = ace.edit('editor') + editor.getSession().setMode(mode) + + e = ace.edit("editor") + + + + + return diff --git a/app/assets/javascripts/tags.coffee b/app/assets/javascripts/tags.coffee new file mode 100644 --- /dev/null +++ b/app/assets/javascripts/tags.coffee @@ -0,0 +1,3 @@ +# Place all the behaviors and hooks related to the matching controller here. +# All this logic will automatically be available in application.js. +# You can use CoffeeScript in this file: http://coffeescript.org/ diff --git a/app/assets/javascripts/testcases.js.coffee b/app/assets/javascripts/testcases.js.coffee new file mode 100644 diff --git a/app/assets/stylesheets/application.css.sass b/app/assets/stylesheets/application.css.sass deleted file mode 100644 --- a/app/assets/stylesheets/application.css.sass +++ /dev/null @@ -1,433 +0,0 @@ -/* - * This is a manifest file that'll be compiled into application.css, which will include all the files - * listed below. - * - * Any CSS and SCSS file within this directory, lib/assets/stylesheets, vendor/assets/stylesheets, - * or any plugin's vendor/assets/stylesheets directory can be referenced here using a relative path. - * - * You're free to add application-wide styles to this file and they'll appear at the bottom of the - * compiled file so the styles you add here take precedence over styles defined in any styles - * defined in the other CSS/SCSS files in this directory. It is generally better to create a new - * file per style scope. - * - // bootstrap says that we should not do this, but @import each file instead - # *= require_tree . - # *= require_self - */ - -@import jquery.ui.all -@import jquery.ui.core -@import jquery.ui.theme -@import jquery.ui.datepicker -@import jquery.ui.slider -@import jquery-ui-timepicker-addon -@import jquery-tablesorter/theme.metro-dark -@import jquery.countdown -@import tablesorter-theme.cafe - -//bootstrap -@import bootstrap-sprockets -@import bootstrap -@import select2 -@import select2-bootstrap -//@import bootstrap3-switch -@import bootstrap-toggle -@import bootstrap-sortable - -//bootstrap navbar color (from) -$bgDefault : #19197b -$bgHighlight : #06064b -$colDefault : #8e8eb4 -$colHighlight : #ffffff -$dropDown : false -.navbar-default - background-color: $bgDefault - border-color: $bgHighlight - .navbar-brand - color: $colDefault - &:hover, &:focus - color: $colHighlight - .navbar-text - color: $colDefault - .navbar-nav - > li - > a - color: $colDefault - &:hover, &:focus - color: $colHighlight - @if $dropDown - > .dropdown-menu - background-color: $bgDefault - > li - > a - color: $colDefault - &:hover, &:focus - color: $colHighlight - background-color: $bgHighlight - > .divider - background-color: $bgHighlight - @if $dropDown - .open .dropdown-menu > .active - > a, > a:hover, > a:focus - color: $colHighlight - background-color: $bgHighlight - > .active - > a, > a:hover, > a:focus - color: $colHighlight - background-color: $bgHighlight - > .open - > a, > a:hover, > a:focus - color: $colHighlight - background-color: $bgHighlight - .navbar-toggle - border-color: $bgHighlight - &:hover, &:focus - background-color: $bgHighlight - .icon-bar - background-color: $colDefault - .navbar-collapse, - .navbar-form - border-color: $colDefault - .navbar-link - color: $colDefault - &:hover - color: $colHighlight -@media (max-width: 767px) - .navbar-default .navbar-nav .open .dropdown-menu - > li > a - color: $colDefault - &:hover, &:focus - color: $colHighlight - > .active - > a, > a:hover, > a:focus - color: $colHighlight - background-color: $bgHighlight - -.secondnavbar - top: 50px - - -// --------------- bootstrap file upload ---------------------- -.btn-file - position: relative - overflow: hidden - -.btn-file input[type=file] - position: absolute - top: 0 - right: 0 - min-width: 100% - min-height: 100% - font-size: 100px - text-align: right - filter: alpha(opacity=0) - opacity: 0 - outline: none - background: white - cursor: inherit - display: block - -body - background: white image-url("topbg.jpg") repeat-x top center - //font-size: 13px - //font-family: Tahoma, "sans-serif" - margin: 10px - padding: 10px - padding-top: 60px - -// ------------------ bootstrap sortable -------------------- -table.sortable th - padding-right: 20px !important - span.sign - right: -15px !important - &.text-right - padding-left: 20px !important - padding-right: 8px !important - &:after, span.sign - left: -15px !important - -input - font-family: Tahoma, "sans-serif" - - -h1 - font-size: 24px - color: #334488 - line-height: 2em - - -h2 - font-size: 18px - color: #5566bb - line-height: 1.5em - - -hr - border-top: 1px solid #dddddd - border-bottom: 1px solid #eeeeee - - -//#a -// color: #6666cc -// text-decoration: none -// -// &:link, &:visited -// color: #6666cc -// text-decoration: none -// -// &:hover, &:focus -// color: #111166 -// text-decoration: none - - -div - &.userbar - line-height: 1.5em - text-align: right - font-size: 12px - - &.title - padding: 10px 0px - line-height: 1.5em - font-size: 13px - - span.contest-over-msg - font-size: 15px - color: red - - table - width: 100% - font-weight: bold - - td - &.left-col - text-align: left - vertical-align: top - color: #444444 - - &.right-col - text-align: right - vertical-align: top - font-size: 18px - color: #116699 - - -table.info - margin: 10px 0 - border: 1px solid #666666 - border-collapse: collapse - font-size: 12px - - th - border: 1px solid #666666 - line-height: 1.5em - padding: 0 0.5em - - td - border-left: 1px solid #666666 - border-right: 1px solid #666666 - line-height: 1.5em - padding: 0 0.5em - - -tr - &.info-head - background: #777777 - color: white - - &.info-odd - background: #eeeeee - - &.info-even - background: #fcfcfc - -=basicbox - background: #eeeeff - border: 1px dotted #99aaee - padding: 5px - margin: 10px 0px - color: black - font-size: 13px - -.infobox - +basicbox - -.submitbox - +basicbox - -.errorExplanation - border: 1px dotted gray - color: #bb2222 - padding: 5px 15px 5px 15px - margin-bottom: 5px - background-color: white - font-weight: normal - - h2 - color: #cc1111 - font-weight: bold - - -table.uinfo - border-collapse: collapse - border: 1px solid black - font-size: 13px - - -td.uinfo - vertical-align: top - border: 1px solid black - padding: 5px - - -th.uinfo - background: lightgreen - vertical-align: top - text-align: right - border: 1px solid black - padding: 5px - - -div - &.compilermsgbody - font-family: monospace - - &.task-menu - text-align: center - font-size: 13px - line-height: 1.75em - font-weight: bold - border-top: 1px dashed gray - border-bottom: 1px dashed gray - margin-top: 2px - margin-bottom: 4px - - -table.taskdesc - border: 2px solid #dddddd - border-collapse: collapse - margin: 10px auto - width: 90% - font-size: 13px - - p - font-size: 13px - - tr.name - border: 2px solid #dddddd - background: #dddddd - color: #333333 - font-weight: bold - font-size: 14px - line-height: 1.5em - text-align: center - - td - &.desc-odd - padding: 5px - padding-left: 20px - background: #fefeee - - &.desc-even - padding: 5px - padding-left: 20px - background: #feeefe - - -.announcementbox - margin: 10px 0px - background: #bbddee - padding: 1px - - span.title - font-weight: bold - color: #224455 - padding-left: 10px - line-height: 1.6em - -.announcement - margin: 2px - background: white - padding: 1px - padding-left: 10px - padding-right: 10px - padding-top: 5px - padding-bottom: 5px - - -.announcement p - font-size: 12px - margin: 2px - - -.pub-info - text-align: right - font-style: italic - font-size: 9px - - p - text-align: right - font-style: italic - font-size: 9px - - -.announcement - .toggles - font-weight: normal - float: right - font-size: 80% - - .announcement-title - font-weight: bold - - -div - &.message - margin: 10px 0 0 - - div - &.message - margin: 0 0 0 30px - - &.body - border: 2px solid #dddddd - background: #fff8f8 - padding-left: 5px - - &.reply-body - border: 2px solid #bbbbbb - background: #fffff8 - padding-left: 5px - - &.stat - font-size: 10px - line-height: 1.75em - padding: 0 5px - color: #333333 - background: #dddddd - font-weight: bold - - &.message div.stat - font-size: 10px - line-height: 1.75em - padding: 0 5px - color: #444444 - background: #bbbbbb - font-weight: bold - - &.contest-title - color: white - text-align: center - line-height: 2em - - &.registration-desc, &.test-desc - border: 1px dotted gray - background: #f5f5f5 - padding: 5px - margin: 10px 0 - font-size: 12px - line-height: 1.5em - -h2.contest-title - margin-top: 5px - margin-bottom: 5px diff --git a/app/assets/stylesheets/application.css.scss b/app/assets/stylesheets/application.css.scss new file mode 100644 --- /dev/null +++ b/app/assets/stylesheets/application.css.scss @@ -0,0 +1,557 @@ +/* This is a manifest file that'll be compiled into application.css, which will include all the files + * listed below. + * + * Any CSS and SCSS file within this directory, lib/assets/stylesheets, vendor/assets/stylesheets, + * or any plugin's vendor/assets/stylesheets directory can be referenced here using a relative path. + * + * You're free to add application-wide styles to this file and they'll appear at the bottom of the + * compiled file so the styles you add here take precedence over styles defined in any styles + * defined in the other CSS/SCSS files in this directory. It is generally better to create a new + * file per style scope. + * + * // bootstrap says that we should not do this, but @import each file instead + * # *= require_tree . + * # *= require_self + */ + +@import "jquery-ui"; +//@import "jquery.ui.core"; +//@import "jquery.ui.theme"; +//@import "jquery.ui.datepicker"; +//@import "jquery.ui.slider"; +@import "jquery-ui-timepicker-addon"; +@import "jquery-tablesorter/theme.metro-dark"; +@import "jquery.countdown"; +@import "tablesorter-theme.cafe"; + +//bootstrap +@import "bootstrap-sprockets"; +@import "bootstrap"; +@import "select2"; +@import "select2-bootstrap"; + +//@import bootstrap3-switch +@import "bootstrap-toggle"; +@import "bootstrap-sortable"; +@import "bootstrap-datepicker3"; +@import "bootstrap-datetimepicker"; +@import "dataTables/bootstrap/3/jquery.dataTables.bootstrap"; + +//bootstrap navbar color (from) +$bgDefault: #19197b; +$bgHighlight: #06064b; +$colDefault: #8e8eb4; +$colHighlight: #ffffff; +$dropDown: false; + +@font-face { + font-family: 'Glyphicons Halflings'; + src: font-path('bootstrap/glyphicons-halflings-regular.eot'); + src: font-path('bootstrap/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), + font-path('bootstrap/glyphicons-halflings-regular.woff') format('woff'), + font-path('bootstrap/glyphicons-halflings-regular.ttf') format('truetype'), + font-path('bootstrap/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg'); +} + + +.navbar-default { + background-color: $bgDefault; + border-color: $bgHighlight; + + .navbar-brand { + color: $colDefault; + + &:hover, &:focus { + color: $colHighlight; + } + } + + .navbar-text { + color: $colDefault; + } + + .navbar-nav { + > li { + > a { + color: $colDefault; + + &:hover, &:focus { + color: $colHighlight; + } + } + + @if $dropDown { + > .dropdown-menu { + background-color: $bgDefault; + + > li { + > a { + color: $colDefault; + + &:hover, &:focus { + color: $colHighlight; + background-color: $bgHighlight; + } + } + + > .divider { + background-color: $bgHighlight; + } + } + } + } + } + + @if $dropDown { + .open .dropdown-menu > .active { + > a, > a:hover, > a:focus { + color: $colHighlight; + background-color: $bgHighlight; + } + } + } + + > .active { + > a, > a:hover, > a:focus { + color: $colHighlight; + background-color: $bgHighlight; + } + } + + > .open { + > a, > a:hover, > a:focus { + color: $colHighlight; + background-color: $bgHighlight; + } + } + } + + .navbar-toggle { + border-color: $bgHighlight; + + &:hover, &:focus { + background-color: $bgHighlight; + } + + .icon-bar { + background-color: $colDefault; + } + } + + .navbar-collapse, + .navbar-form { + border-color: $colDefault; + } + + .navbar-link { + color: $colDefault; + + &:hover { + color: $colHighlight; + } + } +} + +@media (max-width: 767px) { + .navbar-default .navbar-nav .open .dropdown-menu { + > li > a { + color: $colDefault; + + &:hover, &:focus { + color: $colHighlight; + } + } + + > .active { + > a, > a:hover, > a:focus { + color: $colHighlight; + background-color: $bgHighlight; + } + } + } +} + +.secondnavbar { + top: 50px; +} + +// --------------- bootstrap file upload ---------------------- +.btn-file { + position: relative; + overflow: hidden; +} + +.btn-file input[type=file] { + position: absolute; + top: 0; + right: 0; + min-width: 100%; + min-height: 100%; + font-size: 100px; + text-align: right; + filter: alpha(opacity = 0); + opacity: 0; + outline: none; + background: white; + cursor: inherit; + display: block; +} + +body { + background: white image-url("topbg.jpg") repeat-x top center; + + //font-size: 13px + //font-family: Tahoma, "sans-serif" + margin: 10px; + padding: 10px; + padding-top: 60px; +} + +// ------------------ bootstrap sortable -------------------- +table.sortable th { + padding-right: 20px !important; + + span.sign { + right: (-15px) !important; + } + + &.text-right { + padding-left: 20px !important; + padding-right: 8px !important; + + &:after, span.sign { + left: (-15px) !important; + } + } +} + +input { + font-family: Tahoma, "sans-serif"; +} + +h1 { + font-size: 24px; + color: #334488; + line-height: 2em; +} + +h2 { + font-size: 18px; + color: #5566bb; + line-height: 1.5em; +} + +hr { + border-top: 1px solid #dddddd; + border-bottom: 1px solid #eeeeee; +} + +//#a +// color: #6666cc +// text-decoration: none +// +// &:link, &:visited +// color: #6666cc +// text-decoration: none +// +// &:hover, &:focus +// color: #111166 +// text-decoration: none + +div { + &.userbar { + line-height: 1.5em; + text-align: right; + font-size: 12px; + } + + &.title { + padding: 10px 0px; + line-height: 1.5em; + font-size: 13px; + + span.contest-over-msg { + font-size: 15px; + color: red; + } + + table { + width: 100%; + font-weight: bold; + } + + td { + &.left-col { + text-align: left; + vertical-align: top; + color: #444444; + } + + &.right-col { + text-align: right; + vertical-align: top; + font-size: 18px; + color: #116699; + } + } + } +} + +table.info { + margin: 10px 0; + border: 1px solid #666666; + border-collapse: collapse; + font-size: 12px; + + th { + border: 1px solid #666666; + line-height: 1.5em; + padding: 0 0.5em; + } + + td { + border-left: 1px solid #666666; + border-right: 1px solid #666666; + line-height: 1.5em; + padding: 0 0.5em; + } +} + +tr { + &.info-head { + background: #777777; + color: white; + } + + &.info-odd { + background: #eeeeee; + } + + &.info-even { + background: #fcfcfc; + } +} + +@mixin basicbox { + background: #eeeeff; + border: 1px dotted #99aaee; + padding: 5px; + margin: 10px 0px; + color: black; + font-size: 13px; +} + +.infobox { + @include basicbox; +} + +.submitbox { + @include basicbox; +} + +.errorExplanation { + border: 1px dotted gray; + color: #bb2222; + padding: 5px 15px 5px 15px; + margin-bottom: 5px; + background-color: white; + font-weight: normal; + + h2 { + color: #cc1111; + font-weight: bold; + } +} + +table.uinfo { + border-collapse: collapse; + border: 1px solid black; + font-size: 13px; +} + +td.uinfo { + vertical-align: top; + border: 1px solid black; + padding: 5px; +} + +th.uinfo { + background: lightgreen; + vertical-align: top; + text-align: right; + border: 1px solid black; + padding: 5px; +} + +div { + &.compilermsgbody { + font-family: monospace; + } + + &.task-menu { + text-align: center; + font-size: 13px; + line-height: 1.75em; + font-weight: bold; + border-top: 1px dashed gray; + border-bottom: 1px dashed gray; + margin-top: 2px; + margin-bottom: 4px; + } +} + +table.taskdesc { + border: 2px solid #dddddd; + border-collapse: collapse; + margin: 10px auto; + width: 90%; + font-size: 13px; + + p { + font-size: 13px; + } + + tr.name { + border: 2px solid #dddddd; + background: #dddddd; + color: #333333; + font-weight: bold; + font-size: 14px; + line-height: 1.5em; + text-align: center; + } + + td { + &.desc-odd { + padding: 5px; + padding-left: 20px; + background: #fefeee; + } + + &.desc-even { + padding: 5px; + padding-left: 20px; + background: #feeefe; + } + } +} + +.announcementbox { + margin: 10px 0px; + background: #bbddee; + padding: 1px; + + span.title { + font-weight: bold; + color: #224455; + padding-left: 10px; + line-height: 1.6em; + } +} + +.announcement { + margin: 2px; + background: white; + padding: 1px; + padding-left: 10px; + padding-right: 10px; + padding-top: 5px; + padding-bottom: 5px; +} + +.announcement p { + font-size: 12px; + margin: 2px; +} + +.pub-info { + text-align: right; + font-style: italic; + font-size: 9px; + + p { + text-align: right; + font-style: italic; + font-size: 9px; + } +} + +.announcement { + .toggles { + font-weight: normal; + float: right; + font-size: 80%; + } + + .announcement-title { + font-weight: bold; + } +} + +div { + &.message { + margin: 10px 0 0; + + div { + &.message { + margin: 0 0 0 30px; + } + + &.body { + border: 2px solid #dddddd; + background: #fff8f8; + padding-left: 5px; + } + + &.reply-body { + border: 2px solid #bbbbbb; + background: #fffff8; + padding-left: 5px; + } + + &.stat { + font-size: 10px; + line-height: 1.75em; + padding: 0 5px; + color: #333333; + background: #dddddd; + font-weight: bold; + } + + &.message div.stat { + font-size: 10px; + line-height: 1.75em; + padding: 0 5px; + color: #444444; + background: #bbbbbb; + font-weight: bold; + } + } + } + + &.contest-title { + color: white; + text-align: center; + line-height: 2em; + } + + &.registration-desc, &.test-desc { + border: 1px dotted gray; + background: #f5f5f5; + padding: 5px; + margin: 10px 0; + font-size: 12px; + line-height: 1.5em; + } +} + +h2.contest-title { + margin-top: 5px; + margin-bottom: 5px; +} + + + +.grader-comment { + word-wrap: break-word; +} diff --git a/app/assets/stylesheets/groups.css.scss b/app/assets/stylesheets/groups.css.scss new file mode 100644 diff --git a/app/assets/stylesheets/submissions.css.scss b/app/assets/stylesheets/submissions.css.scss new file mode 100644 --- /dev/null +++ b/app/assets/stylesheets/submissions.css.scss @@ -0,0 +1,4 @@ +// Place all the styles related to the sources controller here. +// They will automatically be included in application.css. +// You can use Sass (SCSS) here: http://sass-lang.com/ + diff --git a/app/assets/stylesheets/tags.scss b/app/assets/stylesheets/tags.scss new file mode 100644 --- /dev/null +++ b/app/assets/stylesheets/tags.scss @@ -0,0 +1,3 @@ +// Place all the styles related to the tags controller here. +// They will automatically be included in application.css. +// You can use Sass (SCSS) here: http://sass-lang.com/ diff --git a/app/assets/stylesheets/testcases.css.scss b/app/assets/stylesheets/testcases.css.scss new file mode 100644 diff --git a/app/controllers/announcements_controller.rb b/app/controllers/announcements_controller.rb --- a/app/controllers/announcements_controller.rb +++ b/app/controllers/announcements_controller.rb @@ -7,8 +7,7 @@ # GET /announcements # GET /announcements.xml def index - @announcements = Announcement.find(:all, - :order => "created_at DESC") + @announcements = Announcement.order(created_at: :desc) respond_to do |format| format.html # index.html.erb @@ -46,7 +45,7 @@ # POST /announcements # POST /announcements.xml def create - @announcement = Announcement.new(params[:announcement]) + @announcement = Announcement.new(announcement_params) respond_to do |format| if @announcement.save @@ -66,7 +65,7 @@ @announcement = Announcement.find(params[:id]) respond_to do |format| - if @announcement.update_attributes(params[:announcement]) + if @announcement.update_attributes(announcement_params) flash[:notice] = 'Announcement was successfully updated.' format.html { redirect_to(@announcement) } format.js {} @@ -108,4 +107,10 @@ format.xml { head :ok } end end + + private + + def announcement_params + params.require(:announcement).permit(:author, :body, :published, :frontpage, :contest_only, :title) + end end diff --git a/app/controllers/application_controller.rb b/app/controllers/application_controller.rb --- a/app/controllers/application_controller.rb +++ b/app/controllers/application_controller.rb @@ -1,11 +1,17 @@ class ApplicationController < ActionController::Base protect_from_forgery - before_filter :current_user + before_filter :current_user SINGLE_USER_MODE_CONF_KEY = 'system.single_user_mode' MULTIPLE_IP_LOGIN_CONF_KEY = 'right.multiple_ip_login' + #report and redirect for unauthorized activities + def unauthorized_redirect + flash[:notice] = 'You are not authorized to view the page you requested' + redirect_to :controller => 'main', :action => 'login' + end + # Returns the current logged-in user (if any). def current_user return nil unless session[:user_id] @@ -14,10 +20,9 @@ def admin_authorization return false unless authenticate - user = User.find(session[:user_id], :include => ['roles']) + user = User.includes(:roles).find(session[:user_id]) unless user.admin? - flash[:notice] = 'You are not authorized to view the page you requested' - redirect_to :controller => 'main', :action => 'login' unless user.admin? + unauthorized_redirect return false end return true @@ -27,12 +32,20 @@ return false unless authenticate user = User.find(session[:user_id]) unless user.roles.detect { |role| allowed_roles.member?(role.name) } - flash[:notice] = 'You are not authorized to view the page you requested' - redirect_to :controller => 'main', :action => 'login' + unauthorized_redirect return false end end + def testcase_authorization + #admin always has privileged + if @current_user.admin? + return true + end + + unauthorized_redirect unless GraderConfiguration["right.view_testcase"] + end + protected def authenticate @@ -45,27 +58,28 @@ return false end + # check if run in single user mode if GraderConfiguration[SINGLE_USER_MODE_CONF_KEY] - user = User.find_by_id(session[:user_id]) - if user==nil or (not user.admin?) + if @current_user==nil or (not @current_user.admin?) flash[:notice] = 'You cannot log in at this time' redirect_to :controller => 'main', :action => 'login' return false end - unless user.enabled? - flash[:notice] = 'Your account is disabled' - redirect_to :controller => 'main', :action => 'login' - return false - end return true end + # check if the user is enabled + unless @current_user.enabled? or @current_user.admin? + flash[:notice] = 'Your account is disabled' + redirect_to :controller => 'main', :action => 'login' + return false + end + if GraderConfiguration.multicontests? - user = User.find(session[:user_id]) - return true if user.admin? + return true if @current_user.admin? begin - if user.contest_stat(true).forced_logout + if @current_user.contest_stat(true).forced_logout flash[:notice] = 'You have been automatically logged out.' redirect_to :controller => 'main', :action => 'index' end @@ -97,10 +111,10 @@ return false unless authenticate user = User.find(session[:user_id]) unless user.roles.detect { |role| - role.rights.detect{ |right| - right.controller == self.class.controller_name and - (right.action == 'all' or right.action == action_name) - } + role.rights.detect{ |right| + right.controller == self.class.controller_name and + (right.action == 'all' or right.action == action_name) + } } flash[:notice] = 'You are not authorized to view the page you requested' #request.env['HTTP_REFERER'] ? (redirect_to :back) : (redirect_to :controller => 'login') diff --git a/app/controllers/configurations_controller.rb b/app/controllers/configurations_controller.rb --- a/app/controllers/configurations_controller.rb +++ b/app/controllers/configurations_controller.rb @@ -5,8 +5,8 @@ def index - @configurations = GraderConfiguration.find(:all, - :order => '`key`') + @configurations = GraderConfiguration.order(:key) + @group = GraderConfiguration.pluck("grader_configurations.key").map{ |x| x[0...(x.index('.'))] }.uniq.sort end def reload @@ -18,7 +18,7 @@ @config = GraderConfiguration.find(params[:id]) User.clear_last_login if @config.key == GraderConfiguration::MULTIPLE_IP_LOGIN_KEY and @config.value == 'true' and params[:grader_configuration][:value] == 'false' respond_to do |format| - if @config.update_attributes(params[:grader_configuration]) + if @config.update_attributes(configuration_params) format.json { head :ok } else format.json { respond_with_bip(@config) } @@ -26,4 +26,9 @@ end end +private + def configuration_params + params.require(:grader_configuration).permit(:key,:value_type,:value,:description) + end + end diff --git a/app/controllers/contest_management_controller.rb b/app/controllers/contest_management_controller.rb --- a/app/controllers/contest_management_controller.rb +++ b/app/controllers/contest_management_controller.rb @@ -11,9 +11,9 @@ redirect_to :action => 'index' and return end - @users = User.find(:all) + @users = User.all @start_times = {} - UserContestStat.find(:all).each do |stat| + UserContestStat.all.each do |stat| @start_times[stat.user_id] = stat.started_at end end diff --git a/app/controllers/contests_controller.rb b/app/controllers/contests_controller.rb --- a/app/controllers/contests_controller.rb +++ b/app/controllers/contests_controller.rb @@ -66,7 +66,7 @@ @contest = Contest.find(params[:id]) respond_to do |format| - if @contest.update_attributes(params[:contest]) + if @contest.update_attributes(contests_params) flash[:notice] = 'Contest was successfully updated.' format.html { redirect_to(@contest) } format.xml { head :ok } @@ -89,4 +89,10 @@ end end + private + + def contests_params + params.require(:contest).permit(:title,:enabled,:name) + end + end diff --git a/app/controllers/graders_controller.rb b/app/controllers/graders_controller.rb --- a/app/controllers/graders_controller.rb +++ b/app/controllers/graders_controller.rb @@ -1,15 +1,6 @@ class GradersController < ApplicationController - before_filter :admin_authorization, except: [ :submission ] - before_filter(only: [:submission]) { - return false unless authenticate - - if GraderConfiguration["right.user_view_submission"] - return true; - end - - admin_authorization - } + before_filter :admin_authorization verify :method => :post, :only => ['clear_all', 'start_exam', @@ -28,11 +19,10 @@ @terminated_processes = GraderProcess.find_terminated_graders - @last_task = Task.find(:first, - :order => 'created_at DESC') - @last_test_request = TestRequest.find(:first, - :order => 'created_at DESC') + @last_task = Task.last + @last_test_request = TestRequest.last @submission = Submission.order("id desc").limit(20) + @backlog_submission = Submission.where('graded_at is null') end def clear @@ -49,7 +39,7 @@ end def clear_all - GraderProcess.find(:all).each do |p| + GraderProcess.all.each do |p| p.destroy end redirect_to :action => 'list' @@ -71,25 +61,6 @@ @task = Task.find(params[:id]) end - def submission - @submission = Submission.find(params[:id]) - formatter = Rouge::Formatters::HTML.new(css_class: 'highlight', line_numbers: true ) - lexer = case @submission.language.name - when "c" then Rouge::Lexers::C.new - when "cpp" then Rouge::Lexers::Cpp.new - when "pas" then Rouge::Lexers::Pas.new - when "ruby" then Rouge::Lexers::Ruby.new - when "python" then Rouge::Lexers::Python.new - when "java" then Rouge::Lexers::Java.new - when "php" then Rouge::Lexers::PHP.new - end - @formatted_code = formatter.format(lexer.lex(@submission.source)) - @css_style = Rouge::Themes::ThankfulEyes.render(scope: '.highlight') - - user = User.find(session[:user_id]) - SubmissionViewLog.create(user_id: session[:user_id],submission_id: @submission.id) unless user.admin? - - end # various grader controls diff --git a/app/controllers/groups_controller.rb b/app/controllers/groups_controller.rb new file mode 100644 --- /dev/null +++ b/app/controllers/groups_controller.rb @@ -0,0 +1,104 @@ +class GroupsController < ApplicationController + before_action :set_group, only: [:show, :edit, :update, :destroy, + :add_user, :remove_user,:remove_all_user, + :add_problem, :remove_problem,:remove_all_problem, + ] + before_action :authenticate, :admin_authorization + + # GET /groups + def index + @groups = Group.all + end + + # GET /groups/1 + def show + end + + # GET /groups/new + def new + @group = Group.new + end + + # GET /groups/1/edit + def edit + end + + # POST /groups + def create + @group = Group.new(group_params) + + if @group.save + redirect_to @group, notice: 'Group was successfully created.' + else + render :new + end + end + + # PATCH/PUT /groups/1 + def update + if @group.update(group_params) + redirect_to @group, notice: 'Group was successfully updated.' + else + render :edit + end + end + + # DELETE /groups/1 + def destroy + @group.destroy + redirect_to groups_url, notice: 'Group was successfully destroyed.' + end + + def remove_user + user = User.find(params[:user_id]) + @group.users.delete(user) + redirect_to group_path(@group), flash: {success: "User #{user.login} was removed from the group #{@group.name}"} + end + + def remove_all_user + @group.users.clear + redirect_to group_path(@group), alert: 'All users removed' + end + + def remove_all_problem + @group.problems.clear + redirect_to group_path(@group), alert: 'All problems removed' + end + + def add_user + user = User.find(params[:user_id]) + begin + @group.users << user + redirect_to group_path(@group), flash: { success: "User #{user.login} was add to the group #{@group.name}"} + rescue => e + redirect_to group_path(@group), alert: e.message + end + end + + def remove_problem + problem = Problem.find(params[:problem_id]) + @group.problems.delete(problem) + redirect_to group_path(@group), flash: {success: "Problem #{problem.name} was removed from the group #{@group.name}" } + end + + def add_problem + problem = Problem.find(params[:problem_id]) + begin + @group.problems << problem + redirect_to group_path(@group), flash: {success: "Problem #{problem.name} was add to the group #{@group.name}" } + rescue => e + redirect_to group_path(@group), alert: e.message + end + end + + private + # Use callbacks to share common setup or constraints between actions. + def set_group + @group = Group.find(params[:id]) + end + + # Only allow a trusted parameter "white list" through. + def group_params + params.require(:group).permit(:name, :description) + end +end diff --git a/app/controllers/heartbeat_controller.rb b/app/controllers/heartbeat_controller.rb --- a/app/controllers/heartbeat_controller.rb +++ b/app/controllers/heartbeat_controller.rb @@ -2,11 +2,11 @@ before_filter :admin_authorization, :only => ['index'] def edit - @user = User.find_by_login(params[:id]) - unless @user - render text: "LOGIN_NOT_FOUND" - return - end + #@user = User.find_by_login(params[:id]) + #unless @user + # render text: "LOGIN_NOT_FOUND" + # return + #end #hb = HeartBeat.where(user_id: @user.id, ip_address: request.remote_ip).first #puts "status = #{params[:status]}" @@ -19,9 +19,24 @@ #else # HeartBeat.creae(user_id: @user.id, ip_address: request.remote_ip) #end - HeartBeat.create(user_id: @user.id, ip_address: request.remote_ip, status: params[:status]) + #HeartBeat.create(user_id: @user.id, ip_address: request.remote_ip, status: params[:status]) + + res = GraderConfiguration['right.heartbeat_response'] + res.strip! if res + full = GraderConfiguration['right.heartbeat_response_full'] + full.strip! if full - render text: (GraderConfiguration['right.heartbeat_response'] || 'OK') + if full and full != '' + l = Login.where(ip_address: request.remote_ip).last + @user = l.user + if @user.solve_all_available_problems? + render text: (full || 'OK') + else + render text: (res || 'OK') + end + else + render text: (GraderConfiguration['right.heartbeat_response'] || 'OK') + end end def index diff --git a/app/controllers/login_controller.rb b/app/controllers/login_controller.rb --- a/app/controllers/login_controller.rb +++ b/app/controllers/login_controller.rb @@ -7,32 +7,38 @@ end def login - if (!GraderConfiguration['right.bypass_agreement']) and (!params[:accept_agree]) + user = User.authenticate(params[:login], params[:password]) + unless user + flash[:notice] = 'Wrong password' + redirect_to :controller => 'main', :action => 'login' + return + end + + if (!GraderConfiguration['right.bypass_agreement']) and (!params[:accept_agree]) and !user.admin? flash[:notice] = 'You must accept the agreement before logging in' redirect_to :controller => 'main', :action => 'login' - elsif user = User.authenticate(params[:login], params[:password]) - session[:user_id] = user.id - session[:admin] = user.admin? + return + end + + #process logging in + session[:user_id] = user.id + session[:admin] = user.admin? - # clear forced logout flag for multicontests contest change - if GraderConfiguration.multicontests? - contest_stat = user.contest_stat - if contest_stat.respond_to? :forced_logout - if contest_stat.forced_logout - contest_stat.forced_logout = false - contest_stat.save - end + # clear forced logout flag for multicontests contest change + if GraderConfiguration.multicontests? + contest_stat = user.contest_stat + if contest_stat.respond_to? :forced_logout + if contest_stat.forced_logout + contest_stat.forced_logout = false + contest_stat.save end end - - #save login information - Login.create(user_id: user.id, ip_address: request.remote_ip) + end - redirect_to :controller => 'main', :action => 'list' - else - flash[:notice] = 'Wrong password' - redirect_to :controller => 'main', :action => 'login' - end + #save login information + Login.create(user_id: user.id, ip_address: request.remote_ip) + + redirect_to :controller => 'main', :action => 'list' end def site_login diff --git a/app/controllers/main_controller.rb b/app/controllers/main_controller.rb --- a/app/controllers/main_controller.rb +++ b/app/controllers/main_controller.rb @@ -45,7 +45,7 @@ # @hidelogin = true # end - @announcements = Announcement.find_for_frontpage + @announcements = Announcement.frontpage render :action => 'login', :layout => 'empty' end @@ -86,18 +86,18 @@ render :action => 'list' and return end - if @submission.valid? + if @submission.valid?(@current_user) if @submission.save == false - flash[:notice] = 'Error saving your submission' + flash[:notice] = 'Error saving your submission' elsif Task.create(:submission_id => @submission.id, :status => Task::STATUS_INQUEUE) == false - flash[:notice] = 'Error adding your submission to task queue' + flash[:notice] = 'Error adding your submission to task queue' end else prepare_list_information render :action => 'list' and return end - redirect_to :action => 'list' + redirect_to edit_submission_path(@submission) end def source @@ -106,7 +106,7 @@ (submission.problem != nil) and (submission.problem.available)) send_data(submission.source, - {:filename => submission.download_filename, + {:filename => submission.download_filename, :type => 'text/plain'}) else flash[:notice] = 'Error viewing source' @@ -124,23 +124,6 @@ end end - def submission - @user = User.find(session[:user_id]) - @problems = @user.available_problems - if params[:id]==nil - @problem = nil - @submissions = nil - else - @problem = Problem.find_by_id(params[:id]) - if (@problem == nil) or (not @problem.available) - redirect_to :action => 'list' - flash[:notice] = 'Error: submissions for that problem are not viewable.' - return - end - @submissions = Submission.find_all_by_user_problem(@user.id, @problem.id) - end - end - def result if !GraderConfiguration.show_grading_result redirect_to :action => 'list' and return @@ -217,9 +200,9 @@ def prepare_announcements(recent=nil) if GraderConfiguration.show_tasks_to?(@user) - @announcements = Announcement.find_published(true) + @announcements = Announcement.published(true) else - @announcements = Announcement.find_published + @announcements = Announcement.published end if recent!=nil recent_id = recent.to_i diff --git a/app/controllers/problems_controller.rb b/app/controllers/problems_controller.rb --- a/app/controllers/problems_controller.rb +++ b/app/controllers/problems_controller.rb @@ -1,21 +1,21 @@ class ProblemsController < ApplicationController - before_filter :authenticate, :authorization + before_action :authenticate, :authorization + before_action :testcase_authorization, only: [:show_testcase] in_place_edit_for :problem, :name in_place_edit_for :problem, :full_name in_place_edit_for :problem, :full_score def index - @problems = Problem.find(:all, :order => 'date_added DESC') + @problems = Problem.order(date_added: :desc) end # GETs should be safe (see http://www.w3.org/2001/tag/doc/whenToUseGet.html) - verify :method => :post, :only => [ :destroy, - :create, :quick_create, + verify :method => :post, :only => [ :create, :quick_create, :do_manage, :do_import, - :update ], + ], :redirect_to => { :action => :index } def show @@ -28,7 +28,7 @@ end def create - @problem = Problem.new(params[:problem]) + @problem = Problem.new(problem_params) @description = Description.new(params[:description]) if @description.body!='' if !@description.save @@ -47,7 +47,7 @@ end def quick_create - @problem = Problem.new(params[:problem]) + @problem = Problem.new(problem_params) @problem.full_name = @problem.name if @problem.full_name == '' @problem.full_score = 100 @problem.available = false @@ -71,14 +71,14 @@ def update @problem = Problem.find(params[:id]) @description = @problem.description - if @description == nil and params[:description][:body]!='' + if @description.nil? and params[:description][:body]!='' @description = Description.new(params[:description]) if !@description.save flash[:notice] = 'Error saving description' render :action => 'edit' and return end @problem.description = @description - elsif @description!=nil + elsif @description if !@description.update_attributes(params[:description]) flash[:notice] = 'Error saving description' render :action => 'edit' and return @@ -88,7 +88,7 @@ flash[:notice] = 'Error: Uploaded file is not PDF' render :action => 'edit' and return end - if @problem.update_attributes(params[:problem]) + if @problem.update_attributes(problem_params) flash[:notice] = 'Problem was successfully updated.' unless params[:file] == nil or params[:file] == '' flash[:notice] = 'Problem was successfully updated and a new PDF file is uploaded.' @@ -115,8 +115,8 @@ end def destroy - Problem.find(params[:id]).destroy - redirect_to action: :index + p = Problem.find(params[:id]).destroy + redirect_to action: :index end def toggle @@ -135,9 +135,16 @@ end end + def toggle_view_testcase + @problem = Problem.find(params[:id]) + @problem.update_attributes(view_testcase: !(@problem.view_testcase?) ) + respond_to do |format| + format.js { } + end + end + def turn_all_off - Problem.find(:all, - :conditions => "available = 1").each do |problem| + Problem.available.all.each do |problem| problem.available = false problem.save end @@ -145,8 +152,7 @@ end def turn_all_on - Problem.find(:all, - :conditions => "available = 0").each do |problem| + Problem.where.not(available: true).each do |problem| problem.available = true problem.save end @@ -159,7 +165,7 @@ redirect_to :controller => 'main', :action => 'list' return end - @submissions = Submission.includes(:user).where(problem_id: params[:id]).order(:user_id,:id) + @submissions = Submission.includes(:user).includes(:language).where(problem_id: params[:id]).order(:user_id,:id) #stat summary range =65 @@ -177,7 +183,7 @@ end def manage - @problems = Problem.find(:all, :order => 'date_added DESC') + @problems = Problem.order(date_added: :desc) end def do_manage @@ -189,7 +195,26 @@ set_available(true) elsif params.has_key? 'disable_problem' set_available(false) + elsif params.has_key? 'add_group' + group = Group.find(params[:group_id]) + ok = [] + failed = [] + get_problems_from_params.each do |p| + begin + group.problems << p + ok << p.full_name + rescue => e + failed << p.full_name + end + end + flash[:success] = "The following problems are added to the group #{group.name}: " + ok.join(', ') if ok.count > 0 + flash[:alert] = "The following problems are already in the group #{group.name}: " + failed.join(', ') if failed.count > 0 + elsif params.has_key? 'add_tags' + get_problems_from_params.each do |p| + p.tag_ids += params[:tag_ids] + end end + redirect_to :action => 'manage' end @@ -237,10 +262,7 @@ def change_date_added problems = get_problems_from_params - year = params[:date_added][:year].to_i - month = params[:date_added][:month].to_i - day = params[:date_added][:day].to_i - date = Date.new(year,month,day) + date = Date.parse(params[:date_added]) problems.each do |p| p.date_added = date p.save @@ -279,4 +301,10 @@ def get_problems_stat end + private + + def problem_params + params.require(:problem).permit(:name, :full_name, :full_score, :date_added, :available, :test_allowed,:output_only, :url, :description, tag_ids:[]) + end + end diff --git a/app/controllers/report_controller.rb b/app/controllers/report_controller.rb --- a/app/controllers/report_controller.rb +++ b/app/controllers/report_controller.rb @@ -1,24 +1,81 @@ +require 'csv' + class ReportController < ApplicationController - before_filter :admin_authorization, only: [:login_stat,:submission_stat, :stuck, :cheat_report, :cheat_scruntinize] + before_filter :authenticate + + before_filter :admin_authorization, only: [:login_stat,:submission_stat, :stuck, :cheat_report, :cheat_scruntinize, :show_max_score, :current_score] before_filter(only: [:problem_hof]) { |c| return false unless authenticate - if GraderConfiguration["right.user_view_submission"] - return true; + admin_authorization unless GraderConfiguration["right.user_view_submission"] + } + + def max_score + end + + def current_score + @problems = Problem.available_problems + @users = User.includes(:contests).includes(:contest_stat).where(enabled: true) + @scorearray = calculate_max_score(@problems, @users,0,0,true) + + #rencer accordingly + if params[:button] == 'download' then + csv = gen_csv_from_scorearray(@scorearray,@problems) + send_data csv, filename: 'max_score.csv' + else + #render template: 'user_admin/user_stat' + render 'current_score' + end + end + + def show_max_score + #process parameters + #problems + @problems = [] + if params[:problem_id] + params[:problem_id].each do |id| + next unless id.strip != "" + pid = Problem.find_by_id(id.to_i) + @problems << pid if pid + end end - admin_authorization - } + #users + @users = if params[:user] == "all" then + User.includes(:contests).includes(:contest_stat) + else + User.includes(:contests).includes(:contest_stat).where(enabled: true) + end + + #set up range from param + @since_id = params.fetch(:from_id, 0).to_i + @until_id = params.fetch(:to_id, 0).to_i + @since_id = nil if @since_id == 0 + @until_id = nil if @until_id == 0 + + #calculate the routine + @scorearray = calculate_max_score(@problems, @users, @since_id, @until_id) + + #rencer accordingly + if params[:button] == 'download' then + csv = gen_csv_from_scorearray(@scorearray,@problems) + send_data csv, filename: 'max_score.csv' + else + #render template: 'user_admin/user_stat' + render 'max_score' + end + + end def score if params[:commit] == 'download csv' @problems = Problem.all else - @problems = Problem.find_available_problems + @problems = Problem.available_problems end - @users = User.includes(:contests, :contest_stat).where(enabled: true) #find(:all, :include => [:contests, :contest_stat]).where(enabled: true) + @users = User.includes(:contests, :contest_stat).where(enabled: true) @scorearray = Array.new @users.each do |u| ustat = Array.new @@ -170,7 +227,7 @@ @by_lang[lang.pretty_name][:memory] = { avail: true, user_id: sub.user_id, value: sub.peak_memory, sub_id: sub.id } end - if sub.submitted_at and sub.submitted_at < @by_lang[lang.pretty_name][:first][:value] and + if sub.submitted_at and sub.submitted_at < @by_lang[lang.pretty_name][:first][:value] and sub.user and !sub.user.admin? @by_lang[lang.pretty_name][:first] = { avail: true, user_id: sub.user_id, value: sub.submitted_at, sub_id: sub.id } end @@ -400,5 +457,65 @@ end + protected + + def calculate_max_score(problems, users,since_id,until_id, get_last_score = false) + scorearray = Array.new + users.each do |u| + ustat = Array.new + ustat[0] = u + problems.each do |p| + unless get_last_score + #get max score + max_points = 0 + Submission.find_in_range_by_user_and_problem(u.id,p.id,since_id,until_id).each do |sub| + max_points = sub.points if sub and sub.points and (sub.points > max_points) + end + ustat << [(max_points.to_f*100/p.full_score).round, (max_points>=p.full_score)] + else + #get latest score + sub = Submission.find_last_by_user_and_problem(u.id,p.id) + if (sub!=nil) and (sub.points!=nil) and p and p.full_score + ustat << [(sub.points.to_f*100/p.full_score).round, (sub.points>=p.full_score)] + else + ustat << [0,false] + end + end + end + scorearray << ustat + end + return scorearray + end + + def gen_csv_from_scorearray(scorearray,problem) + CSV.generate do |csv| + #add header + header = ['User','Name', 'Activated?', 'Logged in', 'Contest'] + problem.each { |p| header << p.name } + header += ['Total','Passed'] + csv << header + #add data + scorearray.each do |sc| + total = num_passed = 0 + row = Array.new + sc.each_index do |i| + if i == 0 + row << sc[i].login + row << sc[i].full_name + row << sc[i].activated + row << (sc[i].try(:contest_stat).try(:started_at)!=nil ? 'yes' : 'no') + row << sc[i].contests.collect {|c| c.name}.join(', ') + else + row << sc[i][0] + total += sc[i][0] + num_passed += 1 if sc[i][1] + end + end + row << total + row << num_passed + csv << row + end + end + end end diff --git a/app/controllers/site_controller.rb b/app/controllers/site_controller.rb --- a/app/controllers/site_controller.rb +++ b/app/controllers/site_controller.rb @@ -4,7 +4,7 @@ def login # Site administrator login - @countries = Country.find(:all, :include => :sites) + @countries = Country.includes(:sites).all @country_select = @countries.collect { |c| [c.name, c.id] } @country_select_with_all = [['Any',0]] @@ -59,4 +59,9 @@ end end + private + def site_params + params.require(:site).permit() + end + end diff --git a/app/controllers/sites_controller.rb b/app/controllers/sites_controller.rb --- a/app/controllers/sites_controller.rb +++ b/app/controllers/sites_controller.rb @@ -5,7 +5,7 @@ # GET /sites # GET /sites.xml def index - @sites = Site.find(:all, :order => 'country_id') + @sites = Site.order(:country_id) respond_to do |format| format.html # index.html.erb @@ -65,7 +65,7 @@ @site.clear_start_time_if_not_started respond_to do |format| - if @site.update_attributes(params[:site]) + if @site.update_attributes(site_params) flash[:notice] = 'Site was successfully updated.' format.html { redirect_to(@site) } format.xml { head :ok } @@ -88,4 +88,10 @@ end end + private + + def site_params + params.require(:site).permit(:name,:started,:start_time,:country_id,:password) + end + end diff --git a/app/controllers/sources_controller.rb b/app/controllers/sources_controller.rb deleted file mode 100644 --- a/app/controllers/sources_controller.rb +++ /dev/null @@ -1,27 +0,0 @@ -class SourcesController < ApplicationController - before_filter :authenticate - - def direct_edit - @problem = Problem.find(params[:pid]) - @source = '' - end - - def direct_edit_submission - @submission = Submission.find(params[:sid]) - @source = @submission.source.to_s - @problem = @submission.problem - @lang_id = @submission.language.id - render 'direct_edit' - end - - def get_latest_submission_status - @problem = Problem.find(params[:pid]) - @submission = Submission.find_last_by_user_and_problem(params[:uid],params[:pid]) - puts User.find(params[:uid]).login - puts Problem.find(params[:pid]).name - puts 'nil' unless @submission - respond_to do |format| - format.js - end - end -end diff --git a/app/controllers/submissions_controller.rb b/app/controllers/submissions_controller.rb new file mode 100644 --- /dev/null +++ b/app/controllers/submissions_controller.rb @@ -0,0 +1,115 @@ +class SubmissionsController < ApplicationController + before_action :authenticate + before_action :submission_authorization, only: [:show, :download, :edit] + before_action :admin_authorization, only: [:rejudge] + + # GET /submissions + # GET /submissions.json + # Show problem selection and user's submission of that problem + def index + @user = @current_user + @problems = @user.available_problems + + if params[:problem_id]==nil + @problem = nil + @submissions = nil + else + @problem = Problem.find_by_id(params[:problem_id]) + if (@problem == nil) or (not @problem.available) + redirect_to main_list_path + flash[:notice] = 'Error: submissions for that problem are not viewable.' + return + end + @submissions = Submission.find_all_by_user_problem(@user.id, @problem.id).order(id: :desc) + end + end + + # GET /submissions/1 + # GET /submissions/1.json + def show + @submission = Submission.find(params[:id]) + + #log the viewing + user = User.find(session[:user_id]) + SubmissionViewLog.create(user_id: session[:user_id],submission_id: @submission.id) unless user.admin? + + @task = @submission.task + end + + def download + @submission = Submission.find(params[:id]) + send_data(@submission.source, {:filename => @submission.download_filename, :type => 'text/plain'}) + end + + def compiler_msg + @submission = Submission.find(params[:id]) + respond_to do |format| + format.js + end + end + + #on-site new submission on specific problem + def direct_edit_problem + @problem = Problem.find(params[:problem_id]) + unless @current_user.can_view_problem?(@problem) + unauthorized_redirect + return + end + @source = '' + if (params[:view_latest]) + sub = Submission.find_last_by_user_and_problem(@current_user.id,@problem.id) + @source = @submission.source.to_s if @submission and @submission.source + end + render 'edit' + end + + # GET /submissions/1/edit + def edit + @submission = Submission.find(params[:id]) + @source = @submission.source.to_s + @problem = @submission.problem + @lang_id = @submission.language.id + end + + + def get_latest_submission_status + @problem = Problem.find(params[:pid]) + @submission = Submission.find_last_by_user_and_problem(params[:uid],params[:pid]) + puts User.find(params[:uid]).login + puts Problem.find(params[:pid]).name + puts 'nil' unless @submission + respond_to do |format| + format.js + end + end + + # GET /submissions/:id/rejudge + def rejudge + @submission = Submission.find(params[:id]) + @task = @submission.task + @task.status_inqueue! if @task + respond_to do |format| + format.js + end + end + +protected + + def submission_authorization + #admin always has privileged + if @current_user.admin? + return true + end + + sub = Submission.find(params[:id]) + if @current_user.available_problems.include? sub.problem + return true if GraderConfiguration["right.user_view_submission"] or sub.user == @current_user + end + + #default to NO + unauthorized_redirect + return false + end + + +end diff --git a/app/controllers/tags_controller.rb b/app/controllers/tags_controller.rb new file mode 100644 --- /dev/null +++ b/app/controllers/tags_controller.rb @@ -0,0 +1,60 @@ +class TagsController < ApplicationController + before_action :set_tag, only: [:show, :edit, :update, :destroy] + + # GET /tags + def index + @tags = Tag.all + end + + # GET /tags/1 + def show + end + + # GET /tags/new + def new + @tag = Tag.new + end + + # GET /tags/1/edit + def edit + end + + # POST /tags + def create + @tag = Tag.new(tag_params) + + if @tag.save + redirect_to @tag, notice: 'Tag was successfully created.' + else + render :new + end + end + + # PATCH/PUT /tags/1 + def update + if @tag.update(tag_params) + redirect_to @tag, notice: 'Tag was successfully updated.' + else + render :edit + end + end + + # DELETE /tags/1 + def destroy + #remove any association + ProblemTag.where(tag_id: @tag.id).destroy_all + @tag.destroy + redirect_to tags_url, notice: 'Tag was successfully destroyed.' + end + + private + # Use callbacks to share common setup or constraints between actions. + def set_tag + @tag = Tag.find(params[:id]) + end + + # Only allow a trusted parameter "white list" through. + def tag_params + params.require(:tag).permit(:name, :description, :public) + end +end diff --git a/app/controllers/tasks_controller.rb b/app/controllers/tasks_controller.rb --- a/app/controllers/tasks_controller.rb +++ b/app/controllers/tasks_controller.rb @@ -26,7 +26,7 @@ # this has problem-level access control def download problem = Problem.find(params[:id]) - if !problem or !problem.available or !@user.can_view_problem? problem + unless @current_user.can_view_problem? problem redirect_to :action => 'index' and return end diff --git a/app/controllers/testcases_controller.rb b/app/controllers/testcases_controller.rb new file mode 100644 --- /dev/null +++ b/app/controllers/testcases_controller.rb @@ -0,0 +1,32 @@ +class TestcasesController < ApplicationController + before_action :set_testcase, only: [:download_input,:download_sol] + before_action :testcase_authorization + + def download_input + send_data @testcase.input, type: 'text/plain', filename: "#{@testcase.problem.name}.#{@testcase.num}.in" + end + + def download_sol + send_data @testcase.sol, type: 'text/plain', filename: "#{@testcase.problem.name}.#{@testcase.num}.sol" + end + + def show_problem + @problem = Problem.includes(:testcases).find(params[:problem_id]) + unless @current_user.admin? or @problem.view_testcase + flash[:error] = 'You cannot view the testcase of this problem' + redirect_to :controller => 'main', :action => 'list' + end + end + + + private + # Use callbacks to share common setup or constraints between actions. + def set_testcase + @testcase = Testcase.find(params[:id]) + end + + # Only allow a trusted parameter "white list" through. + def testcase_params + params[:testcase] + end +end diff --git a/app/controllers/user_admin_controller.rb b/app/controllers/user_admin_controller.rb --- a/app/controllers/user_admin_controller.rb +++ b/app/controllers/user_admin_controller.rb @@ -7,7 +7,7 @@ before_filter :admin_authorization # GETs should be safe (see http://www.w3.org/2001/tag/doc/whenToUseGet.html) - verify :method => :post, :only => [ :destroy, + verify :method => :post, :only => [ :create, :create_from_list, :update, :manage_contest, @@ -16,10 +16,6 @@ :redirect_to => { :action => :list } def index - list - end - - def list @user_count = User.count if params[:page] == 'all' @users = User.all @@ -28,12 +24,13 @@ @users = User.paginate :page => params[:page] @paginated = true end + @users = User.all @hidden_columns = ['hashed_password', 'salt', 'created_at', 'updated_at'] @contests = Contest.enabled end def active - sessions = ActiveRecord::SessionStore::Session.find(:all, :conditions => ["updated_at >= ?", 60.minutes.ago]) + sessions = ActiveRecord::SessionStore::Session.where("updated_at >= ?", 60.minutes.ago) @users = [] sessions.each do |session| if session.data[:user_id] @@ -51,7 +48,7 @@ end def create - @user = User.new(params[:user]) + @user = User.new(user_params) @user.activated = true if @user.save flash[:notice] = 'User was successfully created.' @@ -78,27 +75,39 @@ if items.length>=2 login = items[0] full_name = items[1] + remark ='' + user_alias = '' added_random_password = false - if items.length>=3 + if items.length >= 3 and items[2].chomp(" ").length > 0; password = items[2].chomp(" ") - user_alias = (items.length>=4) ? items[3] : login else password = random_password - user_alias = (items.length>=4) ? items[3] : login - added_random_password = true + add_random_password=true; + end + + if items.length>= 4 and items[3].chomp(" ").length > 0; + user_alias = items[3].chomp(" ") + else + user_alias = login + end + + if items.length>=5 + remark = items[4].strip; end user = User.find_by_login(login) if (user) user.full_name = full_name user.password = password + user.remark = remark else user = User.new({:login => login, :full_name => full_name, :password => password, :password_confirmation => password, - :alias => user_alias}) + :alias => user_alias, + :remark => remark}) end user.activated = true user.save @@ -110,7 +119,7 @@ end end end - flash[:notice] = 'User(s) ' + note.join(', ') + + flash[:success] = 'User(s) ' + note.join(', ') + ' were successfully created. ' + '( (+) - created with random passwords.)' redirect_to :action => 'index' @@ -122,7 +131,7 @@ def update @user = User.find(params[:id]) - if @user.update_attributes(params[:user]) + if @user.update_attributes(user_params) flash[:notice] = 'User was successfully updated.' redirect_to :action => 'show', :id => @user else @@ -139,9 +148,9 @@ if params[:commit] == 'download csv' @problems = Problem.all else - @problems = Problem.find_available_problems + @problems = Problem.available_problems end - @users = User.includes(:contests, :contest_stat).where(enabled: true) #find(:all, :include => [:contests, :contest_stat]).where(enabled: true) + @users = User.includes(:contests, :contest_stat).where(enabled: true) @scorearray = Array.new @users.each do |u| ustat = Array.new @@ -168,9 +177,9 @@ if params[:commit] == 'download csv' @problems = Problem.all else - @problems = Problem.find_available_problems + @problems = Problem.available_problems end - @users = User.find(:all, :include => [:contests, :contest_stat]) + @users = User.includes(:contests).includes(:contest_stat).all @scorearray = Array.new #set up range from param since_id = params.fetch(:since_id, 0).to_i @@ -205,7 +214,7 @@ end def random_all_passwords - users = User.find(:all) + users = User.all @prefix = params[:prefix] || '' @non_admin_users = User.find_non_admin_with_prefix(@prefix) @changed = false @@ -220,6 +229,7 @@ end end + # contest management def contests @@ -328,7 +338,7 @@ # admin management def admin - @admins = User.find(:all).find_all {|user| user.admin? } + @admins = User.all.find_all {|user| user.admin? } end def grant_admin @@ -399,6 +409,56 @@ redirect_to :action => 'mass_mailing' end + #bulk manage + def bulk_manage + + begin + @users = User.where('(login REGEXP ?) OR (remark REGEXP ?)',params[:regex],params[:regex]) if params[:regex] + @users.count if @users #i don't know why I have to call count, but if I won't exception is not raised + rescue Exception + flash[:error] = 'Regular Expression is malformed' + @users = nil + end + + if params[:commit] + @action = {} + @action[:set_enable] = params[:enabled] + @action[:enabled] = params[:enable] == "1" + @action[:gen_password] = params[:gen_password] + @action[:add_group] = params[:add_group] + @action[:group_name] = params[:group_name] + end + + if params[:commit] == "Perform" + if @action[:set_enable] + @users.update_all(enabled: @action[:enabled]) + end + if @action[:gen_password] + @users.each do |u| + password = random_password + u.password = password + u.password_confirmation = password + u.save + end + end + if @action[:add_group] and @action[:group_name] + @group = Group.find(@action[:group_name]) + ok = [] + failed = [] + @users.each do |user| + begin + @group.users << user + ok << user.login + rescue => e + failed << user.login + end + end + flash[:success] = "The following users are added to the 'group #{@group.name}': " + ok.join(', ') if ok.count > 0 + flash[:alert] = "The following users are already in the 'group #{@group.name}': " + failed.join(', ') if failed.count > 0 + end + end + end + protected def random_password(length=5) @@ -525,7 +585,7 @@ row << sc[i].login row << sc[i].full_name row << sc[i].activated - row << (sc[i].try(:contest_stat).try(:started_at)!=nil ? 'yes' : 'no') + row << (sc[i].try(:contest_stat).try(:started_at).nil? ? 'no' : 'yes') row << sc[i].contests.collect {|c| c.name}.join(', ') else row << sc[i][0] @@ -539,4 +599,9 @@ end end end + + private + def user_params + params.require(:user).permit(:login,:password,:password_confirmation,:email, :alias, :full_name,:remark) + end end diff --git a/app/controllers/users_controller.rb b/app/controllers/users_controller.rb --- a/app/controllers/users_controller.rb +++ b/app/controllers/users_controller.rb @@ -52,7 +52,7 @@ redirect_to :controller => 'main', :action => 'login' return end - @user = User.new(params[:user]) + @user = User.new(user_params) @user.password_confirmation = @user.password = User.random_password @user.activated = false if (@user.valid?) and (@user.save) @@ -109,9 +109,10 @@ redirect_to :action => 'forget' end - def profile + def stat @user = User.find(params[:id]) - @submission = Submission.includes(:problem).where(user_id: params[:id]) + @submission = Submission.joins(:problem).where(user_id: params[:id]) + @submission = @submission.where('problems.available = true') unless current_user.admin? range = 120 @histogram = { data: Array.new(range,0), summary: {} } @@ -209,6 +210,10 @@ #finally, we allow only admin admin_authorization end - + + private + def user_params + params.require(:user).permit(:login, :full_name, :email) + end end diff --git a/app/helpers/application_helper.rb b/app/helpers/application_helper.rb --- a/app/helpers/application_helper.rb +++ b/app/helpers/application_helper.rb @@ -1,6 +1,7 @@ # Methods added to this helper will be available to all templates in the application. module ApplicationHelper + #new bootstrap header def navbar_user_header left_menu = '' right_menu = '' @@ -27,7 +28,7 @@ result = content_tag(:ul,left_menu.html_safe,class: 'nav navbar-nav') + content_tag(:ul,right_menu.html_safe,class: 'nav navbar-nav navbar-right') end - def add_menu(title, controller, action,html_option = {}) + def add_menu(title, controller, action, html_option = {}) link_option = {controller: controller, action: action} html_option[:class] = (html_option[:class] || '') + " active" if current_page?(link_option) content_tag(:li, link_to(title,link_option),html_option) @@ -84,11 +85,10 @@ end def format_short_time(time) - now = Time.now.gmtime + now = Time.zone.now st = '' - if (time.yday != now.yday) or - (time.year != now.year) - st = time.strftime("%x ") + if (time.yday != now.yday) or (time.year != now.year) + st = time.strftime("%d/%m/%y ") end st + time.strftime("%X") end @@ -99,6 +99,10 @@ return Time.at(d).gmtime.strftime("%X") end + def format_full_time_ago(time) + st = time_ago_in_words(time) + ' ago (' + format_short_time(time) + ')' + end + def read_textfile(fname,max_size=2048) begin File.open(fname).read(max_size) @@ -195,4 +199,26 @@ markdown.to_html.html_safe end + + BOOTSTRAP_FLASH_MSG = { + success: 'alert-success', + error: 'alert-danger', + alert: 'alert-danger', + notice: 'alert-info' + } + + def bootstrap_class_for(flash_type) + BOOTSTRAP_FLASH_MSG.fetch(flash_type.to_sym, flash_type.to_s) + end + + def flash_messages + flash.each do |msg_type, message| + concat(content_tag(:div, message, class: "alert #{bootstrap_class_for(msg_type)} fade in") do + concat content_tag(:button, 'x', class: "close", data: { dismiss: 'alert' }) + concat message + end) + end + nil + end + end diff --git a/app/helpers/groups_helper.rb b/app/helpers/groups_helper.rb new file mode 100644 --- /dev/null +++ b/app/helpers/groups_helper.rb @@ -0,0 +1,2 @@ +module GroupsHelper +end diff --git a/app/helpers/submissions_helper.rb b/app/helpers/submissions_helper.rb new file mode 100644 --- /dev/null +++ b/app/helpers/submissions_helper.rb @@ -0,0 +1,2 @@ +module SubmissionsHelper +end diff --git a/app/helpers/tags_helper.rb b/app/helpers/tags_helper.rb new file mode 100644 --- /dev/null +++ b/app/helpers/tags_helper.rb @@ -0,0 +1,2 @@ +module TagsHelper +end diff --git a/app/helpers/testcases_helper.rb b/app/helpers/testcases_helper.rb new file mode 100644 --- /dev/null +++ b/app/helpers/testcases_helper.rb @@ -0,0 +1,2 @@ +module TestcasesHelper +end diff --git a/app/models/announcement.rb b/app/models/announcement.rb --- a/app/models/announcement.rb +++ b/app/models/announcement.rb @@ -1,21 +1,15 @@ class Announcement < ActiveRecord::Base - def self.find_published(contest_started=false) + def self.published(contest_started=false) if contest_started - Announcement.find(:all, - :conditions => "(published = 1) AND (frontpage = 0)", - :order => "created_at DESC") + where(published: true).where(frontpage: false).order(created_at: :desc) else - Announcement.find(:all, - :conditions => "(published = 1) AND (frontpage = 0) AND (contest_only = 0)", - :order => "created_at DESC") + where(published: true).where(frontpage: false).where(contest_only: false).order(created_at: :desc) end end - def self.find_for_frontpage - Announcement.find(:all, - :conditions => "(published = 1) AND (frontpage = 1)", - :order => "created_at DESC") + def self.frontpage + where(published: 1).where(frontpage: 1).order(created_at: :desc) end end diff --git a/app/models/contest.rb b/app/models/contest.rb --- a/app/models/contest.rb +++ b/app/models/contest.rb @@ -3,6 +3,6 @@ has_and_belongs_to_many :users has_and_belongs_to_many :problems - scope :enabled, :conditions => {:enabled => true} + scope :enabled, -> { where(enabled: true) } end diff --git a/app/models/grader_configuration.rb b/app/models/grader_configuration.rb --- a/app/models/grader_configuration.rb +++ b/app/models/grader_configuration.rb @@ -10,6 +10,9 @@ MULTICONTESTS_KEY = 'system.multicontests' CONTEST_TIME_LIMIT_KEY = 'contest.time_limit' MULTIPLE_IP_LOGIN_KEY = 'right.multiple_ip_login' + VIEW_TESTCASE = 'right.view_testcase' + SINGLE_USER_KEY = 'system.single_user_mode' + SYSTEM_USE_PROBLEM_GROUP = 'system.use_problem_group' cattr_accessor :config_cache cattr_accessor :task_grading_info_cache @@ -70,6 +73,10 @@ return (get(SYSTEM_MODE_CONF_KEY)=='analysis') end + def self.show_testcase + return get(VIEW_TESTCASE) + end + def self.allow_test_request(user) mode = get(SYSTEM_MODE_CONF_KEY) early_timeout = get(TEST_REQUEST_EARLY_TIMEOUT_KEY) @@ -113,6 +120,10 @@ def self.analysis_mode? return get(SYSTEM_MODE_CONF_KEY) == 'analysis' end + + def self.use_problem_group? + return get(SYSTEM_USE_PROBLEM_GROUP) + end def self.contest_time_limit contest_time_str = GraderConfiguration[CONTEST_TIME_LIMIT_KEY] @@ -152,7 +163,7 @@ def self.read_config GraderConfiguration.config_cache = {} - GraderConfiguration.find(:all).each do |conf| + GraderConfiguration.all.each do |conf| key = conf.key val = conf.value GraderConfiguration.config_cache[key] = GraderConfiguration.convert_type(val,conf.value_type) diff --git a/app/models/grader_process.rb b/app/models/grader_process.rb --- a/app/models/grader_process.rb +++ b/app/models/grader_process.rb @@ -1,11 +1,7 @@ class GraderProcess < ActiveRecord::Base def self.find_by_host_and_pid(host,pid) - return GraderProcess.find(:first, - :conditions => { - :host => host, - :pid => pid - }) + return GraderProcess.where(host:host).where(pid: pid).first end def self.register(host,pid,mode) @@ -27,20 +23,15 @@ end def self.find_running_graders - GraderProcess.find(:all, - :conditions => {:terminated => 0}) + where(terminated: false) end def self.find_terminated_graders - GraderProcess.find(:all, - :conditions => "`terminated`") + where(terminated: true) end def self.find_stalled_process - GraderProcess.find(:all, - :conditions => ["(`terminated` = 0) AND active AND " + - "(updated_at < ?)", - Time.now.gmtime - GraderProcess.stalled_time]) + where(terminated: false).where(active: true).where("updated_at < ?",Time.now.gmtime - GraderProcess.stalled_time) end def report_active(task=nil) diff --git a/app/models/group.rb b/app/models/group.rb new file mode 100644 --- /dev/null +++ b/app/models/group.rb @@ -0,0 +1,13 @@ +class Group < ActiveRecord::Base + has_many :groups_problems, class_name: GroupProblem + has_many :problems, :through => :groups_problems + + has_many :groups_users, class_name: GroupUser + has_many :users, :through => :groups_users + + #has_and_belongs_to_many :problems + #has_and_belongs_to_many :users + + +end + diff --git a/app/models/group_problem.rb b/app/models/group_problem.rb new file mode 100644 --- /dev/null +++ b/app/models/group_problem.rb @@ -0,0 +1,7 @@ +class GroupProblem < ActiveRecord::Base + self.table_name = 'groups_problems' + + belongs_to :problem + belongs_to :group + validates_uniqueness_of :problem_id, scope: :group_id, message: ->(object, data) { "'#{Problem.find(data[:value]).full_name}' is already in the group" } +end diff --git a/app/models/group_user.rb b/app/models/group_user.rb new file mode 100644 --- /dev/null +++ b/app/models/group_user.rb @@ -0,0 +1,7 @@ +class GroupUser < ActiveRecord::Base + self.table_name = 'groups_users' + + belongs_to :user + belongs_to :group + validates_uniqueness_of :user_id, scope: :group_id, message: ->(object, data) { "'#{User.find(data[:value]).full_name}' is already in the group" } +end diff --git a/app/models/language.rb b/app/models/language.rb --- a/app/models/language.rb +++ b/app/models/language.rb @@ -4,7 +4,7 @@ def self.cache_ext_hash @@languages_by_ext = {} - Language.find(:all).each do |language| + Language.all.each do |language| language.common_ext.split(',').each do |ext| @@languages_by_ext[ext] = language end diff --git a/app/models/login.rb b/app/models/login.rb --- a/app/models/login.rb +++ b/app/models/login.rb @@ -1,5 +1,4 @@ class Login < ActiveRecord::Base belongs_to :user - attr_accessible :ip_address, :logged_in_at, :user_id end diff --git a/app/models/message.rb b/app/models/message.rb --- a/app/models/message.rb +++ b/app/models/message.rb @@ -23,10 +23,8 @@ end def self.find_all_system_unreplied_messages - self.find(:all, - :conditions => 'ISNULL(receiver_id) ' + - 'AND (ISNULL(replied) OR replied=0)', - :order => 'created_at') + where('ISNULL(receiver_id) ' + + 'AND (ISNULL(replied) OR replied=0)') end def self.build_replying_message_hierarchy(*args) diff --git a/app/models/problem.rb b/app/models/problem.rb --- a/app/models/problem.rb +++ b/app/models/problem.rb @@ -2,19 +2,29 @@ belongs_to :description has_and_belongs_to_many :contests, :uniq => true + + #has_and_belongs_to_many :groups + has_many :groups_problems, class_name: GroupProblem + has_many :groups, :through => :groups_problems + + has_many :problems_tags, class_name: ProblemTag + has_many :tags, through: :problems_tags + has_many :test_pairs, :dependent => :delete_all + has_many :testcases, :dependent => :destroy validates_presence_of :name - validates_format_of :name, :with => /^\w+$/ + validates_format_of :name, :with => /\A\w+\z/ validates_presence_of :full_name - scope :available, :conditions => {:available => true} + scope :available, -> { where(available: true) } DEFAULT_TIME_LIMIT = 1 DEFAULT_MEMORY_LIMIT = 32 - def self.find_available_problems - Problem.available.all(:order => "date_added DESC, name ASC") + def self.available_problems + available.order(date_added: :desc).order(:name) + #Problem.available.all(:order => "date_added DESC, name ASC") end def self.create_from_import_form_params(params, old_problem=nil) @@ -59,7 +69,9 @@ result = Hash.new #total number of submission result[:total_sub] = Submission.where(problem_id: self.id).count - result[:attempted_user] = Submission.where(problem_id: self.id).group_by(:user_id) + result[:attempted_user] = Submission.where(problem_id: self.id).group(:user_id) + result[:pass] = Submission.where(problem_id: self.id).where("points >= ?",self.full_score).count + return result end def long_name diff --git a/app/models/problem_tag.rb b/app/models/problem_tag.rb new file mode 100644 --- /dev/null +++ b/app/models/problem_tag.rb @@ -0,0 +1,8 @@ +class ProblemTag < ActiveRecord::Base + self.table_name = 'problems_tags' + + belongs_to :problem + belongs_to :tag + + validates_uniqueness_of :problem_id, scope: :tag_id, message: ->(object, data) { "'#{Problem.find(data[:value]).full_name}' is already has this tag" } +end diff --git a/app/models/submission.rb b/app/models/submission.rb --- a/app/models/submission.rb +++ b/app/models/submission.rb @@ -13,14 +13,12 @@ validate :must_have_valid_problem validate :must_specify_language + has_one :task + before_save :assign_latest_number_if_new_recond def self.find_last_by_user_and_problem(user_id, problem_id) - last_sub = find(:first, - :conditions => {:user_id => user_id, - :problem_id => problem_id}, - :order => 'number DESC') - return last_sub + where("user_id = ? AND problem_id = ?",user_id,problem_id).last end def self.find_all_last_by_problem(problem_id) @@ -36,14 +34,14 @@ def self.find_in_range_by_user_and_problem(user_id, problem_id,since_id,until_id) records = Submission.where(problem_id: problem_id,user_id: user_id) - records = records.where('id >= ?',since_id) if since_id > 0 - records = records.where('id <= ?',until_id) if until_id > 0 + records = records.where('id >= ?',since_id) if since_id and since_id > 0 + records = records.where('id <= ?',until_id) if until_id and until_id > 0 records.all end def self.find_last_for_all_available_problems(user_id) submissions = Array.new - problems = Problem.find_available_problems + problems = Problem.available_problems problems.each do |problem| sub = Submission.find_last_by_user_and_problem(user_id, problem.id) submissions << sub if sub!=nil @@ -52,20 +50,11 @@ end def self.find_by_user_problem_number(user_id, problem_id, number) - Submission.find(:first, - :conditions => { - :user_id => user_id, - :problem_id => problem_id, - :number => number - }) + where("user_id = ? AND problem_id = ? AND number = ?",user_id,problem_id,number).first end def self.find_all_by_user_problem(user_id, problem_id) - Submission.find(:all, - :conditions => { - :user_id => user_id, - :problem_id => problem_id, - }) + where("user_id = ? AND problem_id = ?",user_id,problem_id) end def download_filename @@ -148,7 +137,7 @@ # for output_only tasks return if self.problem!=nil and self.problem.output_only - + if self.language==nil errors.add('source',"Cannot detect language. Did you submit a correct source file?") unless self.language!=nil end @@ -158,8 +147,12 @@ return if self.source==nil if self.problem==nil errors.add('problem',"must be specified.") - elsif (!self.problem.available) and (self.new_record?) - errors.add('problem',"must be valid.") + else + #admin always have right + return if self.user.admin? + + #check if user has the right to submit the problem + errors.add('problem',"must be valid.") if (!self.user.available_problems.include?(self.problem)) and (self.new_record?) end end diff --git a/app/models/submission_view_log.rb b/app/models/submission_view_log.rb --- a/app/models/submission_view_log.rb +++ b/app/models/submission_view_log.rb @@ -1,3 +1,3 @@ class SubmissionViewLog < ActiveRecord::Base - attr_accessible :submission_id, :user_id + #attr_accessible :submission_id, :user_id end diff --git a/app/models/tag.rb b/app/models/tag.rb new file mode 100644 --- /dev/null +++ b/app/models/tag.rb @@ -0,0 +1,4 @@ +class Tag < ActiveRecord::Base + has_many :problems_tags, class_name: ProblemTag + has_many :problems, through: :problems_tags +end diff --git a/app/models/task.rb b/app/models/task.rb --- a/app/models/task.rb +++ b/app/models/task.rb @@ -48,10 +48,7 @@ task = nil begin Task.transaction do - task = Task.find(:first, - :order => "created_at", - :conditions => {:status=> Task::STATUS_INQUEUE}, - :lock => true) + task = Task.where(status: Task::STATUS_INQUEUE).lock(true).first if task!=nil task.status = status task.save! diff --git a/app/models/test_request.rb b/app/models/test_request.rb --- a/app/models/test_request.rb +++ b/app/models/test_request.rb @@ -16,8 +16,7 @@ require 'fileutils' class TestRequest < Task - - set_table_name "test_requests" + self.table_name = "test_requests" belongs_to :user belongs_to :problem @@ -38,9 +37,7 @@ # since there will be only one grader grading TestRequest # we do not need locking (hopefully) - test_request = TestRequest.find(:first, - :order => "created_at", - :conditions => {:status=> Task::STATUS_INQUEUE}) + test_request = TestRequest.where(status: Task::STATUS_INQUEUE).first if test_request!=nil test_request.status = status test_request.save! diff --git a/app/models/testcase.rb b/app/models/testcase.rb new file mode 100644 --- /dev/null +++ b/app/models/testcase.rb @@ -0,0 +1,4 @@ +class Testcase < ActiveRecord::Base + belongs_to :problem + #attr_accessible :group, :input, :num, :score, :sol +end diff --git a/app/models/user.rb b/app/models/user.rb --- a/app/models/user.rb +++ b/app/models/user.rb @@ -8,30 +8,32 @@ has_and_belongs_to_many :roles - has_many :test_requests, :order => "submitted_at DESC" + #has_and_belongs_to_many :groups + has_many :groups_users, class_name: GroupUser + has_many :groups, :through => :groups_users - has_many :messages, + has_many :test_requests, -> {order(submitted_at: DESC)} + + has_many :messages, -> { order(created_at: DESC) }, :class_name => "Message", - :foreign_key => "sender_id", - :order => 'created_at DESC' + :foreign_key => "sender_id" - has_many :replied_messages, + has_many :replied_messages, -> { order(created_at: DESC) }, :class_name => "Message", - :foreign_key => "receiver_id", - :order => 'created_at DESC' + :foreign_key => "receiver_id" has_one :contest_stat, :class_name => "UserContestStat", :dependent => :destroy belongs_to :site belongs_to :country - has_and_belongs_to_many :contests, :uniq => true, :order => 'name' + has_and_belongs_to_many :contests, -> { order(:name); uniq} - scope :activated_users, :conditions => {:activated => true} + scope :activated_users, -> {where activated: true} validates_presence_of :login validates_uniqueness_of :login - validates_format_of :login, :with => /^[\_A-Za-z0-9]+$/ + validates_format_of :login, :with => /\A[\_A-Za-z0-9]+\z/ validates_length_of :login, :within => 3..30 validates_presence_of :full_name @@ -111,6 +113,7 @@ begin http = Net::HTTP.new('www.cas.chula.ac.th', 443) http.use_ssl = true + http.verify_mode = OpenSSL::SSL::VERIFY_NONE result = [ ] http.start do |http| req = Net::HTTP::Post.new('/cas/api/?q=studentAuthenticate') @@ -119,7 +122,7 @@ result = JSON.parse resp.body end return true if result["type"] == "beanStudent" - rescue + rescue => e return false end return false @@ -176,14 +179,14 @@ end def self.find_non_admin_with_prefix(prefix='') - users = User.find(:all) + users = User.all return users.find_all { |u| !(u.admin?) and u.login.index(prefix)==0 } end # Contest information def self.find_users_with_no_contest() - users = User.find(:all) + users = User.all return users.find_all { |u| u.contests.length == 0 } end @@ -239,7 +242,7 @@ def update_start_time stat = self.contest_stat - if (stat.nil?) or (stat.started_at.nil?) + if stat.nil? or stat.started_at.nil? stat ||= UserContestStat.new(:user => self) stat.started_at = Time.now.gmtime stat.save @@ -280,9 +283,23 @@ return contest_problems end + def solve_all_available_problems? + available_problems.each do |p| + u = self + sub = Submission.find_last_by_user_and_problem(u.id,p.id) + return false if !p or !sub or sub.points < p.full_score + end + return true + end + + #get a list of available problem def available_problems if not GraderConfiguration.multicontests? - return Problem.find_available_problems + if GraderConfiguration.use_problem_group? + return available_problems_in_group + else + return Problem.available_problems + end else contest_problems = [] pin = {} @@ -299,12 +316,32 @@ end end + def available_problems_in_group + problem = [] + self.groups.each do |group| + group.problems.where(available: true).each { |p| problem << p } + end + problem.uniq! + if problem + problem.sort! do |a,b| + case + when a.date_added < b.date_added + 1 + when a.date_added > b.date_added + -1 + else + a.name <=> b.name + end + end + return problem + else + return [] + end + end + def can_view_problem?(problem) - if not GraderConfiguration.multicontests? - return problem.available - else - return problem_in_user_contests? problem - end + return true if admin? + return available_problems.include? problem end def self.clear_last_login diff --git a/app/views/announcements/edit.html.erb b/app/views/announcements/edit.html.erb deleted file mode 100644 --- a/app/views/announcements/edit.html.erb +++ /dev/null @@ -1,47 +0,0 @@ -

Editing announcement

- -<%= error_messages_for :announcement %> - -<%= form_for(@announcement) do |f| %> -

- Title
- <%= f.text_field :title %> -

- -

- Notes (shown internally, used to organize announcements)
- <%= f.text_field :notes %> -

- -

- Body
- <%= f.text_area :body %> -

- -

- Author
- <%= f.text_field :author %> -

- -

- Published
- <%= f.check_box :published %> -

- -

- Show on front page?
- <%= f.check_box :frontpage %> -

- -

- Show only in contest?
- <%= f.check_box :contest_only %> -

- -

- <%= f.submit "Update" %> -

-<% end %> - -<%= link_to 'Show', @announcement %> | -<%= link_to 'Back', announcements_path %> diff --git a/app/views/announcements/edit.html.haml b/app/views/announcements/edit.html.haml new file mode 100644 --- /dev/null +++ b/app/views/announcements/edit.html.haml @@ -0,0 +1,34 @@ +.container-fluid + %h1 Editing announcement + = error_messages_for :announcement + .row + .col-md-6 + = form_for(@announcement) do |f| + .form-group + %label Title + = f.text_field :title, class: 'form-control' + .form-group + %label Notes + (shown internally, used to organize announcements) + = f.text_field :notes, class: 'form-control' + .form-group + %label Body + = f.text_area :body, class: 'form-control', style: 'height: 200px;' + .form-group + %label Author + = f.text_field :author, class: 'form-control' + .checkbox + %label + = f.check_box :published + Published + .checkbox + %label + = f.check_box :frontpage + Show on front page? + .checkbox + %label + = f.check_box :contest_only + Show only in contest? + = f.submit "Update", class: 'btn btn-primary' + = link_to 'Show', @announcement, class: 'btn btn-default' + = link_to 'Back', announcements_path, class: 'btn btn-default' diff --git a/app/views/announcements/index.html.haml b/app/views/announcements/index.html.haml --- a/app/views/announcements/index.html.haml +++ b/app/views/announcements/index.html.haml @@ -28,8 +28,8 @@ %br/ = h announcement.body %td= h announcement.author - %td= toggle_button(announcement.published?, toggle_announcement_url(@announcement), "announcement_toggle_#{@announcement.id}", {size: 'btn-sm'}) - %td= toggle_button(announcement.frontpage?, toggle_front_announcement_url(@announcement), "announcement_toggle_front_#{@announcement.id}", {size: 'btn-sm'}) + %td= toggle_button(announcement.published?, toggle_announcement_path(@announcement), "announcement_toggle_#{@announcement.id}", {size: 'btn-sm'}) + %td= toggle_button(announcement.frontpage?, toggle_front_announcement_path(@announcement), "announcement_toggle_front_#{@announcement.id}", {size: 'btn-sm'}) %td= link_to 'Edit', edit_announcement_path(announcement), class: 'btn btn-block btn-sm btn-info' %td= link_to 'Destroy', announcement, :confirm => 'Are you sure?', :method => :delete, class: "btn btn-block btn-sm btn-danger" %br diff --git a/app/views/announcements/show.html.erb b/app/views/announcements/show.html.erb --- a/app/views/announcements/show.html.erb +++ b/app/views/announcements/show.html.erb @@ -15,7 +15,7 @@

Body: - <%=h @announcement.body %> + <%=h markdown(@announcement.body) %>

diff --git a/app/views/application/_compiler_message.html.erb b/app/views/application/_compiler_message.html.erb new file mode 100644 --- /dev/null +++ b/app/views/application/_compiler_message.html.erb @@ -0,0 +1,15 @@ + +<% if compiler_message == nil or compiler_message.chomp == '' %> + No message +<% else %> +

+
+
+ <%=simple_format(compiler_message) %> +
+
+<% end %> diff --git a/app/views/application/_submission.html.haml b/app/views/application/_submission.html.haml new file mode 100644 --- /dev/null +++ b/app/views/application/_submission.html.haml @@ -0,0 +1,26 @@ + +%tr + %td{:align => "center"} + = submission.number + %td.text-right + = link_to "##{submission.id}", submission_path(submission.id) + %td + = l submission.submitted_at, format: :long + = "( #{time_ago_in_words(submission.submitted_at)} ago)" + %td + = submission.source_filename + = " (#{submission.language.pretty_name}) " + = link_to('[load]',{:action => 'source', :id => submission.id}) + %td + - if submission.graded_at + = "Graded at #{format_short_time(submission.graded_at)}." + %br/ + = "Score: #{(submission.points*100/submission.problem.full_score).to_i} " if GraderConfiguration['ui.show_score'] + = " [" + %tt + = submission.grader_comment + = "]" + %td + = render :partial => 'compiler_message', :locals => {:compiler_message => submission.compiler_message } + %td + = link_to 'Edit', edit_submission_path(submission.id), class: 'btn btn-success' diff --git a/app/views/application/_submission_short.html.haml b/app/views/application/_submission_short.html.haml --- a/app/views/application/_submission_short.html.haml +++ b/app/views/application/_submission_short.html.haml @@ -1,26 +1,28 @@ - - if submission.nil? = "-" - else - - if submission.graded_at.nil? - =t 'main.submitted_at' - = format_short_time(submission.submitted_at.localtime) + %strong= "Submission ID:" + = submission.id + %br + - unless submission.graded_at + %strong= t 'main.submitted_at:' + = format_full_time_ago(submission.submitted_at.localtime) - else - = t 'main.graded_at' - = "#{format_short_time(submission.graded_at.localtime)}, " + %strong= t 'main.graded_at:' + = format_full_time_ago(submission.graded_at.localtime) + %br - if GraderConfiguration['ui.show_score'] - = t 'main.score' + %strong=t 'main.score' = "#{(submission.points*100/submission.problem.full_score).to_i} " = " [" %tt = submission.grader_comment = "]" - - if GraderConfiguration.show_grading_result - = " | " - = link_to '[detailed result]', :action => 'result', :id => submission.id - = " | " - = link_to("[#{t 'main.cmp_msg'}]", {:action => 'compiler_msg', :id => submission.id}, {:popup => true}) - = " | " - = link_to("[#{t 'main.src_link'}]",{:action => 'source', :id => submission.id}) - //= " | " - //= link_to "[#{t 'main.submissions_link'}]", main_submission_path(submission.problem.id) + %br + %strong View: + - if GraderConfiguration.show_grading_result + = link_to '[detailed result]', :action => 'result', :id => submission.id + = link_to "#{t 'main.cmp_msg'}", {:action => 'compiler_msg', :id => submission.id}, {popup: true,class: 'btn btn-xs btn-info'} if submission.graded_at + = link_to "#{t 'main.src_link'}", download_submission_path(submission.id), class: 'btn btn-xs btn-info' + = link_to "#{t 'main.submissions_link'}", problem_submissions_path(problem_id), class: 'btn btn-xs btn-info' + diff --git a/app/views/configurations/index.html.haml b/app/views/configurations/index.html.haml --- a/app/views/configurations/index.html.haml +++ b/app/views/configurations/index.html.haml @@ -3,24 +3,27 @@ %h1 System configuration -%table.info - %tr.info-head - %th Key - %th Type - %th Value - %th Description - - @configurations.each do |conf| - - @grader_configuration = conf - %tr{:class => cycle("info-odd", "info-even")} - %td - /= in_place_editor_field :grader_configuration, :key, {}, :rows=>1 - = @grader_configuration.key - %td - /= in_place_editor_field :grader_configuration, :value_type, {}, :rows=>1 - = @grader_configuration.value_type - %td - = best_in_place @grader_configuration, :value, ok_button: "ok", cancel_button: "cancel" - %td= conf.description +- @group.each do |g| + %h2= g + %table.table.table-striped + %thead + %th{style: 'width: 25%'} Key + %th{style: 'width: 10%'}Type + %th{style: 'width: 15%'} Value + %th Description + - @configurations.each do |conf| + - next if conf.key[0...(conf.key.index('.'))] != g + - @grader_configuration = conf + %tr + %td + /= in_place_editor_field :grader_configuration, :key, {}, :rows=>1 + = @grader_configuration.key + %td + /= in_place_editor_field :grader_configuration, :value_type, {}, :rows=>1 + = @grader_configuration.value_type + %td + = best_in_place @grader_configuration, :value, ok_button: "ok", cancel_button: "cancel" + %td= conf.description - if GraderConfiguration.config_cached? %br/ diff --git a/app/views/contests/edit.html.erb b/app/views/contests/edit.html.erb deleted file mode 100644 --- a/app/views/contests/edit.html.erb +++ /dev/null @@ -1,29 +0,0 @@ -

Editing contest

- -<%= form_for(@contest) do |f| %> - <%= f.error_messages %> - - - - - - - - - - - - - - -
<%= f.label :name %><%= f.text_field :name %>
<%= f.label :title %><%= f.text_field :title %>
- <%= f.check_box :enabled %> - <%= f.label :enabled %> -
-

- <%= f.submit 'Update' %> -

-<% end %> - -<%= link_to 'Show', @contest %> | -<%= link_to 'Back', contests_path %> diff --git a/app/views/contests/edit.html.haml b/app/views/contests/edit.html.haml new file mode 100644 --- /dev/null +++ b/app/views/contests/edit.html.haml @@ -0,0 +1,20 @@ +%h1 Editing contest += form_for(@contest) do |f| + = f.error_messages + %table + %tr + %td= f.label :name + %td= f.text_field :name + %tr + %td= f.label :title + %td= f.text_field :title + %tr + %td + %td + = f.check_box :enabled + = f.label :enabled + %p + = f.submit 'Update' += link_to 'Show', @contest +| += link_to 'Back', contests_path diff --git a/app/views/contests/index.html.erb b/app/views/contests/index.html.erb deleted file mode 100644 --- a/app/views/contests/index.html.erb +++ /dev/null @@ -1,29 +0,0 @@ -

Listing contests

- -
- Go back to: [<%= link_to 'contest management', :controller => 'contest_management', :action => 'index' %>] -
- - - - - - - - -<% @contests.each do |contest| %> - <% @contest = contest %> - - - - - - - - -<% end %> -
NameTitleEnabled
<%= in_place_editor_field :contest, :name, {}, :rows => 1 %><%= in_place_editor_field :contest, :title, {}, :rows => 1 %><%= in_place_editor_field :contest, :enabled, {}, :rows => 1 %><%= link_to 'Show', contest %><%= link_to 'Edit', edit_contest_path(contest) %><%= link_to 'Destroy', contest, :confirm => 'Are you sure?', :method => :delete %>
- -
- -<%= link_to 'New contest', new_contest_path %> diff --git a/app/views/contests/index.html.haml b/app/views/contests/index.html.haml new file mode 100644 --- /dev/null +++ b/app/views/contests/index.html.haml @@ -0,0 +1,27 @@ +%h1 Listing contests +.infobox + %b Go back to: + [#{link_to 'contest management', :controller => 'contest_management', :action => 'index'}] +%p= link_to 'New contest', new_contest_path, class: 'btn btn-success' +%table.table.table-striped + %tr + %th Name + %th Title + %th Enabled + %th + %th + %th + + - @contests.each do |contest| + - @contest = contest + %tr + -#%td= in_place_editor_field :contest, :name, {}, :rows => 1 + -#%td= in_place_editor_field :contest, :title, {}, :rows => 1 + -#%td= in_place_editor_field :contest, :enabled, {}, :rows => 1 + %td= best_in_place @contest, :name + %td= best_in_place @contest, :title + %td= best_in_place @contest, :enabled + %td= link_to 'Show', contest + %td= link_to 'Edit', edit_contest_path(contest) + %td= link_to 'Destroy', contest, :confirm => 'Are you sure?', :method => :delete +%br/ diff --git a/app/views/contests/new.html.erb b/app/views/contests/new.html.erb deleted file mode 100644 --- a/app/views/contests/new.html.erb +++ /dev/null @@ -1,23 +0,0 @@ -

New contest

- -<%= form_for(@contest) do |f| %> - <%= f.error_messages %> - -

- <%= f.label :name %>
- <%= f.text_field :name %> -

-

- <%= f.label :title %>
- <%= f.text_field :title %> -

-

- <%= f.label :enabled %>
- <%= f.check_box :enabled %> -

-

- <%= f.submit 'Create' %> -

-<% end %> - -<%= link_to 'Back', contests_path %> diff --git a/app/views/contests/new.html.haml b/app/views/contests/new.html.haml new file mode 100644 --- /dev/null +++ b/app/views/contests/new.html.haml @@ -0,0 +1,18 @@ +%h1 New contest += form_for(@contest) do |f| + = f.error_messages + %p + = f.label :name + %br/ + = f.text_field :name + %p + = f.label :title + %br/ + = f.text_field :title + %p + = f.label :enabled + %br/ + = f.check_box :enabled + %p + = f.submit 'Create' += link_to 'Back', contests_path diff --git a/app/views/contests/show.html.erb b/app/views/contests/show.html.erb deleted file mode 100644 --- a/app/views/contests/show.html.erb +++ /dev/null @@ -1,14 +0,0 @@ -

Contest: <%=h @contest.title %>

- -
- Go back to: [<%= link_to 'contest management', :controller => 'contest_management', :action => 'index' %>] -
- -

- Enabled: - <%=h @contest.enabled %> -

- - -<%= link_to 'Edit', edit_contest_path(@contest) %> | -<%= link_to 'Back', contests_path %> diff --git a/app/views/contests/show.html.haml b/app/views/contests/show.html.haml new file mode 100644 --- /dev/null +++ b/app/views/contests/show.html.haml @@ -0,0 +1,11 @@ +%h1 + Contest: #{h @contest.title} +.infobox + %b Go back to: + [#{link_to 'contest management', :controller => 'contest_management', :action => 'index'}] +%p + %b Enabled: + = h @contest.enabled += link_to 'Edit', edit_contest_path(@contest) +| += link_to 'Back', contests_path diff --git a/app/views/graders/_grader.html.haml b/app/views/graders/_grader.html.haml --- a/app/views/graders/_grader.html.haml +++ b/app/views/graders/_grader.html.haml @@ -2,10 +2,10 @@ %td= grader.host %td= grader.pid %td= grader.mode -%td= grader.updated_at.strftime("%H:%M:%S") unless grader.updated_at.nil? +%td= grader.updated_at.strftime("%H:%M:%S") if grader.updated_at %td= grader.task_type %td - - if grader.task_id.nil? + - unless grader.task_id idle - else = link_to "#{grader.task_id}", :action => 'view', :id => grader.task_id, :type => grader.task_type diff --git a/app/views/graders/list.html.haml b/app/views/graders/list.html.haml --- a/app/views/graders/list.html.haml +++ b/app/views/graders/list.html.haml @@ -15,23 +15,6 @@ =link_to 'Stop all running Graders', { action: 'stop_all'}, class: 'btn btn-default', method: 'post' =link_to 'Clear all data', { action: 'clear_all'}, class: 'btn btn-default', method: 'post' -.submitbox - .item - Grader control: - .item - = form_for :clear, :url => {:action => 'start_grading'} do |f| - = submit_tag 'Start graders in grading env' - .item - = form_for :clear, :url => {:action => 'start_exam'} do |f| - = submit_tag 'Start graders in exam env' - .item - = form_for :clear, :url => {:action => 'stop_all'} do |f| - = submit_tag 'Stop all running graders' - .item - = form_for :clear, :url => {:action => 'clear_all'} do |f| - = submit_tag 'Clear all data' - %br{:style => 'clear:both'}/ - .row .col-md-6 - if @last_task @@ -67,15 +50,33 @@ %th Submitted %th Graded %th Result - %th %tbody - @submission.each do |sub| %tr.inactive - %td= link_to sub.id, controller: 'graders' ,action: 'submission', id: sub.id - %td= sub.try(:user).try(:full_name) - %td= sub.try(:problem).try(:full_name) + %td= link_to sub.id, submission_path(sub.id) + %td= ("" unless sub.user) || link_to(sub.try(:user).try(:full_name), stat_user_path(sub.user.id)) + %td= ("" unless sub.problem) || link_to(sub.try(:problem).try(:full_name), stat_problem_path(sub.problem.id)) + %td= "#{time_ago_in_words(sub.submitted_at)} ago" + %td= sub.graded_at ? "#{time_ago_in_words(sub.graded_at)} ago" : " " + %td= sub.grader_comment + %h2 Ungraded submission + %table.table.table-striped.table-condensed + %thead + %th ID + %th User + %th Problem + %th Submitted + %th Graded + %th Result + %tbody + - @backlog_submission.each do |sub| + %tr.inactive + %td= link_to sub.id, submission_path(sub.id) + %td= ("" unless sub.user) || link_to( sub.try(:user).try(:full_name), stat_user_path(sub.user.id)) + %td= ("" unless sub.problem) || link_to( sub.try(:problem).try(:full_name), stat_problem_path(sub.problem.id)) %td= "#{time_ago_in_words(sub.submitted_at)} ago" %td= sub.graded_at ? "#{time_ago_in_words(sub.graded_at)} ago" : " " %td= sub.grader_comment + diff --git a/app/views/graders/submission.html.haml b/app/views/graders/submission.html.haml deleted file mode 100644 --- a/app/views/graders/submission.html.haml +++ /dev/null @@ -1,90 +0,0 @@ -//%style{type: "text/css"} -// = @css_style - -%h1= "Submission: #{@submission.id}" - -%textarea#data{style: "display:none;"} - :preserve - #{@submission.source} - -//%div.highlight{:style => "border: 1px solid black;"} -//=@formatted_code.html_safe -.containter - .row - .col-md-7 - %h2 Source Code - .col-md-5 - %h2 Stat - .row - .col-md-7 - %div#editor{ style: "font-size: 14px; height: 400px; border-radius:5px;" } - :javascript - e = ace.edit("editor") - e.setOptions({ maxLines: Infinity }) - e.setValue($("#data").text()) - e.gotoLine(1) - e.getSession().setMode("#{get_ace_mode(@submission.language)}") - e.setReadOnly(true) - .col-md-5 - %table.table.table-striped - %tr - %td.text-right - %strong User - %td - - if @submission.user - = link_to "(#{@submission.user.login})", controller: "users", action: "profile", id: @submission.user - = @submission.user.full_name - - else - = "(n/a)" - %tr - %td.text-right - %strong Task - %td - - if @submission.problem!=nil - = link_to "(#{@submission.problem.name})", controller: "problems", action: "stat", id: @submission.problem - = @submission.problem.full_name - - else - = "(n/a)" - %tr - %td.text-right - %strong Tries - %td= @submission.number - %tr - %td.text-right - %strong Language - %td= @submission.language.pretty_name - %tr - %td.text-right - %strong Submitted - %td #{time_ago_in_words(@submission.submitted_at)} ago (at #{@submission.submitted_at.to_formatted_s(:long)}) - %tr - %td.text-right - %strong Graded - - if @submission.graded_at - %td #{time_ago_in_words(@submission.graded_at)} ago (at #{@submission.graded_at.to_formatted_s(:long)}) - - else - %td - - %tr - %td.text-right - %strong Points - %td #{@submission.points}/#{@submission.problem.full_score} - %tr - %td.text-right - %strong Comment - %td #{@submission.grader_comment} - %tr - %td.text-right - %strong Runtime (s) - %td #{@submission.max_runtime} - %tr - %td.text-right - %strong Memory (kb) - %td #{@submission.peak_memory} - - if session[:admin] - %tr - %td.text-right - %strong IP - %td #{@submission.ip_address} - - - diff --git a/app/views/graders/task.html.haml b/app/views/graders/task.html.haml --- a/app/views/graders/task.html.haml +++ b/app/views/graders/task.html.haml @@ -9,7 +9,7 @@ %br/ = "Submission: #{@task.submission_id}" - if @task.submission !=nil - = link_to '[view submission]', :action => 'submission', :id => @task.submission.id + = link_to '[view submission]', submission_path( @task.submission.id ) %br/ = "Submitted at: #{format_short_time(@task.created_at)}" %br/ diff --git a/app/views/groups/_form.html.haml b/app/views/groups/_form.html.haml new file mode 100644 --- /dev/null +++ b/app/views/groups/_form.html.haml @@ -0,0 +1,16 @@ += form_for @group do |f| + - if @group.errors.any? + #error_explanation + %h2= "#{pluralize(@group.errors.count, "error")} prohibited this group from being saved:" + %ul + - @group.errors.full_messages.each do |msg| + %li= msg + + .form-group.field + = f.label :name + = f.text_field :name, class: 'form-control' + .form-group.field + = f.label :description + = f.text_field :description, class: 'form-control' + .form-group.actions + = f.submit 'Save', class: 'btn btn-primary' diff --git a/app/views/groups/edit.html.haml b/app/views/groups/edit.html.haml new file mode 100644 --- /dev/null +++ b/app/views/groups/edit.html.haml @@ -0,0 +1,7 @@ +%h1 Editing group + += render 'form' + += link_to 'Show', @group +\| += link_to 'Back', groups_path diff --git a/app/views/groups/index.html.haml b/app/views/groups/index.html.haml new file mode 100644 --- /dev/null +++ b/app/views/groups/index.html.haml @@ -0,0 +1,22 @@ +%h1 Groups + +%p + = link_to 'New Group', new_group_path, class: 'btn btn-primary' +%table.table.table-hover + %thead + %tr + %th Name + %th Description + %th + %th + + %tbody + - @groups.each do |group| + %tr + %td= group.name + %td= group.description + %td= link_to 'View', group, class: 'btn btn-default' + %td= link_to 'Destroy', group, :method => :delete, :data => { :confirm => 'Are you sure?' }, class: 'btn btn-danger' + +%br + diff --git a/app/views/groups/new.html.haml b/app/views/groups/new.html.haml new file mode 100644 --- /dev/null +++ b/app/views/groups/new.html.haml @@ -0,0 +1,5 @@ +%h1 New group + += render 'form' + += link_to 'Back', groups_path diff --git a/app/views/groups/show.html.haml b/app/views/groups/show.html.haml new file mode 100644 --- /dev/null +++ b/app/views/groups/show.html.haml @@ -0,0 +1,73 @@ +%p + %b Name: + = @group.name +%p + %b Description: + = @group.description + +%br += link_to 'Edit', edit_group_path(@group) +\| += link_to 'Back', groups_path + +.row + .col-md-12 + %h1 Group details +.row + .col-md-6 + .panel.panel-default + .panel-heading + .panel-title Users in this group + .panel-body + =form_tag add_user_group_path(@group), class: 'form-inline' do + .form-group + =label_tag :user_id, "User" + =select_tag :user_id, options_from_collection_for_select(User.all,'id','full_name'), class: 'select2' + =submit_tag "Add",class: 'btn btn-primary' + + + %table.table.table-hover + %thead + %tr + %th Login + %th Full name + %th Remark + %th= link_to 'Remove All', remove_all_user_group_path(@group), method: :delete, :data => { :confirm => "Remove ALL USERS from group?" }, class: 'btn btn-danger btn-sm' + + %tbody + - @group.users.each do |user| + %tr + %td= user.login + %td= user.full_name + %td= user.remark + %td= link_to 'Remove', remove_user_group_path(@group,user), :method => :delete, :data => { :confirm => "Remove #{user.full_name}?" }, class: 'btn btn-danger btn-sm' + .col-md-6 + .panel.panel-default + .panel-heading + .panel-title Problems + .panel-body + + =form_tag add_problem_group_path(@group), class: 'form-inline' do + .form-group + =label_tag :problem_id, "Problem" + =select_tag :problem_id, options_from_collection_for_select(Problem.all,'id','full_name'), class: 'select2' + =submit_tag "Add",class: 'btn btn-primary' + + + %table.table.table-hover + %thead + %tr + %th name + %th Full name + %th Full score + %th= link_to 'Remove All', remove_all_problem_group_path(@group), method: :delete, :data => { :confirm => "Remove ALL PROBLEMS from group?" }, class: 'btn btn-danger btn-sm' + + %tbody + - @group.problems.each do |problem| + %tr + %td= problem.name + %td= problem.full_name + %td= problem.full_score + %td= link_to 'Remove', remove_problem_group_path(@group,problem), :method => :delete, :data => { :confirm => "Remove #{problem.full_name}?" }, class: 'btn btn-danger btn-sm' + + diff --git a/app/views/layouts/_header.html.haml b/app/views/layouts/_header.html.haml --- a/app/views/layouts/_header.html.haml +++ b/app/views/layouts/_header.html.haml @@ -2,20 +2,26 @@ %nav .container-fluid .navbar-header + %button.navbar-toggle.collapsed{ data: {toggle: 'collapse', target: '#navbar-collapse'} } + %span.sr-only Togggle Navigation + %span.icon-bar + %span.icon-bar + %span.icon-bar %a.navbar-brand{href: main_list_path} %span.glyphicon.glyphicon-home MAIN - .collapse.navbar-collapse + .collapse.navbar-collapse#navbar-collapse %ul.nav.navbar-nav + / submission - if (@current_user!=nil) and (GraderConfiguration.show_tasks_to?(@current_user)) - //= add_menu("#{I18n.t 'menu.tasks'}", 'tasks', 'list') %li.dropdown %a.dropdown-toggle{href: '#', data: {toggle:'dropdown'}, aria: {haspopup:"true", expanded:"false"}, role: "button"} = "#{I18n.t 'menu.submissions'}" %span.caret %ul.dropdown-menu - = add_menu("View", 'main', 'submission') + = add_menu("View", 'submissions', 'index') = add_menu("Self Test", 'test', 'index') + / hall of fame - if GraderConfiguration['right.user_hall_of_fame'] = add_menu("#{I18n.t 'menu.hall_of_fame'}", 'report', 'problem_hof') / display MODE button (with countdown in contest mode) @@ -32,6 +38,7 @@ $("#countdown").countdown({until: "+#{@current_user.contest_time_left.to_i}s", layout: 'Time left: {hnn}:{mnn}:{snn}'}); / admin section - if (@current_user!=nil) and (session[:admin]) + / management %li.dropdown %a.dropdown-toggle{href: '#', data: {toggle:'dropdown'}, aria: {haspopup:"true", expanded:"false"}, role: "button"} Manage @@ -39,7 +46,9 @@ %ul.dropdown-menu = add_menu( 'Announcements', 'announcements', 'index') = add_menu( 'Problems', 'problems', 'index') + = add_menu( 'Tags', 'tags', 'index') = add_menu( 'Users', 'user_admin', 'index') + = add_menu( 'User Groups', 'groups', 'index') = add_menu( 'Graders', 'graders', 'list') = add_menu( 'Message ', 'messages', 'console') %li.divider{role: 'separator'} @@ -47,12 +56,14 @@ %li.divider{role: 'separator'} = add_menu( 'Sites', 'sites', 'index') = add_menu( 'Contests', 'contest_management', 'index') + / report %li.dropdown %a.dropdown-toggle{href: '#', data: {toggle:'dropdown'}, aria: {haspopup:"true", expanded:"false"}, role: "button"} Report %span.caret %ul.dropdown-menu - = add_menu( 'Results', 'user_admin', 'user_stat') + = add_menu( 'Current Score', 'report', 'current_score') + = add_menu( 'Score Report', 'report', 'max_score') = add_menu( 'Report', 'report', 'multiple_login') - if (ungraded = Submission.where('graded_at is null').where('submitted_at < ?', 1.minutes.ago).count) > 0 =link_to "#{ungraded} backlogs!", diff --git a/app/views/layouts/application.html.haml b/app/views/layouts/application.html.haml --- a/app/views/layouts/application.html.haml +++ b/app/views/layouts/application.html.haml @@ -11,5 +11,6 @@ %body = render 'layouts/header' - = content_tag(:p,flash[:notice],class: 'alert alert-success') if flash[:notice]!=nil + /= content_tag(:p,flash[:notice],class: 'alert alert-success') if flash[:notice]!=nil + = flash_messages = yield diff --git a/app/views/main/_announcement.html.haml b/app/views/main/_announcement.html.haml --- a/app/views/main/_announcement.html.haml +++ b/app/views/main/_announcement.html.haml @@ -1,6 +1,8 @@ %li.list-group-item %strong = announcement.title + - if @current_user and @current_user.admin? + = link_to 'Edit', edit_announcement_path(announcement), class: 'btn btn-xs btn-default' %small= "(updated #{time_ago_in_words(announcement.updated_at)} ago on #{announcement.updated_at})" %br diff --git a/app/views/main/_compiler_message.html.erb b/app/views/main/_compiler_message.html.erb deleted file mode 100644 --- a/app/views/main/_compiler_message.html.erb +++ /dev/null @@ -1,15 +0,0 @@ - -<% if compiler_message == nil or compiler_message.chomp == '' %> - No message -<% else %> -
-
-
- <%=simple_format(compiler_message) %> -
-
-<% end %> diff --git a/app/views/main/_login_box.html.haml b/app/views/main/_login_box.html.haml --- a/app/views/main/_login_box.html.haml +++ b/app/views/main/_login_box.html.haml @@ -12,22 +12,26 @@ %hr/ %div{ :style => "border: solid 1px gray; padding: 4px; background: #eeeeff;"} - = form_tag :controller => 'login', :action => 'login' do - %table - %tr - %td{:align => "right"} - ="#{t 'login_label'}:" - %td= text_field_tag 'login' - %tr - %td{:align => "right"} - ="#{t 'password_label'}:" - %td= password_field_tag - - unless GraderConfiguration['right.bypass_agreement'] - %tr - %td{:align => "right"}= check_box_tag 'accept_agree' - %td ยอมรับข้อตกลงการใช้งาน - - = submit_tag t('login.login_submit') + = form_tag login_login_path, {class: 'form-horizontal'} do + .form-group + =label_tag :login, "Login",class: 'col-sm-3 control-label' + .col-sm-9 + =text_field_tag :login, nil, class: 'form-control' + .form-group + =label_tag :password, "Password", class: 'col-sm-3 control-label' + .col-sm-9 + =password_field_tag :password, nil, class: 'form-control' + - unless GraderConfiguration['right.bypass_agreement'] + .form-group + .col-sm-offset-3.col-sm-9 + .checkbox + %label + = check_box_tag 'accept_agree' + ยอมรับข้อตกลงการใช้งาน + + .form-group + .col-sm-offset-3.col-sm-9 + = submit_tag t('login.login_submit'), class: 'btn btn-primary' %br/ - if GraderConfiguration['system.online_registration'] diff --git a/app/views/main/_problem.html.haml b/app/views/main/_problem.html.haml --- a/app/views/main/_problem.html.haml +++ b/app/views/main/_problem.html.haml @@ -1,17 +1,22 @@ %tr %td - = "#{problem.name}" + - if @current_user and @current_user.admin? + = link_to problem.name, stat_problem_path(problem) + - else + = "#{problem.name}" %td = "#{problem.full_name}" + + %br = link_to_description_if_any "[#{t 'main.problem_desc'}] ".html_safe, problem %td = @prob_submissions[problem.id][:count] - = link_to "[subs]", main_submission_path(problem.id) + -#= link_to "[subs]", main_submission_path(problem.id) %td = render :partial => 'submission_short', - :locals => {:submission => @prob_submissions[problem.id][:submission], :problem_name => problem.name } + :locals => {:submission => @prob_submissions[problem.id][:submission], :problem_name => problem.name, :problem_id => problem.id } %td - if @prob_submissions[problem.id][:submission] - = link_to 'Edit', direct_edit_submission_path(@prob_submissions[problem.id][:submission]), class: 'btn btn-success' + = link_to 'Edit', edit_submission_path(@prob_submissions[problem.id][:submission]), class: 'btn btn-success' - else - = link_to 'New', direct_edit_path(problem.id), class: 'btn btn-success' + = link_to 'New', direct_edit_problem_submissions_path(problem.id), class: 'btn btn-success' diff --git a/app/views/main/_submission.html.haml b/app/views/main/_submission.html.haml deleted file mode 100644 --- a/app/views/main/_submission.html.haml +++ /dev/null @@ -1,26 +0,0 @@ - -%tr - %td{:align => "center"} - = submission_counter+1 - %td{:align => "center"} - = link_to "##{submission.id}", controller: :graders, action: :submission, id: submission.id - %td - = l submission.submitted_at, format: :long - = "( #{time_ago_in_words(submission.submitted_at)} ago)" - %td - = submission.source_filename - = " (#{submission.language.pretty_name}) " - = link_to('[load]',{:action => 'source', :id => submission.id}) - %td - - if submission.graded_at - = "Graded at #{format_short_time(submission.graded_at)}." - %br/ - = "Score: #{(submission.points*100/submission.problem.full_score).to_i} " if GraderConfiguration['ui.show_score'] - = " [" - %tt - = submission.grader_comment - = "]" - %td - = render :partial => 'compiler_message', :locals => {:compiler_message => submission.compiler_message } - %td - = link_to 'Edit', direct_edit_submission_path(submission.id), class: 'btn btn-success' diff --git a/app/views/main/_submission_box.html.haml b/app/views/main/_submission_box.html.haml --- a/app/views/main/_submission_box.html.haml +++ b/app/views/main/_submission_box.html.haml @@ -8,7 +8,7 @@ %li= msg .form-group = label_tag :submission, 'Problem:' - = select 'submission', 'problem_id', [[(t 'main.specified_in_header'),'-1']] + @problems.collect {|p| ["[#{p.name}] #{p.full_name}", p.id]}, {:selected => '-1'}, { class: 'select2 form-control' } + = select 'submission', 'problem_id', [[(t 'main.specified_in_header'),'-1']] + @problems.collect {|p| ["[#{p.name}] #{p.full_name}", p.id]}, {:selected => '-1'}, { class: 'select2 form-control', style: "width: 100%" } .form-group = label_tag :file, 'File:' .input-group diff --git a/app/views/main/_submission_short.html.haml b/app/views/main/_submission_short.html.haml --- a/app/views/main/_submission_short.html.haml +++ b/app/views/main/_submission_short.html.haml @@ -2,25 +2,28 @@ - if submission.nil? = "-" - else - - if submission.graded_at.nil? - =t 'main.submitted_at' + - unless submission.graded_at + = t 'main.submitted_at' = format_short_time(submission.submitted_at.localtime) - else - = t 'main.graded_at' - = "#{format_short_time(submission.graded_at.localtime)}, " + %strong= t 'main.graded_at' + = "#{format_short_time(submission.graded_at.localtime)} " + %br - if GraderConfiguration['ui.show_score'] - = t 'main.score' + %strong=t 'main.score' = "#{(submission.points*100/submission.problem.full_score).to_i} " = " [" - %tt + %tt.grader-comment = submission.grader_comment = "]" + %br + %strong View: - if GraderConfiguration.show_grading_result - = " | " = link_to '[detailed result]', :action => 'result', :id => submission.id - = " | " - = link_to("[#{t 'main.cmp_msg'}]", {:action => 'compiler_msg', :id => submission.id}, {:popup => true}) - = " | " - = link_to("[#{t 'main.src_link'}]",{:action => 'source', :id => submission.id}) - //= " | " - //= link_to "[#{t 'main.submissions_link'}]", main_submission_path(submission.problem.id) + /= link_to "#{t 'main.cmp_msg'}", {:action => 'compiler_msg', :id => submission.id}, {popup: true,class: 'btn btn-xs btn-info'} + = link_to "#{t 'main.cmp_msg'}", compiler_msg_submission_path(submission.id), {popup: true,remote: true,class: 'btn btn-xs btn-info'} + = link_to "#{t 'main.src_link'}",{:action => 'source', :id => submission.id}, class: 'btn btn-xs btn-info' + = link_to "#{t 'main.submissions_link'}", problem_submissions_path(problem_id), class: 'btn btn-xs btn-info' + - if GraderConfiguration.show_testcase + = link_to "testcases", show_problem_testcases_path(problem_id), class: 'btn btn-xs btn-info' + diff --git a/app/views/main/list.html.haml b/app/views/main/list.html.haml --- a/app/views/main/list.html.haml +++ b/app/views/main/list.html.haml @@ -43,6 +43,7 @@ .panel.panel-info .panel-heading Announcement + = link_to 'Manage', announcements_path, class: 'btn btn-xs btn-default' %ul.list-group = render :partial => 'announcement', :collection => @announcements @@ -50,3 +51,14 @@ = "Announcement.refreshUrl = '#{url_for :controller => 'main', :action => 'announcements'}';" Announcement.registerRefreshEventTimer(); +.modal.fade#compiler{tabindex: -1,role: 'dialog'} + .modal-dialog.modal-lg{role:'document'} + .modal-content + .modal-header + %button.close{type: 'button', data: {dismissed: :modal}, aria: {label: 'close'}} + %span{aria: {hidden: 'true'}, data: {dismiss: 'modal'}} × + %h4 Compiler message + .modal-body + %pre#compiler_msg + .modal-footer + %button.btn.btn-default{type: 'button', data: {dismiss: 'modal'}} Close diff --git a/app/views/main/login.html.haml b/app/views/main/login.html.haml --- a/app/views/main/login.html.haml +++ b/app/views/main/login.html.haml @@ -1,12 +1,11 @@ %h1= GraderConfiguration['ui.front.title'] -%table - %tr - %td - - if @announcements.length!=0 - .announcementbox{:style => 'margin-top: 0px'} - %span{:class => 'title'} - Announcements - = render :partial => 'announcement', :collection => @announcements - %td{:style => 'vertical-align: top; width: 40%; padding-left: 20px;'} - = render :partial => 'login_box' +.row + .col-md-6 + - if @announcements.length!=0 + .announcementbox{:style => 'margin-top: 0px'} + %span{:class => 'title'} + Announcements + = render :partial => 'announcement', :collection => @announcements + .col-md-4{style: "padding-left: 20px;"} + = render :partial => 'login_box' diff --git a/app/views/main/submission.html.haml b/app/views/main/submission.html.haml deleted file mode 100644 --- a/app/views/main/submission.html.haml +++ /dev/null @@ -1,31 +0,0 @@ -= user_title_bar(@user) - -.panel.panel-info - .panel-heading - Select Problems - .panel-body - .form-inline - = select 'submission', - 'problem_id', - @problems.collect {|p| ["[#{p.name}] #{p.full_name}", main_submission_url(p.id)]}, - { selected: (@problem ? main_submission_url(@problem) : -1) }, - { class: 'select2 form-control'} - %button.btn.btn-primary.btn-sm.go-button#problem_go{data: {source: '#submission_problem_id'}} Go - -- if @problem!=nil - %h2= "Task: #{@problem.full_name} (#{@problem.name})" - -- if @submissions!=nil - - if @submissions.length>0 - %table.table - %thead - %th No. - %th # - %th At - %th Source - %th Result - %th{:width => "300px"} Compiler message - %th - = render :partial => 'submission', :collection => @submissions - - else - No submission diff --git a/app/views/problems/_form.html.erb b/app/views/problems/_form.html.erb deleted file mode 100644 --- a/app/views/problems/_form.html.erb +++ /dev/null @@ -1,54 +0,0 @@ -<%= error_messages_for 'problem' %> - - -


-<%= text_field 'problem', 'name' %>

- -


-<%= text_field 'problem', 'full_name' %>

- -


-<%= text_field 'problem', 'full_score' %>

- -


-<%= date_select 'problem', 'date_added' %>

- -<% -# TODO: these should be put in model Problem, but I can't think of -# nice default values for them. These values look fine only -# in this case (of lazily adding new problems). -@problem.available = true if @problem!=nil and @problem.available==nil -@problem.test_allowed = true if @problem!=nil and @problem.test_allowed==nil -@problem.output_only = false if @problem!=nil and @problem.output_only==nil -%> - -

- -<%= check_box :problem, :available %> - - -<%= check_box :problem, :test_allowed %> - - -<%= check_box :problem, :output_only %> -

- -<%= error_messages_for 'description' %> - -


-<%= text_area :description, :body, :rows => 10, :cols => 80 %>

- -

-<%= select "description", - "markdowned", - [['True',true],['False',false]], - {:selected => (@description) ? @description.markdowned : false } -%>

- -


-<%= text_field 'problem', 'url' %>

- -

Task PDF <%= file_field_tag 'file' %>

- - - diff --git a/app/views/problems/_form.html.haml b/app/views/problems/_form.html.haml new file mode 100644 --- /dev/null +++ b/app/views/problems/_form.html.haml @@ -0,0 +1,55 @@ += error_messages_for 'problem' +/ [form:problem] +.form-group + %label{:for => "problem_name"} Name + = text_field 'problem', 'name', class: 'form-control' + %small + Do not directly edit the problem name, unless you know what you are doing. If you want to change the name, use the name change button in the problem management menu instead. +.form-group + %label{:for => "problem_full_name"} Full name + = text_field 'problem', 'full_name', class: 'form-control' +.form-group + %label{:for => "problem_full_score"} Full score + = text_field 'problem', 'full_score', class: 'form-control' +.form-group + %label{:for => "problem_full_score"} Tags + = collection_select(:problem, :tag_ids, Tag.all, :id, :name, {}, {multiple: true, class: 'form-control select2'}) +.form-group + %label{:for => "problem_date_added"} Date added + = date_select 'problem', 'date_added', class: 'form-control' +- # TODO: these should be put in model Problem, but I can't think of +- # nice default values for them. These values look fine only +- # in this case (of lazily adding new problems). +- @problem.available = true if @problem!=nil and @problem.available==nil +- @problem.test_allowed = true if @problem!=nil and @problem.test_allowed==nil +- @problem.output_only = false if @problem!=nil and @problem.output_only==nil +.checkbox + %label{:for => "problem_available"} + = check_box :problem, :available + Available? +.checkbox + %label{:for => "problem_test_allowed"} + = check_box :problem, :test_allowed + Test allowed? +.checkbox + %label{:for => "problem_output_only"} + = check_box :problem, :output_only + Output only? += error_messages_for 'description' +.form-group + %label{:for => "description_body"} Description + %br/ + = text_area :description, :body, :rows => 10, :cols => 80,class: 'form-control' +.form-group + %label{:for => "description_markdowned"} Markdowned? + = select "description", | + "markdowned", | + [['True',true],['False',false]], | + {:selected => (@description) ? @description.markdowned : false } | +.form-group + %label{:for => "problem_url"} URL + %br/ + = text_field 'problem', 'url',class: 'form-control' +%p + Task PDF #{file_field_tag 'file'} +/ [eoform:problem] diff --git a/app/views/problems/do_import.html.haml b/app/views/problems/do_import.html.haml --- a/app/views/problems/do_import.html.haml +++ b/app/views/problems/do_import.html.haml @@ -10,7 +10,7 @@ = "Note that the full score has been set to #{@problem.full_score}." %p - = link_to '[Back to problem list]', :action => 'list' + = link_to '[Back to problem list]', problems_path = link_to '[Import other problems]', :action => 'import' %h3 Import log diff --git a/app/views/problems/edit.html.erb b/app/views/problems/edit.html.erb deleted file mode 100644 --- a/app/views/problems/edit.html.erb +++ /dev/null @@ -1,9 +0,0 @@ -

Editing problem

- -<%= form_tag({action: 'update', id: @problem},multipart: true) do %> - <%= render :partial => 'form' %> - <%= submit_tag 'Edit' %> -<% end %> - -<%= link_to 'Show', :action => 'show', :id => @problem %> | -<%= link_to 'Back', :action => 'list' %> diff --git a/app/views/problems/edit.html.haml b/app/views/problems/edit.html.haml new file mode 100644 --- /dev/null +++ b/app/views/problems/edit.html.haml @@ -0,0 +1,14 @@ +.container-fluid + = form_for @problem,url:{action: 'update'},html: {multipart: true} do + .row + .col-md-6 + %h1 Editing problem + = render :partial => 'form' + .row + .col-md-4 + = submit_tag 'Edit', class: 'btn btn-primary btn-block' + .col-md-4 + = link_to 'Show', {:action => 'show', :id => @problem}, class: 'btn btn-default btn-block' + .col-md-4 + = link_to 'Back', problems_path, class: 'btn btn-default btn-block' +.div{style: 'height: 5em'} diff --git a/app/views/problems/import.html.haml b/app/views/problems/import.html.haml --- a/app/views/problems/import.html.haml +++ b/app/views/problems/import.html.haml @@ -3,7 +3,7 @@ %h1 Import problems -%p= link_to '[Back to problem list]', :action => 'list' +%p= link_to '[Back to problem list]', problems_path - if @problem and @problem.errors =error_messages_for 'problem' diff --git a/app/views/problems/index.html.haml b/app/views/problems/index.html.haml --- a/app/views/problems/index.html.haml +++ b/app/views/problems/index.html.haml @@ -1,10 +1,10 @@ - content_for :head do = stylesheet_link_tag 'problems' -%h1 Listing problems +%h1 Problems %p - = link_to 'New problem', new_problem_path, class: 'btn btn-default btn-sm' - = link_to 'Manage problems', { action: 'manage'}, class: 'btn btn-default btn-sm' - = link_to 'Import problems', {:action => 'import'}, class: 'btn btn-default btn-sm' + = link_to 'Import problems', {:action => 'import'}, class: 'btn btn-success btn-sm' + = link_to 'New problem', new_problem_path, class: 'btn btn-success btn-sm' + = link_to 'Bulk Manage', { action: 'manage'}, class: 'btn btn-info btn-sm' = link_to 'Turn off all problems', {:action => 'turn_all_off'}, class: 'btn btn-default btn-sm' = link_to 'Turn on all problems', {:action => 'turn_all_on'}, class: 'btn btn-default btn-sm' .submitbox @@ -21,11 +21,18 @@ %th Name %th Full name %th.text-right Full score + %th Tags + %th + Submit + %sup{class: 'text-primary',data: {toggle: 'tooltip'}, title: 'Admin can always submit to any problem' } [?] %th Date added %th.text-center Avail? %sup{class: 'text-primary',data: {toggle: 'tooltip'}, title: 'Let user submits to this problem?' } [?] %th.text-center + View Data? + %sup{class: 'text-primary',data: {toggle: 'tooltip'}, title: 'Let user view the testcase of this problem?' } [?] + %th.text-center Test? %sup{class: 'text-primary',data: {toggle: 'tooltip'}, title: 'Let user uses test interface on this problem?' } [?] - if GraderConfiguration.multicontests? @@ -33,18 +40,26 @@ - for problem in @problems %tr{:class => "#{(problem.available) ? "success" : "danger"}", :id => "prob-#{problem.id}", :name => "prob-#{problem.id}"} - @problem=problem - %td= in_place_editor_field :problem, :name, {}, :rows=>1 - %td= in_place_editor_field :problem, :full_name, {}, :rows=>1 - %td.text-right= in_place_editor_field :problem, :full_score, {}, :rows=>1 + %td= problem.name #in_place_editor_field :problem, :name, {}, :rows=>1 + %td + = problem.full_name #in_place_editor_field :problem, :full_name, {}, :rows=>1 + = link_to_description_if_any "[#{t 'main.problem_desc'}] ".html_safe, problem + %td.text-right= problem.full_score #in_place_editor_field :problem, :full_score, {}, :rows=>1 + %td + - problem.tags.each do |t| + - #%button.btn.btn-default.btn-xs= t.name + %span.label.label-default= t.name + %td= link_to "Submit", direct_edit_problem_submissions_path(problem,@current_user.id), class: 'btn btn-xs btn-primary' %td= problem.date_added - %td= toggle_button(@problem.available?, toggle_problem_url(@problem), "problem-avail-#{@problem.id}") - %td= toggle_button(@problem.test_allowed?, toggle_test_problem_url(@problem), "problem-test-#{@problem.id}") + %td= toggle_button(@problem.available?, toggle_problem_path(@problem), "problem-avail-#{@problem.id}") + %td= toggle_button(@problem.view_testcase?, toggle_view_testcase_problem_path(@problem), "problem-view-testcase-#{@problem.id}") + %td= toggle_button(@problem.test_allowed?, toggle_test_problem_path(@problem), "problem-test-#{@problem.id}") - if GraderConfiguration.multicontests? %td = problem.contests.collect { |c| c.name }.join(', ') %td= link_to 'Stat', {:action => 'stat', :id => problem.id}, class: 'btn btn-info btn-xs btn-block' %td= link_to 'Show', {:action => 'show', :id => problem}, class: 'btn btn-info btn-xs btn-block' %td= link_to 'Edit', {:action => 'edit', :id => problem}, class: 'btn btn-info btn-xs btn-block' - %td= link_to 'Destroy', { :action => 'destroy', :id => problem }, :confirm => 'Are you sure?', :method => :post, class: 'btn btn-danger btn-xs btn-block' + %td= link_to 'Destroy', { :action => 'destroy', :id => problem }, :confirm => 'Are you sure?', :method => :delete, class: 'btn btn-danger btn-xs btn-block' %br/ = link_to '[New problem]', :action => 'new' diff --git a/app/views/problems/manage.html.haml b/app/views/problems/manage.html.haml --- a/app/views/problems/manage.html.haml +++ b/app/views/problems/manage.html.haml @@ -36,50 +36,83 @@ %h1 Manage problems -%p= link_to '[Back to problem list]', :action => 'list' +%p= link_to '[Back to problem list]', problems_path = form_tag :action=>'do_manage' do - .submitbox - What do you want to do to the selected problem? - %br/ - (You can shift-click to select a range of problems) - %ul - %li - Change date added to - = select_date Date.current, :prefix => 'date_added' -     - = submit_tag 'Change', :name => 'change_date_added' - %li - Set available to - = submit_tag 'True', :name => 'enable_problem' - = submit_tag 'False', :name => 'disable_problem' + .panel.panel-primary + .panel-heading + Action + .panel-body + .submit-box + What do you want to do to the selected problem? + %br/ + (You can shift-click to select a range of problems) + %ul.form-inline + %li + Change "Date added" to + .input-group.date + = text_field_tag :date_added, class: 'form-control' + %span.input-group-addon + %span.glyphicon.glyphicon-calendar + -# = select_date Date.current, :prefix => 'date_added' +     + = submit_tag 'Change', :name => 'change_date_added', class: 'btn btn-primary btn-sm' + %li + Set "Available" to + = submit_tag 'True', :name => 'enable_problem', class: 'btn btn-primary btn-sm' + = submit_tag 'False', :name => 'disable_problem', class: 'btn btn-primary btn-sm' - - if GraderConfiguration.multicontests? - %li - Add to - = select("contest","id",Contest.all.collect {|c| [c.title, c.id]}) - = submit_tag 'Add', :name => 'add_to_contest' + - if GraderConfiguration.multicontests? + %li + Add selected problems to contest + = select("contest","id",Contest.all.collect {|c| [c.title, c.id]}) + = submit_tag 'Add', :name => 'add_to_contest', class: 'btn btn-primary btn-sm' + %li + Add selected problems to user group + = select_tag "group_id", options_from_collection_for_select( Group.all, 'id','name',params[:group_name]), id: 'group_name',class: 'select2' + = submit_tag 'Add', name: 'add_group', class: 'btn btn-primary' + %li + Add the following tags to the selected problems + = select_tag "tag_ids", options_from_collection_for_select( Tag.all, 'id','name'), id: 'tags_name',class: 'select2', multiple: true, data: {placeholder: 'Select tags by clicking', width: "200px"} + = submit_tag 'Add', name: 'add_tags', class: 'btn btn-primary' - %table - %tr{style: "text-align: left;"} - %th= check_box_tag 'select_all' - %th Name - %th Full name - %th Available - %th Date added - - if GraderConfiguration.multicontests? - %th Contests + %table.table.table-hover.datatable + %thead + %tr{style: "text-align: left;"} + %th= check_box_tag 'select_all' + %th Name + %th Full name + %th Tags + %th Available + %th Date added + - if GraderConfiguration.multicontests? + %th Contests - - num = 0 - - for problem in @problems - - num += 1 - %tr{:id => "row-prob-#{problem.id}", :name=> "prob-#{problem.id}"} - %td= check_box_tag "prob-#{problem.id}-#{num}" - %td= problem.name - %td= problem.full_name - %td= problem.available - %td= problem.date_added - - if GraderConfiguration.multicontests? + %tbody + - num = 0 + - for problem in @problems + - num += 1 + %tr{:id => "row-prob-#{problem.id}", :name=> "prob-#{problem.id}"} + %td= check_box_tag "prob-#{problem.id}-#{num}" + %td= problem.name + %td= problem.full_name %td - - problem.contests.each do |contest| - = "(#{contest.name} [#{link_to 'x', :action => 'remove_contest', :id => problem.id, :contest_id => contest.id }])" + - problem.tags.each do |t| + %span.label.label-default= t.name + %td= problem.available + %td= problem.date_added + - if GraderConfiguration.multicontests? + %td + - problem.contests.each do |contest| + = "(#{contest.name} [#{link_to 'x', :action => 'remove_contest', :id => problem.id, :contest_id => contest.id }])" + +:javascript + $('.input-group.date').datetimepicker({ + format: 'DD/MMM/YYYY', + showTodayButton: true, + widgetPositioning: {horizontal: 'auto', vertical: 'bottom'}, + + }); + $('.datatable').DataTable({ + paging: false + }); diff --git a/app/views/problems/show.html.erb b/app/views/problems/show.html.erb --- a/app/views/problems/show.html.erb +++ b/app/views/problems/show.html.erb @@ -21,4 +21,4 @@

<%= link_to 'Edit', :action => 'edit', :id => @problem %> | -<%= link_to 'Back', :action => 'list' %> +<%= link_to 'Back', problems_path %> diff --git a/app/views/problems/stat.html.haml b/app/views/problems/stat.html.haml --- a/app/views/problems/stat.html.haml +++ b/app/views/problems/stat.html.haml @@ -25,13 +25,14 @@ %h2 Submissions - if @submissions and @submissions.count > 0 - %table.info#main_table + %table#main_table.table.table-condensed.table-striped %thead - %tr.info-head + %tr %th ID %th Login %th Name %th Submitted_at + %th language %th Points %th comment %th IP @@ -40,14 +41,19 @@ - @submissions.each do |sub| - next unless sub.user - row_odd,curr = !row_odd, sub.user if curr != sub.user - %tr{class: row_odd ? "info-odd" : "info-even"} - %td= link_to sub.id, controller: 'graders', action: 'submission', id: sub.id - %td= link_to sub.user.login, controller: :users, action: :profile, id: sub.user.id + %tr + %td= link_to sub.id, submission_path(sub) + %td= link_to sub.user.login, stat_user_path(sub.user) %td= sub.user.full_name - %td= time_ago_in_words(sub.submitted_at) + " ago" + %td{data: {order: sub.submitted_at}}= time_ago_in_words(sub.submitted_at) + " ago" + %td= sub.language.name %td= sub.points %td.fix-width= sub.grader_comment %td= sub.ip_address - else No submission +:javascript + $("#main_table").DataTable({ + paging: false + }); diff --git a/app/views/problems/toggle_view_testcase.js.haml b/app/views/problems/toggle_view_testcase.js.haml new file mode 100644 --- /dev/null +++ b/app/views/problems/toggle_view_testcase.js.haml @@ -0,0 +1,2 @@ += render partial: 'toggle_button', + locals: {button_id: "#problem-view-testcase-#{@problem.id}",button_on: @problem.view_testcase?} diff --git a/app/views/report/_score_table.html.haml b/app/views/report/_score_table.html.haml new file mode 100644 --- /dev/null +++ b/app/views/report/_score_table.html.haml @@ -0,0 +1,69 @@ +%table.table.sortable.table-striped.table-bordered.table-condensed + %thead + %tr + %th Login + %th Name + / %th Activated? + / %th Logged_in + / %th Contest(s) + %th Remark + - @problems.each do |p| + %th.text-right= p.name.gsub('_',' ') + %th.text-right Total + %th.text-right Passed + %tbody + - sum = Array.new(@scorearray[0].count,0) + - nonzero = Array.new(@scorearray[0].count,0) + - full = Array.new(@scorearray[0].count,0) + - @scorearray.each do |sc| + %tr + - total,num_passed = 0,0 + - sc.each_index do |i| + - if i == 0 + %td= link_to sc[i].login, stat_user_path(sc[i]) + %td= sc[i].full_name + / %td= sc[i].activated + / %td= sc[i].try(:contest_stat).try(:started_at) ? 'yes' : 'no' + / %td= sc[i].contests.collect {|c| c.name}.join(', ') + %td= sc[i].remark + - else + %td.text-right= sc[i][0] + - total += sc[i][0] + - num_passed += 1 if sc[i][1] + - sum[i] += sc[i][0] + - nonzero[i] += 1 if sc[i][0] > 0 + - full[i] += 1 if sc[i][1] + %td.text-right= total + %td.text-right= num_passed + %tfoot + %tr + %td Summation + %td + %td + - sum.each.with_index do |s,i| + - next if i == 0 + %td.text-right= number_with_delimiter(s) + %td + %td + %tr + %td partial solver + %td + %td + - nonzero.each.with_index do |s,i| + - next if i == 0 + %td.text-right= number_with_delimiter(s) + %td + %td + %tr + %td Full solver + %td + %td + - full.each.with_index do |s,i| + - next if i == 0 + %td.text-right= number_with_delimiter(s) + %td + %td + + +:javascript + $.bootstrapSortable(true,'reversed') diff --git a/app/views/report/_task_hof.html.haml b/app/views/report/_task_hof.html.haml --- a/app/views/report/_task_hof.html.haml +++ b/app/views/report/_task_hof.html.haml @@ -44,14 +44,14 @@ %tr %td.info_param Best Runtime %td - by #{link_to @best[:runtime][:user], controller:'users', action:'profile', id:@best[:memory][:user_id]} + by #{link_to @best[:runtime][:user], stat_user_path(@best[:runtime][:user_id])} %br using #{@best[:runtime][:lang]} %br with #{@best[:runtime][:value] * 1000} milliseconds %br at submission - = link_to("#" + @best[:runtime][:sub_id].to_s, controller: 'graders', action: 'submission', id:@best[:runtime][:sub_id]) + = link_to "#" + @best[:runtime][:sub_id].to_s, submission_path(@best[:runtime][:sub_id]) %tr %td.info_param @@ -62,39 +62,39 @@ title: "This counts only for submission with 100% score. Right now, java is excluded from memory usage competition. (Because it always uses 2GB memory...)"} [?] %td - by #{link_to @best[:memory][:user], controller:'users', action:'profile', id:@best[:memory][:user_id]} + by #{link_to @best[:memory][:user], stat_user_path(@best[:memory][:user_id])} %br using #{@best[:memory][:lang]} %br with #{number_with_delimiter(@best[:memory][:value])} kbytes %br at submission - = link_to("#" + @best[:memory][:sub_id].to_s, controller: 'graders' , action: 'submission', id:@best[:memory][:sub_id]) + = link_to "#" + @best[:memory][:sub_id].to_s, submission_path(@best[:memory][:sub_id]) %tr %td.info_param Shortest Code %td - by #{link_to @best[:length][:user], controller:'users', action:'profile', id:@best[:length][:user_id]} + by #{link_to @best[:length][:user], stat_user_path(@best[:length][:user_id])} %br using #{@best[:length][:lang]} %br with #{@best[:length][:value]} bytes %br at submission - = link_to("#" + @best[:length][:sub_id].to_s, controller: 'graders' , action: 'submission', id: @best[:length][:sub_id]) + = link_to "#" + @best[:length][:sub_id].to_s, submission_path(@best[:length][:sub_id]) %tr %td.info_param First solver %td - if @best[:first][:user] != '(NULL)' - #{link_to @best[:first][:user], controller:'users', action:'profile', id:@best[:first][:user_id]} is the first solver + #{link_to @best[:first][:user], stat_user_path(@best[:first][:user_id])} is the first solver %br using #{@best[:first][:lang]} %br on #{@best[:first][:value]} %br at submission - = link_to("#" + @best[:first][:sub_id].to_s, controller: 'graders' , action: 'submission', id: @best[:first][:sub_id]) + = link_to "#" + @best[:first][:sub_id].to_s, submission_path( @best[:first][:sub_id]) - else no first solver .col-md-8 @@ -113,24 +113,24 @@ %tr %td= lang %td - = link_to value[:runtime][:user], controller: 'users', action: 'profile', id: value[:runtime][:user_id] + = link_to value[:runtime][:user], stat_user_path(value[:runtime][:user_id]) %br - = "(#{(value[:runtime][:value] * 1000).to_i} @" - = "#{link_to("#" + value[:runtime][:sub_id].to_s, controller: 'graders' , action: 'submission', id: value[:runtime][:sub_id])} )".html_safe + = "#{(value[:runtime][:value] * 1000).to_i} @" + = link_to "#" + value[:runtime][:sub_id].to_s, submission_path( value[:runtime][:sub_id]) %td - = link_to value[:memory][:user], controller: 'users', action: 'profile', id: value[:memory][:user_id] + = link_to value[:memory][:user], stat_user_path( value[:memory][:user_id]) %br - = "(#{number_with_delimiter(value[:memory][:value])} @" - = "#{link_to("#" + value[:memory][:sub_id].to_s, controller: 'graders' , action: 'submission', id: value[:memory][:sub_id])} )".html_safe + = "#{number_with_delimiter(value[:memory][:value])} @" + = link_to "#" + value[:memory][:sub_id].to_s, submission_path(value[:memory][:sub_id]) %td - = link_to value[:length][:user], controller: 'users', action: 'profile', id: value[:length][:user_id] + = link_to value[:length][:user], stat_user_path(value[:length][:user_id]) %br - = "(#{value[:length][:value]} @" - = "#{link_to("#" + value[:length][:sub_id].to_s, controller: 'graders' , action: 'submission', id: value[:length][:sub_id])} )".html_safe + = "#{value[:length][:value]} @" + = link_to "#" + value[:length][:sub_id].to_s, submission_path(value[:length][:sub_id]) %td - if value[:first][:user] != '(NULL)' #TODO: i know... this is wrong... - = link_to value[:first][:user], controller: 'users', action: 'profile', id: value[:first][:user_id] + = link_to value[:first][:user], stat_user_path(value[:first][:user_id]) %br - = "(#{value[:first][:value]} @" - = "#{link_to("#" + value[:first][:sub_id].to_s, controller: 'graders' , action: 'submission', id: value[:first][:sub_id])} )".html_safe + = "#{value[:first][:value]} @" + = link_to "#" + value[:first][:sub_id].to_s, submission_path( value[:first][:sub_id]) diff --git a/app/views/report/current_score.html.haml b/app/views/report/current_score.html.haml new file mode 100644 --- /dev/null +++ b/app/views/report/current_score.html.haml @@ -0,0 +1,3 @@ +%h1 Current Score + += render "score_table" diff --git a/app/views/report/max_score.html.haml b/app/views/report/max_score.html.haml new file mode 100644 --- /dev/null +++ b/app/views/report/max_score.html.haml @@ -0,0 +1,49 @@ +%h1 Maximum score + += form_tag report_show_max_score_path +.row + .col-md-4 + .panel.panel-primary + .panel-heading + Problems + .panel-body + %p + Select problem(s) that we wish to know the score. + = label_tag :problem_id, "Problems" + = select_tag 'problem_id[]', + options_for_select(Problem.all.collect {|p| ["[#{p.name}] #{p.full_name}", p.id]},params[:problem_id]), + { class: 'select2 form-control', multiple: "true" } + .col-md-4 + .panel.panel-primary + .panel-heading + Submission range + .panel-body + %p + Input minimum and maximum range of submission ID that should be included. A blank value for min and max means -1 and infinity, respectively. + .form-group + = label_tag :from, "Min" + = text_field_tag 'from_id', @since_id, class: "form-control" + .form-group + = label_tag :from, "Max" + = text_field_tag 'to_id', @until_id, class: "form-control" + .col-md-4 + .panel.panel-primary + .panel-heading + Users + .panel-body + .radio + %label + = radio_button_tag 'users', 'all', (params[:users] == "all") + All users + .radio + %label + = radio_button_tag 'users', 'enabled', (params[:users] == "enabled") + Only enabled users +.row + .col-md-12 + = button_tag 'Show', class: "btn btn-primary btn-large", value: "show" + = button_tag 'Download CSV', class: "btn btn-primary btn-large", value: "download" + +- if @scorearray + %h2 Result + =render "score_table" diff --git a/app/views/report/show_max_score.html.haml b/app/views/report/show_max_score.html.haml new file mode 100644 diff --git a/app/views/sites/edit.html.haml b/app/views/sites/edit.html.haml --- a/app/views/sites/edit.html.haml +++ b/app/views/sites/edit.html.haml @@ -1,24 +1,36 @@ %h1 Editing site = error_messages_for :site = form_for(@site) do |f| - %p - %b Name - %br/ - = f.text_field :name - %p - %b Password - %br/ - = f.text_field :password - %p - %b Started - %br/ - = f.check_box :started - %p - %b Start time - %br/ - = f.datetime_select :start_time, :include_blank => true - %p - = f.submit "Update" + .row + .col-md-4 + .form-group.field + = f.label :name, "Name" + = f.text_field :name, class: 'form-control' + .form-group.field + = f.label :password, "Password" + = f.text_field :password, class: 'form-control' + .form-group.field + = f.label :started, "Started" + = f.check_box :started, class: 'form-control' + .form-group.field + = f.label :start_time, "Start time" + -# = f.datetime_select :start_time, :include_blank => true + .input-group.date + = f.text_field :start_time, class:'form-control' , value: (@site.start_time ? @site.start_time.strftime('%d/%b/%Y %H:%M') : '') + %span.input-group-addon + %span.glyphicon.glyphicon-calendar + .actions + = f.submit "Update", class: 'btn btn-primary' + .col-md-8 + = link_to 'Show', @site | = link_to 'Back', sites_path + + +:javascript + $('.input-group.date').datetimepicker({ + format: 'DD/MMM/YYYY HH:mm', + showTodayButton: true, + }); + diff --git a/app/views/sources/direct_edit.html.haml b/app/views/sources/direct_edit.html.haml deleted file mode 100644 --- a/app/views/sources/direct_edit.html.haml +++ /dev/null @@ -1,264 +0,0 @@ -%h2 Live submit -%br - -%textarea#text_haha{style: "display:none"}~ @source -.container - .row - .col-md-12 - .alert.alert-info - Write your code in the following box, choose language, and click submit button when finished - .row - .col-md-8 - %div#editor{style: 'height: 500px; border-radius: 7px; font-size: 14px;'} - .col-md-4 - = form_tag({controller: :main, :action => 'submit'}, :multipart => true, class: 'form') do - - = hidden_field_tag 'editor_text', @source - = hidden_field_tag 'submission[problem_id]', @problem.id - .form-group - = label_tag "Task:" - = text_field_tag 'asdf', "#{@problem.long_name}", class: 'form-control', disabled: true - - .form-group - = label_tag 'Language' - = select_tag 'language_id', options_from_collection_for_select(Language.all, 'id', 'pretty_name', @lang_id || Language.find_by_pretty_name("Python").id || Language.first.id), class: 'form-control select', style: "width: 100px" - .form-group - = submit_tag 'Submit', class: 'btn btn-success', id: 'live_submit', - data: {confirm: "Submitting this source code for task #{@problem.long_name}?"} - .panel.panel-info - .panel-heading - Latest Submission Status - .panel-body - - if @submission - = render :partial => 'submission_short', - :locals => {:submission => @submission, :problem_name => @problem.name } - .row - .col-md-12 - %h2 Console - %textarea#console{style: 'height: 100%; width: 100%;background-color:#000;color:#fff;font-family: consolas, monaco, "Droid Sans Mono";',rows: 20} - -:javascript - $(document).ready(function() { - brython(); - }); - - -%script#__main__{type:'text/python3'} - :plain - import sys - import traceback - - from browser import document as doc - from browser import window, alert, console - - _credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands - for supporting Python development. See www.python.org for more information.""" - - _copyright = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com - All Rights Reserved. - - Copyright (c) 2001-2013 Python Software Foundation. - All Rights Reserved. - - Copyright (c) 2000 BeOpen.com. - All Rights Reserved. - - Copyright (c) 1995-2001 Corporation for National Research Initiatives. - All Rights Reserved. - - Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. - All Rights Reserved.""" - - _license = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. Redistributions in binary - form must reproduce the above copyright notice, this list of conditions and - the following disclaimer in the documentation and/or other materials provided - with the distribution. - Neither the name of the nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - """ - - def credits(): - print(_credits) - credits.__repr__ = lambda:_credits - - def copyright(): - print(_copyright) - copyright.__repr__ = lambda:_copyright - - def license(): - print(_license) - license.__repr__ = lambda:_license - - def write(data): - doc['console'].value += str(data) - - - sys.stdout.write = sys.stderr.write = write - history = [] - current = 0 - _status = "main" # or "block" if typing inside a block - - # execution namespace - editor_ns = {'credits':credits, - 'copyright':copyright, - 'license':license, - '__name__':'__main__'} - - def cursorToEnd(*args): - pos = len(doc['console'].value) - doc['console'].setSelectionRange(pos, pos) - doc['console'].scrollTop = doc['console'].scrollHeight - - def get_col(area): - # returns the column num of cursor - sel = doc['console'].selectionStart - lines = doc['console'].value.split('\n') - for line in lines[:-1]: - sel -= len(line) + 1 - return sel - - - def myKeyPress(event): - global _status, current - if event.keyCode == 9: # tab key - event.preventDefault() - doc['console'].value += " " - elif event.keyCode == 13: # return - src = doc['console'].value - if _status == "main": - currentLine = src[src.rfind('>>>') + 4:] - elif _status == "3string": - currentLine = src[src.rfind('>>>') + 4:] - currentLine = currentLine.replace('\n... ', '\n') - else: - currentLine = src[src.rfind('...') + 4:] - if _status == 'main' and not currentLine.strip(): - doc['console'].value += '\n>>> ' - event.preventDefault() - return - doc['console'].value += '\n' - history.append(currentLine) - current = len(history) - if _status == "main" or _status == "3string": - try: - _ = editor_ns['_'] = eval(currentLine, editor_ns) - if _ is not None: - write(repr(_)+'\n') - doc['console'].value += '>>> ' - _status = "main" - except IndentationError: - doc['console'].value += '... ' - _status = "block" - except SyntaxError as msg: - if str(msg) == 'invalid syntax : triple string end not found' or \ - str(msg).startswith('Unbalanced bracket'): - doc['console'].value += '... ' - _status = "3string" - elif str(msg) == 'eval() argument must be an expression': - try: - exec(currentLine, editor_ns) - except: - traceback.print_exc() - doc['console'].value += '>>> ' - _status = "main" - elif str(msg) == 'decorator expects function': - doc['console'].value += '... ' - _status = "block" - else: - traceback.print_exc() - doc['console'].value += '>>> ' - _status = "main" - except: - traceback.print_exc() - doc['console'].value += '>>> ' - _status = "main" - elif currentLine == "": # end of block - block = src[src.rfind('>>>') + 4:].splitlines() - block = [block[0]] + [b[4:] for b in block[1:]] - block_src = '\n'.join(block) - # status must be set before executing code in globals() - _status = "main" - try: - _ = exec(block_src, editor_ns) - if _ is not None: - print(repr(_)) - except: - traceback.print_exc() - doc['console'].value += '>>> ' - else: - doc['console'].value += '... ' - - cursorToEnd() - event.preventDefault() - - def myKeyDown(event): - global _status, current - if event.keyCode == 37: # left arrow - sel = get_col(doc['console']) - if sel < 5: - event.preventDefault() - event.stopPropagation() - elif event.keyCode == 36: # line start - pos = doc['console'].selectionStart - col = get_col(doc['console']) - doc['console'].setSelectionRange(pos - col + 4, pos - col + 4) - event.preventDefault() - elif event.keyCode == 38: # up - if current > 0: - pos = doc['console'].selectionStart - col = get_col(doc['console']) - # remove current line - doc['console'].value = doc['console'].value[:pos - col + 4] - current -= 1 - doc['console'].value += history[current] - event.preventDefault() - elif event.keyCode == 40: # down - if current < len(history) - 1: - pos = doc['console'].selectionStart - col = get_col(doc['console']) - # remove current line - doc['console'].value = doc['console'].value[:pos - col + 4] - current += 1 - doc['console'].value += history[current] - event.preventDefault() - elif event.keyCode == 8: # backspace - src = doc['console'].value - lstart = src.rfind('\n') - if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6): - event.preventDefault() - event.stopPropagation() - - - doc['console'].bind('keypress', myKeyPress) - doc['console'].bind('keydown', myKeyDown) - doc['console'].bind('click', cursorToEnd) - v = sys.implementation.version - doc['console'].value = "Brython %s.%s.%s on %s %s\n>>> " % ( - v[0], v[1], v[2], window.navigator.appName, window.navigator.appVersion) - #doc['console'].value += 'Type "copyright", "credits" or "license" for more information.' - doc['console'].focus() - cursorToEnd() - - - - diff --git a/app/views/sources/get_latest_submission_status.js.haml b/app/views/sources/get_latest_submission_status.js.haml deleted file mode 100644 --- a/app/views/sources/get_latest_submission_status.js.haml +++ /dev/null @@ -1,2 +0,0 @@ -:javascript - $("#latest_status").html("#{j render({partial: 'submission_short', locals: {submission: @submission, problem_name: @problem.name}})}") diff --git a/app/views/submissions/_form.html.haml b/app/views/submissions/_form.html.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/_form.html.haml @@ -0,0 +1,10 @@ += form_for @submission do |f| + - if @submission.errors.any? + #error_explanation + %h2= "#{pluralize(@submission.errors.count, "error")} prohibited this submission from being saved:" + %ul + - @submission.errors.full_messages.each do |msg| + %li= msg + + .actions + = f.submit 'Save' diff --git a/app/views/submissions/compiler_msg.js.haml b/app/views/submissions/compiler_msg.js.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/compiler_msg.js.haml @@ -0,0 +1,4 @@ +:plain + $("#compiler_msg").html("#{j @submission.compiler_message}"); + $("#compiler").modal(); + diff --git a/app/views/submissions/edit.html.haml b/app/views/submissions/edit.html.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/edit.html.haml @@ -0,0 +1,272 @@ +%h2 Live submit +%br + +%textarea#text_sourcecode{style: "display:none"}~ @source +.container + .row + .col-md-12 + .alert.alert-info + Write your code in the following box, choose language, and click submit button when finished + .row + .col-md-8 + %div#editor{style: 'height: 500px; border-radius: 7px; font-size: 14px;'} + .col-md-4 + - # submission form + = form_tag({controller: :main, :action => 'submit'}, :multipart => true, class: 'form') do + + = hidden_field_tag 'editor_text', @source + = hidden_field_tag 'submission[problem_id]', @problem.id + .form-group + = label_tag "Task:" + = text_field_tag 'asdf', "#{@problem.long_name}", class: 'form-control', disabled: true + + .form-group + = label_tag 'Language' + = select_tag 'language_id', options_from_collection_for_select(Language.all, 'id', 'pretty_name', @lang_id || Language.find_by_pretty_name("Python").id || Language.first.id), class: 'form-control select', style: "width: 100px" + .form-group + = submit_tag 'Submit', class: 'btn btn-success', id: 'live_submit', + data: {confirm: "Submitting this source code for task #{@problem.long_name}?"} + - # latest submission status + .panel{class: (@submission && @submission.graded_at) ? "panel-info" : "panel-warning"} + .panel-heading + Latest Submission Status + = link_to "Refresh",get_latest_submission_status_submissions_path(@submission.user,@problem), class: "btn btn-default btn-sm", remote: true if @submission + .panel-body + %div#latest_status + - if @submission + = render :partial => 'submission_short', + :locals => {submission: @submission, problem_name: @problem.name, problem_id: @problem.id } + .row + .col-md-12 + %h2 Console + %textarea#console{style: 'height: 100%; width: 100%;background-color:#000;color:#fff;font-family: consolas, monaco, "Droid Sans Mono";',rows: 20} + +:javascript + $(document).ready(function() { + e = ace.edit("editor") + e.setValue($("#text_sourcecode").val()); + e.gotoLine(1); + $("#language_id").trigger('change'); + brython(); + }); + + +%script#__main__{type:'text/python3'} + :plain + import sys + import traceback + + from browser import document as doc + from browser import window, alert, console + + _credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands + for supporting Python development. See www.python.org for more information.""" + + _copyright = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com + All Rights Reserved. + + Copyright (c) 2001-2013 Python Software Foundation. + All Rights Reserved. + + Copyright (c) 2000 BeOpen.com. + All Rights Reserved. + + Copyright (c) 1995-2001 Corporation for National Research Initiatives. + All Rights Reserved. + + Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. + All Rights Reserved.""" + + _license = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. Redistributions in binary + form must reproduce the above copyright notice, this list of conditions and + the following disclaimer in the documentation and/or other materials provided + with the distribution. + Neither the name of the nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + """ + + def credits(): + print(_credits) + credits.__repr__ = lambda:_credits + + def copyright(): + print(_copyright) + copyright.__repr__ = lambda:_copyright + + def license(): + print(_license) + license.__repr__ = lambda:_license + + def write(data): + doc['console'].value += str(data) + + + sys.stdout.write = sys.stderr.write = write + history = [] + current = 0 + _status = "main" # or "block" if typing inside a block + + # execution namespace + editor_ns = {'credits':credits, + 'copyright':copyright, + 'license':license, + '__name__':'__main__'} + + def cursorToEnd(*args): + pos = len(doc['console'].value) + doc['console'].setSelectionRange(pos, pos) + doc['console'].scrollTop = doc['console'].scrollHeight + + def get_col(area): + # returns the column num of cursor + sel = doc['console'].selectionStart + lines = doc['console'].value.split('\n') + for line in lines[:-1]: + sel -= len(line) + 1 + return sel + + + def myKeyPress(event): + global _status, current + if event.keyCode == 9: # tab key + event.preventDefault() + doc['console'].value += " " + elif event.keyCode == 13: # return + src = doc['console'].value + if _status == "main": + currentLine = src[src.rfind('>>>') + 4:] + elif _status == "3string": + currentLine = src[src.rfind('>>>') + 4:] + currentLine = currentLine.replace('\n... ', '\n') + else: + currentLine = src[src.rfind('...') + 4:] + if _status == 'main' and not currentLine.strip(): + doc['console'].value += '\n>>> ' + event.preventDefault() + return + doc['console'].value += '\n' + history.append(currentLine) + current = len(history) + if _status == "main" or _status == "3string": + try: + _ = editor_ns['_'] = eval(currentLine, editor_ns) + if _ is not None: + write(repr(_)+'\n') + doc['console'].value += '>>> ' + _status = "main" + except IndentationError: + doc['console'].value += '... ' + _status = "block" + except SyntaxError as msg: + if str(msg) == 'invalid syntax : triple string end not found' or \ + str(msg).startswith('Unbalanced bracket'): + doc['console'].value += '... ' + _status = "3string" + elif str(msg) == 'eval() argument must be an expression': + try: + exec(currentLine, editor_ns) + except: + traceback.print_exc() + doc['console'].value += '>>> ' + _status = "main" + elif str(msg) == 'decorator expects function': + doc['console'].value += '... ' + _status = "block" + else: + traceback.print_exc() + doc['console'].value += '>>> ' + _status = "main" + except: + traceback.print_exc() + doc['console'].value += '>>> ' + _status = "main" + elif currentLine == "": # end of block + block = src[src.rfind('>>>') + 4:].splitlines() + block = [block[0]] + [b[4:] for b in block[1:]] + block_src = '\n'.join(block) + # status must be set before executing code in globals() + _status = "main" + try: + _ = exec(block_src, editor_ns) + if _ is not None: + print(repr(_)) + except: + traceback.print_exc() + doc['console'].value += '>>> ' + else: + doc['console'].value += '... ' + + cursorToEnd() + event.preventDefault() + + def myKeyDown(event): + global _status, current + if event.keyCode == 37: # left arrow + sel = get_col(doc['console']) + if sel < 5: + event.preventDefault() + event.stopPropagation() + elif event.keyCode == 36: # line start + pos = doc['console'].selectionStart + col = get_col(doc['console']) + doc['console'].setSelectionRange(pos - col + 4, pos - col + 4) + event.preventDefault() + elif event.keyCode == 38: # up + if current > 0: + pos = doc['console'].selectionStart + col = get_col(doc['console']) + # remove current line + doc['console'].value = doc['console'].value[:pos - col + 4] + current -= 1 + doc['console'].value += history[current] + event.preventDefault() + elif event.keyCode == 40: # down + if current < len(history) - 1: + pos = doc['console'].selectionStart + col = get_col(doc['console']) + # remove current line + doc['console'].value = doc['console'].value[:pos - col + 4] + current += 1 + doc['console'].value += history[current] + event.preventDefault() + elif event.keyCode == 8: # backspace + src = doc['console'].value + lstart = src.rfind('\n') + if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6): + event.preventDefault() + event.stopPropagation() + + + doc['console'].bind('keypress', myKeyPress) + doc['console'].bind('keydown', myKeyDown) + doc['console'].bind('click', cursorToEnd) + v = sys.implementation.version + doc['console'].value = "Brython %s.%s.%s on %s %s\n>>> " % ( + v[0], v[1], v[2], window.navigator.appName, window.navigator.appVersion) + #doc['console'].value += 'Type "copyright", "credits" or "license" for more information.' + doc['console'].focus() + cursorToEnd() + + + + diff --git a/app/views/submissions/get_latest_submission_status.js.haml b/app/views/submissions/get_latest_submission_status.js.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/get_latest_submission_status.js.haml @@ -0,0 +1,2 @@ +:plain + $("#latest_status").html("#{j render({partial: 'submission_short', locals: {submission: @submission, problem_name: @problem.name, problem_id: @problem.id}})}") diff --git a/app/views/submissions/index.html.haml b/app/views/submissions/index.html.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/index.html.haml @@ -0,0 +1,29 @@ +.panel.panel-info + .panel-heading + Select Problems + .panel-body + .form-inline + = select 'submission', + 'problem_id', + @problems.collect {|p| ["[#{p.name}] #{p.full_name}", problem_submissions_url(p.id)]}, + { selected: (@problem ? problem_submissions_url(@problem) : -1) }, + { class: 'select2 form-control'} + %button.btn.btn-primary.btn-sm.go-button#problem_go{data: {source: '#submission_problem_id'}} Go + +- if @problem!=nil + %h2= "Task: #{@problem.full_name} (#{@problem.name})" + +- if @submissions!=nil + - if @submissions.length>0 + %table.table + %thead + %th No. + %th.text-right # + %th At + %th Source + %th Result + %th{:width => "300px"} Compiler message + %th + = render :partial => 'submission', :collection => @submissions + - else + No submission diff --git a/app/views/submissions/new.html.haml b/app/views/submissions/new.html.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/new.html.haml @@ -0,0 +1,5 @@ +%h1 New submission + += render 'form' + += link_to 'Back', submissions_path diff --git a/app/views/submissions/rejudge.js.haml b/app/views/submissions/rejudge.js.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/rejudge.js.haml @@ -0,0 +1,2 @@ +:plain + $("body").prepend("
Submission #{@submission.id}'s task status has been changed to \"#{@task.status_str}\". It will be re-judged soon.
") diff --git a/app/views/submissions/show.html.haml b/app/views/submissions/show.html.haml new file mode 100644 --- /dev/null +++ b/app/views/submissions/show.html.haml @@ -0,0 +1,112 @@ +%h1= "Submission: #{@submission.id}" + +%textarea#data{style: "display:none;"} + :preserve + #{@submission.source} + +//%div.highlight{:style => "border: 1px solid black;"} +//=@formatted_code.html_safe + + +.containter + .row + .col-md-7 + %h2 Source Code + .col-md-5 + %h2 Stat + .row + .col-md-7 + %div#editor{ style: "font-size: 14px; height: 400px; border-radius:5px;" } + :javascript + e = ace.edit("editor") + e.setOptions({ maxLines: Infinity }) + e.setValue($("#data").text()) + e.gotoLine(1) + e.getSession().setMode("#{get_ace_mode(@submission.language)}") + e.setReadOnly(true) + .col-md-5 + %table.table.table-striped + %tr + %td.text-right + %strong User + %td + - if @submission.user + = link_to "#{@submission.user.login}", stat_user_path(@submission.user) + = @submission.user.full_name + - else + = "(n/a)" + %tr + %td.text-right + %strong Task + %td + - if @submission.problem!=nil + = link_to "[#{@submission.problem.name}]", stat_problem_path(@submission.problem) + = @submission.problem.full_name + - else + = "(n/a)" + %tr + %td.text-right + %strong Tries + %td= @submission.number + %tr + %td.text-right + %strong Language + %td= @submission.language.pretty_name + %tr + %td.text-right + %strong Submitted + %td #{time_ago_in_words(@submission.submitted_at)} ago (at #{@submission.submitted_at.to_formatted_s(:long)}) + %tr + %td.text-right + %strong Graded + - if @submission.graded_at + %td #{time_ago_in_words(@submission.graded_at)} ago (at #{@submission.graded_at.to_formatted_s(:long)}) + - else + %td - + %tr + %td.text-right + %strong Points + %td #{@submission.points}/#{@submission.try(:problem).try(:full_score)} + %tr + %td.text-right + %strong Comment + %td #{@submission.grader_comment} + %tr + %td.text-right + %strong Runtime (s) + %td #{@submission.max_runtime} + %tr + %td.text-right + %strong Memory (kb) + %td #{@submission.peak_memory} + %tr + %td.text-right + %strong Compiler result + %td + %button.btn.btn-info.btn-xs{type: 'button', data: {toggle: 'modal', target: '#compiler'}} + view + - if session[:admin] + %tr + %td.text-right + %strong IP + %td #{@submission.ip_address} + %tr + %td.text-right + %strong Grading Task Status + %td + = @task.status_str if @task + - if session[:admin] + = link_to "rejudge", rejudge_submission_path, data: {remote: true}, class: 'btn btn-info btn-xs' + + +.modal.fade#compiler{tabindex: -1,role: 'dialog'} + .modal-dialog.modal-lg{role:'document'} + .modal-content + .modal-header + %button.close{type: 'button', data: {dismissed: :modal}, aria: {label: 'close'}} + %span{aria: {hidden: 'true'}, data: {dismiss: 'modal'}} × + %h4 Compiler message + .modal-body + %pre#compiler_msg= @submission.compiler_message + .modal-footer + %button.btn.btn-default{type: 'button', data: {dismiss: 'modal'}} Close diff --git a/app/views/tags/_form.html.haml b/app/views/tags/_form.html.haml new file mode 100644 --- /dev/null +++ b/app/views/tags/_form.html.haml @@ -0,0 +1,22 @@ += form_for @tag do |f| + - if @tag.errors.any? + #error_explanation + %h2= "#{pluralize(@tag.errors.count, "error")} prohibited this tag from being saved:" + %ul + - @tag.errors.full_messages.each do |msg| + %li= msg + + .row + .col-md-6 + .form-group.field + = f.label :name + = f.text_field :name, class: 'form-control' + .form-group.field + = f.label :description + = f.text_area :description, class: 'form-control' + .form-group.field + = f.label :public + = f.text_field :public, class: 'form-control' + .actions + = f.submit 'Save', class: 'btn btn-primary' + .col-md-6 diff --git a/app/views/tags/edit.html.haml b/app/views/tags/edit.html.haml new file mode 100644 --- /dev/null +++ b/app/views/tags/edit.html.haml @@ -0,0 +1,7 @@ +%h1 Editing tag + += render 'form' + += link_to 'Show', @tag +\| += link_to 'Back', tags_path diff --git a/app/views/tags/index.html.haml b/app/views/tags/index.html.haml new file mode 100644 --- /dev/null +++ b/app/views/tags/index.html.haml @@ -0,0 +1,26 @@ +%h1 Tags + += link_to 'New Tag', new_tag_path, class: 'btn btn-success' + +%table.table.table-hover + %thead + %tr + %th Name + %th Description + %th Public + %th + %th + %th + + %tbody + - @tags.each do |tag| + %tr + %td= tag.name + %td= tag.description + %td= tag.public + %td= link_to 'Show', tag + %td= link_to 'Edit', edit_tag_path(tag) + %td= link_to 'Destroy', tag, :method => :delete, :data => { :confirm => 'Are you sure?' } + +%br + diff --git a/app/views/tags/new.html.haml b/app/views/tags/new.html.haml new file mode 100644 --- /dev/null +++ b/app/views/tags/new.html.haml @@ -0,0 +1,5 @@ +%h1 New tag + += render 'form' + += link_to 'Back', tags_path diff --git a/app/views/tags/show.html.haml b/app/views/tags/show.html.haml new file mode 100644 --- /dev/null +++ b/app/views/tags/show.html.haml @@ -0,0 +1,15 @@ +%p#notice= notice + +%p + %b Name: + = @tag.name +%p + %b Description: + = @tag.description +%p + %b Public: + = @tag.public + += link_to 'Edit', edit_tag_path(@tag) +\| += link_to 'Back', tags_path diff --git a/app/views/testcases/show_problem.html.haml b/app/views/testcases/show_problem.html.haml new file mode 100644 --- /dev/null +++ b/app/views/testcases/show_problem.html.haml @@ -0,0 +1,25 @@ +%h1 Test cases +%h2= @problem.long_name + +/navbar +%ul.nav.nav-pills{role: :tablist} + - @problem.testcases.each.with_index do |tc,id| + %li{role: :presentation, class: ('active' if id == 0)} + %a{href:"#tc#{tc.id}", role: 'tab', data: {toggle: 'tab'}}= tc.num + +/actual data +.tab-content + - @problem.testcases.each.with_index do |tc,id| + .tab-pane{id: "tc#{tc.id}",class: ('active' if id == 0)} + .row + .col-md-6 + %h3 Input + = link_to "Download",download_input_testcase_path(tc),class: 'btn btn-info btn-sm' + .col-md-6 + %h3 Output + = link_to "Download",download_sol_testcase_path(tc),class: 'btn btn-info btn-sm' + .row + .col-md-6 + %textarea{ rows: 25,readonly: true,style: "width:100%;resize=none;overflow-y: scroll;"}= tc.input + .col-md-6 + %textarea{ rows: 25,readonly: true,style: "width:100%;resize=none;overflow-y: scroll;"}= tc.sol diff --git a/app/views/user_admin/_form.html.erb b/app/views/user_admin/_form.html.erb deleted file mode 100644 --- a/app/views/user_admin/_form.html.erb +++ /dev/null @@ -1,25 +0,0 @@ -<%= error_messages_for 'user' %> - - -


-<%= text_field 'user', 'login' %>

- -


-<%= text_field 'user', 'full_name' %>

- -


-<%= password_field 'user', 'password' %>

- -


-<%= password_field 'user', 'password_confirmation' %>

- -


-<%= email_field 'user', 'email' %>

- -


-<%= text_field 'user', 'alias' %>

- -


-<%= text_field 'user', 'remark' %>

- - diff --git a/app/views/user_admin/_form.html.haml b/app/views/user_admin/_form.html.haml new file mode 100644 --- /dev/null +++ b/app/views/user_admin/_form.html.haml @@ -0,0 +1,38 @@ += error_messages_for 'user' +/ [form:user] +.form-group + %label.col-md-2.control-label{for: :login} Login + .col-md-4 + = text_field 'user', 'login', class: 'form-control' + .col-md-6 +.form-group + %label.col-md-2.control-label{for: :full_name} Full name + .col-md-4 + = text_field 'user', 'full_name', class: 'form-control' + .col-md-6 +.form-group + %label.col-md-2.control-label{for: :password} Password + .col-md-4 + = password_field 'user', 'password', class: 'form-control' + .col-md-6 +.form-group + %label.col-md-2.control-label{for: :password_confirmation} Password (confirm) + .col-md-4 + = password_field 'user', 'password_confirmation', class: 'form-control' + .col-md-6 +.form-group + %label.col-md-2.control-label{for: :email} E-mail + .col-md-4 + = email_field 'user', 'email', class: 'form-control' + .col-md-6 +.form-group + %label.col-md-2.control-label{for: :alias} Alias + .col-md-4 + = text_field 'user', 'alias', class: 'form-control' + .col-md-6 +.form-group + %label.col-md-2.control-label{for: :remark} Remark + .col-md-4 + = text_field 'user', 'remark', class: 'form-control' + .col-md-6 +/ [eoform:user] diff --git a/app/views/user_admin/bulk_manage.html.haml b/app/views/user_admin/bulk_manage.html.haml new file mode 100644 --- /dev/null +++ b/app/views/user_admin/bulk_manage.html.haml @@ -0,0 +1,86 @@ +%h1 Bulk Manage User + += form_tag bulk_manage_user_admin_path +.row + .col-md-6 + .panel.panel-primary + .panel-title.panel-heading + Filter User + .panel-body + Filtering users whose login match the following MySQL regex + .form-group + = label_tag "regex", 'Regex Pattern' + = text_field_tag "regex", params[:regex], class: 'form-control' + %p + Example + %ul + %li + %code root + matches every user whose login contains "root" + %li + %code ^56 + matches every user whose login starts with "56" + %li + %code 21$ + matches every user whose login ends with "21" + .col-md-6 + .panel.panel-primary + .panel-title.panel-heading + Action + .panel-body + .row.form-group + .col-md-6 + %label.checkbox-inline + = check_box_tag "enabled", true, params[:enabled] + Change "Enabled" to + .col-md-3 + %label.radio-inline + = radio_button_tag "enable", 1, params[:enable] == '1', id: 'enable-yes' + Yes + .col-md-3 + %label.radio-inline + = radio_button_tag "enable", 0, params[:enable] == '0', id: 'enable-no' + No + .row.form-group + .col-md-6 + %label.checkbox-inline + = check_box_tag "gen_password", true, params[:gen_password] + Generate new random password + .row.form-group + .col-md-4 + %label.checkbox-inline + = check_box_tag "add_group", true, params[:add_group] + Add users to group + %label.col-md-3.control-label.text-right Group name + .col-md-5 + = select_tag "group_name", options_from_collection_for_select( Group.all, 'id','name',params[:group_name]), id: 'group_name',class: 'form-control select2' + + +.row + .col-md-12 + = submit_tag "Preview Result", class: 'btn btn-default' +- if @users + .row + .col-md-4 + - if @action + %h2 Confirmation + - if @action[:set_enable] + .alert.alert-info The following users will be set #{(@action[:enabled] ? 'enable' : 'disable')}. + - if @action[:gen_password] + .alert.alert-info The password of the following users will be randomly generated. + .row + .col-md-4 + = submit_tag "Perform", class: 'btn btn-primary' + .row + .col-md-12 + The pattern matches #{@users.count} following users. + %br + - @users.each do |user| + = user.login + = ' ' + = user.full_name + = ' ' + = "(#{user.remark})" if user.remark + %br + + diff --git a/app/views/user_admin/edit.html.haml b/app/views/user_admin/edit.html.haml --- a/app/views/user_admin/edit.html.haml +++ b/app/views/user_admin/edit.html.haml @@ -1,9 +1,11 @@ %h1 Editing user -= form_tag :action => 'update', :id => @user do += form_tag( {:action => 'update', :id => @user}, {class: 'form-horizontal'}) do = error_messages_for 'user' = render partial: "form" - = submit_tag "Edit" + .form-group + .col-md-offset-2.col-md-4 + = submit_tag "Edit", class: 'btn btn-primary' = link_to 'Show', :action => 'show', :id => @user diff --git a/app/views/user_admin/index.html.haml b/app/views/user_admin/index.html.haml --- a/app/views/user_admin/index.html.haml +++ b/app/views/user_admin/index.html.haml @@ -1,4 +1,4 @@ -%h1 Listing users +%h1 Users .panel.panel-primary .panel-title.panel-heading @@ -41,6 +41,7 @@ %p = link_to '+ New user', { :action => 'new' }, { class: 'btn btn-success '} = link_to '+ New list of users', { :action => 'new_list' }, { class: 'btn btn-success '} + = link_to 'Bulk Manage', bulk_manage_user_admin_path , { class: 'btn btn-default btn-info'} = link_to 'View administrators',{ :action => 'admin'}, { class: 'btn btn-default '} = link_to 'Random passwords',{ :action => 'random_all_passwords'}, { class: 'btn btn-default '} = link_to 'View active users',{ :action => 'active'}, { class: 'btn btn-default '} @@ -55,17 +56,17 @@ = link_to "[#{contest.name}]", :action => 'contests', :id => contest.id = link_to "[no contest]", :action => 'contests', :id => 'none' -Total #{@user_count} users | -- if !@paginated - Display all users. - \#{link_to '[show in pages]', :action => 'index', :page => '1'} -- else - Display in pages. - \#{link_to '[display all]', :action => 'index', :page => 'all'} | - \#{will_paginate @users, :container => false} +-# Total #{@user_count} users | +-# - if !@paginated +-# Display all users. +-# \#{link_to '[show in pages]', :action => 'index', :page => '1'} +-# - else +-# Display in pages. +-# \#{link_to '[display all]', :action => 'index', :page => 'all'} | +-# \#{will_paginate @users, :container => false} -%table.table.table-hover.table-condense +%table.table.table-hover.table-condense.datatable %thead %th Login %th Full name @@ -84,17 +85,22 @@ %th - for user in @users %tr - %td= link_to user.login, controller: :users, :action => 'profile', :id => user + %td= link_to user.login, stat_user_path(user) %td= user.full_name %td= user.email %td= user.remark - %td= toggle_button(user.activated?, toggle_activate_user_url(user),"toggle_activate_user_#{user.id}") - %td= toggle_button(user.enabled?, toggle_enable_user_url(user),"toggle_enable_user_#{user.id}") + %td= toggle_button(user.activated?, toggle_activate_user_path(user),"toggle_activate_user_#{user.id}") + %td= toggle_button(user.enabled?, toggle_enable_user_path(user),"toggle_enable_user_#{user.id}") %td= user.last_ip %td= link_to 'Clear IP', {:action => 'clear_last_ip', :id => user, :page=>params[:page]}, :confirm => 'This will reset last logging in ip of the user, are you sure?', class: 'btn btn-default btn-xs btn-block' %td= link_to 'Show', {:action => 'show', :id => user}, class: 'btn btn-default btn-xs btn-block' %td= link_to 'Edit', {:action => 'edit', :id => user}, class: 'btn btn-default btn-xs btn-block' - %td= link_to 'Destroy', { :action => 'destroy', :id => user }, :confirm => 'Are you sure?', :method => :post, class: 'btn btn-danger btn-xs btn-block' + %td= link_to 'Destroy', user_admin_destroy_path(user), data: {confirm: 'Are you sure?'}, method: :delete, class: 'btn btn-danger btn-xs btn-block' %br/ = link_to '+ New user', { :action => 'new' }, { class: 'btn btn-success '} = link_to '+ New list of users', { :action => 'new_list' }, { class: 'btn btn-success '} + +:javascript + $('.datatable').DataTable({ + 'pageLength': 50 + }); diff --git a/app/views/user_admin/new.html.erb b/app/views/user_admin/new.html.erb deleted file mode 100644 --- a/app/views/user_admin/new.html.erb +++ /dev/null @@ -1,8 +0,0 @@ -

New user

- -<%= form_tag :action => 'create' do %> - <%= render :partial => 'form' %> - <%= submit_tag "Create" %> -<% end %> - -<%= link_to 'Back', :action => 'list' %> diff --git a/app/views/user_admin/new.html.haml b/app/views/user_admin/new.html.haml new file mode 100644 --- /dev/null +++ b/app/views/user_admin/new.html.haml @@ -0,0 +1,7 @@ +%h1 New user += form_tag( {action: 'create'}, { class: 'form-horizontal'}) do + = render :partial => 'form' + .form-group + .col-md-offset-2.col-md-10 + = submit_tag "Create", class: 'btn btn-primary' += link_to 'Back', :action => 'index' diff --git a/app/views/user_admin/new_list.html.erb b/app/views/user_admin/new_list.html.erb --- a/app/views/user_admin/new_list.html.erb +++ b/app/views/user_admin/new_list.html.erb @@ -1,8 +1,9 @@

Adding list of users

<%= form_tag :action => 'create_from_list' do %> -<%= submit_tag 'create users' %>
-List of user information in this format: user_id,name(,passwd(,alias))
-Note that passwd and alias is optional.
+ <%= submit_tag 'create users',class: 'btn btn-success'%>
+List of user information in this format: user_id,name(,passwd(,alias(,remark)))
+Note that passwd, alias and remark is optional.
+When passwd or alias is empty, the original value will be used instead.
<%= text_area_tag 'user_list', nil, :rows => 50, :cols => 80 %> <% end %> diff --git a/app/views/user_admin/show.html.erb b/app/views/user_admin/show.html.erb deleted file mode 100644 --- a/app/views/user_admin/show.html.erb +++ /dev/null @@ -1,10 +0,0 @@ -

User information

- -<% for column in User.content_columns %> -

- <%= column.human_name %>: <%=h @user.send(column.name) %> -

-<% end %> - -<%= link_to 'Edit', :action => 'edit', :id => @user %> | -<%= link_to 'Back', :action => 'list' %> diff --git a/app/views/user_admin/show.html.haml b/app/views/user_admin/show.html.haml new file mode 100644 --- /dev/null +++ b/app/views/user_admin/show.html.haml @@ -0,0 +1,14 @@ +%h1 User information +- for column in User.content_columns + %p + %b + = column.human_name + \: + = h @user.send(column.name) +%p + %strong Group + \: + = @user.groups.map{ |x| link_to(x.name,group_path(x)).html_safe}.join(', ').html_safe += link_to 'Edit', :action => 'edit', :id => @user +| += link_to 'Back', :action => 'index' diff --git a/app/views/user_admin/user_stat.html.haml b/app/views/user_admin/user_stat.html.haml --- a/app/views/user_admin/user_stat.html.haml +++ b/app/views/user_admin/user_stat.html.haml @@ -48,7 +48,7 @@ %td= link_to sc[i].login, controller: 'users', action: 'profile', id: sc[i] %td= sc[i].full_name %td= sc[i].activated - %td= sc[i].try(:contest_stat).try(:started_at)!=nil ? 'yes' : 'no' + %td= sc[i].try(:contest_stat).try(:started_at) ? 'yes' : 'no' %td= sc[i].contests.collect {|c| c.name}.join(', ') %td= sc[i].remark - else diff --git a/app/views/users/profile.html.haml b/app/views/users/stat.html.haml rename from app/views/users/profile.html.haml rename to app/views/users/stat.html.haml --- a/app/views/users/profile.html.haml +++ b/app/views/users/stat.html.haml @@ -36,7 +36,7 @@ =render partial: 'application/bar_graph', locals: {histogram: @histogram, param: {bar_width: 7}} -%table.tablesorter-cafe#submission_table +%table#submission_table.table.table-striped %thead %tr %th ID @@ -52,8 +52,8 @@ - @submission.each do |s| - next unless s.problem %tr - %td= link_to "#{s.id}", controller: "graders", action: "submission", id: s.id - %td= link_to s.problem.name, controller: "problems", action: "stat", id: s.problem + %td= link_to s.id, submission_path(s) + %td= link_to s.problem.name, stat_problem_path(s.problem) %td= s.problem.full_name %td= s.language.pretty_name %td #{s.submitted_at.strftime('%Y-%m-%d %H:%M')} (#{time_ago_in_words(s.submitted_at)} ago) @@ -64,3 +64,7 @@ +:javascript + $("#submission_table").DataTable({ + paging: false + }); diff --git a/config/application.rb.SAMPLE b/config/application.rb.SAMPLE --- a/config/application.rb.SAMPLE +++ b/config/application.rb.SAMPLE @@ -47,23 +47,25 @@ # like if you have constraints or database-specific column types # config.active_record.schema_format = :sql - # Enforce whitelist mode for mass assignment. - # This will create an empty whitelist of attributes available for mass-assignment for all models - # in your app. As such, your models will need to explicitly whitelist or blacklist accessible - # parameters by using an attr_accessible or attr_protected declaration. - config.active_record.whitelist_attributes = false - # Enable the asset pipeline config.assets.enabled = true # Version of your assets, change this if you want to expire all your assets config.assets.version = '1.0' + # ---------------- IMPORTANT ---------------------- + # If we deploy the app into a subdir name "grader", be sure to do "rake assets:precompile RAILS_RELATIVE_URL_ROOT=/grader" + # moreover, using the following line instead also known to works + #config.action_controller.relative_url_root = '/grader' + + #font path + config.assets.paths << "#{Rails}/vendor/assets/fonts" + config.assets.precompile += ['announcement_refresh.js','effects.js','site_update.js'] config.assets.precompile += ['local_jquery.js','tablesorter-theme.cafe.css'] - %w( announcements configurations contests contest_management graders heartbeat + %w( announcements submissions configurations contests contest_management graders heartbeat login main messages problems report site sites sources tasks - test user_admin users ).each do |controller| + test user_admin users testcases).each do |controller| config.assets.precompile += ["#{controller}.js", "#{controller}.css"] end end diff --git a/config/environments/development.rb b/config/environments/development.rb --- a/config/environments/development.rb +++ b/config/environments/development.rb @@ -6,8 +6,8 @@ # since you don't have to restart the web server when you make code changes. config.cache_classes = false - # Log error messages when you accidentally call methods on nil. - config.whiny_nils = true + # Log error messages when you accidentally call methods on nil. //DEPRICATED + # config.whiny_nils = true // DEPRICATED # Show full error reports and disable caching config.consider_all_requests_local = true @@ -23,11 +23,11 @@ config.action_dispatch.best_standards_support = :builtin # Raise exception on mass assignment protection for Active Record models - config.active_record.mass_assignment_sanitizer = :strict + # config.active_record.mass_assignment_sanitizer = :strict //DEPRICATED - # Log the query plan for queries taking more than this (works - # with SQLite, MySQL, and PostgreSQL) - config.active_record.auto_explain_threshold_in_seconds = 0.5 + # Log the query plan for queries taking more than this (works // DEPRICATED + # with SQLite, MySQL, and PostgreSQL) // DEPRICATED + # config.active_record.auto_explain_threshold_in_seconds = 0.5 // DEPRICATED # Do not compress assets config.assets.compress = false @@ -36,5 +36,7 @@ config.assets.debug = true # Prevents assets from rendering twice - config.serve_static_assets = true + config.serve_static_files = true + + config.eager_load = false end diff --git a/config/environments/production.rb b/config/environments/production.rb --- a/config/environments/production.rb +++ b/config/environments/production.rb @@ -9,7 +9,7 @@ config.action_controller.perform_caching = true # Disable Rails's static asset server (Apache or nginx will already do this) - config.serve_static_assets = false + config.serve_static_files = false # Compress JavaScripts and CSS config.assets.compress = true @@ -64,4 +64,6 @@ # Log the query plan for queries taking more than this (works # with SQLite, MySQL, and PostgreSQL) # config.active_record.auto_explain_threshold_in_seconds = 0.5 + + config.eager_load = true end diff --git a/config/environments/test.rb b/config/environments/test.rb --- a/config/environments/test.rb +++ b/config/environments/test.rb @@ -8,7 +8,7 @@ config.cache_classes = true # Configure static asset server for tests with Cache-Control for performance - config.serve_static_assets = true + config.serve_static_files = true config.static_cache_control = "public, max-age=3600" # Log error messages when you accidentally call methods on nil @@ -30,8 +30,14 @@ config.action_mailer.delivery_method = :test # Raise exception on mass assignment protection for Active Record models - config.active_record.mass_assignment_sanitizer = :strict + #config.active_record.mass_assignment_sanitizer = :strict // DEPRICATED # Print deprecation notices to the stderr config.active_support.deprecation = :stderr + + config.eager_load = false + + #test order + config.active_support.test_order = :sorted + end diff --git a/config/initializers/assets.rb b/config/initializers/assets.rb new file mode 100644 --- /dev/null +++ b/config/initializers/assets.rb @@ -0,0 +1,23 @@ +# Be sure to restart your server when you modify this file. + +# Version of your assets, change this if you want to expire all your assets. +Rails.application.config.assets.version = '1.0' + +# Add additional assets to the asset load path. +# Rails.application.config.assets.paths << Emoji.images_path +# Add Yarn node_modules folder to the asset load path. +Rails.application.config.assets.paths << Rails.root.join('node_modules') +Rails.application.config.assets.paths << Rails.root.join('vendor/assets/fonts') + +# Precompile additional assets. +# application.js, application.css, and all non-JS/CSS in the app/assets +# folder are already added. +# Rails.application.config.assets.precompile += %w( admin.js admin.css ) + +Rails.application.config.assets.precompile += ['announcement_refresh.js','effects.js','site_update.js'] +Rails.application.config.assets.precompile += ['local_jquery.js','tablesorter-theme.cafe.css'] +%w( announcements submissions configurations contests contest_management graders heartbeat + login main messages problems report site sites sources tasks groups + test user_admin users tags testcases).each do |controller| + Rails.application.config.assets.precompile += ["#{controller}.js", "#{controller}.css"] +end diff --git a/config/initializers/mime_types.rb b/config/initializers/mime_types.rb --- a/config/initializers/mime_types.rb +++ b/config/initializers/mime_types.rb @@ -3,4 +3,3 @@ # Add new mime types for use in respond_to blocks: # Mime::Type.register "text/richtext", :rtf # Mime::Type.register_alias "text/html", :iphone -Mime::Type.register 'application/pdf', :pdf diff --git a/config/initializers/secret_token.rb b/config/initializers/secret_token.rb deleted file mode 100644 --- a/config/initializers/secret_token.rb +++ /dev/null @@ -1,7 +0,0 @@ -# Be sure to restart your server when you modify this file. - -# Your secret key for verifying the integrity of signed cookies. -# If you change this key, all old signed cookies will become invalid! -# Make sure the secret is at least 30 characters and all random, -# no regular words or you'll be exposed to dictionary attacks. -CafeGrader::Application.config.secret_token = '7f85485d3d652fc6336dfe3cd98917d9bd7a323b32096bf7635d26b98ccd0480816cc2d12b5c10805cecf7d8fb322104e2bda71eb60dd871c5c537e56a063038' diff --git a/config/routes.rb b/config/routes.rb --- a/config/routes.rb +++ b/config/routes.rb @@ -1,8 +1,12 @@ CafeGrader::Application.routes.draw do + resources :tags get "sources/direct_edit" root :to => 'main#login' + #logins + get 'login/login', to: 'login#login' + resources :contests resources :sites @@ -17,6 +21,8 @@ member do get 'toggle' get 'toggle_test' + get 'toggle_view_testcase' + get 'stat' end collection do get 'turn_all_off' @@ -26,38 +32,83 @@ end end + resources :groups do + member do + post 'add_user', to: 'groups#add_user', as: 'add_user' + delete 'remove_user/:user_id', to: 'groups#remove_user', as: 'remove_user' + delete 'remove_all_user', to: 'groups#remove_all_user', as: 'remove_all_user' + post 'add_problem', to: 'groups#add_problem', as: 'add_problem' + delete 'remove_problem/:problem_id', to: 'groups#remove_problem', as: 'remove_problem' + delete 'remove_all_problem', to: 'groups#remove_all_problem', as: 'remove_all_problem' + end + collection do + + end + end + + resources :testcases, only: [] do + member do + get 'download_input' + get 'download_sol' + end + collection do + get 'show_problem/:problem_id(/:test_num)' => 'testcases#show_problem', as: 'show_problem' + end + end + resources :grader_configuration, controller: 'configurations' resources :users do member do get 'toggle_activate', 'toggle_enable' + get 'stat' end end - #source code edit - get 'sources/direct_edit/:pid', to: 'sources#direct_edit', as: 'direct_edit' - get 'sources/direct_edit_submission/:sid', to: 'sources#direct_edit_submission', as: 'direct_edit_submission' - get 'sources/get_latest_submission_status/:uid/:pid', to: 'sources#get_latest_submission_status', as: 'get_latest_submission_status' + resources :submissions do + member do + get 'download' + get 'compiler_msg' + get 'rejudge' + end + collection do + get 'prob/:problem_id', to: 'submissions#index', as: 'problem' + get 'direct_edit_problem/:problem_id(/:user_id)', to: 'submissions#direct_edit_problem', as: 'direct_edit_problem' + get 'get_latest_submission_status/:uid/:pid', to: 'submissions#get_latest_submission_status', as: 'get_latest_submission_status' + end + end - match 'tasks/view/:file.:ext' => 'tasks#view' - match 'tasks/download/:id/:file.:ext' => 'tasks#download' - match 'heartbeat/:id/edit' => 'heartbeat#edit' #main get "main/list" get 'main/submission(/:id)', to: 'main#submission', as: 'main_submission' + #user admin + get 'user_admin/bulk_manage', to: 'user_admin#bulk_manage', as: 'bulk_manage_user_admin' + post 'user_admin', to: 'user_admin#create' + delete 'user_admin/:id', to: 'user_admin#destroy', as: 'user_admin_destroy' + #report + get 'report/current_score', to: 'report#current_score', as: 'report_current_score' get 'report/problem_hof(/:id)', to: 'report#problem_hof', as: 'report_problem_hof' get "report/login" + get 'report/max_score', to: 'report#max_score', as: 'report_max_score' + post 'report/show_max_score', to: 'report#show_max_score', as: 'report_show_max_score' + + + # + get 'tasks/view/:file.:ext' => 'tasks#view' + get 'tasks/download/:id/:file.:ext' => 'tasks#download' + get 'heartbeat/:id/edit' => 'heartbeat#edit' #grader get 'graders/list', to: 'graders#list', as: 'grader_list' + # See how all your routes lay out with "rake routes" # This is a legacy wild controller route that's not recommended for RESTful applications. # Note: This route will make all actions in every controller accessible via GET requests. - match ':controller(/:action(/:id))(.:format)' + match ':controller(/:action(/:id))(.:format)', via: [:get, :post] end diff --git a/config/secrets.yml.SAMPLE b/config/secrets.yml.SAMPLE new file mode 100644 --- /dev/null +++ b/config/secrets.yml.SAMPLE @@ -0,0 +1,8 @@ +development: + secret_key_base: '444f426d08add8e2d7cbd76e2057e521e06091231eb4d5472af6ba5654ea1124ce6a636f549be6827ce09561c314181226ad840d44e4677e1077942ee0dc82bd' + +test: + secret_key_base: 'd52f411b06a79cc9f56d92e10d27e670cf0f0c3357e7caf9018ec23091b5c452ea9266c03a5c9e37b72c358702d4d460e957f90dcc553c9fc73a98adb520e781' + +production: + secret_key_base: '7f85485d3d652fc6336dfe3cd98917d9bd7a323b32096bf7635d26b98ccd0480816cc2d12b5c10805cecf7d8fb322104e2bda71eb60dd871c5c537e56a063038' diff --git a/db/migrate/011_add_language_ext.rb b/db/migrate/011_add_language_ext.rb --- a/db/migrate/011_add_language_ext.rb +++ b/db/migrate/011_add_language_ext.rb @@ -3,7 +3,7 @@ add_column :languages, :ext, :string, :limit => 10 Language.reset_column_information - langs = Language.find(:all) + langs = Language.all langs.each do |l| l.ext = l.name l.save diff --git a/db/migrate/015_add_status_to_tasks.rb b/db/migrate/015_add_status_to_tasks.rb --- a/db/migrate/015_add_status_to_tasks.rb +++ b/db/migrate/015_add_status_to_tasks.rb @@ -4,7 +4,7 @@ add_column :tasks, :updated_at, :datetime Task.reset_column_information - Task.find(:all).each do |task| + Task.all.each do |task| task.status_complete task.save end diff --git a/db/migrate/018_add_number_to_submissions.rb b/db/migrate/018_add_number_to_submissions.rb --- a/db/migrate/018_add_number_to_submissions.rb +++ b/db/migrate/018_add_number_to_submissions.rb @@ -9,8 +9,7 @@ last_problem_id = nil current_number = 0 - Submission.find(:all, - :order => 'user_id, problem_id, submitted_at').each do |submission| + Submission.order('user_id, problem_id, submitted_at').each do |submission| if submission.user_id==last_user_id and submission.problem_id==last_problem_id current_number += 1 else diff --git a/db/migrate/025_add_site_to_user_and_add_default_site.rb b/db/migrate/025_add_site_to_user_and_add_default_site.rb --- a/db/migrate/025_add_site_to_user_and_add_default_site.rb +++ b/db/migrate/025_add_site_to_user_and_add_default_site.rb @@ -7,7 +7,7 @@ add_column :users, :site_id, :integer User.reset_column_information - User.find(:all).each do |user| + User.all.each do |user| class << user def valid? diff --git a/db/migrate/028_refactor_problem_body_to_description.rb b/db/migrate/028_refactor_problem_body_to_description.rb --- a/db/migrate/028_refactor_problem_body_to_description.rb +++ b/db/migrate/028_refactor_problem_body_to_description.rb @@ -3,7 +3,7 @@ add_column :problems, :description_id, :integer Problem.reset_column_information - Problem.find(:all).each do |problem| + Problem.all.each do |problem| if problem.body!=nil description = Description.new description.body = problem.body @@ -21,7 +21,7 @@ add_column :problems, :body, :text Problem.reset_column_information - Problem.find(:all).each do |problem| + Problem.all.each do |problem| if problem.description_id != nil problem.body = Description.find(problem.description_id).body problem.save diff --git a/db/migrate/029_add_test_allowed_to_problems.rb b/db/migrate/029_add_test_allowed_to_problems.rb --- a/db/migrate/029_add_test_allowed_to_problems.rb +++ b/db/migrate/029_add_test_allowed_to_problems.rb @@ -3,7 +3,7 @@ add_column :problems, :test_allowed, :boolean Problem.reset_column_information - Problem.find(:all).each do |problem| + Problem.all.each do |problem| problem.test_allowed = true problem.save end diff --git a/db/migrate/20081204122651_add_activated_to_users.rb b/db/migrate/20081204122651_add_activated_to_users.rb --- a/db/migrate/20081204122651_add_activated_to_users.rb +++ b/db/migrate/20081204122651_add_activated_to_users.rb @@ -4,7 +4,7 @@ User.reset_column_information - User.find(:all).each do |user| + User.all.each do |user| # disable validation class < 'cpp,cc', 'pas' => 'pas' } - Language.find(:all).each do |lang| + Language.all.each do |lang| lang.common_ext = common_ext[lang.name] lang.save end diff --git a/db/migrate/20140913060729_add_section_to_users.rb b/db/migrate/20140913060729_add_section_to_users.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20140913060729_add_section_to_users.rb @@ -0,0 +1,5 @@ +class AddSectionToUsers < ActiveRecord::Migration + def change + add_column :users, :section, :string + end +end diff --git a/db/migrate/20161008050135_modify_grader_process.rb b/db/migrate/20161008050135_modify_grader_process.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20161008050135_modify_grader_process.rb @@ -0,0 +1,9 @@ +class ModifyGraderProcess < ActiveRecord::Migration + def up + change_column :grader_processes, :host, :string + end + + def down + change_column :grader_processes, :host, :string, limit: 20 + end +end diff --git a/db/migrate/20161014091417_create_testcases.rb b/db/migrate/20161014091417_create_testcases.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20161014091417_create_testcases.rb @@ -0,0 +1,15 @@ +class CreateTestcases < ActiveRecord::Migration + def change + create_table :testcases do |t| + t.references :problem + t.integer :num + t.integer :group + t.integer :score + t.text :input + t.text :sol + + t.timestamps + end + add_index :testcases, :problem_id + end +end diff --git a/db/migrate/20161031063337_add_config_view_test.rb b/db/migrate/20161031063337_add_config_view_test.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20161031063337_add_config_view_test.rb @@ -0,0 +1,16 @@ +class AddConfigViewTest < ActiveRecord::Migration + def up + GraderConfiguration.create key: 'right.view_testcase', value_type: 'boolean', value:'true', description:'When true, any user can view/download test data' + #uglily and dirtily and shamelessly check other config and inifialize + GraderConfiguration.where(key: 'right.user_hall_of_fame').first_or_create(value_type: 'boolean', value: 'false', + description: 'If true, any user can access hall of fame page.') + GraderConfiguration.where(key: 'right.multiple_ip_login').first_or_create(value_type: 'boolean', value: 'false', + description: 'When change from true to false, a user can login from the first IP they logged into afterward.') + GraderConfiguration.where(key: 'right.user_view_submission').first_or_create(value_type: 'boolean', value: 'false', + description: 'If true, any user can view submissions of every one.') + end + + def down + GraderConfiguration.where(key: 'right.view_testcase').destroy_all; + end +end diff --git a/db/migrate/20170123162543_change_testcase_size.rb b/db/migrate/20170123162543_change_testcase_size.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170123162543_change_testcase_size.rb @@ -0,0 +1,6 @@ +class ChangeTestcaseSize < ActiveRecord::Migration + def change + change_column :testcases, :input, :text, :limit => 4294967295 + change_column :testcases, :sol, :text, :limit => 4294967295 + end +end diff --git a/db/migrate/20170124024527_add_view_testcase_to_problem.rb b/db/migrate/20170124024527_add_view_testcase_to_problem.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170124024527_add_view_testcase_to_problem.rb @@ -0,0 +1,5 @@ +class AddViewTestcaseToProblem < ActiveRecord::Migration + def change + add_column :problems, :view_testcase, :bool + end +end diff --git a/db/migrate/20170310110146_add_index_to_task.rb b/db/migrate/20170310110146_add_index_to_task.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170310110146_add_index_to_task.rb @@ -0,0 +1,5 @@ +class AddIndexToTask < ActiveRecord::Migration + def change + add_index :tasks, :submission_id + end +end diff --git a/db/migrate/20170427070345_add_heart_beat_full.rb b/db/migrate/20170427070345_add_heart_beat_full.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170427070345_add_heart_beat_full.rb @@ -0,0 +1,9 @@ +class AddHeartBeatFull < ActiveRecord::Migration + def up + GraderConfiguration.create key: 'right.heartbeat_response_full', value_type: 'string', value:'RESTART', description:'Heart beat response text when user got full score (set this value to the empty string to disable this feature)' + end + + def down + + end +end diff --git a/db/migrate/20170911091143_create_groups.rb b/db/migrate/20170911091143_create_groups.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170911091143_create_groups.rb @@ -0,0 +1,30 @@ +class CreateGroups < ActiveRecord::Migration + + def change + create_table :groups do |t| + t.string :name + t.string :description + end + + create_join_table :groups, :users do |t| + # t.index [:group_id, :user_id] + t.index [:user_id, :group_id] + end + + create_join_table :problems, :groups do |t| + # t.index [:problem_id, :group_id] + t.index [:group_id, :problem_id] + end + + reversible do |change| + change.up do + GraderConfiguration.where(key: 'system.use_problem_group').first_or_create(value_type: 'boolean', value: 'false', + description: 'If true, available problem to the user will be only ones associated with the group of the user'); + end + + change.down do + GraderConfiguration.where(key: 'system.use_problem_group').destroy_all + end + end + end +end diff --git a/db/migrate/20170914150545_create_tags.rb b/db/migrate/20170914150545_create_tags.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170914150545_create_tags.rb @@ -0,0 +1,11 @@ +class CreateTags < ActiveRecord::Migration + def change + create_table :tags do |t| + t.string :name, null: false + t.text :description + t.boolean :public + + t.timestamps null: false + end + end +end diff --git a/db/migrate/20170914150742_create_problem_tags.rb b/db/migrate/20170914150742_create_problem_tags.rb new file mode 100644 --- /dev/null +++ b/db/migrate/20170914150742_create_problem_tags.rb @@ -0,0 +1,10 @@ +class CreateProblemTags < ActiveRecord::Migration + def change + create_table :problems_tags do |t| + t.references :problem, index: true, foreign_key: true + t.references :tag, index: true, foreign_key: true + + t.index [:problem_id,:tag_id], unique: true + end + end +end diff --git a/db/schema.rb b/db/schema.rb --- a/db/schema.rb +++ b/db/schema.rb @@ -9,258 +9,313 @@ # from scratch. The latter is a flawed and unsustainable approach (the more migrations # you'll amass, the slower it'll run and the greater likelihood for issues). # -# It's strongly recommended to check this file into your version control system. +# It's strongly recommended that you check this file into your version control system. -ActiveRecord::Schema.define(:version => 20150916054105) do +ActiveRecord::Schema.define(version: 20170914150742) do - create_table "announcements", :force => true do |t| - t.string "author" - t.text "body" + create_table "announcements", force: :cascade do |t| + t.string "author", limit: 255 + t.text "body", limit: 16777215 t.boolean "published" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false - t.boolean "frontpage", :default => false - t.boolean "contest_only", :default => false - t.string "title" - t.string "notes" + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + t.boolean "frontpage", default: false + t.boolean "contest_only", default: false + t.string "title", limit: 255 + t.string "notes", limit: 255 end - create_table "contests", :force => true do |t| - t.string "title" + create_table "contests", force: :cascade do |t| + t.string "title", limit: 255 t.boolean "enabled" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false - t.string "name" + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + t.string "name", limit: 255 end - create_table "contests_problems", :id => false, :force => true do |t| - t.integer "contest_id" - t.integer "problem_id" + create_table "contests_problems", id: false, force: :cascade do |t| + t.integer "contest_id", limit: 4 + t.integer "problem_id", limit: 4 end - create_table "contests_users", :id => false, :force => true do |t| - t.integer "contest_id" - t.integer "user_id" + create_table "contests_users", id: false, force: :cascade do |t| + t.integer "contest_id", limit: 4 + t.integer "user_id", limit: 4 end - create_table "countries", :force => true do |t| - t.string "name" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + create_table "countries", force: :cascade do |t| + t.string "name", limit: 255 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false end - create_table "descriptions", :force => true do |t| - t.text "body" + create_table "descriptions", force: :cascade do |t| + t.text "body", limit: 16777215 t.boolean "markdowned" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + t.datetime "created_at", null: false + t.datetime "updated_at", null: false end - create_table "grader_configurations", :force => true do |t| - t.string "key" - t.string "value_type" - t.string "value" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false - t.text "description" + create_table "grader_configurations", force: :cascade do |t| + t.string "key", limit: 255 + t.string "value_type", limit: 255 + t.string "value", limit: 255 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + t.text "description", limit: 16777215 end - create_table "grader_processes", :force => true do |t| - t.string "host", :limit => 20 - t.integer "pid" - t.string "mode" + create_table "grader_processes", force: :cascade do |t| + t.string "host", limit: 255 + t.integer "pid", limit: 4 + t.string "mode", limit: 255 t.boolean "active" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false - t.integer "task_id" - t.string "task_type" + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + t.integer "task_id", limit: 4 + t.string "task_type", limit: 255 t.boolean "terminated" end - add_index "grader_processes", ["host", "pid"], :name => "index_grader_processes_on_ip_and_pid" + add_index "grader_processes", ["host", "pid"], name: "index_grader_processes_on_ip_and_pid", using: :btree + + create_table "groups", force: :cascade do |t| + t.string "name", limit: 255 + t.string "description", limit: 255 + end - create_table "heart_beats", :force => true do |t| - t.integer "user_id" - t.string "ip_address" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false - t.string "status" + create_table "groups_problems", id: false, force: :cascade do |t| + t.integer "problem_id", limit: 4, null: false + t.integer "group_id", limit: 4, null: false + end + + add_index "groups_problems", ["group_id", "problem_id"], name: "index_groups_problems_on_group_id_and_problem_id", using: :btree + + create_table "groups_users", id: false, force: :cascade do |t| + t.integer "group_id", limit: 4, null: false + t.integer "user_id", limit: 4, null: false end - add_index "heart_beats", ["updated_at"], :name => "index_heart_beats_on_updated_at" + add_index "groups_users", ["user_id", "group_id"], name: "index_groups_users_on_user_id_and_group_id", using: :btree - create_table "languages", :force => true do |t| - t.string "name", :limit => 10 - t.string "pretty_name" - t.string "ext", :limit => 10 - t.string "common_ext" + create_table "heart_beats", force: :cascade do |t| + t.integer "user_id", limit: 4 + t.string "ip_address", limit: 255 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + t.string "status", limit: 255 end - create_table "logins", :force => true do |t| - t.integer "user_id" - t.string "ip_address" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + add_index "heart_beats", ["updated_at"], name: "index_heart_beats_on_updated_at", using: :btree + + create_table "languages", force: :cascade do |t| + t.string "name", limit: 10 + t.string "pretty_name", limit: 255 + t.string "ext", limit: 10 + t.string "common_ext", limit: 255 end - create_table "messages", :force => true do |t| - t.integer "sender_id" - t.integer "receiver_id" - t.integer "replying_message_id" - t.text "body" - t.boolean "replied" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + create_table "logins", force: :cascade do |t| + t.integer "user_id", limit: 4 + t.string "ip_address", limit: 255 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false end - create_table "problems", :force => true do |t| - t.string "name", :limit => 30 - t.string "full_name" - t.integer "full_score" + create_table "messages", force: :cascade do |t| + t.integer "sender_id", limit: 4 + t.integer "receiver_id", limit: 4 + t.integer "replying_message_id", limit: 4 + t.text "body", limit: 16777215 + t.boolean "replied" + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + end + + create_table "problems", force: :cascade do |t| + t.string "name", limit: 30 + t.string "full_name", limit: 255 + t.integer "full_score", limit: 4 t.date "date_added" t.boolean "available" - t.string "url" - t.integer "description_id" + t.string "url", limit: 255 + t.integer "description_id", limit: 4 t.boolean "test_allowed" t.boolean "output_only" - t.string "description_filename" + t.string "description_filename", limit: 255 + t.boolean "view_testcase" end - create_table "rights", :force => true do |t| - t.string "name" - t.string "controller" - t.string "action" + create_table "problems_tags", force: :cascade do |t| + t.integer "problem_id", limit: 4 + t.integer "tag_id", limit: 4 end - create_table "rights_roles", :id => false, :force => true do |t| - t.integer "right_id" - t.integer "role_id" + add_index "problems_tags", ["problem_id", "tag_id"], name: "index_problems_tags_on_problem_id_and_tag_id", unique: true, using: :btree + add_index "problems_tags", ["problem_id"], name: "index_problems_tags_on_problem_id", using: :btree + add_index "problems_tags", ["tag_id"], name: "index_problems_tags_on_tag_id", using: :btree + + create_table "rights", force: :cascade do |t| + t.string "name", limit: 255 + t.string "controller", limit: 255 + t.string "action", limit: 255 end - add_index "rights_roles", ["role_id"], :name => "index_rights_roles_on_role_id" - - create_table "roles", :force => true do |t| - t.string "name" + create_table "rights_roles", id: false, force: :cascade do |t| + t.integer "right_id", limit: 4 + t.integer "role_id", limit: 4 end - create_table "roles_users", :id => false, :force => true do |t| - t.integer "role_id" - t.integer "user_id" + add_index "rights_roles", ["role_id"], name: "index_rights_roles_on_role_id", using: :btree + + create_table "roles", force: :cascade do |t| + t.string "name", limit: 255 end - add_index "roles_users", ["user_id"], :name => "index_roles_users_on_user_id" + create_table "roles_users", id: false, force: :cascade do |t| + t.integer "role_id", limit: 4 + t.integer "user_id", limit: 4 + end - create_table "sessions", :force => true do |t| - t.string "session_id" - t.text "data" + add_index "roles_users", ["user_id"], name: "index_roles_users_on_user_id", using: :btree + + create_table "sessions", force: :cascade do |t| + t.string "session_id", limit: 255 + t.text "data", limit: 16777215 t.datetime "updated_at" end - add_index "sessions", ["session_id"], :name => "index_sessions_on_session_id" - add_index "sessions", ["updated_at"], :name => "index_sessions_on_updated_at" + add_index "sessions", ["session_id"], name: "index_sessions_on_session_id", using: :btree + add_index "sessions", ["updated_at"], name: "index_sessions_on_updated_at", using: :btree - create_table "sites", :force => true do |t| - t.string "name" + create_table "sites", force: :cascade do |t| + t.string "name", limit: 255 t.boolean "started" t.datetime "start_time" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false - t.integer "country_id" - t.string "password" + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + t.integer "country_id", limit: 4 + t.string "password", limit: 255 end - create_table "submission_view_logs", :force => true do |t| - t.integer "user_id" - t.integer "submission_id" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + create_table "submission_view_logs", force: :cascade do |t| + t.integer "user_id", limit: 4 + t.integer "submission_id", limit: 4 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false end - create_table "submissions", :force => true do |t| - t.integer "user_id" - t.integer "problem_id" - t.integer "language_id" - t.text "source" - t.binary "binary" + create_table "submissions", force: :cascade do |t| + t.integer "user_id", limit: 4 + t.integer "problem_id", limit: 4 + t.integer "language_id", limit: 4 + t.text "source", limit: 16777215 + t.binary "binary", limit: 65535 t.datetime "submitted_at" t.datetime "compiled_at" - t.text "compiler_message" + t.text "compiler_message", limit: 16777215 t.datetime "graded_at" - t.integer "points" - t.text "grader_comment" - t.integer "number" - t.string "source_filename" - t.float "max_runtime" - t.integer "peak_memory" - t.integer "effective_code_length" - t.string "ip_address" + t.integer "points", limit: 4 + t.text "grader_comment", limit: 16777215 + t.integer "number", limit: 4 + t.string "source_filename", limit: 255 + t.float "max_runtime", limit: 24 + t.integer "peak_memory", limit: 4 + t.integer "effective_code_length", limit: 4 + t.string "ip_address", limit: 255 end - add_index "submissions", ["user_id", "problem_id", "number"], :name => "index_submissions_on_user_id_and_problem_id_and_number", :unique => true - add_index "submissions", ["user_id", "problem_id"], :name => "index_submissions_on_user_id_and_problem_id" + add_index "submissions", ["user_id", "problem_id", "number"], name: "index_submissions_on_user_id_and_problem_id_and_number", unique: true, using: :btree + add_index "submissions", ["user_id", "problem_id"], name: "index_submissions_on_user_id_and_problem_id", using: :btree - create_table "tasks", :force => true do |t| - t.integer "submission_id" + create_table "tags", force: :cascade do |t| + t.string "name", limit: 255, null: false + t.text "description", limit: 65535 + t.boolean "public" + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + end + + create_table "tasks", force: :cascade do |t| + t.integer "submission_id", limit: 4 t.datetime "created_at" - t.integer "status" + t.integer "status", limit: 4 t.datetime "updated_at" end - create_table "test_pairs", :force => true do |t| - t.integer "problem_id" - t.text "input", :limit => 16777215 - t.text "solution", :limit => 16777215 - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + add_index "tasks", ["submission_id"], name: "index_tasks_on_submission_id", using: :btree + + create_table "test_pairs", force: :cascade do |t| + t.integer "problem_id", limit: 4 + t.text "input", limit: 4294967295 + t.text "solution", limit: 4294967295 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false end - create_table "test_requests", :force => true do |t| - t.integer "user_id" - t.integer "problem_id" - t.integer "submission_id" - t.string "input_file_name" - t.string "output_file_name" - t.string "running_stat" - t.integer "status" - t.datetime "updated_at", :null => false + create_table "test_requests", force: :cascade do |t| + t.integer "user_id", limit: 4 + t.integer "problem_id", limit: 4 + t.integer "submission_id", limit: 4 + t.string "input_file_name", limit: 255 + t.string "output_file_name", limit: 255 + t.string "running_stat", limit: 255 + t.integer "status", limit: 4 + t.datetime "updated_at", null: false t.datetime "submitted_at" t.datetime "compiled_at" - t.text "compiler_message" + t.text "compiler_message", limit: 16777215 t.datetime "graded_at" - t.string "grader_comment" - t.datetime "created_at", :null => false - t.float "running_time" - t.string "exit_status" - t.integer "memory_usage" + t.string "grader_comment", limit: 255 + t.datetime "created_at", null: false + t.float "running_time", limit: 24 + t.string "exit_status", limit: 255 + t.integer "memory_usage", limit: 4 end - add_index "test_requests", ["user_id", "problem_id"], :name => "index_test_requests_on_user_id_and_problem_id" + add_index "test_requests", ["user_id", "problem_id"], name: "index_test_requests_on_user_id_and_problem_id", using: :btree - create_table "user_contest_stats", :force => true do |t| - t.integer "user_id" + create_table "testcases", force: :cascade do |t| + t.integer "problem_id", limit: 4 + t.integer "num", limit: 4 + t.integer "group", limit: 4 + t.integer "score", limit: 4 + t.text "input", limit: 4294967295 + t.text "sol", limit: 4294967295 + t.datetime "created_at", null: false + t.datetime "updated_at", null: false + end + + add_index "testcases", ["problem_id"], name: "index_testcases_on_problem_id", using: :btree + + create_table "user_contest_stats", force: :cascade do |t| + t.integer "user_id", limit: 4 t.datetime "started_at" - t.datetime "created_at", :null => false - t.datetime "updated_at", :null => false + t.datetime "created_at", null: false + t.datetime "updated_at", null: false t.boolean "forced_logout" end - create_table "users", :force => true do |t| - t.string "login", :limit => 50 - t.string "full_name" - t.string "hashed_password" - t.string "salt", :limit => 5 - t.string "alias" - t.string "email" - t.integer "site_id" - t.integer "country_id" - t.boolean "activated", :default => false + create_table "users", force: :cascade do |t| + t.string "login", limit: 50 + t.string "full_name", limit: 255 + t.string "hashed_password", limit: 255 + t.string "salt", limit: 5 + t.string "alias", limit: 255 + t.string "email", limit: 255 + t.integer "site_id", limit: 4 + t.integer "country_id", limit: 4 + t.boolean "activated", default: false t.datetime "created_at" t.datetime "updated_at" - t.boolean "enabled", :default => true - t.string "remark" - t.string "last_ip" + t.string "section", limit: 255 + t.boolean "enabled", default: true + t.string "remark", limit: 255 + t.string "last_ip", limit: 255 end - add_index "users", ["login"], :name => "index_users_on_login", :unique => true + add_index "users", ["login"], name: "index_users_on_login", unique: true, using: :btree + add_foreign_key "problems_tags", "problems" + add_foreign_key "problems_tags", "tags" end diff --git a/db/seeds.rb b/db/seeds.rb --- a/db/seeds.rb +++ b/db/seeds.rb @@ -53,6 +53,7 @@ :description => 'If the server is in contest mode and this option is true, on the log in of the admin a menu for site selections is shown.' }, + #---------------------------- right -------------------------------- { :key => 'right.user_hall_of_fame', :value_type => 'boolean', @@ -74,6 +75,33 @@ :description => 'If true, any user can view submissions of every one.' }, + { + :key => 'right.bypass_agreement', + :value_type => 'boolean', + :default_value => 'true', + :description => 'When false, a user must accept usage agreement before login' + }, + + { + :key => 'right.heartbeat_response', + :value_type => 'string', + :default_value => 'OK', + :description => 'Heart beat response text' + }, + + { + :key => 'right.heartbeat_response_full', + :value_type => 'string', + :default_value => 'OK', + :description => 'Heart beat response text when user got full score (set this value to the empty string to disable this feature)' + }, + + { + :key => 'right.view_testcase', + :value_type => 'boolean', + :default_value => 'false', + :description => 'When true, any user can view/download test data' + }, # If Configuration['system.online_registration'] is true, the # system allows online registration, and will use these # information for sending confirmation emails. @@ -135,7 +163,16 @@ :value_type => 'string', :default_value => 'none', :description => "New user will be assigned to this contest automatically, if it exists. Set to 'none' if there is no default contest." - } + }, + + { + :key => 'system.use_problem_group', + :value_type => 'boolean', + :default_value => 'false', + :description => "If true, available problem to the user will be only ones associated with the group of the user." + }, + + ] diff --git a/lib/assets/Lib/VFS_import.py b/lib/assets/Lib/VFS_import.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/VFS_import.py @@ -0,0 +1,87 @@ +import os +from browser import doc + +#_scripts=doc.createElement('script') +#_scripts.src="/src/py_VFS.js" +#_scripts.type="text/javascript" +#doc.get(tag='head')[0].appendChild(_scripts) + +VFS=dict(JSObject(__BRYTHON__.py_VFS)) +class VFSModuleFinder: + def __init__(self, path_entry): + print("in VFSModuleFinder") + if path_entry.startswith('/libs') or path_entry.startswith('/Lib'): + self.path_entry=path_entry + else: + raise ImportError() + + def __str__(self): + return '<%s for "%s">' % (self.__class__.__name__, self.path_entry) + + def find_module(self, fullname, path=None): + path = path or self.path_entry + #print('looking for "%s" in %s ...' % (fullname, path)) + for _ext in ['js', 'pyj', 'py']: + _filepath=os.path.join(self.path_entry, '%s.%s' % (fullname, _ext)) + if _filepath in VFS: + print("module found at %s:%s" % (_filepath, fullname)) + return VFSModuleLoader(_filepath, fullname) + + print('module %s not found' % fullname) + raise ImportError() + return None + +class VFSModuleLoader: + """Load source for modules""" + + def __init__(self, filepath, name): + self._filepath=filepath + self._name=name + + def get_source(self): + if self._filepath in VFS: + return JSObject(readFromVFS(self._filepath)) + + raise ImportError('could not find source for %s' % fullname) + + def is_package(self): + return '.' in self._name + + def load_module(self): + if self._name in sys.modules: + #print('reusing existing module from previous import of "%s"' % fullname) + mod = sys.modules[self._name] + return mod + + _src=self.get_source() + if self._filepath.endswith('.js'): + mod=JSObject(import_js_module(_src, self._filepath, self._name)) + elif self._filepath.endswith('.py'): + mod=JSObject(import_py_module(_src, self._filepath, self._name)) + elif self._filepath.endswith('.pyj'): + mod=JSObject(import_pyj_module(_src, self._filepath, self._name)) + else: + raise ImportError('Invalid Module: %s' % self._filepath) + + # Set a few properties required by PEP 302 + mod.__file__ = self._filepath + mod.__name__ = self._name + mod.__path__ = os.path.abspath(self._filepath) + mod.__loader__ = self + mod.__package__ = '.'.join(self._name.split('.')[:-1]) + + if self.is_package(): + print('adding path for package') + # Set __path__ for packages + # so we can find the sub-modules. + mod.__path__ = [ self.path_entry ] + else: + print('imported as regular module') + + print('creating a new module object for "%s"' % self._name) + sys.modules.setdefault(self._name, mod) + JSObject(__BRYTHON__.imported)[self._name]=mod + + return mod + +JSObject(__BRYTHON__.path_hooks.insert(0, VFSModuleFinder)) diff --git a/lib/assets/Lib/__future__.py b/lib/assets/Lib/__future__.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/__future__.py @@ -0,0 +1,134 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", + "barry_as_FLUFL", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names used by +# compile.h, so that an editor search will find them here. However, +# they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x2000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals +CO_FUTURE_BARRY_AS_BDFL = 0x40000 + +class _Feature: + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + """ + + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) + +barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), + (3, 9, 0, "alpha", 0), + CO_FUTURE_BARRY_AS_BDFL) diff --git a/lib/assets/Lib/_abcoll.py b/lib/assets/Lib/_abcoll.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_abcoll.py @@ -0,0 +1,209 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +DON'T USE THIS MODULE DIRECTLY! The classes here should be imported +via collections; they are defined here only to alleviate certain +bootstrapping issues. Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Hashable", "Iterable", "Iterator", + "Sized", "Container", "Callable", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + +""" +### collection related types which are not exposed through builtin ### +## iterators ## +#fixme brython +#bytes_iterator = type(iter(b'')) +bytes_iterator = type(iter('')) +#fixme brython +#bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +dict_proxy = type(type.__dict__) +""" + +def abstractmethod(self): + return self + +### ONE-TRICK PONIES ### + + +#class Iterable(metaclass=ABCMeta): +class Iterable: + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + if any("__iter__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +#class Sized(metaclass=ABCMeta): +class Sized: + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + if any("__len__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +#class Container(metaclass=ABCMeta): +class Container: + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + if any("__contains__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + +### MAPPINGS ### + + +class Mapping(Sized, Iterable, Container): + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + return KeysView(self) + + def items(self): + return ItemsView(self) + + def values(self): + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + def __ne__(self, other): + return not (self == other) + + +class MutableMapping(Mapping): + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + try: + key = next(iter(self)) + except StopIteration: + raise KeyError + value = self[key] + del self[key] + return key, value + + def clear(self): + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + if len(args) > 2: + raise TypeError("update() takes at most 2 positional " + "arguments ({} given)".format(len(args))) + elif not args: + raise TypeError("update() takes at least 1 argument (0 given)") + self = args[0] + other = args[1] if len(args) >= 2 else () + + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + try: + return self[key] + except KeyError: + self[key] = default + return default + +#MutableMapping.register(dict) diff --git a/lib/assets/Lib/_codecs.py b/lib/assets/Lib/_codecs.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_codecs.py @@ -0,0 +1,181 @@ + +def ascii_decode(*args,**kw): + pass + +def ascii_encode(*args,**kw): + pass + +def charbuffer_encode(*args,**kw): + pass + +def charmap_build(*args,**kw): + pass + +def charmap_decode(*args,**kw): + pass + +def charmap_encode(*args,**kw): + pass + +def decode(*args,**kw): + """decode(obj, [encoding[,errors]]) -> object + Decodes obj using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a ValueError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle ValueErrors.""" + pass + +def encode(*args,**kw): + """encode(obj, [encoding[,errors]]) -> object + Encodes obj using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a ValueError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle ValueErrors.""" + obj = args[0] + if len(args)==2: + encoding = args[1] + else: + encoding = 'utf-8' + if isinstance(obj, str): + return obj.encode(encoding) + +def escape_decode(*args,**kw): + pass + +def escape_encode(*args,**kw): + pass + +def latin_1_decode(*args,**kw): + pass + +def latin_1_encode(*args,**kw): + pass + +def lookup(encoding): + """lookup(encoding) -> CodecInfo + Looks up a codec tuple in the Python codec registry and returns + a CodecInfo object.""" + + if encoding in ('utf-8', 'utf_8'): + from javascript import console + console.log('encoding', encoding) + import encodings.utf_8 + return encodings.utf_8.getregentry() + + LookupError(encoding) + +def lookup_error(*args,**kw): + """lookup_error(errors) -> handler + Return the error handler for the specified error handling name + or raise a LookupError, if no handler exists under this name.""" + pass + +def mbcs_decode(*args,**kw): + pass + +def mbcs_encode(*args,**kw): + pass + +def raw_unicode_escape_decode(*args,**kw): + pass + +def raw_unicode_escape_encode(*args,**kw): + pass + +def readbuffer_encode(*args,**kw): + pass + +def register(*args,**kw): + """register(search_function) + Register a codec search function. Search functions are expected to take + one argument, the encoding name in all lower case letters, and return + a tuple of functions (encoder, decoder, stream_reader, stream_writer) + (or a CodecInfo object).""" + pass + +def register_error(*args,**kw): + """register_error(errors, handler) + Register the specified error handler under the name + errors. handler must be a callable object, that + will be called with an exception instance containing + information about the location of the encoding/decoding + error and must return a (replacement, new position) tuple.""" + pass + +def unicode_escape_decode(*args,**kw): + pass + +def unicode_escape_encode(*args,**kw): + pass + +def unicode_internal_decode(*args,**kw): + pass + +def unicode_internal_encode(*args,**kw): + pass + +def utf_16_be_decode(*args,**kw): + pass + +def utf_16_be_encode(*args,**kw): + pass + +def utf_16_decode(*args,**kw): + pass + +def utf_16_encode(*args,**kw): + pass + +def utf_16_ex_decode(*args,**kw): + pass + +def utf_16_le_decode(*args,**kw): + pass + +def utf_16_le_encode(*args,**kw): + pass + +def utf_32_be_decode(*args,**kw): + pass + +def utf_32_be_encode(*args,**kw): + pass + +def utf_32_decode(*args,**kw): + pass + +def utf_32_encode(*args,**kw): + pass + +def utf_32_ex_decode(*args,**kw): + pass + +def utf_32_le_decode(*args,**kw): + pass + +def utf_32_le_encode(*args,**kw): + pass + +def utf_7_decode(*args,**kw): + pass + +def utf_7_encode(*args,**kw): + pass + +def utf_8_decode(*args,**kw): + pass + +def utf_8_encode(*args,**kw): + input=args[0] + if len(args) == 2: + errors = args[1] + else: + errors=kw.get('errors', 'strict') + + #todo need to deal with errors, but for now assume all is well. + + return (bytes([_f for _f in input], 'utf-8'), len(input)) diff --git a/lib/assets/Lib/_collections.py b/lib/assets/Lib/_collections.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_collections.py @@ -0,0 +1,572 @@ +# "High performance data structures +# " +# copied from pypy repo + +# +# Copied and completed from the sandbox of CPython +# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger) +# +# edited for Brython line 558 : catch ImportError instead of AttributeError + +import operator +#try: +# from thread import get_ident as _thread_ident +#except ImportError: +def _thread_ident(): + return -1 + + +n = 30 +LFTLNK = n +RGTLNK = n+1 +BLOCKSIZ = n+2 + +# The deque's size limit is d.maxlen. The limit can be zero or positive, or +# None. After an item is added to a deque, we check to see if the size has +# grown past the limit. If it has, we get the size back down to the limit by +# popping an item off of the opposite end. The methods that can trigger this +# are append(), appendleft(), extend(), and extendleft(). + +#class deque(object): +class deque: + + def __new__(cls, iterable=(), *args, **kw): + #fixme + #self = super(deque, cls).__new__(cls, *args, **kw) + self=object.__new__(cls, *args, **kw) + self.clear() + return self + + def __init__(self, iterable=(), maxlen=None): + object.__init__(self) + self.clear() + if maxlen is not None: + if maxlen < 0: + raise ValueError("maxlen must be non-negative") + self._maxlen = maxlen + add = self.append + for elem in iterable: + add(elem) + + @property + def maxlen(self): + return self._maxlen + + def clear(self): + self.right = self.left = [None] * BLOCKSIZ + self.rightndx = n//2 # points to last written element + self.leftndx = n//2+1 + self.length = 0 + self.state = 0 + + def append(self, x): + self.state += 1 + self.rightndx += 1 + if self.rightndx == n: + newblock = [None] * BLOCKSIZ + self.right[RGTLNK] = newblock + newblock[LFTLNK] = self.right + self.right = newblock + self.rightndx = 0 + self.length += 1 + self.right[self.rightndx] = x + if self.maxlen is not None and self.length > self.maxlen: + self.popleft() + + def appendleft(self, x): + self.state += 1 + self.leftndx -= 1 + if self.leftndx == -1: + newblock = [None] * BLOCKSIZ + self.left[LFTLNK] = newblock + newblock[RGTLNK] = self.left + self.left = newblock + self.leftndx = n-1 + self.length += 1 + self.left[self.leftndx] = x + if self.maxlen is not None and self.length > self.maxlen: + self.pop() + + def extend(self, iterable): + if iterable is self: + iterable = list(iterable) + for elem in iterable: + self.append(elem) + + def extendleft(self, iterable): + if iterable is self: + iterable = list(iterable) + for elem in iterable: + self.appendleft(elem) + + def pop(self): + if self.left is self.right and self.leftndx > self.rightndx: + #raise IndexError, "pop from an empty deque" # does not work in brython + raise IndexError("pop from an empty deque") + x = self.right[self.rightndx] + self.right[self.rightndx] = None + self.length -= 1 + self.rightndx -= 1 + self.state += 1 + if self.rightndx == -1: + prevblock = self.right[LFTLNK] + if prevblock is None: + # the deque has become empty; recenter instead of freeing block + self.rightndx = n//2 + self.leftndx = n//2+1 + else: + prevblock[RGTLNK] = None + self.right[LFTLNK] = None + self.right = prevblock + self.rightndx = n-1 + return x + + def popleft(self): + if self.left is self.right and self.leftndx > self.rightndx: + #raise IndexError, "pop from an empty deque" + raise IndexError("pop from an empty deque") + x = self.left[self.leftndx] + self.left[self.leftndx] = None + self.length -= 1 + self.leftndx += 1 + self.state += 1 + if self.leftndx == n: + prevblock = self.left[RGTLNK] + if prevblock is None: + # the deque has become empty; recenter instead of freeing block + self.rightndx = n//2 + self.leftndx = n//2+1 + else: + prevblock[LFTLNK] = None + self.left[RGTLNK] = None + self.left = prevblock + self.leftndx = 0 + return x + + def count(self, value): + c = 0 + for item in self: + if item == value: + c += 1 + return c + + def remove(self, value): + # Need to be defensive for mutating comparisons + for i in range(len(self)): + if self[i] == value: + del self[i] + return + raise ValueError("deque.remove(x): x not in deque") + + def rotate(self, n=1): + length = len(self) + if length == 0: + return + halflen = (length+1) >> 1 + if n > halflen or n < -halflen: + n %= length + if n > halflen: + n -= length + elif n < -halflen: + n += length + while n > 0: + self.appendleft(self.pop()) + n -= 1 + while n < 0: + self.append(self.popleft()) + n += 1 + + def reverse(self): + "reverse *IN PLACE*" + leftblock = self.left + rightblock = self.right + leftindex = self.leftndx + rightindex = self.rightndx + for i in range(self.length // 2): + # Validate that pointers haven't met in the middle + assert leftblock != rightblock or leftindex < rightindex + + # Swap + (rightblock[rightindex], leftblock[leftindex]) = ( + leftblock[leftindex], rightblock[rightindex]) + + # Advance left block/index pair + leftindex += 1 + if leftindex == n: + leftblock = leftblock[RGTLNK] + assert leftblock is not None + leftindex = 0 + + # Step backwards with the right block/index pair + rightindex -= 1 + if rightindex == -1: + rightblock = rightblock[LFTLNK] + assert rightblock is not None + rightindex = n - 1 + + def __repr__(self): + threadlocalattr = '__repr' + str(_thread_ident()) + if threadlocalattr in self.__dict__: + return 'deque([...])' + else: + self.__dict__[threadlocalattr] = True + try: + if self.maxlen is not None: + return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen) + else: + return 'deque(%r)' % (list(self),) + finally: + del self.__dict__[threadlocalattr] + + def __iter__(self): + return deque_iterator(self, self._iter_impl) + + def _iter_impl(self, original_state, giveup): + if self.state != original_state: + giveup() + block = self.left + while block: + l, r = 0, n + if block is self.left: + l = self.leftndx + if block is self.right: + r = self.rightndx + 1 + for elem in block[l:r]: + yield elem + if self.state != original_state: + giveup() + block = block[RGTLNK] + + def __reversed__(self): + return deque_iterator(self, self._reversed_impl) + + def _reversed_impl(self, original_state, giveup): + if self.state != original_state: + giveup() + block = self.right + while block: + l, r = 0, n + if block is self.left: + l = self.leftndx + if block is self.right: + r = self.rightndx + 1 + for elem in reversed(block[l:r]): + yield elem + if self.state != original_state: + giveup() + block = block[LFTLNK] + + def __len__(self): + #sum = 0 + #block = self.left + #while block: + # sum += n + # block = block[RGTLNK] + #return sum + self.rightndx - self.leftndx + 1 - n + return self.length + + def __getref(self, index): + if index >= 0: + block = self.left + while block: + l, r = 0, n + if block is self.left: + l = self.leftndx + if block is self.right: + r = self.rightndx + 1 + span = r-l + if index < span: + return block, l+index + index -= span + block = block[RGTLNK] + else: + block = self.right + while block: + l, r = 0, n + if block is self.left: + l = self.leftndx + if block is self.right: + r = self.rightndx + 1 + negative_span = l-r + if index >= negative_span: + return block, r+index + index -= negative_span + block = block[LFTLNK] + raise IndexError("deque index out of range") + + def __getitem__(self, index): + block, index = self.__getref(index) + return block[index] + + def __setitem__(self, index, value): + block, index = self.__getref(index) + block[index] = value + + def __delitem__(self, index): + length = len(self) + if index >= 0: + if index >= length: + raise IndexError("deque index out of range") + self.rotate(-index) + self.popleft() + self.rotate(index) + else: + #index = ~index #todo until bit wise operators are in bython + index= index^(2**31) + if index >= length: + raise IndexError("deque index out of range") + self.rotate(index) + self.pop() + self.rotate(-index) + + def __reduce_ex__(self, proto): + return type(self), (list(self), self.maxlen) + + def __hash__(self): + #raise TypeError, "deque objects are unhashable" + raise TypeError("deque objects are unhashable") + + def __copy__(self): + return self.__class__(self, self.maxlen) + + # XXX make comparison more efficient + def __eq__(self, other): + if isinstance(other, deque): + return list(self) == list(other) + else: + return NotImplemented + + def __ne__(self, other): + if isinstance(other, deque): + return list(self) != list(other) + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, deque): + return list(self) < list(other) + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, deque): + return list(self) <= list(other) + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, deque): + return list(self) > list(other) + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, deque): + return list(self) >= list(other) + else: + return NotImplemented + + def __iadd__(self, other): + self.extend(other) + return self + + +class deque_iterator(object): + + def __init__(self, deq, itergen): + self.counter = len(deq) + def giveup(): + self.counter = 0 + #raise RuntimeError, "deque mutated during iteration" + raise RuntimeError("deque mutated during iteration") + self._gen = itergen(deq.state, giveup) + + def next(self): + res = self._gen.next() + self.counter -= 1 + return res + + def __iter__(self): + return self + +class defaultdict(dict): + + def __init__(self, *args, **kwds): + if len(args) > 0: + default_factory = args[0] + args = args[1:] + if not callable(default_factory) and default_factory is not None: + raise TypeError("first argument must be callable") + else: + default_factory = None + dict.__init__(self, args, kwds) + self.default_factory = default_factory + self.update(args, kwds) + super(defaultdict, self).__init__(*args, **kwds) + + #fixme.. had to add this function to get defaultdict working with brython correctly + #def __getitem__(self, key): + # if self.__contains__(key): + # return dict.__getitem__(self,key) + # + # return self.__missing__(key) + + def __missing__(self, key): + # from defaultdict docs + if self.default_factory is None: + raise KeyError(key) + self[key] = value = self.default_factory() + return value + + def __repr__(self, recurse=set()): + if id(self) in recurse: + return "defaultdict(...)" + try: + recurse.add(id(self)) + return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__()) + finally: + recurse.remove(id(self)) + + def copy(self): + return type(self)(self.default_factory, self) + + def __copy__(self): + return self.copy() + + def __reduce__(self): + # + #__reduce__ must return a 5-tuple as follows: + # + # - factory function + # - tuple of args for the factory function + # - additional state (here None) + # - sequence iterator (here None) + # - dictionary iterator (yielding successive (key, value) pairs + + # This API is used by pickle.py and copy.py. + # + return (type(self), (self.default_factory,), None, None, self.items()) + +from operator import itemgetter as _itemgetter +from keyword import iskeyword as _iskeyword +import sys as _sys + +def namedtuple(typename, field_names, verbose=False, rename=False): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', 'x y') + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = tuple(map(str, field_names)) + if rename: + names = list(field_names) + seen = set() + for i, name in enumerate(names): + if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name) + or not name or name[0].isdigit() or name.startswith('_') + or name in seen): + names[i] = '_%d' % i + seen.add(name) + field_names = tuple(names) + for name in (typename,) + field_names: + if not min(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen_names = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen_names: + raise ValueError('Encountered duplicate field name: %r' % name) + seen_names.add(name) + + # Create and fill-in the class template + numfields = len(field_names) + argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes + reprtxt = ', '.join('%s=%%r' % name for name in field_names) + + template = '''class %(typename)s(tuple): + '%(typename)s(%(argtxt)s)' \n + __slots__ = () \n + _fields = %(field_names)r \n + def __new__(_cls, %(argtxt)s): + return tuple.__new__(_cls, (%(argtxt)s)) \n + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new %(typename)s object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != %(numfields)d: + raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) + return result \n + def __repr__(self): + return '%(typename)s(%(reprtxt)s)' %% self \n + def _asdict(self): + 'Return a new dict which maps field names to their values' + return dict(zip(self._fields, self)) \n + def _replace(_self, **kwds): + 'Return a new %(typename)s object replacing specified fields with new values' + result = _self._make(map(kwds.pop, %(field_names)r, _self)) + if kwds: + raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) + return result \n + def __getnewargs__(self): + return tuple(self) \n\n''' % locals() + for i, name in enumerate(field_names): + template += ' %s = _property(_itemgetter(%d))\n' % (name, i) + + if verbose: + print(template) + + # Execute the template string in a temporary namespace + namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, + _property=property, _tuple=tuple) + try: + exec(template,namespace) + except SyntaxError as e: + raise SyntaxError(e.message + ':\n' + template) + result = namespace[typename] + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return result + +if __name__ == '__main__': + Point = namedtuple('Point', ['x', 'y']) + p = Point(11, y=22) + print(p[0]+p[1]) + x,y=p + print(x,y) + print(p.x+p.y) + print(p) diff --git a/lib/assets/Lib/_csv.py b/lib/assets/Lib/_csv.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_csv.py @@ -0,0 +1,594 @@ +"""CSV parsing and writing. + +[Copied from PyPy +https://bitbucket-assetroot.s3.amazonaws.com/pypy/pypy/1400171824.19/641/_csv.py?Signature=cc%2Bc8m06cBMbsxt2e15XXXUDACk%3D&Expires=1404136251&AWSAccessKeyId=0EMWEFSGA12Z1HF1TZ82 +and adapted to Python 3 syntax for Brython] + + +This module provides classes that assist in the reading and writing +of Comma Separated Value (CSV) files, and implements the interface +described by PEP 305. Although many CSV files are simple to parse, +the format is not formally defined by a stable specification and +is subtle enough that parsing lines of a CSV file with something +like line.split(\",\") is bound to fail. The module supports three +basic APIs: reading, writing, and registration of dialects. + + +DIALECT REGISTRATION: + +Readers and writers support a dialect argument, which is a convenient +handle on a group of settings. When the dialect argument is a string, +it identifies one of the dialects previously registered with the module. +If it is a class or instance, the attributes of the argument are used as +the settings for the reader or writer: + + class excel: + delimiter = ',' + quotechar = '\"' + escapechar = None + doublequote = True + skipinitialspace = False + lineterminator = '\\r\\n' + quoting = QUOTE_MINIMAL + +SETTINGS: + + * quotechar - specifies a one-character string to use as the + quoting character. It defaults to '\"'. + * delimiter - specifies a one-character string to use as the + field separator. It defaults to ','. + * skipinitialspace - specifies how to interpret whitespace which + immediately follows a delimiter. It defaults to False, which + means that whitespace immediately following a delimiter is part + of the following field. + * lineterminator - specifies the character sequence which should + terminate rows. + * quoting - controls when quotes should be generated by the writer. + It can take on any of the following module constants: + + csv.QUOTE_MINIMAL means only when required, for example, when a + field contains either the quotechar or the delimiter + csv.QUOTE_ALL means that quotes are always placed around fields. + csv.QUOTE_NONNUMERIC means that quotes are always placed around + fields which do not parse as integers or floating point + numbers. + csv.QUOTE_NONE means that quotes are never placed around fields. + * escapechar - specifies a one-character string used to escape + the delimiter when quoting is set to QUOTE_NONE. + * doublequote - controls the handling of quotes inside fields. When + True, two consecutive quotes are interpreted as one during read, + and when writing, each quote character embedded in the data is + written as two quotes. +""" + +__version__ = "1.0" + +QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE = range(4) +_dialects = {} +_field_limit = 128 * 1024 # max parsed field size + +class Error(Exception): + pass + +class Dialect(object): + """CSV dialect + + The Dialect type records CSV parsing and generation options.""" + + __slots__ = ["_delimiter", "_doublequote", "_escapechar", + "_lineterminator", "_quotechar", "_quoting", + "_skipinitialspace", "_strict"] + + def __new__(cls, dialect, **kwargs): + + for name in kwargs: + if '_' + name not in Dialect.__slots__: + raise TypeError("unexpected keyword argument '%s'" % + (name,)) + + if dialect is not None: + if isinstance(dialect, str): + dialect = get_dialect(dialect) + + # Can we reuse this instance? + if (isinstance(dialect, Dialect) + and all(value is None for value in kwargs.values())): + return dialect + + self = object.__new__(cls) + + + def set_char(x): + if x is None: + return None + if isinstance(x, str) and len(x) <= 1: + return x + raise TypeError("%r must be a 1-character string" % (name,)) + def set_str(x): + if isinstance(x, str): + return x + raise TypeError("%r must be a string" % (name,)) + def set_quoting(x): + if x in range(4): + return x + raise TypeError("bad 'quoting' value") + + attributes = {"delimiter": (',', set_char), + "doublequote": (True, bool), + "escapechar": (None, set_char), + "lineterminator": ("\r\n", set_str), + "quotechar": ('"', set_char), + "quoting": (QUOTE_MINIMAL, set_quoting), + "skipinitialspace": (False, bool), + "strict": (False, bool), + } + + # Copy attributes + notset = object() + for name in Dialect.__slots__: + name = name[1:] + value = notset + if name in kwargs: + value = kwargs[name] + elif dialect is not None: + value = getattr(dialect, name, notset) + + # mapping by name: (default, converter) + if value is notset: + value = attributes[name][0] + if name == 'quoting' and not self.quotechar: + value = QUOTE_NONE + else: + converter = attributes[name][1] + if converter: + value = converter(value) + + setattr(self, '_' + name, value) + + if not self.delimiter: + raise TypeError("delimiter must be set") + + if self.quoting != QUOTE_NONE and not self.quotechar: + raise TypeError("quotechar must be set if quoting enabled") + + if not self.lineterminator: + raise TypeError("lineterminator must be set") + + return self + + delimiter = property(lambda self: self._delimiter) + doublequote = property(lambda self: self._doublequote) + escapechar = property(lambda self: self._escapechar) + lineterminator = property(lambda self: self._lineterminator) + quotechar = property(lambda self: self._quotechar) + quoting = property(lambda self: self._quoting) + skipinitialspace = property(lambda self: self._skipinitialspace) + strict = property(lambda self: self._strict) + + +def _call_dialect(dialect_inst, kwargs): + return Dialect(dialect_inst, **kwargs) + +def register_dialect(name, dialect=None, **kwargs): + """Create a mapping from a string name to a dialect class. + dialect = csv.register_dialect(name, dialect)""" + if not isinstance(name, str): + raise TypeError("dialect name must be a string or unicode") + + dialect = _call_dialect(dialect, kwargs) + _dialects[name] = dialect + +def unregister_dialect(name): + """Delete the name/dialect mapping associated with a string name.\n + csv.unregister_dialect(name)""" + try: + del _dialects[name] + except KeyError: + raise Error("unknown dialect") + +def get_dialect(name): + """Return the dialect instance associated with name. + dialect = csv.get_dialect(name)""" + try: + return _dialects[name] + except KeyError: + raise Error("unknown dialect") + +def list_dialects(): + """Return a list of all know dialect names + names = csv.list_dialects()""" + return list(_dialects) + +class Reader(object): + """CSV reader + + Reader objects are responsible for reading and parsing tabular data + in CSV format.""" + + + (START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD, + IN_QUOTED_FIELD, ESCAPE_IN_QUOTED_FIELD, QUOTE_IN_QUOTED_FIELD, + EAT_CRNL) = range(8) + + def __init__(self, iterator, dialect=None, **kwargs): + self.dialect = _call_dialect(dialect, kwargs) + + # null characters are not allowed to be in the string so we can use + # it as a fall back + self._delimiter = self.dialect.delimiter if self.dialect.delimiter else '\0' + self._quotechar = self.dialect.quotechar if self.dialect.quotechar else '\0' + self._escapechar = self.dialect.escapechar if self.dialect.escapechar else '\0' + self._doublequote = self.dialect.doublequote + self._quoting = self.dialect.quoting + self._skipinitialspace = self.dialect.skipinitialspace + self._strict = self.dialect.strict + + self.input_iter = iter(iterator) + self.line_num = 0 + + self._parse_reset() + + def _parse_reset(self): + self.field = '' + self.fields = [] + self.state = self.START_RECORD + self.numeric_field = False + + def __iter__(self): + return self + + def __next__(self): + self._parse_reset() + while True: + try: + line = next(self.input_iter) + except StopIteration: + # End of input OR exception + if len(self.field) > 0: + raise Error("newline inside string") + raise + + self.line_num += 1 + + if '\0' in line: + raise Error("line contains NULL byte") + self._parse_process_char(line) + self._parse_eol() + + if self.state == self.START_RECORD: + break + + fields = self.fields + self.fields = [] + return fields + + def _parse_process_char(self, line): + pos = 0 + while pos < len(line): + if self.state == self.IN_FIELD: + # in unquoted field and have already found one character when starting the field + pos2 = pos + while pos2 < len(line): + if line[pos2] == '\n' or line[pos2] == '\r': + # end of line - return [fields] + if pos2 > pos: + self._parse_add_str(line[pos:pos2]) + pos = pos2 + self._parse_save_field() + self.state = self.EAT_CRNL + break + elif line[pos2] == self._escapechar[0]: + # possible escaped character + if pos2 > pos: + self._parse_add_str(line[pos:pos2]) + pos = pos2 + self.state = self.ESCAPED_CHAR + break + elif line[pos2] == self._delimiter[0]: + # save field - wait for new field + if pos2 > pos: + self._parse_add_str(line[pos:pos2]) + pos = pos2 + self._parse_save_field() + self.state = self.START_FIELD + break + # normal character - save in field + pos2 += 1 + else: + if pos2 > pos: + self._parse_add_str(line[pos:pos2]) + pos = pos2 + continue + + elif self.state == self.START_RECORD: + if line[pos] == '\n' or line[pos] == '\r': + self.state = self.EAT_CRNL + else: + self.state = self.START_FIELD + # restart process + continue + + elif self.state == self.START_FIELD: + if line[pos] == '\n' or line[pos] == '\r': + # save empty field - return [fields] + self._parse_save_field() + self.state = self.EAT_CRNL + elif (line[pos] == self._quotechar[0] + and self._quoting != QUOTE_NONE): + # start quoted field + self.state = self.IN_QUOTED_FIELD + elif line[pos] == self._escapechar[0]: + # possible escaped character + self.state = self.ESCAPED_CHAR + elif self._skipinitialspace and line[pos] == ' ': + # ignore space at start of field + pass + elif line[pos] == self._delimiter[0]: + # save empty field + self._parse_save_field() + else: + # begin new unquoted field + if self._quoting == QUOTE_NONNUMERIC: + self.numeric_field = True + self.state = self.IN_FIELD + continue + + elif self.state == self.ESCAPED_CHAR: + self._parse_add_char(line[pos]) + self.state = self.IN_FIELD + + elif self.state == self.IN_QUOTED_FIELD: + if line[pos] == self._escapechar: + # possible escape character + self.state = self.ESCAPE_IN_QUOTED_FIELD + elif (line[pos] == self._quotechar + and self._quoting != QUOTE_NONE): + if self._doublequote: + # doublequote; " represented by "" + self.state = self.QUOTE_IN_QUOTED_FIELD + else: + #end of quote part of field + self.state = self.IN_FIELD + else: + # normal character - save in field + self._parse_add_char(line[pos]) + + elif self.state == self.ESCAPE_IN_QUOTED_FIELD: + self._parse_add_char(line[pos]) + self.state = self.IN_QUOTED_FIELD + + elif self.state == self.QUOTE_IN_QUOTED_FIELD: + # doublequote - seen a quote in a quoted field + if (line[pos] == self._quotechar + and self._quoting != QUOTE_NONE): + # save "" as " + self._parse_add_char(line[pos]) + self.state = self.IN_QUOTED_FIELD + elif line[pos] == self._delimiter[0]: + # save field - wait for new field + self._parse_save_field() + self.state = self.START_FIELD + elif line[pos] == '\r' or line[pos] == '\n': + # end of line - return [fields] + self._parse_save_field() + self.state = self.EAT_CRNL + elif not self._strict: + self._parse_add_char(line[pos]) + self.state = self.IN_FIELD + else: + raise Error("'%c' expected after '%c'" % + (self._delimiter, self._quotechar)) + + elif self.state == self.EAT_CRNL: + if line[pos] == '\r' or line[pos] == '\n': + pass + else: + raise Error("new-line character seen in unquoted field - " + "do you need to open the file " + "in universal-newline mode?") + + else: + raise RuntimeError("unknown state: %r" % (self.state,)) + + pos += 1 + + def _parse_eol(self): + if self.state == self.EAT_CRNL: + self.state = self.START_RECORD + elif self.state == self.START_RECORD: + # empty line - return [] + pass + elif self.state == self.IN_FIELD: + # in unquoted field + # end of line - return [fields] + self._parse_save_field() + self.state = self.START_RECORD + elif self.state == self.START_FIELD: + # save empty field - return [fields] + self._parse_save_field() + self.state = self.START_RECORD + elif self.state == self.ESCAPED_CHAR: + self._parse_add_char('\n') + self.state = self.IN_FIELD + elif self.state == self.IN_QUOTED_FIELD: + pass + elif self.state == self.ESCAPE_IN_QUOTED_FIELD: + self._parse_add_char('\n') + self.state = self.IN_QUOTED_FIELD + elif self.state == self.QUOTE_IN_QUOTED_FIELD: + # end of line - return [fields] + self._parse_save_field() + self.state = self.START_RECORD + else: + raise RuntimeError("unknown state: %r" % (self.state,)) + + def _parse_save_field(self): + field, self.field = self.field, '' + if self.numeric_field: + self.numeric_field = False + field = float(field) + self.fields.append(field) + + def _parse_add_char(self, c): + if len(self.field) + 1 > _field_limit: + raise Error("field larget than field limit (%d)" % (_field_limit)) + self.field += c + + def _parse_add_str(self, s): + if len(self.field) + len(s) > _field_limit: + raise Error("field larget than field limit (%d)" % (_field_limit)) + self.field += s + + +class Writer(object): + """CSV writer + + Writer objects are responsible for generating tabular data + in CSV format from sequence input.""" + + def __init__(self, file, dialect=None, **kwargs): + if not (hasattr(file, 'write') and callable(file.write)): + raise TypeError("argument 1 must have a 'write' method") + self.writeline = file.write + self.dialect = _call_dialect(dialect, kwargs) + + def _join_reset(self): + self.rec = [] + self.num_fields = 0 + + def _join_append(self, field, quoted, quote_empty): + dialect = self.dialect + # If this is not the first field we need a field separator + if self.num_fields > 0: + self.rec.append(dialect.delimiter) + + if dialect.quoting == QUOTE_NONE: + need_escape = tuple(dialect.lineterminator) + ( + dialect.escapechar, # escapechar always first + dialect.delimiter, dialect.quotechar) + + else: + for c in tuple(dialect.lineterminator) + ( + dialect.delimiter, dialect.escapechar): + if c and c in field: + quoted = True + + need_escape = () + if dialect.quotechar in field: + if dialect.doublequote: + field = field.replace(dialect.quotechar, + dialect.quotechar * 2) + quoted = True + else: + need_escape = (dialect.quotechar,) + + + for c in need_escape: + if c and c in field: + if not dialect.escapechar: + raise Error("need to escape, but no escapechar set") + field = field.replace(c, dialect.escapechar + c) + + # If field is empty check if it needs to be quoted + if field == '' and quote_empty: + if dialect.quoting == QUOTE_NONE: + raise Error("single empty field record must be quoted") + quoted = 1 + + if quoted: + field = dialect.quotechar + field + dialect.quotechar + + self.rec.append(field) + self.num_fields += 1 + + + + def writerow(self, row): + dialect = self.dialect + try: + rowlen = len(row) + except TypeError: + raise Error("sequence expected") + + # join all fields in internal buffer + self._join_reset() + + for field in row: + quoted = False + if dialect.quoting == QUOTE_NONNUMERIC: + try: + float(field) + except: + quoted = True + # This changed since 2.5: + # quoted = not isinstance(field, (int, long, float)) + elif dialect.quoting == QUOTE_ALL: + quoted = True + + if field is None: + self._join_append("", quoted, rowlen == 1) + else: + self._join_append(str(field), quoted, rowlen == 1) + + # add line terminator + self.rec.append(dialect.lineterminator) + + self.writeline(''.join(self.rec)) + + def writerows(self, rows): + for row in rows: + self.writerow(row) + +def reader(*args, **kwargs): + """ + csv_reader = reader(iterable [, dialect='excel'] + [optional keyword args]) + for row in csv_reader: + process(row) + + The "iterable" argument can be any object that returns a line + of input for each iteration, such as a file object or a list. The + optional \"dialect\" parameter is discussed below. The function + also accepts optional keyword arguments which override settings + provided by the dialect. + + The returned object is an iterator. Each iteration returns a row + of the CSV file (which can span multiple input lines)""" + + return Reader(*args, **kwargs) + +def writer(*args, **kwargs): + """ + csv_writer = csv.writer(fileobj [, dialect='excel'] + [optional keyword args]) + for row in sequence: + csv_writer.writerow(row) + + [or] + + csv_writer = csv.writer(fileobj [, dialect='excel'] + [optional keyword args]) + csv_writer.writerows(rows) + + The \"fileobj\" argument can be any object that supports the file API.""" + return Writer(*args, **kwargs) + + +undefined = object() +def field_size_limit(limit=undefined): + """Sets an upper limit on parsed fields. + csv.field_size_limit([limit]) + + Returns old limit. If limit is not given, no new limit is set and + the old limit is returned""" + + global _field_limit + old_limit = _field_limit + + if limit is not undefined: + if not isinstance(limit, (int, long)): + raise TypeError("int expected, got %s" % + (limit.__class__.__name__,)) + _field_limit = limit + + return old_limit diff --git a/lib/assets/Lib/_dummy_thread.py b/lib/assets/Lib/_dummy_thread.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_dummy_thread.py @@ -0,0 +1,151 @@ +"""Drop-in replacement for the thread module. + +Meant to be used as a brain-dead substitute so that threaded code does +not need to be rewritten for when the thread module is not present. + +Suggested usage is:: + + try: + import _thread + except ImportError: + import _dummy_thread as _thread + +""" +# Exports only things specified by thread documentation; +# skipping obsolete synonyms allocate(), start_new(), exit_thread(). +__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', + 'interrupt_main', 'LockType'] + +# A dummy value +TIMEOUT_MAX = 2**31 + +# NOTE: this module can be imported early in the extension building process, +# and so top level imports of other modules should be avoided. Instead, all +# imports are done when needed on a function-by-function basis. Since threads +# are disabled, the import lock should not be an issue anyway (??). + +error = RuntimeError + +def start_new_thread(function, args, kwargs={}): + """Dummy implementation of _thread.start_new_thread(). + + Compatibility is maintained by making sure that ``args`` is a + tuple and ``kwargs`` is a dictionary. If an exception is raised + and it is SystemExit (which can be done by _thread.exit()) it is + caught and nothing is done; all other exceptions are printed out + by using traceback.print_exc(). + + If the executed function calls interrupt_main the KeyboardInterrupt will be + raised when the function returns. + + """ + if type(args) != type(tuple()): + raise TypeError("2nd arg must be a tuple") + if type(kwargs) != type(dict()): + raise TypeError("3rd arg must be a dict") + global _main + _main = False + try: + function(*args, **kwargs) + except SystemExit: + pass + except: + import traceback + traceback.print_exc() + _main = True + global _interrupt + if _interrupt: + _interrupt = False + raise KeyboardInterrupt + +def exit(): + """Dummy implementation of _thread.exit().""" + raise SystemExit + +def get_ident(): + """Dummy implementation of _thread.get_ident(). + + Since this module should only be used when _threadmodule is not + available, it is safe to assume that the current process is the + only thread. Thus a constant can be safely returned. + """ + return -1 + +def allocate_lock(): + """Dummy implementation of _thread.allocate_lock().""" + return LockType() + +def stack_size(size=None): + """Dummy implementation of _thread.stack_size().""" + if size is not None: + raise error("setting thread stack size not supported") + return 0 + +class LockType(object): + """Class implementing dummy implementation of _thread.LockType. + + Compatibility is maintained by maintaining self.locked_status + which is a boolean that stores the state of the lock. Pickling of + the lock, though, should not be done since if the _thread module is + then used with an unpickled ``lock()`` from here problems could + occur from this class not having atomic methods. + + """ + + def __init__(self): + self.locked_status = False + + def acquire(self, waitflag=None, timeout=-1): + """Dummy implementation of acquire(). + + For blocking calls, self.locked_status is automatically set to + True and returned appropriately based on value of + ``waitflag``. If it is non-blocking, then the value is + actually checked and not set if it is already acquired. This + is all done so that threading.Condition's assert statements + aren't triggered and throw a little fit. + + """ + if waitflag is None or waitflag: + self.locked_status = True + return True + else: + if not self.locked_status: + self.locked_status = True + return True + else: + if timeout > 0: + import time + time.sleep(timeout) + return False + + __enter__ = acquire + + def __exit__(self, typ, val, tb): + self.release() + + def release(self): + """Release the dummy lock.""" + # XXX Perhaps shouldn't actually bother to test? Could lead + # to problems for complex, threaded code. + if not self.locked_status: + raise error + self.locked_status = False + return True + + def locked(self): + return self.locked_status + +# Used to signal that interrupt_main was called in a "thread" +_interrupt = False +# True when not executing in a "thread" +_main = True + +def interrupt_main(): + """Set _interrupt flag to True to have start_new_thread raise + KeyboardInterrupt upon exiting.""" + if _main: + raise KeyboardInterrupt + else: + global _interrupt + _interrupt = True diff --git a/lib/assets/Lib/_functools.py b/lib/assets/Lib/_functools.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_functools.py @@ -0,0 +1,21 @@ +def partial(func, *args, **keywords): + def newfunc(*fargs, **fkeywords): + newkeywords = keywords.copy() + newkeywords.update(fkeywords) + return func(*(args + fargs), **newkeywords) + newfunc.func = func + newfunc.args = args + newfunc.keywords = keywords + return newfunc + +def reduce(func,iterable,initializer=None): + args = iter(iterable) + if initializer is not None: + res = initializer + else: + res = next(args) + while True: + try: + res = func(res,next(args)) + except StopIteration: + return res diff --git a/lib/assets/Lib/_imp.py b/lib/assets/Lib/_imp.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_imp.py @@ -0,0 +1,54 @@ +"""(Extremely) low-level import machinery bits as used by importlib and imp.""" + + +class __loader__(object):pass + +def _fix_co_filename(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:_fix_co_filename')) + +def acquire_lock(*args,**kw): + """acquire_lock() -> None Acquires the interpreter's import lock for the current thread. + This lock should be used by import hooks to ensure thread-safety + when importing modules. + On platforms without threads, this function does nothing.""" + pass #assume we are a platform without threads + #raise NotImplementedError("%s:not implemented" % ('_imp.py:acquire_lock')) + +def extension_suffixes(*args,**kw): + """extension_suffixes() -> list of strings Returns the list of file suffixes used to identify extension modules.""" + return ['.pyd'] + +def get_frozen_object(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:get_frozen_object')) + +def init_builtin(module,*args,**kw): + return __import__(module) + +def init_frozen(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:init_frozen')) + +def is_builtin(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:is_builtin')) + +def is_frozen(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen')) + +def is_frozen_package(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen_package')) + +def load_dynamic(*args,**kw): + raise NotImplementedError("%s:not implemented" % ('_imp.py:load_dynamic')) + +def lock_held(*args,**kw): + """lock_held() -> boolean Return True if the import lock is currently held, else False. + On platforms without threads, return False.""" + + return False + #raise NotImplementedError("%s:not implemented" % ('_imp.py:lock_held')) + +def release_lock(*args,**kw): + """release_lock() -> None Release the interpreter's import lock. + On platforms without threads, this function does nothing.""" + + pass #assume no threads + #raise NotImplementedError("%s:not implemented" % ('_imp.py:release_lock')) diff --git a/lib/assets/Lib/_io.py b/lib/assets/Lib/_io.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_io.py @@ -0,0 +1,2088 @@ +""" +Python implementation of the io module. +""" + +import os +import abc +import codecs +import errno +# Import _thread instead of threading to reduce startup cost +try: + from _thread import allocate_lock as Lock +except ImportError: + from _dummy_thread import allocate_lock as Lock + +import io +#brython fix me +#from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END) +SEEK_SET=0 +SEEK_CUR=1 +SEEK_END=2 + +valid_seek_flags = {0, 1, 2} # Hardwired values +if hasattr(os, 'SEEK_HOLE') : + valid_seek_flags.add(os.SEEK_HOLE) + valid_seek_flags.add(os.SEEK_DATA) + +# open() uses st_blksize whenever we can +DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes + +# NOTE: Base classes defined here are registered with the "official" ABCs +# defined in io.py. We don't use real inheritance though, because we don't +# want to inherit the C implementations. + +# Rebind for compatibility +BlockingIOError = BlockingIOError + + +def __open(file, mode="r", buffering=-1, encoding=None, errors=None, + newline=None, closefd=True, opener=None): + + r"""Open file and return a stream. Raise IOError upon failure. + + file is either a text or byte string giving the name (and the path + if the file isn't in the current working directory) of the file to + be opened or an integer file descriptor of the file to be + wrapped. (If a file descriptor is given, it is closed when the + returned I/O object is closed, unless closefd is set to False.) + + mode is an optional string that specifies the mode in which the file is + opened. It defaults to 'r' which means open for reading in text mode. Other + common values are 'w' for writing (truncating the file if it already + exists), 'x' for exclusive creation of a new file, and 'a' for appending + (which on some Unix systems, means that all writes append to the end of the + file regardless of the current seek position). In text mode, if encoding is + not specified the encoding used is platform dependent. (For reading and + writing raw bytes use binary mode and leave encoding unspecified.) The + available modes are: + + ========= =============================================================== + Character Meaning + --------- --------------------------------------------------------------- + 'r' open for reading (default) + 'w' open for writing, truncating the file first + 'x' create a new file and open it for writing + 'a' open for writing, appending to the end of the file if it exists + 'b' binary mode + 't' text mode (default) + '+' open a disk file for updating (reading and writing) + 'U' universal newline mode (for backwards compatibility; unneeded + for new code) + ========= =============================================================== + + The default mode is 'rt' (open for reading text). For binary random + access, the mode 'w+b' opens and truncates the file to 0 bytes, while + 'r+b' opens the file without truncation. The 'x' mode implies 'w' and + raises an `FileExistsError` if the file already exists. + + Python distinguishes between files opened in binary and text modes, + even when the underlying operating system doesn't. Files opened in + binary mode (appending 'b' to the mode argument) return contents as + bytes objects without any decoding. In text mode (the default, or when + 't' is appended to the mode argument), the contents of the file are + returned as strings, the bytes having been first decoded using a + platform-dependent encoding or using the specified encoding if given. + + buffering is an optional integer used to set the buffering policy. + Pass 0 to switch buffering off (only allowed in binary mode), 1 to select + line buffering (only usable in text mode), and an integer > 1 to indicate + the size of a fixed-size chunk buffer. When no buffering argument is + given, the default buffering policy works as follows: + + * Binary files are buffered in fixed-size chunks; the size of the buffer + is chosen using a heuristic trying to determine the underlying device's + "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`. + On many systems, the buffer will typically be 4096 or 8192 bytes long. + + * "Interactive" text files (files for which isatty() returns True) + use line buffering. Other text files use the policy described above + for binary files. + + encoding is the str name of the encoding used to decode or encode the + file. This should only be used in text mode. The default encoding is + platform dependent, but any encoding supported by Python can be + passed. See the codecs module for the list of supported encodings. + + errors is an optional string that specifies how encoding errors are to + be handled---this argument should not be used in binary mode. Pass + 'strict' to raise a ValueError exception if there is an encoding error + (the default of None has the same effect), or pass 'ignore' to ignore + errors. (Note that ignoring encoding errors can lead to data loss.) + See the documentation for codecs.register for a list of the permitted + encoding error strings. + + newline is a string controlling how universal newlines works (it only + applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works + as follows: + + * On input, if newline is None, universal newlines mode is + enabled. Lines in the input can end in '\n', '\r', or '\r\n', and + these are translated into '\n' before being returned to the + caller. If it is '', universal newline mode is enabled, but line + endings are returned to the caller untranslated. If it has any of + the other legal values, input lines are only terminated by the given + string, and the line ending is returned to the caller untranslated. + + * On output, if newline is None, any '\n' characters written are + translated to the system default line separator, os.linesep. If + newline is '', no translation takes place. If newline is any of the + other legal values, any '\n' characters written are translated to + the given string. + + closedfd is a bool. If closefd is False, the underlying file descriptor will + be kept open when the file is closed. This does not work when a file name is + given and must be True in that case. + + A custom opener can be used by passing a callable as *opener*. The + underlying file descriptor for the file object is then obtained by calling + *opener* with (*file*, *flags*). *opener* must return an open file + descriptor (passing os.open as *opener* results in functionality similar to + passing None). + + open() returns a file object whose type depends on the mode, and + through which the standard file operations such as reading and writing + are performed. When open() is used to open a file in a text mode ('w', + 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open + a file in a binary mode, the returned class varies: in read binary + mode, it returns a BufferedReader; in write binary and append binary + modes, it returns a BufferedWriter, and in read/write mode, it returns + a BufferedRandom. + + It is also possible to use a string or bytearray as a file for both + reading and writing. For strings StringIO can be used like a file + opened in a text mode, and for bytes a BytesIO can be used like a file + opened in a binary mode. + """ + if not isinstance(file, (str, bytes, int)): + raise TypeError("invalid file: %r" % file) + if not isinstance(mode, str): + raise TypeError("invalid mode: %r" % mode) + if not isinstance(buffering, int): + raise TypeError("invalid buffering: %r" % buffering) + if encoding is not None and not isinstance(encoding, str): + raise TypeError("invalid encoding: %r" % encoding) + if errors is not None and not isinstance(errors, str): + raise TypeError("invalid errors: %r" % errors) + modes = set(mode) + if modes - set("axrwb+tU") or len(mode) > len(modes): + raise ValueError("invalid mode: %r" % mode) + creating = "x" in modes + reading = "r" in modes + writing = "w" in modes + appending = "a" in modes + updating = "+" in modes + text = "t" in modes + binary = "b" in modes + if "U" in modes: + if creating or writing or appending: + raise ValueError("can't use U and writing mode at once") + reading = True + if text and binary: + raise ValueError("can't have text and binary mode at once") + if creating + reading + writing + appending > 1: + raise ValueError("can't have read/write/append mode at once") + if not (creating or reading or writing or appending): + raise ValueError("must have exactly one of read/write/append mode") + if binary and encoding is not None: + raise ValueError("binary mode doesn't take an encoding argument") + if binary and errors is not None: + raise ValueError("binary mode doesn't take an errors argument") + if binary and newline is not None: + raise ValueError("binary mode doesn't take a newline argument") + raw = FileIO(file, + (creating and "x" or "") + + (reading and "r" or "") + + (writing and "w" or "") + + (appending and "a" or "") + + (updating and "+" or ""), + closefd, opener=opener) + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return raw + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif creating or writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) + else: + raise ValueError("unknown mode: %r" % mode) + if binary: + return buffer + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + text.mode = mode + return text + + +class DocDescriptor: + """Helper for builtins.open.__doc__ + """ + def __get__(self, obj, typ): + return ( + "open(file, mode='r', buffering=-1, encoding=None, " + "errors=None, newline=None, closefd=True)\n\n" + + open.__doc__) + +class OpenWrapper: + """Wrapper for builtins.open + + Trick so that open won't become a bound method when stored + as a class variable (as dbm.dumb does). + + See initstdio() in Python/pythonrun.c. + """ + __doc__ = DocDescriptor() + + def __new__(cls, *args, **kwargs): + return open(*args, **kwargs) + + +# In normal operation, both `UnsupportedOperation`s should be bound to the +# same object. +try: + UnsupportedOperation = io.UnsupportedOperation +except AttributeError: + class UnsupportedOperation(ValueError, IOError): + pass + + +class IOBase(metaclass=abc.ABCMeta): + + """The abstract base class for all I/O classes, acting on streams of + bytes. There is no public constructor. + + This class provides dummy implementations for many methods that + derived classes can override selectively; the default implementations + represent a file that cannot be read, written or seeked. + + Even though IOBase does not declare read, readinto, or write because + their signatures will vary, implementations and clients should + consider those methods part of the interface. Also, implementations + may raise UnsupportedOperation when operations they do not support are + called. + + The basic type used for binary data read from or written to a file is + bytes. bytearrays are accepted too, and in some cases (such as + readinto) needed. Text I/O classes work with str data. + + Note that calling any method (even inquiries) on a closed stream is + undefined. Implementations may raise IOError in this case. + + IOBase (and its subclasses) support the iterator protocol, meaning + that an IOBase object can be iterated over yielding the lines in a + stream. + + IOBase also supports the :keyword:`with` statement. In this example, + fp is closed after the suite of the with statement is complete: + + with open('spam.txt', 'r') as fp: + fp.write('Spam and eggs!') + """ + + ### Internal ### + + def _unsupported(self, name): + """Internal: raise an IOError exception for unsupported operations.""" + raise UnsupportedOperation("%s.%s() not supported" % + (self.__class__.__name__, name)) + + ### Positioning ### + + def seek(self, pos, whence=0): + """Change stream position. + + Change the stream position to byte offset pos. Argument pos is + interpreted relative to the position indicated by whence. Values + for whence are ints: + + * 0 -- start of stream (the default); offset should be zero or positive + * 1 -- current stream position; offset may be negative + * 2 -- end of stream; offset is usually negative + Some operating systems / file systems could provide additional values. + + Return an int indicating the new absolute position. + """ + self._unsupported("seek") + + def tell(self): + """Return an int indicating the current stream position.""" + return self.seek(0, 1) + + def truncate(self, pos=None): + """Truncate file to size bytes. + + Size defaults to the current IO position as reported by tell(). Return + the new size. + """ + self._unsupported("truncate") + + ### Flush and close ### + + def flush(self): + """Flush write buffers, if applicable. + + This is not implemented for read-only and non-blocking streams. + """ + self._checkClosed() + # XXX Should this return the number of bytes written??? + + __closed = False + + def close(self): + """Flush and close the IO object. + + This method has no effect if the file is already closed. + """ + if not self.__closed: + try: + self.flush() + finally: + self.__closed = True + + def __del__(self): + """Destructor. Calls close().""" + # The try/except block is in case this is called at program + # exit time, when it's possible that globals have already been + # deleted, and then the close() call might fail. Since + # there's nothing we can do about such failures and they annoy + # the end users, we suppress the traceback. + try: + self.close() + except: + pass + + ### Inquiries ### + + def seekable(self): + """Return a bool indicating whether object supports random access. + + If False, seek(), tell() and truncate() will raise UnsupportedOperation. + This method may need to do a test seek(). + """ + return False + + def _checkSeekable(self, msg=None): + """Internal: raise UnsupportedOperation if file is not seekable + """ + if not self.seekable(): + raise UnsupportedOperation("File or stream is not seekable." + if msg is None else msg) + + def readable(self): + """Return a bool indicating whether object was opened for reading. + + If False, read() will raise UnsupportedOperation. + """ + return False + + def _checkReadable(self, msg=None): + """Internal: raise UnsupportedOperation if file is not readable + """ + if not self.readable(): + raise UnsupportedOperation("File or stream is not readable." + if msg is None else msg) + + def writable(self): + """Return a bool indicating whether object was opened for writing. + + If False, write() and truncate() will raise UnsupportedOperation. + """ + return False + + def _checkWritable(self, msg=None): + """Internal: raise UnsupportedOperation if file is not writable + """ + if not self.writable(): + raise UnsupportedOperation("File or stream is not writable." + if msg is None else msg) + + @property + def closed(self): + """closed: bool. True iff the file has been closed. + + For backwards compatibility, this is a property, not a predicate. + """ + return self.__closed + + def _checkClosed(self, msg=None): + """Internal: raise an ValueError if file is closed + """ + if self.closed: + raise ValueError("I/O operation on closed file." + if msg is None else msg) + + ### Context manager ### + + def __enter__(self): # That's a forward reference + """Context management protocol. Returns self (an instance of IOBase).""" + self._checkClosed() + return self + + def __exit__(self, *args): + """Context management protocol. Calls close()""" + self.close() + + ### Lower-level APIs ### + + # XXX Should these be present even if unimplemented? + + def fileno(self): + """Returns underlying file descriptor (an int) if one exists. + + An IOError is raised if the IO object does not use a file descriptor. + """ + self._unsupported("fileno") + + def isatty(self): + """Return a bool indicating whether this is an 'interactive' stream. + + Return False if it can't be determined. + """ + self._checkClosed() + return False + + ### Readline[s] and writelines ### + + def readline(self, limit=-1): + r"""Read and return a line of bytes from the stream. + + If limit is specified, at most limit bytes will be read. + Limit should be an int. + + The line terminator is always b'\n' for binary files; for text + files, the newlines argument to open can be used to select the line + terminator(s) recognized. + """ + # For backwards compatibility, a (slowish) readline(). + if hasattr(self, "peek"): + def nreadahead(): + readahead = self.peek(1) + if not readahead: + return 1 + n = (readahead.find(b"\n") + 1) or len(readahead) + if limit >= 0: + n = min(n, limit) + return n + else: + def nreadahead(): + return 1 + if limit is None: + limit = -1 + elif not isinstance(limit, int): + raise TypeError("limit must be an integer") + res = bytearray() + while limit < 0 or len(res) < limit: + b = self.read(nreadahead()) + if not b: + break + res += b + if res.endswith(b"\n"): + break + return bytes(res) + + def __iter__(self): + self._checkClosed() + return self + + def __next__(self): + line = self.readline() + if not line: + raise StopIteration + return line + + def readlines(self, hint=None): + """Return a list of lines from the stream. + + hint can be specified to control the number of lines read: no more + lines will be read if the total size (in bytes/characters) of all + lines so far exceeds hint. + """ + if hint is None or hint <= 0: + return list(self) + n = 0 + lines = [] + for line in self: + lines.append(line) + n += len(line) + if n >= hint: + break + return lines + + def writelines(self, lines): + self._checkClosed() + for line in lines: + self.write(line) + +#fix me brython +#io.IOBase.register(IOBase) + + +class RawIOBase(IOBase): + + """Base class for raw binary I/O.""" + + # The read() method is implemented by calling readinto(); derived + # classes that want to support read() only need to implement + # readinto() as a primitive operation. In general, readinto() can be + # more efficient than read(). + + # (It would be tempting to also provide an implementation of + # readinto() in terms of read(), in case the latter is a more suitable + # primitive operation, but that would lead to nasty recursion in case + # a subclass doesn't implement either.) + + def read(self, n=-1): + """Read and return up to n bytes, where n is an int. + + Returns an empty bytes object on EOF, or None if the object is + set not to block and has no data to read. + """ + if n is None: + n = -1 + if n < 0: + return self.readall() + b = bytearray(n.__index__()) + n = self.readinto(b) + if n is None: + return None + del b[n:] + return bytes(b) + + def readall(self): + """Read until EOF, using multiple read() call.""" + res = bytearray() + while True: + data = self.read(DEFAULT_BUFFER_SIZE) + if not data: + break + res += data + if res: + return bytes(res) + else: + # b'' or None + return data + + def readinto(self, b): + """Read up to len(b) bytes into bytearray b. + + Returns an int representing the number of bytes read (0 for EOF), or + None if the object is set not to block and has no data to read. + """ + self._unsupported("readinto") + + def write(self, b): + """Write the given buffer to the IO stream. + + Returns the number of bytes written, which may be less than len(b). + """ + self._unsupported("write") + +#io.RawIOBase.register(RawIOBase) +#fix me brython +#from _io import FileIO +#RawIOBase.register(FileIO) + + +class BufferedIOBase(IOBase): + + """Base class for buffered IO objects. + + The main difference with RawIOBase is that the read() method + supports omitting the size argument, and does not have a default + implementation that defers to readinto(). + + In addition, read(), readinto() and write() may raise + BlockingIOError if the underlying raw stream is in non-blocking + mode and not ready; unlike their raw counterparts, they will never + return None. + + A typical implementation should not inherit from a RawIOBase + implementation, but wrap one. + """ + + def read(self, n=None): + """Read and return up to n bytes, where n is an int. + + If the argument is omitted, None, or negative, reads and + returns all data until EOF. + + If the argument is positive, and the underlying raw stream is + not 'interactive', multiple raw reads may be issued to satisfy + the byte count (unless EOF is reached first). But for + interactive raw streams (XXX and for pipes?), at most one raw + read will be issued, and a short result does not imply that + EOF is imminent. + + Returns an empty bytes array on EOF. + + Raises BlockingIOError if the underlying raw stream has no + data at the moment. + """ + self._unsupported("read") + + def read1(self, n=None): + """Read up to n bytes with at most one read() system call, + where n is an int. + """ + self._unsupported("read1") + + def readinto(self, b): + """Read up to len(b) bytes into bytearray b. + + Like read(), this may issue multiple reads to the underlying raw + stream, unless the latter is 'interactive'. + + Returns an int representing the number of bytes read (0 for EOF). + + Raises BlockingIOError if the underlying raw stream has no + data at the moment. + """ + # XXX This ought to work with anything that supports the buffer API + data = self.read(len(b)) + n = len(data) + try: + b[:n] = data + except TypeError as err: + import array + if not isinstance(b, array.array): + raise err + b[:n] = array.array('b', data) + return n + + def write(self, b): + """Write the given bytes buffer to the IO stream. + + Return the number of bytes written, which is never less than + len(b). + + Raises BlockingIOError if the buffer is full and the + underlying raw stream cannot accept more data at the moment. + """ + self._unsupported("write") + + def detach(self): + """ + Separate the underlying raw stream from the buffer and return it. + + After the raw stream has been detached, the buffer is in an unusable + state. + """ + self._unsupported("detach") + +#fix me brython +#io.BufferedIOBase.register(BufferedIOBase) + + +class _BufferedIOMixin(BufferedIOBase): + + """A mixin implementation of BufferedIOBase with an underlying raw stream. + + This passes most requests on to the underlying raw stream. It + does *not* provide implementations of read(), readinto() or + write(). + """ + + def __init__(self, raw): + self._raw = raw + + ### Positioning ### + + def seek(self, pos, whence=0): + new_position = self.raw.seek(pos, whence) + if new_position < 0: + raise IOError("seek() returned an invalid position") + return new_position + + def tell(self): + pos = self.raw.tell() + if pos < 0: + raise IOError("tell() returned an invalid position") + return pos + + def truncate(self, pos=None): + # Flush the stream. We're mixing buffered I/O with lower-level I/O, + # and a flush may be necessary to synch both views of the current + # file state. + self.flush() + + if pos is None: + pos = self.tell() + # XXX: Should seek() be used, instead of passing the position + # XXX directly to truncate? + return self.raw.truncate(pos) + + ### Flush and close ### + + def flush(self): + if self.closed: + raise ValueError("flush of closed file") + self.raw.flush() + + def close(self): + if self.raw is not None and not self.closed: + try: + # may raise BlockingIOError or BrokenPipeError etc + self.flush() + finally: + self.raw.close() + + def detach(self): + if self.raw is None: + raise ValueError("raw stream already detached") + self.flush() + raw = self._raw + self._raw = None + return raw + + ### Inquiries ### + + def seekable(self): + return self.raw.seekable() + + def readable(self): + return self.raw.readable() + + def writable(self): + return self.raw.writable() + + @property + def raw(self): + return self._raw + + @property + def closed(self): + return self.raw.closed + + @property + def name(self): + return self.raw.name + + @property + def mode(self): + return self.raw.mode + + def __getstate__(self): + raise TypeError("can not serialize a '{0}' object" + .format(self.__class__.__name__)) + + def __repr__(self): + clsname = self.__class__.__name__ + try: + name = self.name + except AttributeError: + return "<_io.{0}>".format(clsname) + else: + return "<_io.{0} name={1!r}>".format(clsname, name) + + ### Lower-level APIs ### + + def fileno(self): + return self.raw.fileno() + + def isatty(self): + return self.raw.isatty() + + +class BytesIO(BufferedIOBase): + + """Buffered I/O implementation using an in-memory bytes buffer.""" + + def __init__(self, initial_bytes=None): + buf = bytearray() + if initial_bytes is not None: + buf += initial_bytes + self._buffer = buf + self._pos = 0 + + def __getstate__(self): + if self.closed: + raise ValueError("__getstate__ on closed file") + return self.__dict__.copy() + + def getvalue(self): + """Return the bytes value (contents) of the buffer + """ + if self.closed: + raise ValueError("getvalue on closed file") + return bytes(self._buffer) + + def getbuffer(self): + """Return a readable and writable view of the buffer. + """ + return memoryview(self._buffer) + + def read(self, n=None): + if self.closed: + raise ValueError("read from closed file") + if n is None: + n = -1 + if n < 0: + n = len(self._buffer) + if len(self._buffer) <= self._pos: + return b"" + newpos = min(len(self._buffer), self._pos + n) + b = self._buffer[self._pos : newpos] + self._pos = newpos + return bytes(b) + + def read1(self, n): + """This is the same as read. + """ + return self.read(n) + + def write(self, b): + if self.closed: + raise ValueError("write to closed file") + if isinstance(b, str): + raise TypeError("can't write str to binary stream") + n = len(b) + if n == 0: + return 0 + pos = self._pos + if pos > len(self._buffer): + # Inserts null bytes between the current end of the file + # and the new write position. + padding = b'\x00' * (pos - len(self._buffer)) + self._buffer += padding + self._buffer[pos:pos + n] = b + self._pos += n + return n + + def seek(self, pos, whence=0): + if self.closed: + raise ValueError("seek on closed file") + try: + pos.__index__ + except AttributeError as err: + raise TypeError("an integer is required") from err + if whence == 0: + if pos < 0: + raise ValueError("negative seek position %r" % (pos,)) + self._pos = pos + elif whence == 1: + self._pos = max(0, self._pos + pos) + elif whence == 2: + self._pos = max(0, len(self._buffer) + pos) + else: + raise ValueError("unsupported whence value") + return self._pos + + def tell(self): + if self.closed: + raise ValueError("tell on closed file") + return self._pos + + def truncate(self, pos=None): + if self.closed: + raise ValueError("truncate on closed file") + if pos is None: + pos = self._pos + else: + try: + pos.__index__ + except AttributeError as err: + raise TypeError("an integer is required") from err + if pos < 0: + raise ValueError("negative truncate position %r" % (pos,)) + del self._buffer[pos:] + return pos + + def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + +class BufferedReader(_BufferedIOMixin): + + """BufferedReader(raw[, buffer_size]) + + A buffer for a readable, sequential BaseRawIO object. + + The constructor creates a BufferedReader for the given readable raw + stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE + is used. + """ + + def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): + """Create a new buffered reader using the given readable raw IO object. + """ + if not raw.readable(): + raise IOError('"raw" argument must be readable.') + + _BufferedIOMixin.__init__(self, raw) + if buffer_size <= 0: + raise ValueError("invalid buffer size") + self.buffer_size = buffer_size + self._reset_read_buf() + self._read_lock = Lock() + + def _reset_read_buf(self): + self._read_buf = b"" + self._read_pos = 0 + + def read(self, n=None): + """Read n bytes. + + Returns exactly n bytes of data unless the underlying raw IO + stream reaches EOF or if the call would block in non-blocking + mode. If n is negative, read until EOF or until read() would + block. + """ + if n is not None and n < -1: + raise ValueError("invalid number of bytes to read") + with self._read_lock: + return self._read_unlocked(n) + + def _read_unlocked(self, n=None): + nodata_val = b"" + empty_values = (b"", None) + buf = self._read_buf + pos = self._read_pos + + # Special case for when the number of bytes to read is unspecified. + if n is None or n == -1: + self._reset_read_buf() + if hasattr(self.raw, 'readall'): + chunk = self.raw.readall() + if chunk is None: + return buf[pos:] or None + else: + return buf[pos:] + chunk + chunks = [buf[pos:]] # Strip the consumed bytes. + current_size = 0 + while True: + # Read until EOF or until read() would block. + try: + chunk = self.raw.read() + except InterruptedError: + continue + if chunk in empty_values: + nodata_val = chunk + break + current_size += len(chunk) + chunks.append(chunk) + return b"".join(chunks) or nodata_val + + # The number of bytes to read is specified, return at most n bytes. + avail = len(buf) - pos # Length of the available buffered data. + if n <= avail: + # Fast path: the data to read is fully buffered. + self._read_pos += n + return buf[pos:pos+n] + # Slow path: read from the stream until enough bytes are read, + # or until an EOF occurs or until read() would block. + chunks = [buf[pos:]] + wanted = max(self.buffer_size, n) + while avail < n: + try: + chunk = self.raw.read(wanted) + except InterruptedError: + continue + if chunk in empty_values: + nodata_val = chunk + break + avail += len(chunk) + chunks.append(chunk) + # n is more then avail only when an EOF occurred or when + # read() would have blocked. + n = min(n, avail) + out = b"".join(chunks) + self._read_buf = out[n:] # Save the extra data in the buffer. + self._read_pos = 0 + return out[:n] if out else nodata_val + + def peek(self, n=0): + """Returns buffered bytes without advancing the position. + + The argument indicates a desired minimal number of bytes; we + do at most one raw read to satisfy it. We never return more + than self.buffer_size. + """ + with self._read_lock: + return self._peek_unlocked(n) + + def _peek_unlocked(self, n=0): + want = min(n, self.buffer_size) + have = len(self._read_buf) - self._read_pos + if have < want or have <= 0: + to_read = self.buffer_size - have + while True: + try: + current = self.raw.read(to_read) + except InterruptedError: + continue + break + if current: + self._read_buf = self._read_buf[self._read_pos:] + current + self._read_pos = 0 + return self._read_buf[self._read_pos:] + + def read1(self, n): + """Reads up to n bytes, with at most one read() system call.""" + # Returns up to n bytes. If at least one byte is buffered, we + # only return buffered bytes. Otherwise, we do one raw read. + if n < 0: + raise ValueError("number of bytes to read must be positive") + if n == 0: + return b"" + with self._read_lock: + self._peek_unlocked(1) + return self._read_unlocked( + min(n, len(self._read_buf) - self._read_pos)) + + def tell(self): + return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos + + def seek(self, pos, whence=0): + if whence not in valid_seek_flags: + raise ValueError("invalid whence value") + with self._read_lock: + if whence == 1: + pos -= len(self._read_buf) - self._read_pos + pos = _BufferedIOMixin.seek(self, pos, whence) + self._reset_read_buf() + return pos + +class BufferedWriter(_BufferedIOMixin): + + """A buffer for a writeable sequential RawIO object. + + The constructor creates a BufferedWriter for the given writeable raw + stream. If the buffer_size is not given, it defaults to + DEFAULT_BUFFER_SIZE. + """ + + def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): + if not raw.writable(): + raise IOError('"raw" argument must be writable.') + + _BufferedIOMixin.__init__(self, raw) + if buffer_size <= 0: + raise ValueError("invalid buffer size") + self.buffer_size = buffer_size + self._write_buf = bytearray() + self._write_lock = Lock() + + def write(self, b): + if self.closed: + raise ValueError("write to closed file") + if isinstance(b, str): + raise TypeError("can't write str to binary stream") + with self._write_lock: + # XXX we can implement some more tricks to try and avoid + # partial writes + if len(self._write_buf) > self.buffer_size: + # We're full, so let's pre-flush the buffer. (This may + # raise BlockingIOError with characters_written == 0.) + self._flush_unlocked() + before = len(self._write_buf) + self._write_buf.extend(b) + written = len(self._write_buf) - before + if len(self._write_buf) > self.buffer_size: + try: + self._flush_unlocked() + except BlockingIOError as e: + if len(self._write_buf) > self.buffer_size: + # We've hit the buffer_size. We have to accept a partial + # write and cut back our buffer. + overage = len(self._write_buf) - self.buffer_size + written -= overage + self._write_buf = self._write_buf[:self.buffer_size] + raise BlockingIOError(e.errno, e.strerror, written) + return written + + def truncate(self, pos=None): + with self._write_lock: + self._flush_unlocked() + if pos is None: + pos = self.raw.tell() + return self.raw.truncate(pos) + + def flush(self): + with self._write_lock: + self._flush_unlocked() + + def _flush_unlocked(self): + if self.closed: + raise ValueError("flush of closed file") + while self._write_buf: + try: + n = self.raw.write(self._write_buf) + except InterruptedError: + continue + except BlockingIOError: + raise RuntimeError("self.raw should implement RawIOBase: it " + "should not raise BlockingIOError") + if n is None: + raise BlockingIOError( + errno.EAGAIN, + "write could not complete without blocking", 0) + if n > len(self._write_buf) or n < 0: + raise IOError("write() returned incorrect number of bytes") + del self._write_buf[:n] + + def tell(self): + return _BufferedIOMixin.tell(self) + len(self._write_buf) + + def seek(self, pos, whence=0): + if whence not in valid_seek_flags: + raise ValueError("invalid whence value") + with self._write_lock: + self._flush_unlocked() + return _BufferedIOMixin.seek(self, pos, whence) + + +class BufferedRWPair(BufferedIOBase): + + """A buffered reader and writer object together. + + A buffered reader object and buffered writer object put together to + form a sequential IO object that can read and write. This is typically + used with a socket or two-way pipe. + + reader and writer are RawIOBase objects that are readable and + writeable respectively. If the buffer_size is omitted it defaults to + DEFAULT_BUFFER_SIZE. + """ + + # XXX The usefulness of this (compared to having two separate IO + # objects) is questionable. + + def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE): + """Constructor. + + The arguments are two RawIO instances. + """ + if not reader.readable(): + raise IOError('"reader" argument must be readable.') + + if not writer.writable(): + raise IOError('"writer" argument must be writable.') + + self.reader = BufferedReader(reader, buffer_size) + self.writer = BufferedWriter(writer, buffer_size) + + def read(self, n=None): + if n is None: + n = -1 + return self.reader.read(n) + + def readinto(self, b): + return self.reader.readinto(b) + + def write(self, b): + return self.writer.write(b) + + def peek(self, n=0): + return self.reader.peek(n) + + def read1(self, n): + return self.reader.read1(n) + + def readable(self): + return self.reader.readable() + + def writable(self): + return self.writer.writable() + + def flush(self): + return self.writer.flush() + + def close(self): + self.writer.close() + self.reader.close() + + def isatty(self): + return self.reader.isatty() or self.writer.isatty() + + @property + def closed(self): + return self.writer.closed + + +class BufferedRandom(BufferedWriter, BufferedReader): + + """A buffered interface to random access streams. + + The constructor creates a reader and writer for a seekable stream, + raw, given in the first argument. If the buffer_size is omitted it + defaults to DEFAULT_BUFFER_SIZE. + """ + + def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): + raw._checkSeekable() + BufferedReader.__init__(self, raw, buffer_size) + BufferedWriter.__init__(self, raw, buffer_size) + + def seek(self, pos, whence=0): + if whence not in valid_seek_flags: + raise ValueError("invalid whence value") + self.flush() + if self._read_buf: + # Undo read ahead. + with self._read_lock: + self.raw.seek(self._read_pos - len(self._read_buf), 1) + # First do the raw seek, then empty the read buffer, so that + # if the raw seek fails, we don't lose buffered data forever. + pos = self.raw.seek(pos, whence) + with self._read_lock: + self._reset_read_buf() + if pos < 0: + raise IOError("seek() returned invalid position") + return pos + + def tell(self): + if self._write_buf: + return BufferedWriter.tell(self) + else: + return BufferedReader.tell(self) + + def truncate(self, pos=None): + if pos is None: + pos = self.tell() + # Use seek to flush the read buffer. + return BufferedWriter.truncate(self, pos) + + def read(self, n=None): + if n is None: + n = -1 + self.flush() + return BufferedReader.read(self, n) + + def readinto(self, b): + self.flush() + return BufferedReader.readinto(self, b) + + def peek(self, n=0): + self.flush() + return BufferedReader.peek(self, n) + + def read1(self, n): + self.flush() + return BufferedReader.read1(self, n) + + def write(self, b): + if self._read_buf: + # Undo readahead + with self._read_lock: + self.raw.seek(self._read_pos - len(self._read_buf), 1) + self._reset_read_buf() + return BufferedWriter.write(self, b) + + +class TextIOBase(IOBase): + + """Base class for text I/O. + + This class provides a character and line based interface to stream + I/O. There is no readinto method because Python's character strings + are immutable. There is no public constructor. + """ + + def read(self, n=-1): + """Read at most n characters from stream, where n is an int. + + Read from underlying buffer until we have n characters or we hit EOF. + If n is negative or omitted, read until EOF. + + Returns a string. + """ + self._unsupported("read") + + def write(self, s): + """Write string s to stream and returning an int.""" + self._unsupported("write") + + def truncate(self, pos=None): + """Truncate size to pos, where pos is an int.""" + self._unsupported("truncate") + + def readline(self): + """Read until newline or EOF. + + Returns an empty string if EOF is hit immediately. + """ + self._unsupported("readline") + + def detach(self): + """ + Separate the underlying buffer from the TextIOBase and return it. + + After the underlying buffer has been detached, the TextIO is in an + unusable state. + """ + self._unsupported("detach") + + @property + def encoding(self): + """Subclasses should override.""" + return None + + @property + def newlines(self): + """Line endings translated so far. + + Only line endings translated during reading are considered. + + Subclasses should override. + """ + return None + + @property + def errors(self): + """Error setting of the decoder or encoder. + + Subclasses should override.""" + return None + +#fix me brython +#io.TextIOBase.register(TextIOBase) + + +class IncrementalNewlineDecoder(codecs.IncrementalDecoder): + r"""Codec used when reading a file in universal newlines mode. It wraps + another incremental decoder, translating \r\n and \r into \n. It also + records the types of newlines encountered. When used with + translate=False, it ensures that the newline sequence is returned in + one piece. + """ + def __init__(self, decoder, translate, errors='strict'): + codecs.IncrementalDecoder.__init__(self, errors=errors) + self.translate = translate + self.decoder = decoder + self.seennl = 0 + self.pendingcr = False + + def decode(self, input, final=False): + # decode input (with the eventual \r from a previous pass) + if self.decoder is None: + output = input + else: + output = self.decoder.decode(input, final=final) + if self.pendingcr and (output or final): + output = "\r" + output + self.pendingcr = False + + # retain last \r even when not translating data: + # then readline() is sure to get \r\n in one pass + if output.endswith("\r") and not final: + output = output[:-1] + self.pendingcr = True + + # Record which newlines are read + crlf = output.count('\r\n') + cr = output.count('\r') - crlf + lf = output.count('\n') - crlf + self.seennl |= (lf and self._LF) | (cr and self._CR) \ + | (crlf and self._CRLF) + + if self.translate: + if crlf: + output = output.replace("\r\n", "\n") + if cr: + output = output.replace("\r", "\n") + + return output + + def getstate(self): + if self.decoder is None: + buf = b"" + flag = 0 + else: + buf, flag = self.decoder.getstate() + flag <<= 1 + if self.pendingcr: + flag |= 1 + return buf, flag + + def setstate(self, state): + buf, flag = state + self.pendingcr = bool(flag & 1) + if self.decoder is not None: + self.decoder.setstate((buf, flag >> 1)) + + def reset(self): + self.seennl = 0 + self.pendingcr = False + if self.decoder is not None: + self.decoder.reset() + + _LF = 1 + _CR = 2 + _CRLF = 4 + + @property + def newlines(self): + return (None, + "\n", + "\r", + ("\r", "\n"), + "\r\n", + ("\n", "\r\n"), + ("\r", "\r\n"), + ("\r", "\n", "\r\n") + )[self.seennl] + + +class TextIOWrapper(TextIOBase): + + r"""Character and line based layer over a BufferedIOBase object, buffer. + + encoding gives the name of the encoding that the stream will be + decoded or encoded with. It defaults to locale.getpreferredencoding(False). + + errors determines the strictness of encoding and decoding (see the + codecs.register) and defaults to "strict". + + newline can be None, '', '\n', '\r', or '\r\n'. It controls the + handling of line endings. If it is None, universal newlines is + enabled. With this enabled, on input, the lines endings '\n', '\r', + or '\r\n' are translated to '\n' before being returned to the + caller. Conversely, on output, '\n' is translated to the system + default line separator, os.linesep. If newline is any other of its + legal values, that newline becomes the newline when the file is read + and it is returned untranslated. On output, '\n' is converted to the + newline. + + If line_buffering is True, a call to flush is implied when a call to + write contains a newline character. + """ + + _CHUNK_SIZE = 2048 + + # The write_through argument has no effect here since this + # implementation always writes through. The argument is present only + # so that the signature can match the signature of the C version. + def __init__(self, buffer, encoding=None, errors=None, newline=None, + line_buffering=False, write_through=False): + if newline is not None and not isinstance(newline, str): + raise TypeError("illegal newline type: %r" % (type(newline),)) + if newline not in (None, "", "\n", "\r", "\r\n"): + raise ValueError("illegal newline value: %r" % (newline,)) + if encoding is None: + try: + encoding = os.device_encoding(buffer.fileno()) + except (AttributeError, UnsupportedOperation): + pass + if encoding is None: + try: + import locale + except ImportError: + # Importing locale may fail if Python is being built + encoding = "ascii" + else: + encoding = locale.getpreferredencoding(False) + + if not isinstance(encoding, str): + raise ValueError("invalid encoding: %r" % encoding) + + if errors is None: + errors = "strict" + else: + if not isinstance(errors, str): + raise ValueError("invalid errors: %r" % errors) + + self._buffer = buffer + self._line_buffering = line_buffering + self._encoding = encoding + self._errors = errors + self._readuniversal = not newline + self._readtranslate = newline is None + self._readnl = newline + self._writetranslate = newline != '' + self._writenl = newline or os.linesep + self._encoder = None + self._decoder = None + self._decoded_chars = '' # buffer for text returned from decoder + self._decoded_chars_used = 0 # offset into _decoded_chars for read() + self._snapshot = None # info for reconstructing decoder state + self._seekable = self._telling = self.buffer.seekable() + self._has_read1 = hasattr(self.buffer, 'read1') + self._b2cratio = 0.0 + + if self._seekable and self.writable(): + position = self.buffer.tell() + if position != 0: + try: + self._get_encoder().setstate(0) + except LookupError: + # Sometimes the encoder doesn't exist + pass + + # self._snapshot is either None, or a tuple (dec_flags, next_input) + # where dec_flags is the second (integer) item of the decoder state + # and next_input is the chunk of input bytes that comes next after the + # snapshot point. We use this to reconstruct decoder states in tell(). + + # Naming convention: + # - "bytes_..." for integer variables that count input bytes + # - "chars_..." for integer variables that count decoded characters + + def __repr__(self): + result = "<_io.TextIOWrapper" + try: + name = self.name + except AttributeError: + pass + else: + result += " name={0!r}".format(name) + try: + mode = self.mode + except AttributeError: + pass + else: + result += " mode={0!r}".format(mode) + return result + " encoding={0!r}>".format(self.encoding) + + @property + def encoding(self): + return self._encoding + + @property + def errors(self): + return self._errors + + @property + def line_buffering(self): + return self._line_buffering + + @property + def buffer(self): + return self._buffer + + def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return self._seekable + + def readable(self): + return self.buffer.readable() + + def writable(self): + return self.buffer.writable() + + def flush(self): + self.buffer.flush() + self._telling = self._seekable + + def close(self): + if self.buffer is not None and not self.closed: + try: + self.flush() + finally: + self.buffer.close() + + @property + def closed(self): + return self.buffer.closed + + @property + def name(self): + return self.buffer.name + + def fileno(self): + return self.buffer.fileno() + + def isatty(self): + return self.buffer.isatty() + + def write(self, s): + 'Write data, where s is a str' + if self.closed: + raise ValueError("write to closed file") + if not isinstance(s, str): + raise TypeError("can't write %s to text stream" % + s.__class__.__name__) + length = len(s) + haslf = (self._writetranslate or self._line_buffering) and "\n" in s + if haslf and self._writetranslate and self._writenl != "\n": + s = s.replace("\n", self._writenl) + encoder = self._encoder or self._get_encoder() + # XXX What if we were just reading? + b = encoder.encode(s) + self.buffer.write(b) + if self._line_buffering and (haslf or "\r" in s): + self.flush() + self._snapshot = None + if self._decoder: + self._decoder.reset() + return length + + def _get_encoder(self): + make_encoder = codecs.getincrementalencoder(self._encoding) + self._encoder = make_encoder(self._errors) + return self._encoder + + def _get_decoder(self): + make_decoder = codecs.getincrementaldecoder(self._encoding) + decoder = make_decoder(self._errors) + if self._readuniversal: + decoder = IncrementalNewlineDecoder(decoder, self._readtranslate) + self._decoder = decoder + return decoder + + # The following three methods implement an ADT for _decoded_chars. + # Text returned from the decoder is buffered here until the client + # requests it by calling our read() or readline() method. + def _set_decoded_chars(self, chars): + """Set the _decoded_chars buffer.""" + self._decoded_chars = chars + self._decoded_chars_used = 0 + + def _get_decoded_chars(self, n=None): + """Advance into the _decoded_chars buffer.""" + offset = self._decoded_chars_used + if n is None: + chars = self._decoded_chars[offset:] + else: + chars = self._decoded_chars[offset:offset + n] + self._decoded_chars_used += len(chars) + return chars + + def _rewind_decoded_chars(self, n): + """Rewind the _decoded_chars buffer.""" + if self._decoded_chars_used < n: + raise AssertionError("rewind decoded_chars out of bounds") + self._decoded_chars_used -= n + + def _read_chunk(self): + """ + Read and decode the next chunk of data from the BufferedReader. + """ + + # The return value is True unless EOF was reached. The decoded + # string is placed in self._decoded_chars (replacing its previous + # value). The entire input chunk is sent to the decoder, though + # some of it may remain buffered in the decoder, yet to be + # converted. + + if self._decoder is None: + raise ValueError("no decoder") + + if self._telling: + # To prepare for tell(), we need to snapshot a point in the + # file where the decoder's input buffer is empty. + + dec_buffer, dec_flags = self._decoder.getstate() + # Given this, we know there was a valid snapshot point + # len(dec_buffer) bytes ago with decoder state (b'', dec_flags). + + # Read a chunk, decode it, and put the result in self._decoded_chars. + if self._has_read1: + input_chunk = self.buffer.read1(self._CHUNK_SIZE) + else: + input_chunk = self.buffer.read(self._CHUNK_SIZE) + eof = not input_chunk + decoded_chars = self._decoder.decode(input_chunk, eof) + self._set_decoded_chars(decoded_chars) + if decoded_chars: + self._b2cratio = len(input_chunk) / len(self._decoded_chars) + else: + self._b2cratio = 0.0 + + if self._telling: + # At the snapshot point, len(dec_buffer) bytes before the read, + # the next input to be decoded is dec_buffer + input_chunk. + self._snapshot = (dec_flags, dec_buffer + input_chunk) + + return not eof + + def _pack_cookie(self, position, dec_flags=0, + bytes_to_feed=0, need_eof=0, chars_to_skip=0): + # The meaning of a tell() cookie is: seek to position, set the + # decoder flags to dec_flags, read bytes_to_feed bytes, feed them + # into the decoder with need_eof as the EOF flag, then skip + # chars_to_skip characters of the decoded result. For most simple + # decoders, tell() will often just give a byte offset in the file. + return (position | (dec_flags<<64) | (bytes_to_feed<<128) | + (chars_to_skip<<192) | bool(need_eof)<<256) + + def _unpack_cookie(self, bigint): + rest, position = divmod(bigint, 1<<64) + rest, dec_flags = divmod(rest, 1<<64) + rest, bytes_to_feed = divmod(rest, 1<<64) + need_eof, chars_to_skip = divmod(rest, 1<<64) + return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip + + def tell(self): + if not self._seekable: + raise UnsupportedOperation("underlying stream is not seekable") + if not self._telling: + raise IOError("telling position disabled by next() call") + self.flush() + position = self.buffer.tell() + decoder = self._decoder + if decoder is None or self._snapshot is None: + if self._decoded_chars: + # This should never happen. + raise AssertionError("pending decoded text") + return position + + # Skip backward to the snapshot point (see _read_chunk). + dec_flags, next_input = self._snapshot + position -= len(next_input) + + # How many decoded characters have been used up since the snapshot? + chars_to_skip = self._decoded_chars_used + if chars_to_skip == 0: + # We haven't moved from the snapshot point. + return self._pack_cookie(position, dec_flags) + + # Starting from the snapshot position, we will walk the decoder + # forward until it gives us enough decoded characters. + saved_state = decoder.getstate() + try: + # Fast search for an acceptable start point, close to our + # current pos. + # Rationale: calling decoder.decode() has a large overhead + # regardless of chunk size; we want the number of such calls to + # be O(1) in most situations (common decoders, non-crazy input). + # Actually, it will be exactly 1 for fixed-size codecs (all + # 8-bit codecs, also UTF-16 and UTF-32). + skip_bytes = int(self._b2cratio * chars_to_skip) + skip_back = 1 + assert skip_bytes <= len(next_input) + while skip_bytes > 0: + decoder.setstate((b'', dec_flags)) + # Decode up to temptative start point + n = len(decoder.decode(next_input[:skip_bytes])) + if n <= chars_to_skip: + b, d = decoder.getstate() + if not b: + # Before pos and no bytes buffered in decoder => OK + dec_flags = d + chars_to_skip -= n + break + # Skip back by buffered amount and reset heuristic + skip_bytes -= len(b) + skip_back = 1 + else: + # We're too far ahead, skip back a bit + skip_bytes -= skip_back + skip_back = skip_back * 2 + else: + skip_bytes = 0 + decoder.setstate((b'', dec_flags)) + + # Note our initial start point. + start_pos = position + skip_bytes + start_flags = dec_flags + if chars_to_skip == 0: + # We haven't moved from the start point. + return self._pack_cookie(start_pos, start_flags) + + # Feed the decoder one byte at a time. As we go, note the + # nearest "safe start point" before the current location + # (a point where the decoder has nothing buffered, so seek() + # can safely start from there and advance to this location). + bytes_fed = 0 + need_eof = 0 + # Chars decoded since `start_pos` + chars_decoded = 0 + for i in range(skip_bytes, len(next_input)): + bytes_fed += 1 + chars_decoded += len(decoder.decode(next_input[i:i+1])) + dec_buffer, dec_flags = decoder.getstate() + if not dec_buffer and chars_decoded <= chars_to_skip: + # Decoder buffer is empty, so this is a safe start point. + start_pos += bytes_fed + chars_to_skip -= chars_decoded + start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 + if chars_decoded >= chars_to_skip: + break + else: + # We didn't get enough decoded data; signal EOF to get more. + chars_decoded += len(decoder.decode(b'', final=True)) + need_eof = 1 + if chars_decoded < chars_to_skip: + raise IOError("can't reconstruct logical file position") + + # The returned cookie corresponds to the last safe start point. + return self._pack_cookie( + start_pos, start_flags, bytes_fed, need_eof, chars_to_skip) + finally: + decoder.setstate(saved_state) + + def truncate(self, pos=None): + self.flush() + if pos is None: + pos = self.tell() + return self.buffer.truncate(pos) + + def detach(self): + if self.buffer is None: + raise ValueError("buffer is already detached") + self.flush() + buffer = self._buffer + self._buffer = None + return buffer + + def seek(self, cookie, whence=0): + if self.closed: + raise ValueError("tell on closed file") + if not self._seekable: + raise UnsupportedOperation("underlying stream is not seekable") + if whence == 1: # seek relative to current position + if cookie != 0: + raise UnsupportedOperation("can't do nonzero cur-relative seeks") + # Seeking to the current position should attempt to + # sync the underlying buffer with the current position. + whence = 0 + cookie = self.tell() + if whence == 2: # seek relative to end of file + if cookie != 0: + raise UnsupportedOperation("can't do nonzero end-relative seeks") + self.flush() + position = self.buffer.seek(0, 2) + self._set_decoded_chars('') + self._snapshot = None + if self._decoder: + self._decoder.reset() + return position + if whence != 0: + raise ValueError("unsupported whence (%r)" % (whence,)) + if cookie < 0: + raise ValueError("negative seek position %r" % (cookie,)) + self.flush() + + # The strategy of seek() is to go back to the safe start point + # and replay the effect of read(chars_to_skip) from there. + start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \ + self._unpack_cookie(cookie) + + # Seek back to the safe start point. + self.buffer.seek(start_pos) + self._set_decoded_chars('') + self._snapshot = None + + # Restore the decoder to its state from the safe start point. + if cookie == 0 and self._decoder: + self._decoder.reset() + elif self._decoder or dec_flags or chars_to_skip: + self._decoder = self._decoder or self._get_decoder() + self._decoder.setstate((b'', dec_flags)) + self._snapshot = (dec_flags, b'') + + if chars_to_skip: + # Just like _read_chunk, feed the decoder and save a snapshot. + input_chunk = self.buffer.read(bytes_to_feed) + self._set_decoded_chars( + self._decoder.decode(input_chunk, need_eof)) + self._snapshot = (dec_flags, input_chunk) + + # Skip chars_to_skip of the decoded characters. + if len(self._decoded_chars) < chars_to_skip: + raise IOError("can't restore logical file position") + self._decoded_chars_used = chars_to_skip + + # Finally, reset the encoder (merely useful for proper BOM handling) + try: + encoder = self._encoder or self._get_encoder() + except LookupError: + # Sometimes the encoder doesn't exist + pass + else: + if cookie != 0: + encoder.setstate(0) + else: + encoder.reset() + return cookie + + def read(self, n=None): + self._checkReadable() + if n is None: + n = -1 + decoder = self._decoder or self._get_decoder() + try: + n.__index__ + except AttributeError as err: + raise TypeError("an integer is required") from err + if n < 0: + # Read everything. + result = (self._get_decoded_chars() + + decoder.decode(self.buffer.read(), final=True)) + self._set_decoded_chars('') + self._snapshot = None + return result + else: + # Keep reading chunks until we have n characters to return. + eof = False + result = self._get_decoded_chars(n) + while len(result) < n and not eof: + eof = not self._read_chunk() + result += self._get_decoded_chars(n - len(result)) + return result + + def __next__(self): + self._telling = False + line = self.readline() + if not line: + self._snapshot = None + self._telling = self._seekable + raise StopIteration + return line + + def readline(self, limit=None): + if self.closed: + raise ValueError("read from closed file") + if limit is None: + limit = -1 + elif not isinstance(limit, int): + raise TypeError("limit must be an integer") + + # Grab all the decoded text (we will rewind any extra bits later). + line = self._get_decoded_chars() + + start = 0 + # Make the decoder if it doesn't already exist. + if not self._decoder: + self._get_decoder() + + pos = endpos = None + while True: + if self._readtranslate: + # Newlines are already translated, only search for \n + pos = line.find('\n', start) + if pos >= 0: + endpos = pos + 1 + break + else: + start = len(line) + + elif self._readuniversal: + # Universal newline search. Find any of \r, \r\n, \n + # The decoder ensures that \r\n are not split in two pieces + + # In C we'd look for these in parallel of course. + nlpos = line.find("\n", start) + crpos = line.find("\r", start) + if crpos == -1: + if nlpos == -1: + # Nothing found + start = len(line) + else: + # Found \n + endpos = nlpos + 1 + break + elif nlpos == -1: + # Found lone \r + endpos = crpos + 1 + break + elif nlpos < crpos: + # Found \n + endpos = nlpos + 1 + break + elif nlpos == crpos + 1: + # Found \r\n + endpos = crpos + 2 + break + else: + # Found \r + endpos = crpos + 1 + break + else: + # non-universal + pos = line.find(self._readnl) + if pos >= 0: + endpos = pos + len(self._readnl) + break + + if limit >= 0 and len(line) >= limit: + endpos = limit # reached length limit + break + + # No line ending seen yet - get more data' + while self._read_chunk(): + if self._decoded_chars: + break + if self._decoded_chars: + line += self._get_decoded_chars() + else: + # end of file + self._set_decoded_chars('') + self._snapshot = None + return line + + if limit >= 0 and endpos > limit: + endpos = limit # don't exceed limit + + # Rewind _decoded_chars to just after the line ending we found. + self._rewind_decoded_chars(len(line) - endpos) + return line[:endpos] + + @property + def newlines(self): + return self._decoder.newlines if self._decoder else None + + +class StringIO(TextIOWrapper): + """Text I/O implementation using an in-memory buffer. + + The initial_value argument sets the value of object. The newline + argument is like the one of TextIOWrapper's constructor. + """ + + def __init__(self, initial_value="", newline="\n"): + super(StringIO, self).__init__(BytesIO(), + encoding="utf-8", + errors="strict", + newline=newline) + # Issue #5645: make universal newlines semantics the same as in the + # C version, even under Windows. + if newline is None: + self._writetranslate = False + if initial_value is not None: + if not isinstance(initial_value, str): + raise TypeError("initial_value must be str or None, not {0}" + .format(type(initial_value).__name__)) + initial_value = str(initial_value) + self.write(initial_value) + self.seek(0) + + def getvalue(self): + self.flush() + return self.buffer.getvalue().decode(self._encoding, self._errors) + + def __repr__(self): + # TextIOWrapper tells the encoding in its repr. In StringIO, + # that's a implementation detail. + return object.__repr__(self) + + @property + def errors(self): + return None + + @property + def encoding(self): + return None + + def detach(self): + # This doesn't make sense on StringIO. + self._unsupported("detach") diff --git a/lib/assets/Lib/_markupbase.py b/lib/assets/Lib/_markupbase.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_markupbase.py @@ -0,0 +1,395 @@ +"""Shared support for scanning document type declarations in HTML and XHTML. + +This module is used as a foundation for the html.parser module. It has no +documented public API and should not be used directly. + +""" + +import re + +_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match +_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match +_commentclose = re.compile(r'--\s*>') +_markedsectionclose = re.compile(r']\s*]\s*>') + +# An analysis of the MS-Word extensions is available at +# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf + +_msmarkedsectionclose = re.compile(r']\s*>') + +del re + + +class ParserBase: + """Parser base class which provides some common support methods used + by the SGML/HTML and XHTML parsers.""" + + def __init__(self): + if self.__class__ is ParserBase: + raise RuntimeError( + "_markupbase.ParserBase must be subclassed") + + def error(self, message): + raise NotImplementedError( + "subclasses of ParserBase must override error()") + + def reset(self): + self.lineno = 1 + self.offset = 0 + + def getpos(self): + """Return current line number and offset.""" + return self.lineno, self.offset + + # Internal -- update line number and offset. This should be + # called for each piece of data exactly once, in order -- in other + # words the concatenation of all the input strings to this + # function should be exactly the entire input. + def updatepos(self, i, j): + if i >= j: + return j + rawdata = self.rawdata + nlines = rawdata.count("\n", i, j) + if nlines: + self.lineno = self.lineno + nlines + pos = rawdata.rindex("\n", i, j) # Should not fail + self.offset = j-(pos+1) + else: + self.offset = self.offset + j-i + return j + + _decl_otherchars = '' + + # Internal -- parse declaration (for use by subclasses). + def parse_declaration(self, i): + # This is some sort of declaration; in "HTML as + # deployed," this should only be the document type + # declaration (""). + # ISO 8879:1986, however, has more complex + # declaration syntax for elements in , including: + # --comment-- + # [marked section] + # name in the following list: ENTITY, DOCTYPE, ELEMENT, + # ATTLIST, NOTATION, SHORTREF, USEMAP, + # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM + rawdata = self.rawdata + j = i + 2 + assert rawdata[i:j] == "": + # the empty comment + return j + 1 + if rawdata[j:j+1] in ("-", ""): + # Start of comment followed by buffer boundary, + # or just a buffer boundary. + return -1 + # A simple, practical version could look like: ((name|stringlit) S*) + '>' + n = len(rawdata) + if rawdata[j:j+2] == '--': #comment + # Locate --.*-- as the body of the comment + return self.parse_comment(i) + elif rawdata[j] == '[': #marked section + # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section + # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA + # Note that this is extended by Microsoft Office "Save as Web" function + # to include [if...] and [endif]. + return self.parse_marked_section(i) + else: #all other declaration elements + decltype, j = self._scan_name(j, i) + if j < 0: + return j + if decltype == "doctype": + self._decl_otherchars = '' + while j < n: + c = rawdata[j] + if c == ">": + # end of declaration syntax + data = rawdata[i+2:j] + if decltype == "doctype": + self.handle_decl(data) + else: + # According to the HTML5 specs sections "8.2.4.44 Bogus + # comment state" and "8.2.4.45 Markup declaration open + # state", a comment token should be emitted. + # Calling unknown_decl provides more flexibility though. + self.unknown_decl(data) + return j + 1 + if c in "\"'": + m = _declstringlit_match(rawdata, j) + if not m: + return -1 # incomplete + j = m.end() + elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": + name, j = self._scan_name(j, i) + elif c in self._decl_otherchars: + j = j + 1 + elif c == "[": + # this could be handled in a separate doctype parser + if decltype == "doctype": + j = self._parse_doctype_subset(j + 1, i) + elif decltype in {"attlist", "linktype", "link", "element"}: + # must tolerate []'d groups in a content model in an element declaration + # also in data attribute specifications of attlist declaration + # also link type declaration subsets in linktype declarations + # also link attribute specification lists in link declarations + self.error("unsupported '[' char in %s declaration" % decltype) + else: + self.error("unexpected '[' char in declaration") + else: + self.error( + "unexpected %r char in declaration" % rawdata[j]) + if j < 0: + return j + return -1 # incomplete + + # Internal -- parse a marked section + # Override this to handle MS-word extension syntax content + def parse_marked_section(self, i, report=1): + rawdata= self.rawdata + assert rawdata[i:i+3] == ' ending + match= _markedsectionclose.search(rawdata, i+3) + elif sectName in {"if", "else", "endif"}: + # look for MS Office ]> ending + match= _msmarkedsectionclose.search(rawdata, i+3) + else: + self.error('unknown status keyword %r in marked section' % rawdata[i+3:j]) + if not match: + return -1 + if report: + j = match.start(0) + self.unknown_decl(rawdata[i+3: j]) + return match.end(0) + + # Internal -- parse comment, return length or -1 if not terminated + def parse_comment(self, i, report=1): + rawdata = self.rawdata + if rawdata[i:i+4] != ' (host, port) + Get host and port for a sockaddr.""" + pass + +def getprotobyname(*args,**kw): + """getprotobyname(name) -> integer + Return the protocol number for the named protocol. (Rarely used.)""" + pass + +def getservbyname(*args,**kw): + """getservbyname(servicename[, protocolname]) -> integer + Return a port number from a service name and protocol name. + The optional protocol name, if given, should be 'tcp' or 'udp', + otherwise any protocol will match.""" + pass + +def getservbyport(*args,**kw): + """getservbyport(port[, protocolname]) -> string + Return the service name from a port number and protocol name. + The optional protocol name, if given, should be 'tcp' or 'udp', + otherwise any protocol will match.""" + pass + +has_ipv6 = True + +class herror: + pass + +def htonl(*args,**kw): + """htonl(integer) -> integer + Convert a 32-bit integer from host to network byte order.""" + pass + +def htons(*args,**kw): + """htons(integer) -> integer + Convert a 16-bit integer from host to network byte order.""" + pass + +def inet_aton(*args,**kw): + """inet_aton(string) -> bytes giving packed 32-bit IP representation + Convert an IP address in string format (123.45.67.89) to the 32-bit packed + binary format used in low-level network functions.""" + pass + +def inet_ntoa(*args,**kw): + """inet_ntoa(packed_ip) -> ip_address_string + Convert an IP address from 32-bit packed binary format to string format""" + pass + +def ntohl(*args,**kw): + """ntohl(integer) -> integer + Convert a 32-bit integer from network to host byte order.""" + pass + +def ntohs(*args,**kw): + """ntohs(integer) -> integer + Convert a 16-bit integer from network to host byte order.""" + pass + +def setdefaulttimeout(*args,**kw): + """setdefaulttimeout(timeout) + Set the default timeout in seconds (float) for new socket objects. + A value of None indicates that new socket objects have no timeout. + When the socket module is first imported, the default is None.""" + pass + +class socket: + def __init__(self,*args,**kw): + pass + def bind(self,*args,**kw): + pass + def close(self): + pass + +class timeout: + pass diff --git a/lib/assets/Lib/_sre.py b/lib/assets/Lib/_sre.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_sre.py @@ -0,0 +1,1354 @@ +# NOT_RPYTHON +""" +A pure Python reimplementation of the _sre module from CPython 2.4 +Copyright 2005 Nik Haldimann, licensed under the MIT license + +This code is based on material licensed under CNRI's Python 1.6 license and +copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB +""" + +MAXREPEAT = 2147483648 + +#import array +import operator, sys +from sre_constants import ATCODES, OPCODES, CHCODES +from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL +from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE + + +import sys + +# Identifying as _sre from Python 2.3 or 2.4 +#if sys.version_info[:2] >= (2, 4): +MAGIC = 20031017 +#else: +# MAGIC = 20030419 + +# In _sre.c this is bytesize of the code word type of the C implementation. +# There it's 2 for normal Python builds and more for wide unicode builds (large +# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python +# we only see re bytecodes as Python longs, we shouldn't have to care about the +# codesize. But sre_compile will compile some stuff differently depending on the +# codesize (e.g., charsets). +# starting with python 3.3 CODESIZE is 4 +#if sys.maxunicode == 65535: +# CODESIZE = 2 +#else: +CODESIZE = 4 + +copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann" + + +def getcodesize(): + return CODESIZE + +def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): + """Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern + object. Actual compilation to opcodes happens in sre_compile.""" + return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup) + +def getlower(char_ord, flags): + if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \ + or (flags & SRE_FLAG_LOCALE and char_ord < 256): + #return ord(unichr(char_ord).lower()) + return ord(chr(char_ord).lower()) + else: + return char_ord + + +class SRE_Pattern: + + def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): + self.pattern = pattern + self.flags = flags + self.groups = groups + self.groupindex = groupindex # Maps group names to group indices + self._indexgroup = indexgroup # Maps indices to group names + self._code = code + + def match(self, string, pos=0, endpos=sys.maxsize): + """If zero or more characters at the beginning of string match this + regular expression, return a corresponding MatchObject instance. Return + None if the string does not match the pattern.""" + state = _State(string, pos, endpos, self.flags) + if state.match(self._code): + return SRE_Match(self, state) + return None + + def search(self, string, pos=0, endpos=sys.maxsize): + """Scan through string looking for a location where this regular + expression produces a match, and return a corresponding MatchObject + instance. Return None if no position in the string matches the + pattern.""" + state = _State(string, pos, endpos, self.flags) + if state.search(self._code): + return SRE_Match(self, state) + else: + return None + + def findall(self, string, pos=0, endpos=sys.maxsize): + """Return a list of all non-overlapping matches of pattern in string.""" + matchlist = [] + state = _State(string, pos, endpos, self.flags) + while state.start <= state.end: + state.reset() + state.string_position = state.start + if not state.search(self._code): + break + match = SRE_Match(self, state) + if self.groups == 0 or self.groups == 1: + item = match.group(self.groups) + else: + item = match.groups("") + matchlist.append(item) + if state.string_position == state.start: + state.start += 1 + else: + state.start = state.string_position + return matchlist + + def _subx(self, template, string, count=0, subn=False): + filter = template + if not callable(template) and "\\" in template: + # handle non-literal strings ; hand it over to the template compiler + #import sre #sre was renamed to re + #fix me brython + #print("possible issue at _sre.py line 116") + import re as sre + filter = sre._subx(self, template) + state = _State(string, 0, sys.maxsize, self.flags) + sublist = [] + + n = last_pos = 0 + while not count or n < count: + state.reset() + state.string_position = state.start + if not state.search(self._code): + break + if last_pos < state.start: + sublist.append(string[last_pos:state.start]) + if not (last_pos == state.start and + last_pos == state.string_position and n > 0): + # the above ignores empty matches on latest position + if callable(filter): + sublist.append(filter(SRE_Match(self, state))) + else: + sublist.append(filter) + last_pos = state.string_position + n += 1 + if state.string_position == state.start: + state.start += 1 + else: + state.start = state.string_position + + if last_pos < state.end: + sublist.append(string[last_pos:state.end]) + item = "".join(sublist) + if subn: + return item, n + else: + return item + + def sub(self, repl, string, count=0): + """Return the string obtained by replacing the leftmost non-overlapping + occurrences of pattern in string by the replacement repl.""" + return self._subx(repl, string, count, False) + + def subn(self, repl, string, count=0): + """Return the tuple (new_string, number_of_subs_made) found by replacing + the leftmost non-overlapping occurrences of pattern with the replacement + repl.""" + return self._subx(repl, string, count, True) + + def split(self, string, maxsplit=0): + """Split string by the occurrences of pattern.""" + splitlist = [] + state = _State(string, 0, sys.maxsize, self.flags) + n = 0 + last = state.start + while not maxsplit or n < maxsplit: + state.reset() + state.string_position = state.start + if not state.search(self._code): + break + if state.start == state.string_position: # zero-width match + if last == state.end: # or end of string + break + state.start += 1 + continue + splitlist.append(string[last:state.start]) + # add groups (if any) + if self.groups: + match = SRE_Match(self, state) + splitlist.extend(list(match.groups(None))) + n += 1 + last = state.start = state.string_position + splitlist.append(string[last:state.end]) + return splitlist + + def finditer(self, string, pos=0, endpos=sys.maxsize): + """Return a list of all non-overlapping matches of pattern in string.""" + #scanner = self.scanner(string, pos, endpos) + _list=[] + _m=self.scanner(string, pos, endpos) + _re=SRE_Scanner(self, string, pos, endpos) + _m=_re.search() + while _m: + _list.append(_m) + _m=_re.search() + return _list + #return iter(scanner.search, None) + + def scanner(self, string, start=0, end=sys.maxsize): + return SRE_Scanner(self, string, start, end) + + def __copy__(self): + raise TypeError("cannot copy this pattern object") + + def __deepcopy__(self): + raise TypeError("cannot copy this pattern object") + +class SRE_Scanner: + """Undocumented scanner interface of sre.""" + + def __init__(self, pattern, string, start, end): + self.pattern = pattern + self._state = _State(string, start, end, self.pattern.flags) + + def _match_search(self, matcher): + state = self._state + state.reset() + state.string_position = state.start + match = None + if matcher(self.pattern._code): + match = SRE_Match(self.pattern, state) + if match is None or state.string_position == state.start: + state.start += 1 + else: + state.start = state.string_position + return match + + def match(self): + return self._match_search(self._state.match) + + def search(self): + return self._match_search(self._state.search) + +class SRE_Match: + + def __init__(self, pattern, state): + self.re = pattern + self.string = state.string + self.pos = state.pos + self.endpos = state.end + self.lastindex = state.lastindex + if self.lastindex < 0: + self.lastindex = None + self.regs = self._create_regs(state) + + #statement below is not valid under python3 ( 0 <= None) + #if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup): + if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup): + # The above upper-bound check should not be necessary, as the re + # compiler is supposed to always provide an _indexgroup list long + # enough. But the re.Scanner class seems to screw up something + # there, test_scanner in test_re won't work without upper-bound + # checking. XXX investigate this and report bug to CPython. + self.lastgroup = pattern._indexgroup[self.lastindex] + else: + self.lastgroup = None + + def _create_regs(self, state): + """Creates a tuple of index pairs representing matched groups.""" + regs = [(state.start, state.string_position)] + for group in range(self.re.groups): + mark_index = 2 * group + if mark_index + 1 < len(state.marks) \ + and state.marks[mark_index] is not None \ + and state.marks[mark_index + 1] is not None: + regs.append((state.marks[mark_index], state.marks[mark_index + 1])) + else: + regs.append((-1, -1)) + return tuple(regs) + + def _get_index(self, group): + if isinstance(group, int): + if group >= 0 and group <= self.re.groups: + return group + else: + if group in self.re.groupindex: + return self.re.groupindex[group] + raise IndexError("no such group") + + def _get_slice(self, group, default): + group_indices = self.regs[group] + if group_indices[0] >= 0: + return self.string[group_indices[0]:group_indices[1]] + else: + return default + + def start(self, group=0): + """Returns the indices of the start of the substring matched by group; + group defaults to zero (meaning the whole matched substring). Returns -1 + if group exists but did not contribute to the match.""" + return self.regs[self._get_index(group)][0] + + def end(self, group=0): + """Returns the indices of the end of the substring matched by group; + group defaults to zero (meaning the whole matched substring). Returns -1 + if group exists but did not contribute to the match.""" + return self.regs[self._get_index(group)][1] + + def span(self, group=0): + """Returns the 2-tuple (m.start(group), m.end(group)).""" + return self.start(group), self.end(group) + + def expand(self, template): + """Return the string obtained by doing backslash substitution and + resolving group references on template.""" + import sre + return sre._expand(self.re, self, template) + + def groups(self, default=None): + """Returns a tuple containing all the subgroups of the match. The + default argument is used for groups that did not participate in the + match (defaults to None).""" + groups = [] + for indices in self.regs[1:]: + if indices[0] >= 0: + groups.append(self.string[indices[0]:indices[1]]) + else: + groups.append(default) + return tuple(groups) + + def groupdict(self, default=None): + """Return a dictionary containing all the named subgroups of the match. + The default argument is used for groups that did not participate in the + match (defaults to None).""" + groupdict = {} + for key, value in self.re.groupindex.items(): + groupdict[key] = self._get_slice(value, default) + return groupdict + + def group(self, *args): + """Returns one or more subgroups of the match. Each argument is either a + group index or a group name.""" + if len(args) == 0: + args = (0,) + grouplist = [] + for group in args: + grouplist.append(self._get_slice(self._get_index(group), None)) + if len(grouplist) == 1: + return grouplist[0] + else: + return tuple(grouplist) + + def __copy__(): + raise TypeError("cannot copy this pattern object") + + def __deepcopy__(): + raise TypeError("cannot copy this pattern object") + + +class _State: + + def __init__(self, string, start, end, flags): + self.string = string + if start < 0: + start = 0 + if end > len(string): + end = len(string) + self.start = start + self.string_position = self.start + self.end = end + self.pos = start + self.flags = flags + self.reset() + + def reset(self): + self.marks = [] + self.lastindex = -1 + self.marks_stack = [] + self.context_stack = [] + self.repeat = None + + def match(self, pattern_codes): + # Optimization: Check string length. pattern_codes[3] contains the + # minimum length for a string to possibly match. + # brython.. the optimization doesn't work + #if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]: + # if self.end - self.string_position < pattern_codes[3]: + # #_log("reject (got %d chars, need %d)" + # # % (self.end - self.string_position, pattern_codes[3])) + # return False + + dispatcher = _OpcodeDispatcher() + self.context_stack.append(_MatchContext(self, pattern_codes)) + has_matched = None + while len(self.context_stack) > 0: + context = self.context_stack[-1] + has_matched = dispatcher.match(context) + if has_matched is not None: # don't pop if context isn't done + self.context_stack.pop() + return has_matched + + def search(self, pattern_codes): + flags = 0 + if pattern_codes[0] == OPCODES["info"]: + # optimization info block + # <1=skip> <2=flags> <3=min> <4=max> <5=prefix info> + if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1: + return self.fast_search(pattern_codes) + flags = pattern_codes[2] + pattern_codes = pattern_codes[pattern_codes[1] + 1:] + + string_position = self.start + if pattern_codes[0] == OPCODES["literal"]: + # Special case: Pattern starts with a literal character. This is + # used for short prefixes + character = pattern_codes[1] + while True: + while string_position < self.end \ + and ord(self.string[string_position]) != character: + string_position += 1 + if string_position >= self.end: + return False + self.start = string_position + string_position += 1 + self.string_position = string_position + if flags & SRE_INFO_LITERAL: + return True + if self.match(pattern_codes[2:]): + return True + return False + + # General case + while string_position <= self.end: + self.reset() + self.start = self.string_position = string_position + if self.match(pattern_codes): + return True + string_position += 1 + return False + + def fast_search(self, pattern_codes): + """Skips forward in a string as fast as possible using information from + an optimization info block.""" + # pattern starts with a known prefix + # <5=length> <6=skip> <7=prefix data> + flags = pattern_codes[2] + prefix_len = pattern_codes[5] + prefix_skip = pattern_codes[6] # don't really know what this is good for + prefix = pattern_codes[7:7 + prefix_len] + overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1] + pattern_codes = pattern_codes[pattern_codes[1] + 1:] + i = 0 + string_position = self.string_position + while string_position < self.end: + while True: + if ord(self.string[string_position]) != prefix[i]: + if i == 0: + break + else: + i = overlap[i] + else: + i += 1 + if i == prefix_len: + # found a potential match + self.start = string_position + 1 - prefix_len + self.string_position = string_position + 1 \ + - prefix_len + prefix_skip + if flags & SRE_INFO_LITERAL: + return True # matched all of pure literal pattern + if self.match(pattern_codes[2 * prefix_skip:]): + return True + i = overlap[i] + break + string_position += 1 + return False + + def set_mark(self, mark_nr, position): + if mark_nr & 1: + # This id marks the end of a group. + # fix python 3 division incompatability + #self.lastindex = mark_nr / 2 + 1 + self.lastindex = mark_nr // 2 + 1 + if mark_nr >= len(self.marks): + self.marks.extend([None] * (mark_nr - len(self.marks) + 1)) + self.marks[mark_nr] = position + + def get_marks(self, group_index): + marks_index = 2 * group_index + if len(self.marks) > marks_index + 1: + return self.marks[marks_index], self.marks[marks_index + 1] + else: + return None, None + + def marks_push(self): + self.marks_stack.append((self.marks[:], self.lastindex)) + + def marks_pop(self): + self.marks, self.lastindex = self.marks_stack.pop() + + def marks_pop_keep(self): + self.marks, self.lastindex = self.marks_stack[-1] + + def marks_pop_discard(self): + self.marks_stack.pop() + + def lower(self, char_ord): + return getlower(char_ord, self.flags) + + +class _MatchContext: + + def __init__(self, state, pattern_codes): + self.state = state + self.pattern_codes = pattern_codes + self.string_position = state.string_position + self.code_position = 0 + self.has_matched = None + + def push_new_context(self, pattern_offset): + """Creates a new child context of this context and pushes it on the + stack. pattern_offset is the offset off the current code position to + start interpreting from.""" + child_context = _MatchContext(self.state, + self.pattern_codes[self.code_position + pattern_offset:]) + #print("_sre.py:517:pushing new context") #, child_context.has_matched) + #print(self.state.string_position) + #print(self.pattern_codes[self.code_position + pattern_offset:]) + #print(pattern_offset) + self.state.context_stack.append(child_context) + return child_context + + def peek_char(self, peek=0): + return self.state.string[self.string_position + peek] + + def skip_char(self, skip_count): + self.string_position += skip_count + + def remaining_chars(self): + return self.state.end - self.string_position + + def peek_code(self, peek=0): + return self.pattern_codes[self.code_position + peek] + + def skip_code(self, skip_count): + self.code_position += skip_count + + def remaining_codes(self): + return len(self.pattern_codes) - self.code_position + + def at_beginning(self): + return self.string_position == 0 + + def at_end(self): + return self.string_position == self.state.end + + def at_linebreak(self): + return not self.at_end() and _is_linebreak(self.peek_char()) + + def at_boundary(self, word_checker): + if self.at_beginning() and self.at_end(): + return False + that = not self.at_beginning() and word_checker(self.peek_char(-1)) + this = not self.at_end() and word_checker(self.peek_char()) + return this != that + + +class _RepeatContext(_MatchContext): + + def __init__(self, context): + _MatchContext.__init__(self, context.state, + context.pattern_codes[context.code_position:]) + self.count = -1 + #print('569:repeat', context.state.repeat) + self.previous = context.state.repeat + self.last_position = None + + +class _Dispatcher: + + DISPATCH_TABLE = None + + def dispatch(self, code, context): + method = self.DISPATCH_TABLE.get(code, self.__class__.unknown) + return method(self, context) + + def unknown(self, code, ctx): + raise NotImplementedError() + + def build_dispatch_table(cls, code_dict, method_prefix): + if cls.DISPATCH_TABLE is not None: + return + table = {} + for key, value in code_dict.items(): + if hasattr(cls, "%s%s" % (method_prefix, key)): + table[value] = getattr(cls, "%s%s" % (method_prefix, key)) + cls.DISPATCH_TABLE = table + + build_dispatch_table = classmethod(build_dispatch_table) + + +class _OpcodeDispatcher(_Dispatcher): + + def __init__(self): + self.executing_contexts = {} + self.at_dispatcher = _AtcodeDispatcher() + self.ch_dispatcher = _ChcodeDispatcher() + self.set_dispatcher = _CharsetDispatcher() + + def match(self, context): + """Returns True if the current context matches, False if it doesn't and + None if matching is not finished, ie must be resumed after child + contexts have been matched.""" + while context.remaining_codes() > 0 and context.has_matched is None: + opcode = context.peek_code() + if not self.dispatch(opcode, context): + return None + if context.has_matched is None: + context.has_matched = False + return context.has_matched + + def dispatch(self, opcode, context): + """Dispatches a context on a given opcode. Returns True if the context + is done matching, False if it must be resumed when next encountered.""" + #if self.executing_contexts.has_key(id(context)): + if id(context) in self.executing_contexts: + generator = self.executing_contexts[id(context)] + del self.executing_contexts[id(context)] + has_finished = next(generator) + else: + method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown) + has_finished = method(self, context) + if hasattr(has_finished, "__next__"): # avoid using the types module + generator = has_finished + has_finished = next(generator) + if not has_finished: + self.executing_contexts[id(context)] = generator + return has_finished + + def op_success(self, ctx): + # end of pattern + #self._log(ctx, "SUCCESS") + ctx.state.string_position = ctx.string_position + ctx.has_matched = True + return True + + def op_failure(self, ctx): + # immediate failure + #self._log(ctx, "FAILURE") + ctx.has_matched = False + return True + + def general_op_literal(self, ctx, compare, decorate=lambda x: x): + #print(ctx.peek_char()) + if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())), + decorate(ctx.peek_code(1))): + ctx.has_matched = False + ctx.skip_code(2) + ctx.skip_char(1) + + def op_literal(self, ctx): + # match literal string + # + #self._log(ctx, "LITERAL", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.eq) + return True + + def op_not_literal(self, ctx): + # match anything that is not the given literal character + # + #self._log(ctx, "NOT_LITERAL", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.ne) + return True + + def op_literal_ignore(self, ctx): + # match literal regardless of case + # + #self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.eq, ctx.state.lower) + return True + + def op_not_literal_ignore(self, ctx): + # match literal regardless of case + # + #self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.ne, ctx.state.lower) + return True + + def op_at(self, ctx): + # match at given position + # + #self._log(ctx, "AT", ctx.peek_code(1)) + if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx): + ctx.has_matched = False + #print('_sre.py:line693, update context.has_matched variable') + return True + ctx.skip_code(2) + return True + + def op_category(self, ctx): + # match at given category + # + #self._log(ctx, "CATEGORY", ctx.peek_code(1)) + if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx): + ctx.has_matched = False + #print('_sre.py:line703, update context.has_matched variable') + return True + ctx.skip_code(2) + ctx.skip_char(1) + return True + + def op_any(self, ctx): + # match anything (except a newline) + # + #self._log(ctx, "ANY") + if ctx.at_end() or ctx.at_linebreak(): + ctx.has_matched = False + #print('_sre.py:line714, update context.has_matched variable') + return True + ctx.skip_code(1) + ctx.skip_char(1) + return True + + def op_any_all(self, ctx): + # match anything + # + #self._log(ctx, "ANY_ALL") + if ctx.at_end(): + ctx.has_matched = False + #print('_sre.py:line725, update context.has_matched variable') + return True + ctx.skip_code(1) + ctx.skip_char(1) + return True + + def general_op_in(self, ctx, decorate=lambda x: x): + #self._log(ctx, "OP_IN") + #print('general_op_in') + if ctx.at_end(): + ctx.has_matched = False + #print('_sre.py:line734, update context.has_matched variable') + return + skip = ctx.peek_code(1) + ctx.skip_code(2) # set op pointer to the set code + #print(ctx.peek_char(), ord(ctx.peek_char()), + # decorate(ord(ctx.peek_char()))) + if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))): + #print('_sre.py:line738, update context.has_matched variable') + ctx.has_matched = False + return + ctx.skip_code(skip - 1) + ctx.skip_char(1) + #print('end:general_op_in') + + def op_in(self, ctx): + # match set member (or non_member) + # + #self._log(ctx, "OP_IN") + self.general_op_in(ctx) + return True + + def op_in_ignore(self, ctx): + # match set member (or non_member), disregarding case of current char + # + #self._log(ctx, "OP_IN_IGNORE") + self.general_op_in(ctx, ctx.state.lower) + return True + + def op_jump(self, ctx): + # jump forward + # + #self._log(ctx, "JUMP", ctx.peek_code(1)) + ctx.skip_code(ctx.peek_code(1) + 1) + return True + + # skip info + # + op_info = op_jump + + def op_mark(self, ctx): + # set mark + # + #self._log(ctx, "OP_MARK", ctx.peek_code(1)) + ctx.state.set_mark(ctx.peek_code(1), ctx.string_position) + ctx.skip_code(2) + return True + + def op_branch(self, ctx): + # alternation + # <0=skip> code ... + #self._log(ctx, "BRANCH") + ctx.state.marks_push() + ctx.skip_code(1) + current_branch_length = ctx.peek_code(0) + while current_branch_length: + # The following tries to shortcut branches starting with a + # (unmatched) literal. _sre.c also shortcuts charsets here. + if not (ctx.peek_code(1) == OPCODES["literal"] and \ + (ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))): + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(1) + #print("_sre.py:803:op_branch") + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.state.marks_pop_keep() + ctx.skip_code(current_branch_length) + current_branch_length = ctx.peek_code(0) + ctx.state.marks_pop_discard() + ctx.has_matched = False + #print('_sre.py:line805, update context.has_matched variable') + yield True + + def op_repeat_one(self, ctx): + # match repeated sequence (maximizing). + # this operator only works if the repeated item is exactly one character + # wide, and we're not already collecting backtracking points. + # <1=min> <2=max> item tail + mincount = ctx.peek_code(2) + maxcount = ctx.peek_code(3) + #print("repeat one", mincount, maxcount) + #self._log(ctx, "REPEAT_ONE", mincount, maxcount) + + if ctx.remaining_chars() < mincount: + ctx.has_matched = False + yield True + ctx.state.string_position = ctx.string_position + count = self.count_repetitions(ctx, maxcount) + ctx.skip_char(count) + if count < mincount: + ctx.has_matched = False + yield True + if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]: + # tail is empty. we're finished + ctx.state.string_position = ctx.string_position + ctx.has_matched = True + yield True + + ctx.state.marks_push() + if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]: + # Special case: Tail starts with a literal. Skip positions where + # the rest of the pattern cannot possibly match. + char = ctx.peek_code(ctx.peek_code(1) + 2) + while True: + while count >= mincount and \ + (ctx.at_end() or ord(ctx.peek_char()) != char): + ctx.skip_char(-1) + count -= 1 + if count < mincount: + break + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + #print("_sre.py:856:push_new_context") + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.skip_char(-1) + count -= 1 + ctx.state.marks_pop_keep() + + else: + # General case: backtracking + while count >= mincount: + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.skip_char(-1) + count -= 1 + ctx.state.marks_pop_keep() + + ctx.state.marks_pop_discard() + ctx.has_matched = False + #ctx.has_matched = True # <== this should be True (so match object gets returned to program) + yield True + + def op_min_repeat_one(self, ctx): + # match repeated sequence (minimizing) + # <1=min> <2=max> item tail + mincount = ctx.peek_code(2) + maxcount = ctx.peek_code(3) + #self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount) + + if ctx.remaining_chars() < mincount: + ctx.has_matched = False + yield True + ctx.state.string_position = ctx.string_position + if mincount == 0: + count = 0 + else: + count = self.count_repetitions(ctx, mincount) + if count < mincount: + ctx.has_matched = False + #print('_sre.py:line891, update context.has_matched variable') + yield True + ctx.skip_char(count) + if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]: + # tail is empty. we're finished + ctx.state.string_position = ctx.string_position + ctx.has_matched = True + yield True + + ctx.state.marks_push() + while maxcount == MAXREPEAT or count <= maxcount: + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + #print('_sre.py:916:push new context') + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.state.string_position = ctx.string_position + if self.count_repetitions(ctx, 1) == 0: + break + ctx.skip_char(1) + count += 1 + ctx.state.marks_pop_keep() + + ctx.state.marks_pop_discard() + ctx.has_matched = False + yield True + + def op_repeat(self, ctx): + # create repeat context. all the hard work is done by the UNTIL + # operator (MAX_UNTIL, MIN_UNTIL) + # <1=min> <2=max> item tail + #self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3)) + + #if ctx.state.repeat is None: + # print("951:ctx.state.repeat is None") + # #ctx.state.repeat=_RepeatContext(ctx) + + repeat = _RepeatContext(ctx) + ctx.state.repeat = repeat + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + #print("_sre.py:941:push new context", id(child_context)) + #print(child_context.state.repeat) + #print(ctx.state.repeat) + # are these two yields causing the issue? + yield False + ctx.state.repeat = repeat.previous + ctx.has_matched = child_context.has_matched + yield True + + def op_max_until(self, ctx): + # maximizing repeat + # <1=min> <2=max> item tail + repeat = ctx.state.repeat + #print("op_max_until") #, id(ctx.state.repeat)) + if repeat is None: + #print(id(ctx), id(ctx.state)) + raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.") + mincount = repeat.peek_code(2) + maxcount = repeat.peek_code(3) + ctx.state.string_position = ctx.string_position + count = repeat.count + 1 + #self._log(ctx, "MAX_UNTIL", count) + + if count < mincount: + # not enough matches + repeat.count = count + child_context = repeat.push_new_context(4) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + yield True + + if (count < maxcount or maxcount == MAXREPEAT) \ + and ctx.state.string_position != repeat.last_position: + # we may have enough matches, if we can match another item, do so + repeat.count = count + ctx.state.marks_push() + save_last_position = repeat.last_position # zero-width match protection + repeat.last_position = ctx.state.string_position + child_context = repeat.push_new_context(4) + yield False + repeat.last_position = save_last_position + if child_context.has_matched: + ctx.state.marks_pop_discard() + ctx.has_matched = True + yield True + ctx.state.marks_pop() + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + + # cannot match more repeated items here. make sure the tail matches + ctx.state.repeat = repeat.previous + child_context = ctx.push_new_context(1) + #print("_sre.py:987:op_max_until") + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + ctx.state.repeat = repeat + ctx.state.string_position = ctx.string_position + yield True + + def op_min_until(self, ctx): + # minimizing repeat + # <1=min> <2=max> item tail + repeat = ctx.state.repeat + if repeat is None: + raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.") + mincount = repeat.peek_code(2) + maxcount = repeat.peek_code(3) + ctx.state.string_position = ctx.string_position + count = repeat.count + 1 + #self._log(ctx, "MIN_UNTIL", count) + + if count < mincount: + # not enough matches + repeat.count = count + child_context = repeat.push_new_context(4) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + yield True + + # see if the tail matches + ctx.state.marks_push() + ctx.state.repeat = repeat.previous + child_context = ctx.push_new_context(1) + #print('_sre.py:1022:push new context') + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.state.repeat = repeat + ctx.state.string_position = ctx.string_position + ctx.state.marks_pop() + + # match more until tail matches + if count >= maxcount and maxcount != MAXREPEAT: + ctx.has_matched = False + #print('_sre.py:line1022, update context.has_matched variable') + yield True + repeat.count = count + child_context = repeat.push_new_context(4) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + yield True + + def general_op_groupref(self, ctx, decorate=lambda x: x): + group_start, group_end = ctx.state.get_marks(ctx.peek_code(1)) + if group_start is None or group_end is None or group_end < group_start: + ctx.has_matched = False + return True + while group_start < group_end: + if ctx.at_end() or decorate(ord(ctx.peek_char())) \ + != decorate(ord(ctx.state.string[group_start])): + ctx.has_matched = False + #print('_sre.py:line1042, update context.has_matched variable') + return True + group_start += 1 + ctx.skip_char(1) + ctx.skip_code(2) + return True + + def op_groupref(self, ctx): + # match backreference + # + #self._log(ctx, "GROUPREF", ctx.peek_code(1)) + return self.general_op_groupref(ctx) + + def op_groupref_ignore(self, ctx): + # match backreference case-insensitive + # + #self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1)) + return self.general_op_groupref(ctx, ctx.state.lower) + + def op_groupref_exists(self, ctx): + # codeyes codeno ... + #self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1)) + group_start, group_end = ctx.state.get_marks(ctx.peek_code(1)) + if group_start is None or group_end is None or group_end < group_start: + ctx.skip_code(ctx.peek_code(2) + 1) + else: + ctx.skip_code(3) + return True + + def op_assert(self, ctx): + # assert subpattern + # + #self._log(ctx, "ASSERT", ctx.peek_code(2)) + ctx.state.string_position = ctx.string_position - ctx.peek_code(2) + if ctx.state.string_position < 0: + ctx.has_matched = False + yield True + child_context = ctx.push_new_context(3) + yield False + if child_context.has_matched: + ctx.skip_code(ctx.peek_code(1) + 1) + else: + ctx.has_matched = False + yield True + + def op_assert_not(self, ctx): + # assert not subpattern + # + #self._log(ctx, "ASSERT_NOT", ctx.peek_code(2)) + ctx.state.string_position = ctx.string_position - ctx.peek_code(2) + if ctx.state.string_position >= 0: + child_context = ctx.push_new_context(3) + yield False + if child_context.has_matched: + ctx.has_matched = False + yield True + ctx.skip_code(ctx.peek_code(1) + 1) + yield True + + def unknown(self, ctx): + #self._log(ctx, "UNKNOWN", ctx.peek_code()) + raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code()) + + def check_charset(self, ctx, char): + """Checks whether a character matches set of arbitrary length. Assumes + the code pointer is at the first member of the set.""" + self.set_dispatcher.reset(char) + save_position = ctx.code_position + result = None + while result is None: + result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx) + ctx.code_position = save_position + #print("_sre.py:1123:check_charset", result) + return result + + def count_repetitions(self, ctx, maxcount): + """Returns the number of repetitions of a single item, starting from the + current string position. The code pointer is expected to point to a + REPEAT_ONE operation (with the repeated 4 ahead).""" + count = 0 + real_maxcount = ctx.state.end - ctx.string_position + if maxcount < real_maxcount and maxcount != MAXREPEAT: + real_maxcount = maxcount + # XXX could special case every single character pattern here, as in C. + # This is a general solution, a bit hackisch, but works and should be + # efficient. + code_position = ctx.code_position + string_position = ctx.string_position + ctx.skip_code(4) + reset_position = ctx.code_position + while count < real_maxcount: + # this works because the single character pattern is followed by + # a success opcode + ctx.code_position = reset_position + self.dispatch(ctx.peek_code(), ctx) + #print("count_repetitions", ctx.has_matched, count) + if ctx.has_matched is False: # could be None as well + break + count += 1 + ctx.has_matched = None + ctx.code_position = code_position + ctx.string_position = string_position + return count + + def _log(self, context, opname, *args): + arg_string = ("%s " * len(args)) % args + _log("|%s|%s|%s %s" % (context.pattern_codes, + context.string_position, opname, arg_string)) + +_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_") + + +class _CharsetDispatcher(_Dispatcher): + + def __init__(self): + self.ch_dispatcher = _ChcodeDispatcher() + + def reset(self, char): + self.char = char + self.ok = True + + def set_failure(self, ctx): + return not self.ok + def set_literal(self, ctx): + # + if ctx.peek_code(1) == self.char: + return self.ok + else: + ctx.skip_code(2) + def set_category(self, ctx): + # + if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx): + return self.ok + else: + ctx.skip_code(2) + def set_charset(self, ctx): + # (16 bits per code word) + char_code = self.char + ctx.skip_code(1) # point to beginning of bitmap + if CODESIZE == 2: + if char_code < 256 and ctx.peek_code(char_code >> 4) \ + & (1 << (char_code & 15)): + return self.ok + ctx.skip_code(16) # skip bitmap + else: + if char_code < 256 and ctx.peek_code(char_code >> 5) \ + & (1 << (char_code & 31)): + return self.ok + ctx.skip_code(8) # skip bitmap + def set_range(self, ctx): + # + if ctx.peek_code(1) <= self.char <= ctx.peek_code(2): + return self.ok + ctx.skip_code(3) + def set_negate(self, ctx): + self.ok = not self.ok + ctx.skip_code(1) + + #fixme brython. array module doesn't exist + def set_bigcharset(self, ctx): + raise NotImplementationError("_sre.py: set_bigcharset, array not implemented") + # <256 blockindices> + char_code = self.char + count = ctx.peek_code(1) + ctx.skip_code(2) + if char_code < 65536: + block_index = char_code >> 8 + # NB: there are CODESIZE block indices per bytecode + a = array.array("B") + a.fromstring(array.array(CODESIZE == 2 and "H" or "I", + [ctx.peek_code(block_index // CODESIZE)]).tostring()) + block = a[block_index % CODESIZE] + ctx.skip_code(256 // CODESIZE) # skip block indices + block_value = ctx.peek_code(block * (32 // CODESIZE) + + ((char_code & 255) >> (CODESIZE == 2 and 4 or 5))) + if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))): + return self.ok + else: + ctx.skip_code(256 // CODESIZE) # skip block indices + ctx.skip_code(count * (32 // CODESIZE)) # skip blocks + + def unknown(self, ctx): + return False + +_CharsetDispatcher.build_dispatch_table(OPCODES, "set_") + + +class _AtcodeDispatcher(_Dispatcher): + + def at_beginning(self, ctx): + return ctx.at_beginning() + at_beginning_string = at_beginning + def at_beginning_line(self, ctx): + return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1)) + def at_end(self, ctx): + return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end() + def at_end_line(self, ctx): + return ctx.at_linebreak() or ctx.at_end() + def at_end_string(self, ctx): + return ctx.at_end() + def at_boundary(self, ctx): + return ctx.at_boundary(_is_word) + def at_non_boundary(self, ctx): + return not ctx.at_boundary(_is_word) + def at_loc_boundary(self, ctx): + return ctx.at_boundary(_is_loc_word) + def at_loc_non_boundary(self, ctx): + return not ctx.at_boundary(_is_loc_word) + def at_uni_boundary(self, ctx): + return ctx.at_boundary(_is_uni_word) + def at_uni_non_boundary(self, ctx): + return not ctx.at_boundary(_is_uni_word) + def unknown(self, ctx): + return False + +_AtcodeDispatcher.build_dispatch_table(ATCODES, "") + + +class _ChcodeDispatcher(_Dispatcher): + + def category_digit(self, ctx): + return _is_digit(ctx.peek_char()) + def category_not_digit(self, ctx): + return not _is_digit(ctx.peek_char()) + def category_space(self, ctx): + return _is_space(ctx.peek_char()) + def category_not_space(self, ctx): + return not _is_space(ctx.peek_char()) + def category_word(self, ctx): + return _is_word(ctx.peek_char()) + def category_not_word(self, ctx): + return not _is_word(ctx.peek_char()) + def category_linebreak(self, ctx): + return _is_linebreak(ctx.peek_char()) + def category_not_linebreak(self, ctx): + return not _is_linebreak(ctx.peek_char()) + def category_loc_word(self, ctx): + return _is_loc_word(ctx.peek_char()) + def category_loc_not_word(self, ctx): + return not _is_loc_word(ctx.peek_char()) + def category_uni_digit(self, ctx): + return ctx.peek_char().isdigit() + def category_uni_not_digit(self, ctx): + return not ctx.peek_char().isdigit() + def category_uni_space(self, ctx): + return ctx.peek_char().isspace() + def category_uni_not_space(self, ctx): + return not ctx.peek_char().isspace() + def category_uni_word(self, ctx): + return _is_uni_word(ctx.peek_char()) + def category_uni_not_word(self, ctx): + return not _is_uni_word(ctx.peek_char()) + def category_uni_linebreak(self, ctx): + return ord(ctx.peek_char()) in _uni_linebreaks + def category_uni_not_linebreak(self, ctx): + return ord(ctx.peek_char()) not in _uni_linebreaks + def unknown(self, ctx): + return False + +_ChcodeDispatcher.build_dispatch_table(CHCODES, "") + + +_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2, +2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25, +25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, +0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ] + +def _is_digit(char): + code = ord(char) + return code < 128 and _ascii_char_info[code] & 1 + +def _is_space(char): + code = ord(char) + return code < 128 and _ascii_char_info[code] & 2 + +def _is_word(char): + # NB: non-ASCII chars aren't words according to _sre.c + code = ord(char) + return code < 128 and _ascii_char_info[code] & 16 + +def _is_loc_word(char): + return (not (ord(char) & ~255) and char.isalnum()) or char == '_' + +def _is_uni_word(char): + # not valid in python 3 + #return unichr(ord(char)).isalnum() or char == '_' + return chr(ord(char)).isalnum() or char == '_' + +def _is_linebreak(char): + return char == "\n" + +# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK. +_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233] + +def _log(message): + if 0: + print(message) diff --git a/lib/assets/Lib/_string.py b/lib/assets/Lib/_string.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_string.py @@ -0,0 +1,42 @@ +"""string helper module""" + +import re + +class __loader__(object): + pass + +def formatter_field_name_split(fieldname): + """split the argument as a field name""" + _list=[] + for _name in fieldname: + _parts = _name.split('.') + for _item in _parts: + is_attr=False #fix me + if re.match('\d+', _item): + _list.append((int(_item), is_attr)) + else: + _list.append((_item, is_attr)) + + return _list[0][0], iter(_list[1:]) + +def formatter_parser(*args,**kw): + """parse the argument as a format string""" + + assert len(args)==1 + assert isinstance(args[0], str) + + _result=[] + for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]): + _pre, _fmt = _match.groups() + if _fmt is None: + _result.append((_pre, None, None, None)) + elif _fmt == '{}': + _result.append((_pre, '', '', None)) + else: + _m=re.match("\{([^!]*)!?(.*)?\}", _fmt) + _name=_m.groups(0) + _flags=_m.groups(1) + + _result.append((_pre, _name, _flags, None)) + + return _result diff --git a/lib/assets/Lib/_strptime.py b/lib/assets/Lib/_strptime.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_strptime.py @@ -0,0 +1,510 @@ +"""Strptime-related classes and functions. + +CLASSES: + LocaleTime -- Discovers and stores locale-specific time information + TimeRE -- Creates regexes for pattern matching a string of text containing + time information + +FUNCTIONS: + _getlang -- Figure out what language is being used for the locale + strptime -- Calculates the time struct represented by the passed-in string + +""" +import time +import locale +import calendar +from re import compile as re_compile +from re import IGNORECASE +from re import escape as re_escape +from datetime import (date as datetime_date, + timedelta as datetime_timedelta, + timezone as datetime_timezone) +try: + from _thread import allocate_lock as _thread_allocate_lock +except ImportError: + from _dummy_thread import allocate_lock as _thread_allocate_lock + +__all__ = [] + +def _getlang(): + # Figure out what the current language is set to. + return locale.getlocale(locale.LC_TIME) + +class LocaleTime(object): + """Stores and handles locale-specific information related to time. + + ATTRIBUTES: + f_weekday -- full weekday names (7-item list) + a_weekday -- abbreviated weekday names (7-item list) + f_month -- full month names (13-item list; dummy value in [0], which + is added by code) + a_month -- abbreviated month names (13-item list, dummy value in + [0], which is added by code) + am_pm -- AM/PM representation (2-item list) + LC_date_time -- format string for date/time representation (string) + LC_date -- format string for date representation (string) + LC_time -- format string for time representation (string) + timezone -- daylight- and non-daylight-savings timezone representation + (2-item list of sets) + lang -- Language used by instance (2-item tuple) + """ + + def __init__(self): + """Set all attributes. + + Order of methods called matters for dependency reasons. + + The locale language is set at the offset and then checked again before + exiting. This is to make sure that the attributes were not set with a + mix of information from more than one locale. This would most likely + happen when using threads where one thread calls a locale-dependent + function while another thread changes the locale while the function in + the other thread is still running. Proper coding would call for + locks to prevent changing the locale while locale-dependent code is + running. The check here is done in case someone does not think about + doing this. + + Only other possible issue is if someone changed the timezone and did + not call tz.tzset . That is an issue for the programmer, though, + since changing the timezone is worthless without that call. + + """ + self.lang = _getlang() + self.__calc_weekday() + self.__calc_month() + self.__calc_am_pm() + self.__calc_timezone() + self.__calc_date_time() + if _getlang() != self.lang: + raise ValueError("locale changed during initialization") + + def __pad(self, seq, front): + # Add '' to seq to either the front (is True), else the back. + seq = list(seq) + if front: + seq.insert(0, '') + else: + seq.append('') + return seq + + def __calc_weekday(self): + # Set self.a_weekday and self.f_weekday using the calendar + # module. + a_weekday = [calendar.day_abbr[i].lower() for i in range(7)] + f_weekday = [calendar.day_name[i].lower() for i in range(7)] + self.a_weekday = a_weekday + self.f_weekday = f_weekday + + def __calc_month(self): + # Set self.f_month and self.a_month using the calendar module. + a_month = [calendar.month_abbr[i].lower() for i in range(13)] + f_month = [calendar.month_name[i].lower() for i in range(13)] + self.a_month = a_month + self.f_month = f_month + + def __calc_am_pm(self): + # Set self.am_pm by using time.strftime(). + + # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that + # magical; just happened to have used it everywhere else where a + # static date was needed. + am_pm = [] + for hour in (1, 22): + time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0)) + am_pm.append(time.strftime("%p", time_tuple).lower()) + self.am_pm = am_pm + + def __calc_date_time(self): + # Set self.date_time, self.date, & self.time by using + # time.strftime(). + + # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of + # overloaded numbers is minimized. The order in which searches for + # values within the format string is very important; it eliminates + # possible ambiguity for what something represents. + time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0)) + date_time = [None, None, None] + date_time[0] = time.strftime("%c", time_tuple).lower() + date_time[1] = time.strftime("%x", time_tuple).lower() + date_time[2] = time.strftime("%X", time_tuple).lower() + replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'), + (self.f_month[3], '%B'), (self.a_weekday[2], '%a'), + (self.a_month[3], '%b'), (self.am_pm[1], '%p'), + ('1999', '%Y'), ('99', '%y'), ('22', '%H'), + ('44', '%M'), ('55', '%S'), ('76', '%j'), + ('17', '%d'), ('03', '%m'), ('3', '%m'), + # '3' needed for when no leading zero. + ('2', '%w'), ('10', '%I')] + replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone + for tz in tz_values]) + for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')): + current_format = date_time[offset] + for old, new in replacement_pairs: + # Must deal with possible lack of locale info + # manifesting itself as the empty string (e.g., Swedish's + # lack of AM/PM info) or a platform returning a tuple of empty + # strings (e.g., MacOS 9 having timezone as ('','')). + if old: + current_format = current_format.replace(old, new) + # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since + # 2005-01-03 occurs before the first Monday of the year. Otherwise + # %U is used. + time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0)) + if '00' in time.strftime(directive, time_tuple): + U_W = '%W' + else: + U_W = '%U' + date_time[offset] = current_format.replace('11', U_W) + self.LC_date_time = date_time[0] + self.LC_date = date_time[1] + self.LC_time = date_time[2] + + def __calc_timezone(self): + # Set self.timezone by using time.tzname. + # Do not worry about possibility of time.tzname[0] == timetzname[1] + # and time.daylight; handle that in strptime . + #try: + #time.tzset() + #except AttributeError: + #pass + no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()]) + if time.daylight: + has_saving = frozenset([time.tzname[1].lower()]) + else: + has_saving = frozenset() + self.timezone = (no_saving, has_saving) + + +class TimeRE(dict): + """Handle conversion from format directives to regexes.""" + + def __init__(self, locale_time=None): + """Create keys/values. + + Order of execution is important for dependency reasons. + + """ + if locale_time: + self.locale_time = locale_time + else: + self.locale_time = LocaleTime() + base = super() + base.__init__({ + # The " \d" part of the regex is to make %c from ANSI C work + 'd': r"(?P3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])", + 'f': r"(?P[0-9]{1,6})", + 'H': r"(?P2[0-3]|[0-1]\d|\d)", + 'I': r"(?P1[0-2]|0[1-9]|[1-9])", + 'j': r"(?P36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])", + 'm': r"(?P1[0-2]|0[1-9]|[1-9])", + 'M': r"(?P[0-5]\d|\d)", + 'S': r"(?P6[0-1]|[0-5]\d|\d)", + 'U': r"(?P5[0-3]|[0-4]\d|\d)", + 'w': r"(?P[0-6])", + # W is set below by using 'U' + 'y': r"(?P\d\d)", + #XXX: Does 'Y' need to worry about having less or more than + # 4 digits? + 'Y': r"(?P\d\d\d\d)", + 'z': r"(?P[+-]\d\d[0-5]\d)", + 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'), + 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'), + 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'), + 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'), + 'p': self.__seqToRE(self.locale_time.am_pm, 'p'), + 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone + for tz in tz_names), + 'Z'), + '%': '%'}) + base.__setitem__('W', base.__getitem__('U').replace('U', 'W')) + base.__setitem__('c', self.pattern(self.locale_time.LC_date_time)) + base.__setitem__('x', self.pattern(self.locale_time.LC_date)) + base.__setitem__('X', self.pattern(self.locale_time.LC_time)) + + def __seqToRE(self, to_convert, directive): + """Convert a list to a regex string for matching a directive. + + Want possible matching values to be from longest to shortest. This + prevents the possibility of a match occurring for a value that also + a substring of a larger value that should have matched (e.g., 'abc' + matching when 'abcdef' should have been the match). + + """ + to_convert = sorted(to_convert, key=len, reverse=True) + for value in to_convert: + if value != '': + break + else: + return '' + regex = '|'.join(re_escape(stuff) for stuff in to_convert) + regex = '(?P<%s>%s' % (directive, regex) + return '%s)' % regex + + def pattern(self, format): + """Return regex pattern for the format string. + + Need to make sure that any characters that might be interpreted as + regex syntax are escaped. + + """ + processed_format = '' + # The sub() call escapes all characters that might be misconstrued + # as regex syntax. Cannot use re.escape since we have to deal with + # format directives (%m, etc.). + regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])") + format = regex_chars.sub(r"\\\1", format) + whitespace_replacement = re_compile('\s+') + format = whitespace_replacement.sub('\s+', format) + while '%' in format: + directive_index = format.index('%')+1 + processed_format = "%s%s%s" % (processed_format, + format[:directive_index-1], + self[format[directive_index]]) + format = format[directive_index+1:] + return "%s%s" % (processed_format, format) + + def compile(self, format): + """Return a compiled re object for the format string.""" + return re_compile(self.pattern(format), IGNORECASE) + +_cache_lock = _thread_allocate_lock() +# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock +# first! +_TimeRE_cache = TimeRE() +_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache +_regex_cache = {} + +def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon): + """Calculate the Julian day based on the year, week of the year, and day of + the week, with week_start_day representing whether the week of the year + assumes the week starts on Sunday or Monday (6 or 0).""" + first_weekday = datetime_date(year, 1, 1).weekday() + # If we are dealing with the %U directive (week starts on Sunday), it's + # easier to just shift the view to Sunday being the first day of the + # week. + if not week_starts_Mon: + first_weekday = (first_weekday + 1) % 7 + day_of_week = (day_of_week + 1) % 7 + # Need to watch out for a week 0 (when the first day of the year is not + # the same as that specified by %U or %W). + week_0_length = (7 - first_weekday) % 7 + if week_of_year == 0: + return 1 + day_of_week - first_weekday + else: + days_to_week = week_0_length + (7 * (week_of_year - 1)) + return 1 + days_to_week + day_of_week + + +def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): + """Return a 2-tuple consisting of a time struct and an int containing + the number of microseconds based on the input string and the + format string.""" + + for index, arg in enumerate([data_string, format]): + if not isinstance(arg, str): + msg = "strptime() argument {} must be str, not {}" + raise TypeError(msg.format(index, type(arg))) + + global _TimeRE_cache, _regex_cache + with _cache_lock: + + if _getlang() != _TimeRE_cache.locale_time.lang: + _TimeRE_cache = TimeRE() + _regex_cache.clear() + if len(_regex_cache) > _CACHE_MAX_SIZE: + _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time + format_regex = _regex_cache.get(format) + if not format_regex: + try: + format_regex = _TimeRE_cache.compile(format) + # KeyError raised when a bad format is found; can be specified as + # \\, in which case it was a stray % but with a space after it + except KeyError as err: + bad_directive = err.args[0] + if bad_directive == "\\": + bad_directive = "%" + del err + raise ValueError("'%s' is a bad directive in format '%s'" % + (bad_directive, format)) from None + # IndexError only occurs when the format string is "%" + except IndexError: + raise ValueError("stray %% in format '%s'" % format) from None + _regex_cache[format] = format_regex + found = format_regex.match(data_string) + if not found: + raise ValueError("time data %r does not match format %r" % + (data_string, format)) + if len(data_string) != found.end(): + raise ValueError("unconverted data remains: %s" % + data_string[found.end():]) + + year = None + month = day = 1 + hour = minute = second = fraction = 0 + tz = -1 + tzoffset = None + # Default to -1 to signify that values not known; not critical to have, + # though + week_of_year = -1 + week_of_year_start = -1 + # weekday and julian defaulted to -1 so as to signal need to calculate + # values + weekday = julian = -1 + found_dict = found.groupdict() + for group_key in found_dict.keys(): + # Directives not explicitly handled below: + # c, x, X + # handled by making out of other directives + # U, W + # worthless without day of the week + if group_key == 'y': + year = int(found_dict['y']) + # Open Group specification for strptime() states that a %y + #value in the range of [00, 68] is in the century 2000, while + #[69,99] is in the century 1900 + if year <= 68: + year += 2000 + else: + year += 1900 + elif group_key == 'Y': + year = int(found_dict['Y']) + elif group_key == 'm': + month = int(found_dict['m']) + elif group_key == 'B': + month = locale_time.f_month.index(found_dict['B'].lower()) + elif group_key == 'b': + month = locale_time.a_month.index(found_dict['b'].lower()) + elif group_key == 'd': + day = int(found_dict['d']) + elif group_key == 'H': + hour = int(found_dict['H']) + elif group_key == 'I': + hour = int(found_dict['I']) + ampm = found_dict.get('p', '').lower() + # If there was no AM/PM indicator, we'll treat this like AM + if ampm in ('', locale_time.am_pm[0]): + # We're in AM so the hour is correct unless we're + # looking at 12 midnight. + # 12 midnight == 12 AM == hour 0 + if hour == 12: + hour = 0 + elif ampm == locale_time.am_pm[1]: + # We're in PM so we need to add 12 to the hour unless + # we're looking at 12 noon. + # 12 noon == 12 PM == hour 12 + if hour != 12: + hour += 12 + elif group_key == 'M': + minute = int(found_dict['M']) + elif group_key == 'S': + second = int(found_dict['S']) + elif group_key == 'f': + s = found_dict['f'] + # Pad to always return microseconds. + s += "0" * (6 - len(s)) + fraction = int(s) + elif group_key == 'A': + weekday = locale_time.f_weekday.index(found_dict['A'].lower()) + elif group_key == 'a': + weekday = locale_time.a_weekday.index(found_dict['a'].lower()) + elif group_key == 'w': + weekday = int(found_dict['w']) + if weekday == 0: + weekday = 6 + else: + weekday -= 1 + elif group_key == 'j': + julian = int(found_dict['j']) + elif group_key in ('U', 'W'): + week_of_year = int(found_dict[group_key]) + if group_key == 'U': + # U starts week on Sunday. + week_of_year_start = 6 + else: + # W starts week on Monday. + week_of_year_start = 0 + elif group_key == 'z': + z = found_dict['z'] + tzoffset = int(z[1:3]) * 60 + int(z[3:5]) + if z.startswith("-"): + tzoffset = -tzoffset + elif group_key == 'Z': + # Since -1 is default value only need to worry about setting tz if + # it can be something other than -1. + found_zone = found_dict['Z'].lower() + for value, tz_values in enumerate(locale_time.timezone): + if found_zone in tz_values: + # Deal with bad locale setup where timezone names are the + # same and yet time.daylight is true; too ambiguous to + # be able to tell what timezone has daylight savings + if (time.tzname[0] == time.tzname[1] and + time.daylight and found_zone not in ("utc", "gmt")): + break + else: + tz = value + break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 + # If we know the week of the year and what day of that week, we can figure + # out the Julian day of the year. + if julian == -1 and week_of_year != -1 and weekday != -1: + week_starts_Mon = True if week_of_year_start == 0 else False + julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, + week_starts_Mon) + # Cannot pre-calculate datetime_date() since can change in Julian + # calculation and thus could have different value for the day of the week + # calculation. + if julian == -1: + # Need to add 1 to result since first day of the year is 1, not 0. + julian = datetime_date(year, month, day).toordinal() - \ + datetime_date(year, 1, 1).toordinal() + 1 + else: # Assume that if they bothered to include Julian day it will + # be accurate. + datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal()) + year = datetime_result.year + month = datetime_result.month + day = datetime_result.day + if weekday == -1: + weekday = datetime_date(year, month, day).weekday() + # Add timezone info + tzname = found_dict.get("Z") + if tzoffset is not None: + gmtoff = tzoffset * 60 + else: + gmtoff = None + + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + + return (year, month, day, + hour, minute, second, + weekday, julian, tz, tzname, gmtoff), fraction + +def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"): + """Return a time struct based on the input string and the + format string.""" + tt = _strptime(data_string, format)[0] + return time.struct_time(tt[:time._STRUCT_TM_ITEMS]) + +def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"): + """Return a class cls instance based on the input string and the + format string.""" + tt, fraction = _strptime(data_string, format) + tzname, gmtoff = tt[-2:] + args = tt[:6] + (fraction,) + if gmtoff is not None: + tzdelta = datetime_timedelta(seconds=gmtoff) + if tzname: + tz = datetime_timezone(tzdelta, tzname) + else: + tz = datetime_timezone(tzdelta) + args += (tz,) + return cls(*args) diff --git a/lib/assets/Lib/_struct.py b/lib/assets/Lib/_struct.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_struct.py @@ -0,0 +1,448 @@ +# +# This module is a pure Python version of pypy.module.struct. +# It is only imported if the vastly faster pypy.module.struct is not +# compiled in. For now we keep this version for reference and +# because pypy.module.struct is not ootype-backend-friendly yet. +# + +# this module 'borrowed' from +# https://bitbucket.org/pypy/pypy/src/18626459a9b2/lib_pypy/_struct.py?at=py3k-listview_str +# with many bug fixes + +"""Functions to convert between Python values and C structs. +Python strings are used to hold the data representing the C struct +and also as format strings to describe the layout of data in the C struct. + +The optional first format char indicates byte order, size and alignment: + @: native order, size & alignment (default) + =: native order, std. size & alignment + <: little-endian, std. size & alignment + >: big-endian, std. size & alignment + !: same as > + +The remaining chars indicate types of args and must match exactly; +these can be preceded by a decimal repeat count: + x: pad byte (no data); + c:char; + b:signed byte; + B:unsigned byte; + h:short; + H:unsigned short; + i:int; + I:unsigned int; + l:long; + L:unsigned long; + f:float; + d:double. +Special cases (preceding decimal count indicates length): + s:string (array of char); p: pascal string (with count byte). +Special case (only available in native format): + P:an integer type that is wide enough to hold a pointer. +Special case (not in native mode unless 'long long' in platform C): + q:long long; + Q:unsigned long long +Whitespace between formats is ignored. + +The variable struct.error is an exception raised on errors.""" + +import math, sys + +# TODO: XXX Find a way to get information on native sizes and alignments +class StructError(Exception): + pass +error = StructError +def unpack_int(data,index,size,le): + bytes = [b for b in data[index:index+size]] + if le == 'little': + bytes.reverse() + number = 0 + for b in bytes: + number = number << 8 | b + return int(number) + +def unpack_signed_int(data,index,size,le): + number = unpack_int(data,index,size,le) + max = 2**(size*8) + if number > 2**(size*8 - 1) - 1: + number = int(-1*(max - number)) + return number + +INFINITY = 1e200 * 1e200 +NAN = INFINITY / INFINITY + +def unpack_char(data,index,size,le): + return data[index:index+size] + +def pack_int(number,size,le): + x=number + res=[] + for i in range(size): + res.append(x&0xff) + x >>= 8 + if le == 'big': + res.reverse() + return bytes(res) + +def pack_signed_int(number,size,le): + if not isinstance(number, int): + raise StructError("argument for i,I,l,L,q,Q,h,H must be integer") + if number > 2**(8*size-1)-1 or number < -1*2**(8*size-1): + raise OverflowError("Number:%i too large to convert" % number) + return pack_int(number,size,le) + +def pack_unsigned_int(number,size,le): + if not isinstance(number, int): + raise StructError("argument for i,I,l,L,q,Q,h,H must be integer") + if number < 0: + raise TypeError("can't convert negative long to unsigned") + if number > 2**(8*size)-1: + raise OverflowError("Number:%i too large to convert" % number) + return pack_int(number,size,le) + +def pack_char(char,size,le): + return bytes(char) + +def isinf(x): + return x != 0.0 and x / 2 == x +def isnan(v): + return v != v*1.0 or (v == 1.0 and v == 2.0) + +def pack_float(x, size, le): + unsigned = float_pack(x, size) + result = [] + for i in range(size): + result.append((unsigned >> (i * 8)) & 0xFF) + if le == "big": + result.reverse() + return bytes(result) + +def unpack_float(data, index, size, le): + binary = [data[i] for i in range(index, index + size)] + if le == "big": + binary.reverse() + unsigned = 0 + for i in range(size): + unsigned |= binary[i] << (i * 8) + return float_unpack(unsigned, size, le) + +def round_to_nearest(x): + """Python 3 style round: round a float x to the nearest int, but + unlike the builtin Python 2.x round function: + + - return an int, not a float + - do round-half-to-even, not round-half-away-from-zero. + + We assume that x is finite and nonnegative; except wrong results + if you use this for negative x. + + """ + int_part = int(x) + frac_part = x - int_part + if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1: + int_part += 1 + return int_part + +def float_unpack(Q, size, le): + """Convert a 32-bit or 64-bit integer created + by float_pack into a Python float.""" + + if size == 8: + MIN_EXP = -1021 # = sys.float_info.min_exp + MAX_EXP = 1024 # = sys.float_info.max_exp + MANT_DIG = 53 # = sys.float_info.mant_dig + BITS = 64 + elif size == 4: + MIN_EXP = -125 # C's FLT_MIN_EXP + MAX_EXP = 128 # FLT_MAX_EXP + MANT_DIG = 24 # FLT_MANT_DIG + BITS = 32 + else: + raise ValueError("invalid size value") + + if Q >> BITS: + raise ValueError("input out of range") + + # extract pieces + sign = Q >> BITS - 1 + exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1 + mant = Q & ((1 << MANT_DIG - 1) - 1) + + if exp == MAX_EXP - MIN_EXP + 2: + # nan or infinity + result = float('nan') if mant else float('inf') + elif exp == 0: + # subnormal or zero + result = math.ldexp(float(mant), MIN_EXP - MANT_DIG) + else: + # normal + mant += 1 << MANT_DIG - 1 + result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1) + return -result if sign else result + + +def float_pack(x, size): + """Convert a Python float x into a 64-bit unsigned integer + with the same byte representation.""" + + if size == 8: + MIN_EXP = -1021 # = sys.float_info.min_exp + MAX_EXP = 1024 # = sys.float_info.max_exp + MANT_DIG = 53 # = sys.float_info.mant_dig + BITS = 64 + elif size == 4: + MIN_EXP = -125 # C's FLT_MIN_EXP + MAX_EXP = 128 # FLT_MAX_EXP + MANT_DIG = 24 # FLT_MANT_DIG + BITS = 32 + else: + raise ValueError("invalid size value") + + sign = math.copysign(1.0, x) < 0.0 + if math.isinf(x): + mant = 0 + exp = MAX_EXP - MIN_EXP + 2 + elif math.isnan(x): + mant = 1 << (MANT_DIG-2) # other values possible + exp = MAX_EXP - MIN_EXP + 2 + elif x == 0.0: + mant = 0 + exp = 0 + else: + m, e = math.frexp(abs(x)) # abs(x) == m * 2**e + exp = e - (MIN_EXP - 1) + if exp > 0: + # Normal case. + mant = round_to_nearest(m * (1 << MANT_DIG)) + mant -= 1 << MANT_DIG - 1 + else: + # Subnormal case. + if exp + MANT_DIG - 1 >= 0: + mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1)) + else: + mant = 0 + exp = 0 + + # Special case: rounding produced a MANT_DIG-bit mantissa. + assert 0 <= mant <= 1 << MANT_DIG - 1 + if mant == 1 << MANT_DIG - 1: + mant = 0 + exp += 1 + + # Raise on overflow (in some circumstances, may want to return + # infinity instead). + if exp >= MAX_EXP - MIN_EXP + 2: + raise OverflowError("float too large to pack in this format") + + # check constraints + assert 0 <= mant < 1 << MANT_DIG - 1 + assert 0 <= exp <= MAX_EXP - MIN_EXP + 2 + assert 0 <= sign <= 1 + return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant + + +big_endian_format = { + 'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, + 'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, + 'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, + 'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char}, + 's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, + 'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None}, + 'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, + 'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, + 'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, + 'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, + 'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, + 'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, + 'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int}, + 'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int}, + 'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, + 'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float}, + } +default = big_endian_format +formatmode={ '<' : (default, 'little'), + '>' : (default, 'big'), + '!' : (default, 'big'), + '=' : (default, sys.byteorder), + '@' : (default, sys.byteorder) + } + +def getmode(fmt): + try: + formatdef,endianness = formatmode[fmt[0]] + alignment = fmt[0] not in formatmode or fmt[0]=='@' + index = 1 + except (IndexError, KeyError): + formatdef,endianness = formatmode['@'] + alignment = True + index = 0 + return formatdef,endianness,index,alignment + +def getNum(fmt,i): + num=None + cur = fmt[i] + while ('0'<= cur ) and ( cur <= '9'): + if num == None: + num = int(cur) + else: + num = 10*num + int(cur) + i += 1 + cur = fmt[i] + return num,i + +def calcsize(fmt): + """calcsize(fmt) -> int + Return size of C struct described by format string fmt. + See struct.__doc__ for more on format strings.""" + + formatdef,endianness,i,alignment = getmode(fmt) + num = 0 + result = 0 + while i string + Return string containing values v1, v2, ... packed according to fmt. + See struct.__doc__ for more on format strings.""" + formatdef,endianness,i,alignment = getmode(fmt) + args = list(args) + n_args = len(args) + result = [] + while i 0: + result += [bytes([len(args[0])]) + args[0][:num-1] + b'\0'*padding] + else: + if num<255: + result += [bytes([num-1]) + args[0][:num-1]] + else: + result += [bytes([255]) + args[0][:num-1]] + args.pop(0) + else: + raise StructError("arg for string format not a string") + + else: + if len(args) < num: + raise StructError("insufficient arguments to pack") + for var in args[:num]: + # pad with 0 until position is a multiple of size + if len(result) and alignment: + padding = format['size'] - len(result) % format['size'] + result += [bytes([0])]*padding + result += [format['pack'](var,format['size'],endianness)] + args=args[num:] + num = None + i += 1 + if len(args) != 0: + raise StructError("too many arguments for pack format") + return b''.join(result) + +def unpack(fmt,data): + """unpack(fmt, string) -> (v1, v2, ...) + Unpack the string, containing packed C structure data, according + to fmt. Requires len(string)==calcsize(fmt). + See struct.__doc__ for more on format strings.""" + formatdef,endianness,i,alignment = getmode(fmt) + j = 0 + num = 0 + result = [] + length= calcsize(fmt) + if length != len (data): + raise StructError("unpack str size does not match format") + while i= num: + n = num-1 + result.append(data[j+1:j+n+1]) + j += num + else: + # skip padding bytes until we get at a multiple of size + if j>0 and alignment: + padding = format['size'] - j % format['size'] + j += padding + for n in range(num): + result += [format['unpack'](data,j,format['size'],endianness)] + j += format['size'] + + return tuple(result) + +def pack_into(fmt, buf, offset, *args): + data = pack(fmt, *args) + buf[offset:offset+len(data)] = data + +def unpack_from(fmt, buf, offset=0): + size = calcsize(fmt) + data = buf[offset:offset+size] + if len(data) != size: + raise error("unpack_from requires a buffer of at least %d bytes" + % (size,)) + return unpack(fmt, data) + +def _clearcache(): + "Clear the internal cache." + # No cache in this implementation + +if __name__=='__main__': + t = pack('Bf',1,2) + print(t, len(t)) + print(unpack('Bf', t)) + print(calcsize('Bf')) + + \ No newline at end of file diff --git a/lib/assets/Lib/_sysconfigdata.py b/lib/assets/Lib/_sysconfigdata.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_sysconfigdata.py @@ -0,0 +1,2 @@ +build_time_vars={'HAVE_SYS_WAIT_H': 1, 'HAVE_UTIL_H': 0, 'HAVE_SYMLINKAT': 1, 'HAVE_LIBSENDFILE': 0, 'SRCDIRS': 'Parser Grammar Objects Python Modules Mac', 'SIZEOF_OFF_T': 8, 'BASECFLAGS': '-Wno-unused-result', 'HAVE_UTIME_H': 1, 'EXTRAMACHDEPPATH': '', 'HAVE_SYS_TIME_H': 1, 'CFLAGSFORSHARED': '-fPIC', 'HAVE_HYPOT': 1, 'PGSRCS': '\\', 'HAVE_LIBUTIL_H': 0, 'HAVE_COMPUTED_GOTOS': 1, 'HAVE_LUTIMES': 1, 'HAVE_MAKEDEV': 1, 'HAVE_REALPATH': 1, 'HAVE_LINUX_TIPC_H': 1, 'MULTIARCH': 'i386-linux-gnu', 'HAVE_GETWD': 1, 'HAVE_GCC_ASM_FOR_X64': 0, 'HAVE_INET_PTON': 1, 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, 'SIZEOF__BOOL': 1, 'HAVE_ZLIB_COPY': 1, 'ASDLGEN': 'python3.3 ../Parser/asdl_c.py', 'GRAMMAR_INPUT': '../Grammar/Grammar', 'HOST_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_SCHED_RR_GET_INTERVAL': 1, 'HAVE_BLUETOOTH_H': 0, 'HAVE_MKFIFO': 1, 'TIMEMODULE_LIB': 0, 'LIBM': '-lm', 'PGENOBJS': '\\ \\', 'PYTHONFRAMEWORK': '', 'GETPGRP_HAVE_ARG': 0, 'HAVE_MMAP': 1, 'SHLIB_SUFFIX': '.so', 'SIZEOF_FLOAT': 4, 'HAVE_RENAMEAT': 1, 'HAVE_LANGINFO_H': 1, 'HAVE_STDLIB_H': 1, 'PY_CORE_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -I. -IInclude -I../Include -D_FORTIFY_SOURCE=2 -fPIC -DPy_BUILD_CORE', 'HAVE_BROKEN_PIPE_BUF': 0, 'HAVE_CONFSTR': 1, 'HAVE_SIGTIMEDWAIT': 1, 'HAVE_FTELLO': 1, 'READELF': 'readelf', 'HAVE_SIGALTSTACK': 1, 'TESTTIMEOUT': 3600, 'PYTHONPATH': ':plat-i386-linux-gnu', 'SIZEOF_WCHAR_T': 4, 'LIBOBJS': '', 'HAVE_SYSCONF': 1, 'MAKESETUP': '../Modules/makesetup', 'HAVE_UTIMENSAT': 1, 'HAVE_FCHOWNAT': 1, 'HAVE_WORKING_TZSET': 1, 'HAVE_FINITE': 1, 'HAVE_ASINH': 1, 'HAVE_SETEUID': 1, 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'HAVE_SETGROUPS': 1, 'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o', 'HAVE_MBRTOWC': 1, 'SIZEOF_INT': 4, 'HAVE_STDARG_PROTOTYPES': 1, 'TM_IN_SYS_TIME': 0, 'HAVE_SYS_TIMES_H': 1, 'HAVE_LCHOWN': 1, 'HAVE_SSIZE_T': 1, 'HAVE_PAUSE': 1, 'SYSLIBS': '-lm', 'POSIX_SEMAPHORES_NOT_ENABLED': 0, 'HAVE_DEVICE_MACROS': 1, 'BLDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\', 'HAVE_SYS_UN_H': 1, 'HAVE_SYS_STAT_H': 1, 'VPATH': '..', 'INCLDIRSTOMAKE': '/usr/include /usr/include /usr/include/python3.3m /usr/include/python3.3m', 'HAVE_BROKEN_SEM_GETVALUE': 0, 'HAVE_TIMEGM': 1, 'PACKAGE_VERSION': 0, 'MAJOR_IN_SYSMACROS': 0, 'HAVE_ATANH': 1, 'HAVE_GAI_STRERROR': 1, 'HAVE_SYS_POLL_H': 1, 'SIZEOF_PTHREAD_T': 4, 'SIZEOF_FPOS_T': 16, 'HAVE_CTERMID': 1, 'HAVE_TMPFILE': 1, 'HAVE_SETUID': 1, 'CXX': 'i686-linux-gnu-g++ -pthread', 'srcdir': '..', 'HAVE_UINT32_T': 1, 'HAVE_ADDRINFO': 1, 'HAVE_GETSPENT': 1, 'SIZEOF_DOUBLE': 8, 'HAVE_INT32_T': 1, 'LIBRARY_OBJS_OMIT_FROZEN': '\\', 'HAVE_FUTIMES': 1, 'CONFINCLUDEPY': '/usr/include/python3.3m', 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, 'LIBFFI_INCLUDEDIR': '', 'HAVE_SETGID': 1, 'HAVE_UINT64_T': 1, 'EXEMODE': 755, 'UNIVERSALSDK': '', 'HAVE_LIBDL': 1, 'HAVE_GETNAMEINFO': 1, 'HAVE_STDINT_H': 1, 'COREPYTHONPATH': ':plat-i386-linux-gnu', 'HAVE_SOCKADDR_STORAGE': 1, 'HAVE_WAITID': 1, 'EXTRAPLATDIR': '@EXTRAPLATDIR@', 'HAVE_ACCEPT4': 1, 'RUNSHARED': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared:', 'EXE': '', 'HAVE_SIGACTION': 1, 'HAVE_CHOWN': 1, 'HAVE_GETLOGIN': 1, 'HAVE_TZNAME': 0, 'PACKAGE_NAME': 0, 'HAVE_GETPGID': 1, 'HAVE_GLIBC_MEMMOVE_BUG': 0, 'BUILD_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_LINUX_CAN_H': 1, 'DYNLOADFILE': 'dynload_shlib.o', 'HAVE_PWRITE': 1, 'BUILDEXE': '', 'HAVE_OPENPTY': 1, 'HAVE_LOCKF': 1, 'HAVE_COPYSIGN': 1, 'HAVE_PREAD': 1, 'HAVE_DLOPEN': 1, 'HAVE_SYS_KERN_CONTROL_H': 0, 'PY_FORMAT_LONG_LONG': '"ll"', 'HAVE_TCSETPGRP': 1, 'HAVE_SETSID': 1, 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, 'HAVE_STRING_H': 1, 'LDLIBRARY': 'libpython3.3m.so', 'INSTALL_SCRIPT': '/usr/bin/install -c', 'HAVE_SYS_XATTR_H': 1, 'HAVE_CURSES_IS_TERM_RESIZED': 1, 'HAVE_TMPNAM_R': 1, 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", 'WANT_SIGFPE_HANDLER': 1, 'HAVE_INT64_T': 1, 'HAVE_STAT_TV_NSEC': 1, 'HAVE_SYS_MKDEV_H': 0, 'HAVE_BROKEN_POLL': 0, 'HAVE_IF_NAMEINDEX': 1, 'HAVE_GETPWENT': 1, 'PSRCS': '\\', 'RANLIB': 'ranlib', 'HAVE_WCSCOLL': 1, 'WITH_NEXT_FRAMEWORK': 0, 'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py', 'HAVE_RL_PRE_INPUT_HOOK': 1, 'PACKAGE_URL': 0, 'SHLIB_EXT': 0, 'HAVE_SYS_LOADAVG_H': 0, 'HAVE_LIBIEEE': 0, 'HAVE_SEM_OPEN': 1, 'HAVE_TERM_H': 1, 'IO_OBJS': '\\', 'IO_H': 'Modules/_io/_iomodule.h', 'HAVE_STATVFS': 1, 'VERSION': '3.3', 'HAVE_GETC_UNLOCKED': 1, 'MACHDEPS': 'plat-i386-linux-gnu @EXTRAPLATDIR@', 'SUBDIRSTOO': 'Include Lib Misc', 'HAVE_SETREUID': 1, 'HAVE_ERFC': 1, 'HAVE_SETRESUID': 1, 'LINKFORSHARED': '-Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions', 'HAVE_SYS_TYPES_H': 1, 'HAVE_GETPAGESIZE': 1, 'HAVE_SETEGID': 1, 'HAVE_PTY_H': 1, 'HAVE_STRUCT_STAT_ST_FLAGS': 0, 'HAVE_WCHAR_H': 1, 'HAVE_FSEEKO': 1, 'Py_ENABLE_SHARED': 1, 'HAVE_SIGRELSE': 1, 'HAVE_PTHREAD_INIT': 0, 'FILEMODE': 644, 'HAVE_SYS_RESOURCE_H': 1, 'HAVE_READLINKAT': 1, 'PYLONG_BITS_IN_DIGIT': 0, 'LINKCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SETLOCALE': 1, 'HAVE_CHROOT': 1, 'HAVE_OPENAT': 1, 'HAVE_FEXECVE': 1, 'LDCXXSHARED': 'i686-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions', 'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Ext-dummy', 'HAVE_MKNOD': 1, 'PY_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_BROKEN_MBSTOWCS': 0, 'LIBRARY_OBJS': '\\', 'HAVE_LOG1P': 1, 'SIZEOF_VOID_P': 4, 'HAVE_FCHOWN': 1, 'PYTHONFRAMEWORKPREFIX': '', 'HAVE_LIBDLD': 0, 'HAVE_TGAMMA': 1, 'HAVE_ERRNO_H': 1, 'HAVE_IO_H': 0, 'OTHER_LIBTOOL_OPT': '', 'HAVE_POLL_H': 1, 'PY_CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', 'GRAMMAR_H': 'Include/graminit.h', 'TANH_PRESERVES_ZERO_SIGN': 1, 'HAVE_GETLOADAVG': 1, 'UNICODE_DEPS': '\\ \\', 'HAVE_GETCWD': 1, 'MANDIR': '/usr/share/man', 'MACHDESTLIB': '/usr/lib/python3.3', 'GRAMMAR_C': 'Python/graminit.c', 'PGOBJS': '\\', 'HAVE_DEV_PTMX': 1, 'HAVE_UINTPTR_T': 1, 'HAVE_SCHED_SETAFFINITY': 1, 'PURIFY': '', 'HAVE_DECL_ISINF': 1, 'HAVE_RL_CALLBACK': 1, 'HAVE_WRITEV': 1, 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, 'HAVE_SYS_AUDIOIO_H': 0, 'EXT_SUFFIX': '.cpython-33m.so', 'SIZEOF_LONG_LONG': 8, 'DLINCLDIR': '.', 'HAVE_PATHCONF': 1, 'HAVE_UNLINKAT': 1, 'MKDIR_P': '/bin/mkdir -p', 'HAVE_ALTZONE': 0, 'SCRIPTDIR': '/usr/lib', 'OPCODETARGETGEN_FILES': '\\', 'HAVE_GETSPNAM': 1, 'HAVE_SYS_TERMIO_H': 0, 'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0, 'HAVE_PTHREAD_H': 1, 'Py_DEBUG': 0, 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, 'X87_DOUBLE_ROUNDING': 1, 'SIZEOF_TIME_T': 4, 'HAVE_DYNAMIC_LOADING': 1, 'HAVE_DIRECT_H': 0, 'SRC_GDB_HOOKS': '../Tools/gdb/libpython.py', 'HAVE_GETADDRINFO': 1, 'HAVE_BROKEN_NICE': 0, 'HAVE_DIRENT_H': 1, 'HAVE_WCSXFRM': 1, 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, 'HAVE_FSTATVFS': 1, 'PYTHON': 'python', 'HAVE_OSX105_SDK': 0, 'BINDIR': '/usr/bin', 'TESTPYTHON': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python', 'ARFLAGS': 'rc', 'PLATDIR': 'plat-i386-linux-gnu', 'HAVE_ASM_TYPES_H': 1, 'PY3LIBRARY': 'libpython3.so', 'HAVE_PLOCK': 0, 'FLOCK_NEEDS_LIBBSD': 0, 'WITH_TSC': 0, 'HAVE_LIBREADLINE': 1, 'MACHDEP': 'linux', 'HAVE_SELECT': 1, 'LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_HSTRERROR': 1, 'SOABI': 'cpython-33m', 'HAVE_GETTIMEOFDAY': 1, 'HAVE_LIBRESOLV': 0, 'HAVE_UNSETENV': 1, 'HAVE_TM_ZONE': 1, 'HAVE_GETPGRP': 1, 'HAVE_FLOCK': 1, 'HAVE_SYS_BSDTTY_H': 0, 'SUBDIRS': '', 'PYTHONFRAMEWORKINSTALLDIR': '', 'PACKAGE_BUGREPORT': 0, 'HAVE_CLOCK': 1, 'HAVE_GETPEERNAME': 1, 'SIZEOF_PID_T': 4, 'HAVE_CONIO_H': 0, 'HAVE_FSTATAT': 1, 'HAVE_NETPACKET_PACKET_H': 1, 'HAVE_WAIT3': 1, 'DESTPATH': '', 'HAVE_STAT_TV_NSEC2': 0, 'HAVE_GETRESGID': 1, 'HAVE_UCS4_TCL': 0, 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, 'HAVE_TIMES': 1, 'HAVE_UNAME': 1, 'HAVE_ERF': 1, 'SIZEOF_SHORT': 2, 'HAVE_NCURSES_H': 1, 'HAVE_SYS_SENDFILE_H': 1, 'HAVE_CTERMID_R': 0, 'HAVE_TMPNAM': 1, 'prefix': '/usr', 'HAVE_NICE': 1, 'WITH_THREAD': 1, 'LN': 'ln', 'TESTRUNNER': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python ../Tools/scripts/run_tests.py', 'HAVE_SIGINTERRUPT': 1, 'HAVE_SETPGID': 1, 'RETSIGTYPE': 'void', 'HAVE_SCHED_GET_PRIORITY_MAX': 1, 'HAVE_SYS_SYS_DOMAIN_H': 0, 'HAVE_SYS_DIR_H': 0, 'HAVE__GETPTY': 0, 'HAVE_BLUETOOTH_BLUETOOTH_H': 1, 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, 'HAVE_POLL': 1, 'PYTHON_OBJS': '\\', 'HAVE_WAITPID': 1, 'USE_INLINE': 1, 'HAVE_FUTIMENS': 1, 'USE_COMPUTED_GOTOS': 1, 'MAINCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SOCKETPAIR': 1, 'HAVE_PROCESS_H': 0, 'HAVE_SETVBUF': 1, 'HAVE_FDOPENDIR': 1, 'CONFINCLUDEDIR': '/usr/include', 'BINLIBDEST': '/usr/lib/python3.3', 'HAVE_SYS_IOCTL_H': 1, 'HAVE_SYSEXITS_H': 1, 'LDLAST': '', 'HAVE_SYS_FILE_H': 1, 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, 'HAVE_RL_COMPLETION_MATCHES': 1, 'HAVE_TCGETPGRP': 1, 'SIZEOF_SIZE_T': 4, 'HAVE_EPOLL_CREATE1': 1, 'HAVE_SYS_SELECT_H': 1, 'HAVE_CLOCK_GETTIME': 1, 'CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HAVE_SNPRINTF': 1, 'BLDLIBRARY': '-lpython3.3m', 'PARSER_HEADERS': '\\', 'SO': '.so', 'LIBRARY': 'libpython3.3m.a', 'HAVE_FPATHCONF': 1, 'HAVE_TERMIOS_H': 1, 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, 'AST_H': 'Include/Python-ast.h', 'HAVE_GCC_UINT128_T': 0, 'HAVE_ACOSH': 1, 'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o Modules/arraymodule.o Modules/mathmodule.o Modules/_math.o Modules/_struct.o Modules/timemodule.o Modules/_randommodule.o Modules/atexitmodule.o Modules/_elementtree.o Modules/_pickle.o Modules/_datetimemodule.o Modules/_bisectmodule.o Modules/_heapqmodule.o Modules/unicodedata.o Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o Modules/selectmodule.o Modules/socketmodule.o Modules/_posixsubprocess.o Modules/md5module.o Modules/sha1module.o Modules/sha256module.o Modules/sha512module.o Modules/syslogmodule.o Modules/binascii.o Modules/zlibmodule.o Modules/pyexpat.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/_functoolsmodule.o Modules/operator.o Modules/_collectionsmodule.o Modules/itertoolsmodule.o Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o Modules/symtablemodule.o Modules/xxsubtype.o', 'AST_C': 'Python/Python-ast.c', 'HAVE_SYS_NDIR_H': 0, 'DESTDIRS': '/usr /usr/lib /usr/lib/python3.3 /usr/lib/python3.3/lib-dynload', 'HAVE_SIGNAL_H': 1, 'PACKAGE_TARNAME': 0, 'HAVE_GETPRIORITY': 1, 'INCLUDEDIR': '/usr/include', 'HAVE_INTTYPES_H': 1, 'SIGNAL_OBJS': '', 'HAVE_READV': 1, 'HAVE_SETHOSTNAME': 1, 'MODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'CC': 'i686-linux-gnu-gcc -pthread', 'HAVE_LCHMOD': 0, 'SIZEOF_UINTPTR_T': 4, 'LIBPC': '/usr/lib/i386-linux-gnu/pkgconfig', 'BYTESTR_DEPS': '\\', 'HAVE_MKDIRAT': 1, 'LIBPL': '/usr/lib/python3.3/config-3.3m-i386-linux-gnu', 'HAVE_SHADOW_H': 1, 'HAVE_SYS_EVENT_H': 0, 'INSTALL': '/usr/bin/install -c', 'HAVE_GCC_ASM_FOR_X87': 1, 'HAVE_BROKEN_UNSETENV': 0, 'BASECPPFLAGS': '', 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, 'HAVE_STRUCT_STAT_ST_RDEV': 1, 'HAVE_SEM_UNLINK': 1, 'BUILDPYTHON': 'python', 'HAVE_RL_CATCH_SIGNAL': 1, 'HAVE_DECL_TZNAME': 0, 'RESSRCDIR': 'Mac/Resources/framework', 'HAVE_PTHREAD_SIGMASK': 1, 'HAVE_UTIMES': 1, 'DISTDIRS': 'Include Lib Misc Ext-dummy', 'HAVE_FDATASYNC': 1, 'HAVE_USABLE_WCHAR_T': 0, 'PY_FORMAT_SIZE_T': '"z"', 'HAVE_SCHED_SETSCHEDULER': 1, 'VA_LIST_IS_ARRAY': 0, 'HAVE_LINUX_NETLINK_H': 1, 'HAVE_SETREGID': 1, 'HAVE_STROPTS_H': 1, 'LDVERSION': '3.3m', 'abs_builddir': '/build/buildd/python3.3-3.3.1/build-shared', 'SITEPATH': '', 'HAVE_GETHOSTBYNAME': 0, 'HAVE_SIGPENDING': 1, 'HAVE_KQUEUE': 0, 'HAVE_SYNC': 1, 'HAVE_GETSID': 1, 'HAVE_ROUND': 1, 'HAVE_STRFTIME': 1, 'AST_H_DIR': 'Include', 'HAVE_PIPE2': 1, 'AST_C_DIR': 'Python', 'TESTPYTHONOPTS': '', 'HAVE_DEV_PTC': 0, 'GETTIMEOFDAY_NO_TZ': 0, 'HAVE_NET_IF_H': 1, 'HAVE_SENDFILE': 1, 'HAVE_SETPGRP': 1, 'HAVE_SEM_GETVALUE': 1, 'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'DLLLIBRARY': '', 'PYTHON_FOR_BUILD': './python -E', 'SETPGRP_HAVE_ARG': 0, 'HAVE_INET_ATON': 1, 'INSTALL_SHARED': '/usr/bin/install -c -m 555', 'WITH_DOC_STRINGS': 1, 'OPCODETARGETS_H': '\\', 'HAVE_INITGROUPS': 1, 'HAVE_LINKAT': 1, 'BASEMODLIBS': '', 'SGI_ABI': '', 'HAVE_SCHED_SETPARAM': 1, 'OPT': '-DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes', 'HAVE_POSIX_FADVISE': 1, 'datarootdir': '/usr/share', 'HAVE_MEMRCHR': 1, 'HGTAG': '', 'HAVE_MEMMOVE': 1, 'HAVE_GETRESUID': 1, 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, 'HAVE_LSTAT': 1, 'AR': 'ar', 'HAVE_WAIT4': 1, 'HAVE_SYS_MODEM_H': 0, 'INSTSONAME': 'libpython3.3m.so.1.0', 'HAVE_SYS_STATVFS_H': 1, 'HAVE_LGAMMA': 1, 'HAVE_PROTOTYPES': 1, 'HAVE_SYS_UIO_H': 1, 'MAJOR_IN_MKDEV': 0, 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', 'HAVE_SYS_DEVPOLL_H': 0, 'HAVE_CHFLAGS': 0, 'HAVE_FSYNC': 1, 'HAVE_FCHMOD': 1, 'INCLUDEPY': '/usr/include/python3.3m', 'HAVE_SEM_TIMEDWAIT': 1, 'LDLIBRARYDIR': '', 'HAVE_STRUCT_TM_TM_ZONE': 1, 'HAVE_CURSES_H': 1, 'TIME_WITH_SYS_TIME': 1, 'HAVE_DUP2': 1, 'ENABLE_IPV6': 1, 'WITH_VALGRIND': 0, 'HAVE_SETITIMER': 1, 'THREADOBJ': 'Python/thread.o', 'LOCALMODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'HAVE_MEMORY_H': 1, 'HAVE_GETITIMER': 1, 'HAVE_C99_BOOL': 1, 'INSTALL_DATA': '/usr/bin/install -c -m 644', 'PGEN': 'Parser/pgen', 'HAVE_GRP_H': 1, 'HAVE_WCSFTIME': 1, 'AIX_GENUINE_CPLUSPLUS': 0, 'HAVE_LIBINTL_H': 1, 'SHELL': '/bin/sh', 'HAVE_UNISTD_H': 1, 'EXTRATESTOPTS': '', 'HAVE_EXECV': 1, 'HAVE_FSEEK64': 0, 'MVWDELCH_IS_EXPRESSION': 1, 'DESTSHARED': '/usr/lib/python3.3/lib-dynload', 'OPCODETARGETGEN': '\\', 'LIBDEST': '/usr/lib/python3.3', 'CCSHARED': '-fPIC', 'HAVE_EXPM1': 1, 'HAVE_DLFCN_H': 1, 'exec_prefix': '/usr', 'HAVE_READLINK': 1, 'WINDOW_HAS_FLAGS': 1, 'HAVE_FTELL64': 0, 'HAVE_STRLCPY': 0, 'MACOSX_DEPLOYMENT_TARGET': '', 'HAVE_SYS_SYSCALL_H': 1, 'DESTLIB': '/usr/lib/python3.3', 'LDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HGVERSION': '', 'PYTHON_HEADERS': '\\', 'HAVE_STRINGS_H': 1, 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, 'HAVE_POSIX_FALLOCATE': 1, 'HAVE_DIRFD': 1, 'HAVE_LOG2': 1, 'HAVE_GETPID': 1, 'HAVE_ALARM': 1, 'MACHDEP_OBJS': '', 'HAVE_SPAWN_H': 1, 'HAVE_FORK': 1, 'HAVE_SETRESGID': 1, 'HAVE_FCHMODAT': 1, 'HAVE_CLOCK_GETRES': 1, 'MACHDEPPATH': ':plat-i386-linux-gnu', 'STDC_HEADERS': 1, 'HAVE_SETPRIORITY': 1, 'LIBC': '', 'HAVE_SYS_EPOLL_H': 1, 'HAVE_SYS_UTSNAME_H': 1, 'HAVE_PUTENV': 1, 'HAVE_CURSES_RESIZE_TERM': 1, 'HAVE_FUTIMESAT': 1, 'WITH_DYLD': 0, 'INSTALL_PROGRAM': '/usr/bin/install -c', 'LIBS': '-lpthread -ldl -lutil', 'HAVE_TRUNCATE': 1, 'TESTOPTS': '', 'PROFILE_TASK': '../Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck', 'HAVE_CURSES_RESIZETERM': 1, 'ABIFLAGS': 'm', 'HAVE_GETGROUPLIST': 1, 'OBJECT_OBJS': '\\', 'HAVE_MKNODAT': 1, 'HAVE_ST_BLOCKS': 1, 'HAVE_STRUCT_STAT_ST_GEN': 0, 'SYS_SELECT_WITH_SYS_TIME': 1, 'SHLIBS': '-lpthread -ldl -lutil', 'HAVE_GETGROUPS': 1, 'MODULE_OBJS': '\\', 'PYTHONFRAMEWORKDIR': 'no-framework', 'HAVE_FCNTL_H': 1, 'HAVE_LINK': 1, 'HAVE_SIGWAIT': 1, 'HAVE_GAMMA': 1, 'HAVE_SYS_LOCK_H': 0, 'HAVE_FORKPTY': 1, 'HAVE_SOCKADDR_SA_LEN': 0, 'HAVE_TEMPNAM': 1, 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, 'HAVE_MKFIFOAT': 1, 'HAVE_SIGWAITINFO': 1, 'HAVE_FTIME': 1, 'HAVE_EPOLL': 1, 'HAVE_SYS_SOCKET_H': 1, 'HAVE_LARGEFILE_SUPPORT': 1, 'CONFIGURE_CFLAGS': '-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security', 'HAVE_PTHREAD_DESTRUCTOR': 0, 'CONFIGURE_CPPFLAGS': '-D_FORTIFY_SOURCE=2', 'HAVE_SYMLINK': 1, 'HAVE_LONG_LONG': 1, 'HAVE_IEEEFP_H': 0, 'LIBDIR': '/usr/lib', 'HAVE_PTHREAD_KILL': 1, 'TESTPATH': '', 'HAVE_STRDUP': 1, 'POBJS': '\\', 'NO_AS_NEEDED': '-Wl,--no-as-needed', 'HAVE_LONG_DOUBLE': 1, 'HGBRANCH': '', 'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, 'HAVE_FACCESSAT': 1, 'AST_ASDL': '../Parser/Python.asdl', 'CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'HAVE_MKTIME': 1, 'HAVE_NDIR_H': 0, 'PY_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBOBJDIR': 'Python/', 'HAVE_LINUX_CAN_RAW_H': 1, 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, 'PACKAGE_STRING': 0, 'GNULD': 'yes', 'LOG1P_DROPS_ZERO_SIGN': 0, 'HAVE_FTRUNCATE': 1, 'WITH_LIBINTL': 0, 'HAVE_MREMAP': 1, 'HAVE_DECL_ISNAN': 1, 'HAVE_KILLPG': 1, 'SIZEOF_LONG': 4, 'HAVE_DECL_ISFINITE': 1, 'HAVE_IPA_PURE_CONST_BUG': 0, 'WITH_PYMALLOC': 1, 'abs_srcdir': '/build/buildd/python3.3-3.3.1/build-shared/..', 'HAVE_FCHDIR': 1, 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, 'AC_APPLE_UNIVERSAL_BUILD': 0, 'PGENSRCS': '\\ \\', 'DIRMODE': 755, 'HAVE_GETHOSTBYNAME_R': 1, 'HAVE_LCHFLAGS': 0, 'HAVE_SYS_PARAM_H': 1, 'SIZEOF_LONG_DOUBLE': 12, 'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' '--enable-loadable-sqlite-extensions' '--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' '--with-system-expat' '--with-system-ffi' '--with-fpectl' 'CC=i686-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ' 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' 'CPPFLAGS=-D_FORTIFY_SOURCE=2'", 'HAVE_SCHED_H': 1, 'HAVE_KILL': 1} + diff --git a/lib/assets/Lib/_testcapi.py b/lib/assets/Lib/_testcapi.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_testcapi.py @@ -0,0 +1,290 @@ + +CHAR_MAX = 127 + +CHAR_MIN = -128 + +DBL_MAX = 1.7976931348623157e+308 + +DBL_MIN = 2.2250738585072014e-308 + +FLT_MAX = 3.4028234663852886e+38 + +FLT_MIN = 1.1754943508222875e-38 + +INT_MAX = 2147483647 + +INT_MIN = -2147483648 + +LLONG_MAX = 9223372036854775807 + +LLONG_MIN = -9223372036854775808 + +LONG_MAX = 2147483647 + +LONG_MIN = -2147483648 + +PY_SSIZE_T_MAX = 2147483647 + +PY_SSIZE_T_MIN = -2147483648 + +SHRT_MAX = 32767 + +SHRT_MIN = -32768 + +SIZEOF_PYGC_HEAD = 16 + +UCHAR_MAX = 255 + +UINT_MAX = 4294967295 + +ULLONG_MAX = 18446744073709551615 + +ULONG_MAX = 4294967295 + +USHRT_MAX = 65535 + +__loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>" + +def _pending_threadfunc(*args,**kw): + pass + +class _test_structmembersType(object): + pass + +def _test_thread_state(*args,**kw): + pass + +def argparsing(*args,**kw): + pass + +def code_newempty(*args,**kw): + pass + +def codec_incrementaldecoder(*args,**kw): + pass + +def codec_incrementalencoder(*args,**kw): + pass + +def crash_no_current_thread(*args,**kw): + pass + +class error(Exception): + pass + +def exception_print(*args,**kw): + pass + +def getargs_B(*args,**kw): + pass + +def getargs_H(*args,**kw): + pass + +def getargs_I(*args,**kw): + pass + +def getargs_K(*args,**kw): + pass + +def getargs_L(*args,**kw): + pass + +def getargs_Z(*args,**kw): + pass + +def getargs_Z_hash(*args,**kw): + pass + +def getargs_b(*args,**kw): + pass + +def getargs_c(*args,**kw): + pass + +def getargs_h(*args,**kw): + pass + +def getargs_i(*args,**kw): + pass + +def getargs_k(*args,**kw): + pass + +def getargs_keyword_only(*args,**kw): + pass + +def getargs_keywords(*args,**kw): + pass + +def getargs_l(*args,**kw): + pass + +def getargs_n(*args,**kw): + pass + +def getargs_p(*args,**kw): + pass + +def getargs_s(*args,**kw): + pass + +def getargs_s_hash(*args,**kw): + pass + +def getargs_s_star(*args,**kw): + pass + +def getargs_tuple(*args,**kw): + pass + +def getargs_u(*args,**kw): + pass + +def getargs_u_hash(*args,**kw): + pass + +def getargs_w_star(*args,**kw): + pass + +def getargs_y(*args,**kw): + pass + +def getargs_y_hash(*args,**kw): + pass + +def getargs_y_star(*args,**kw): + pass + +def getargs_z(*args,**kw): + pass + +def getargs_z_hash(*args,**kw): + pass + +def getargs_z_star(*args,**kw): + pass + +class instancemethod(object): + pass + +def make_exception_with_doc(*args,**kw): + pass + +def make_memoryview_from_NULL_pointer(*args,**kw): + pass + +def parse_tuple_and_keywords(*args,**kw): + pass + +def pytime_object_to_time_t(*args,**kw): + pass + +def pytime_object_to_timespec(*args,**kw): + pass + +def pytime_object_to_timeval(*args,**kw): + pass + +def raise_exception(*args,**kw): + pass + +def raise_memoryerror(*args,**kw): + pass + +def run_in_subinterp(*args,**kw): + pass + +def set_exc_info(*args,**kw): + pass + +def test_L_code(*args,**kw): + pass + +def test_Z_code(*args,**kw): + pass + +def test_capsule(*args,**kw): + pass + +def test_config(*args,**kw): + pass + +def test_datetime_capi(*args,**kw): + pass + +def test_dict_iteration(*args,**kw): + pass + +def test_empty_argparse(*args,**kw): + pass + +def test_k_code(*args,**kw): + pass + +def test_lazy_hash_inheritance(*args,**kw): + pass + +def test_list_api(*args,**kw): + pass + +def test_long_and_overflow(*args,**kw): + pass + +def test_long_api(*args,**kw): + pass + +def test_long_as_double(*args,**kw): + pass + +def test_long_as_size_t(*args,**kw): + pass + +def test_long_long_and_overflow(*args,**kw): + pass + +def test_long_numbits(*args,**kw): + pass + +def test_longlong_api(*args,**kw): + pass + +def test_null_strings(*args,**kw): + pass + +def test_s_code(*args,**kw): + pass + +def test_string_from_format(*args,**kw): + pass + +def test_string_to_double(*args,**kw): + pass + +def test_u_code(*args,**kw): + pass + +def test_unicode_compare_with_ascii(*args,**kw): + pass + +def test_widechar(*args,**kw): + pass + +def test_with_docstring(*args,**kw): + """This is a pretty normal docstring.""" + pass + +def traceback_print(*args,**kw): + pass + +def unicode_aswidechar(*args,**kw): + pass + +def unicode_aswidecharstring(*args,**kw): + pass + +def unicode_encodedecimal(*args,**kw): + pass + +def unicode_transformdecimaltoascii(*args,**kw): + pass diff --git a/lib/assets/Lib/_thread.py b/lib/assets/Lib/_thread.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_thread.py @@ -0,0 +1,155 @@ +"""Drop-in replacement for the thread module. + +Meant to be used as a brain-dead substitute so that threaded code does +not need to be rewritten for when the thread module is not present. + +Suggested usage is:: + + try: + import _thread + except ImportError: + import _dummy_thread as _thread + +""" +# Exports only things specified by thread documentation; +# skipping obsolete synonyms allocate(), start_new(), exit_thread(). +__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', + 'interrupt_main', 'LockType'] + +# A dummy value +TIMEOUT_MAX = 2**31 + +# NOTE: this module can be imported early in the extension building process, +# and so top level imports of other modules should be avoided. Instead, all +# imports are done when needed on a function-by-function basis. Since threads +# are disabled, the import lock should not be an issue anyway (??). + +error = RuntimeError + +def start_new_thread(function, args, kwargs={}): + """Dummy implementation of _thread.start_new_thread(). + + Compatibility is maintained by making sure that ``args`` is a + tuple and ``kwargs`` is a dictionary. If an exception is raised + and it is SystemExit (which can be done by _thread.exit()) it is + caught and nothing is done; all other exceptions are printed out + by using traceback.print_exc(). + + If the executed function calls interrupt_main the KeyboardInterrupt will be + raised when the function returns. + + """ + if type(args) != type(tuple()): + raise TypeError("2nd arg must be a tuple") + if type(kwargs) != type(dict()): + raise TypeError("3rd arg must be a dict") + global _main + _main = False + try: + function(*args, **kwargs) + except SystemExit: + pass + except: + import traceback + traceback.print_exc() + _main = True + global _interrupt + if _interrupt: + _interrupt = False + raise KeyboardInterrupt + +def exit(): + """Dummy implementation of _thread.exit().""" + raise SystemExit + +def get_ident(): + """Dummy implementation of _thread.get_ident(). + + Since this module should only be used when _threadmodule is not + available, it is safe to assume that the current process is the + only thread. Thus a constant can be safely returned. + """ + return -1 + +def allocate_lock(): + """Dummy implementation of _thread.allocate_lock().""" + return LockType() + +def stack_size(size=None): + """Dummy implementation of _thread.stack_size().""" + if size is not None: + raise error("setting thread stack size not supported") + return 0 + +class LockType(object): + """Class implementing dummy implementation of _thread.LockType. + + Compatibility is maintained by maintaining self.locked_status + which is a boolean that stores the state of the lock. Pickling of + the lock, though, should not be done since if the _thread module is + then used with an unpickled ``lock()`` from here problems could + occur from this class not having atomic methods. + + """ + + def __init__(self): + self.locked_status = False + + def acquire(self, waitflag=None, timeout=-1): + """Dummy implementation of acquire(). + + For blocking calls, self.locked_status is automatically set to + True and returned appropriately based on value of + ``waitflag``. If it is non-blocking, then the value is + actually checked and not set if it is already acquired. This + is all done so that threading.Condition's assert statements + aren't triggered and throw a little fit. + + """ + if waitflag is None or waitflag: + self.locked_status = True + return True + else: + if not self.locked_status: + self.locked_status = True + return True + else: + if timeout > 0: + import time + time.sleep(timeout) + return False + + __enter__ = acquire + + def __exit__(self, typ, val, tb): + self.release() + + def release(self): + """Release the dummy lock.""" + # XXX Perhaps shouldn't actually bother to test? Could lead + # to problems for complex, threaded code. + if not self.locked_status: + raise error + self.locked_status = False + return True + + def locked(self): + return self.locked_status + +# Used to signal that interrupt_main was called in a "thread" +_interrupt = False +# True when not executing in a "thread" +_main = True + +def interrupt_main(): + """Set _interrupt flag to True to have start_new_thread raise + KeyboardInterrupt upon exiting.""" + if _main: + raise KeyboardInterrupt + else: + global _interrupt + _interrupt = True + +# Brython-specific to avoid circular references between threading and _threading_local +class _local: + pass \ No newline at end of file diff --git a/lib/assets/Lib/_threading_local.py b/lib/assets/Lib/_threading_local.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_threading_local.py @@ -0,0 +1,246 @@ +"""Thread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... initialized = False + ... def __init__(self, **kw): + ... if self.initialized: + ... raise SystemError('__init__ called too many times') + ... self.initialized = True + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red'), ('initialized', True)], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +""" + +from weakref import ref +from contextlib import contextmanager + +__all__ = ["local"] + +# We need to use objects from the threading module, but the threading +# module may also want to use our `local` class, if support for locals +# isn't compiled in to the `thread` module. This creates potential problems +# with circular imports. For that reason, we don't import `threading` +# until the bottom of this file (a hack sufficient to worm around the +# potential problems). Note that all platforms on CPython do have support +# for locals in the `thread` module, and there is no circular import problem +# then, so problems introduced by fiddling the order of imports here won't +# manifest. + +class _localimpl: + """A class managing thread-local dicts""" + __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__' + + def __init__(self): + # The key used in the Thread objects' attribute dicts. + # We keep it a string for speed but make it unlikely to clash with + # a "real" attribute. + self.key = '_threading_local._localimpl.' + str(id(self)) + # { id(Thread) -> (ref(Thread), thread-local dict) } + self.dicts = {} + + def get_dict(self): + """Return the dict for the current thread. Raises KeyError if none + defined.""" + thread = current_thread() + return self.dicts[id(thread)][1] + + def create_dict(self): + """Create a new dict for the current thread, and return it.""" + localdict = {} + key = self.key + thread = current_thread() + idt = id(thread) + def local_deleted(_, key=key): + # When the localimpl is deleted, remove the thread attribute. + thread = wrthread() + if thread is not None: + del thread.__dict__[key] + def thread_deleted(_, idt=idt): + # When the thread is deleted, remove the local dict. + # Note that this is suboptimal if the thread object gets + # caught in a reference loop. We would like to be called + # as soon as the OS-level thread ends instead. + local = wrlocal() + if local is not None: + dct = local.dicts.pop(idt) + wrlocal = ref(self, local_deleted) + wrthread = ref(thread, thread_deleted) + thread.__dict__[key] = wrlocal + self.dicts[idt] = wrthread, localdict + return localdict + + +@contextmanager +def _patch(self): + impl = object.__getattribute__(self, '_local__impl') + try: + dct = impl.get_dict() + except KeyError: + dct = impl.create_dict() + args, kw = impl.localargs + self.__init__(*args, **kw) + with impl.locallock: + object.__setattr__(self, '__dict__', dct) + yield + + +class local: + __slots__ = '_local__impl', '__dict__' + + def __new__(cls, *args, **kw): + if (args or kw) and (cls.__init__ is object.__init__): + raise TypeError("Initialization arguments are not supported") + self = object.__new__(cls) + impl = _localimpl() + impl.localargs = (args, kw) + impl.locallock = RLock() + object.__setattr__(self, '_local__impl', impl) + # We need to create the thread dict in anticipation of + # __init__ being called, to make sure we don't call it + # again ourselves. + impl.create_dict() + return self + + def __getattribute__(self, name): + with _patch(self): + return object.__getattribute__(self, name) + + def __setattr__(self, name, value): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__setattr__(self, name, value) + + def __delattr__(self, name): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__delattr__(self, name) + + +from threading import current_thread, RLock diff --git a/lib/assets/Lib/_warnings.py b/lib/assets/Lib/_warnings.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_warnings.py @@ -0,0 +1,20 @@ +"""_warnings provides basic warning filtering support. +It is a helper module to speed up interpreter start-up.""" + + +default_action = """default""" + +filters = [('ignore', None, DeprecationWarning, None, 0), + ('ignore', None, PendingDeprecationWarning, None, 0), + ('ignore', None, ImportWarning, None, 0), + ('ignore', None, BytesWarning, None, 0)] + +once_registry = {} + +def warn(*args,**kw): + """Issue a warning, or maybe ignore it or raise an exception.""" + pass + +def warn_explicit(*args,**kw): + """Low-level inferface to warnings functionality.""" + pass diff --git a/lib/assets/Lib/_weakref.py b/lib/assets/Lib/_weakref.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_weakref.py @@ -0,0 +1,33 @@ +class ProxyType: + + def __init__(self,obj): + self.obj = obj + +CallableProxyType = ProxyType +ProxyTypes = [ProxyType,CallableProxyType] + +class ReferenceType: + + def __init__(self,obj,callback): + self.obj = obj + self.callback = callback + +class ref: + + def __init__(self,obj,callback=None): + self.obj = ReferenceType(obj,callback) + self.callback=callback + + def __call__(self): + return self.obj.obj + +def getweakrefcount(obj): + return 1 + +def getweakrefs(obj): + return obj + + +def proxy(obj,callback=None): + return ProxyType(obj) + diff --git a/lib/assets/Lib/_weakrefset.py b/lib/assets/Lib/_weakrefset.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/_weakrefset.py @@ -0,0 +1,194 @@ +# Access WeakSet through the weakref module. +# This code is separated-out because it is needed +# by abc.py to load everything else at startup. + +from _weakref import ref + +__all__ = ['WeakSet'] + + +class _IterationGuard: + # This context manager registers itself in the current iterators of the + # weak container, such as to delay all removals until the context manager + # exits. + # This technique should be relatively thread-safe (since sets are). + + def __init__(self, weakcontainer): + # Don't create cycles + self.weakcontainer = ref(weakcontainer) + + def __enter__(self): + w = self.weakcontainer() + if w is not None: + w._iterating.add(self) + return self + + def __exit__(self, e, t, b): + w = self.weakcontainer() + if w is not None: + s = w._iterating + s.remove(self) + if not s: + w._commit_removals() + + +class WeakSet: + def __init__(self, data=None): + self.data = set() + def _remove(item, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(item) + else: + self.data.discard(item) + self._remove = _remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + if data is not None: + self.update(data) + + def _commit_removals(self): + l = self._pending_removals + discard = self.data.discard + while l: + discard(l.pop()) + + def __iter__(self): + with _IterationGuard(self): + for itemref in self.data: + item = itemref() + if item is not None: + yield item + + def __len__(self): + return len(self.data) - len(self._pending_removals) + + def __contains__(self, item): + try: + wr = ref(item) + except TypeError: + return False + return wr in self.data + + def __reduce__(self): + return (self.__class__, (list(self),), + getattr(self, '__dict__', None)) + + def add(self, item): + if self._pending_removals: + self._commit_removals() + self.data.add(ref(item, self._remove)) + + def clear(self): + if self._pending_removals: + self._commit_removals() + self.data.clear() + + def copy(self): + return self.__class__(self) + + def pop(self): + if self._pending_removals: + self._commit_removals() + while True: + try: + itemref = self.data.pop() + except KeyError: + raise KeyError('pop from empty WeakSet') + item = itemref() + if item is not None: + return item + + def remove(self, item): + if self._pending_removals: + self._commit_removals() + self.data.remove(ref(item)) + + def discard(self, item): + if self._pending_removals: + self._commit_removals() + self.data.discard(ref(item)) + + def update(self, other): + if self._pending_removals: + self._commit_removals() + for element in other: + self.add(element) + + def __ior__(self, other): + self.update(other) + return self + + def difference(self, other): + newset = self.copy() + newset.difference_update(other) + return newset + __sub__ = difference + + def difference_update(self, other): + self.__isub__(other) + def __isub__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.difference_update(ref(item) for item in other) + return self + + def intersection(self, other): + return self.__class__(item for item in other if item in self) + __and__ = intersection + + def intersection_update(self, other): + self.__iand__(other) + def __iand__(self, other): + if self._pending_removals: + self._commit_removals() + self.data.intersection_update(ref(item) for item in other) + return self + + def issubset(self, other): + return self.data.issubset(ref(item) for item in other) + __le__ = issubset + + def __lt__(self, other): + return self.data < set(ref(item) for item in other) + + def issuperset(self, other): + return self.data.issuperset(ref(item) for item in other) + __ge__ = issuperset + + def __gt__(self, other): + return self.data > set(ref(item) for item in other) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.data == set(ref(item) for item in other) + + def symmetric_difference(self, other): + newset = self.copy() + newset.symmetric_difference_update(other) + return newset + __xor__ = symmetric_difference + + def symmetric_difference_update(self, other): + self.__ixor__(other) + def __ixor__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) + return self + + def union(self, other): + return self.__class__(e for s in (self, other) for e in s) + __or__ = union + + def isdisjoint(self, other): + return len(self.intersection(other)) == 0 diff --git a/lib/assets/Lib/abc.py b/lib/assets/Lib/abc.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/abc.py @@ -0,0 +1,228 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) according to PEP 3119.""" + +from _weakrefset import WeakSet + +def abstractmethod(funcobj): + """A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + """ + funcobj.__isabstractmethod__ = True + return funcobj + + +class abstractclassmethod(classmethod): + """ + A decorator indicating abstract classmethods. + + Similar to abstractmethod. + + Usage: + + class C(metaclass=ABCMeta): + @abstractclassmethod + def my_abstract_classmethod(cls, ...): + ... + + 'abstractclassmethod' is deprecated. Use 'classmethod' with + 'abstractmethod' instead. + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractstaticmethod(staticmethod): + """ + A decorator indicating abstract staticmethods. + + Similar to abstractmethod. + + Usage: + + class C(metaclass=ABCMeta): + @abstractstaticmethod + def my_abstract_staticmethod(...): + ... + + 'abstractstaticmethod' is deprecated. Use 'staticmethod' with + 'abstractmethod' instead. + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractproperty(property): + """ + A decorator indicating abstract properties. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract properties are overridden. + The abstract properties can be called using any of the normal + 'super' call mechanisms. + + Usage: + + class C(metaclass=ABCMeta): + @abstractproperty + def my_abstract_property(self): + ... + + This defines a read-only property; you can also define a read-write + abstract property using the 'long' form of property declaration: + + class C(metaclass=ABCMeta): + def getx(self): ... + def setx(self, value): ... + x = abstractproperty(getx, setx) + + 'abstractproperty' is deprecated. Use 'property' with 'abstractmethod' + instead. + """ + + __isabstractmethod__ = True + + +class ABCMeta(type): + + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + + """ + + # A global counter that is incremented each time a class is + # registered as a virtual subclass of anything. It forces the + # negative cache to be cleared before its next use. + _abc_invalidation_counter = 0 + + def __new__(mcls, name, bases, namespace): + cls = super().__new__(mcls, name, bases, namespace) + # Compute set of abstract method names + abstracts = {name + for name, value in namespace.items() + if getattr(value, "__isabstractmethod__", False)} + for base in bases: + for name in getattr(base, "__abstractmethods__", set()): + value = getattr(cls, name, None) + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + cls.__abstractmethods__ = frozenset(abstracts) + # Set up inheritance registry + cls._abc_registry = WeakSet() + cls._abc_cache = WeakSet() + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ + if not isinstance(subclass, type): + raise TypeError("Can only register classes") + if issubclass(subclass, cls): + return subclass # Already a subclass + # Subtle: test for cycles *after* testing for "already a subclass"; + # this means we allow X.register(X) and interpret it as a no-op. + if issubclass(cls, subclass): + # This would create a cycle, which is bad for the algorithm below + raise RuntimeError("Refusing to create an inheritance cycle") + cls._abc_registry.add(subclass) + ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache + return subclass + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file) + print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file) + for name in sorted(cls.__dict__.keys()): + if name.startswith("_abc_"): + value = getattr(cls, name) + print("%s: %r" % (name, value), file=file) + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + # Inline the cache checking + subclass = instance.__class__ + if subclass in cls._abc_cache: + return True + subtype = type(instance) + if subtype is subclass: + if (cls._abc_negative_cache_version == + ABCMeta._abc_invalidation_counter and + subclass in cls._abc_negative_cache): + return False + # Fall back to the subclass check. + return cls.__subclasscheck__(subclass) + return any(cls.__subclasscheck__(c) for c in {subclass, subtype}) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + # Check cache + if subclass in cls._abc_cache: + return True + # Check negative cache; may have to invalidate + if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: + # Invalidate the negative cache + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + elif subclass in cls._abc_negative_cache: + return False + # Check the subclass hook + ok = cls.__subclasshook__(subclass) + if ok is not NotImplemented: + assert isinstance(ok, bool) + if ok: + cls._abc_cache.add(subclass) + else: + cls._abc_negative_cache.add(subclass) + return ok + # Check if it's a direct subclass + if cls in getattr(subclass, '__mro__', ()): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a registered class (recursive) + for rcls in cls._abc_registry: + if issubclass(subclass, rcls): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a subclass (recursive) + for scls in cls.__subclasses__(): + if issubclass(subclass, scls): + cls._abc_cache.add(subclass) + return True + # No dice; update negative cache + cls._abc_negative_cache.add(subclass) + return False diff --git a/lib/assets/Lib/antigravity.py b/lib/assets/Lib/antigravity.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/antigravity.py @@ -0,0 +1,17 @@ + +import webbrowser +import hashlib + +webbrowser.open("http://xkcd.com/353/") + +def geohash(latitude, longitude, datedow): + '''Compute geohash() using the Munroe algorithm. + + >>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68') + 37.857713 -122.544543 + + ''' + # http://xkcd.com/426/ + h = hashlib.md5(datedow).hexdigest() + p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])] + print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:])) diff --git a/lib/assets/Lib/argparse.py b/lib/assets/Lib/argparse.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/argparse.py @@ -0,0 +1,2386 @@ +# Author: Steven J. Bethard . + +"""Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +""" + +__version__ = '1.1' +__all__ = [ + 'ArgumentParser', + 'ArgumentError', + 'ArgumentTypeError', + 'FileType', + 'HelpFormatter', + 'ArgumentDefaultsHelpFormatter', + 'RawDescriptionHelpFormatter', + 'RawTextHelpFormatter', + 'MetavarTypeHelpFormatter', + 'Namespace', + 'Action', + 'ONE_OR_MORE', + 'OPTIONAL', + 'PARSER', + 'REMAINDER', + 'SUPPRESS', + 'ZERO_OR_MORE', +] + + +import collections as _collections +import copy as _copy +import os as _os +import re as _re +import sys as _sys +import textwrap as _textwrap + +from gettext import gettext as _, ngettext + + +SUPPRESS = '==SUPPRESS==' + +OPTIONAL = '?' +ZERO_OR_MORE = '*' +ONE_OR_MORE = '+' +PARSER = 'A...' +REMAINDER = '...' +_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' + +# ============================= +# Utility functions and classes +# ============================= + +class _AttributeHolder(object): + """Abstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + """ + + def __repr__(self): + type_name = type(self).__name__ + arg_strings = [] + for arg in self._get_args(): + arg_strings.append(repr(arg)) + for name, value in self._get_kwargs(): + arg_strings.append('%s=%r' % (name, value)) + return '%s(%s)' % (type_name, ', '.join(arg_strings)) + + def _get_kwargs(self): + return sorted(self.__dict__.items()) + + def _get_args(self): + return [] + + +def _ensure_value(namespace, name, value): + if getattr(namespace, name, None) is None: + setattr(namespace, name, value) + return getattr(namespace, name) + + +# =============== +# Formatting Help +# =============== + +class HelpFormatter(object): + """Formatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def __init__(self, + prog, + indent_increment=2, + max_help_position=24, + width=None): + + # default setting for width + if width is None: + try: + width = int(_os.environ['COLUMNS']) + except (KeyError, ValueError): + width = 80 + width -= 2 + + self._prog = prog + self._indent_increment = indent_increment + self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) + self._width = width + + self._current_indent = 0 + self._level = 0 + self._action_max_length = 0 + + self._root_section = self._Section(self, None) + self._current_section = self._root_section + + self._whitespace_matcher = _re.compile(r'\s+') + self._long_break_matcher = _re.compile(r'\n\n\n+') + + # =============================== + # Section and indentation methods + # =============================== + def _indent(self): + self._current_indent += self._indent_increment + self._level += 1 + + def _dedent(self): + self._current_indent -= self._indent_increment + assert self._current_indent >= 0, 'Indent decreased below 0.' + self._level -= 1 + + class _Section(object): + + def __init__(self, formatter, parent, heading=None): + self.formatter = formatter + self.parent = parent + self.heading = heading + self.items = [] + + def format_help(self): + # format the indented section + if self.parent is not None: + self.formatter._indent() + join = self.formatter._join_parts + for func, args in self.items: + func(*args) + item_help = join([func(*args) for func, args in self.items]) + if self.parent is not None: + self.formatter._dedent() + + # return nothing if the section was empty + if not item_help: + return '' + + # add the heading if the section was non-empty + if self.heading is not SUPPRESS and self.heading is not None: + current_indent = self.formatter._current_indent + heading = '%*s%s:\n' % (current_indent, '', self.heading) + else: + heading = '' + + # join the section-initial newline, the heading and the help + return join(['\n', heading, item_help, '\n']) + + def _add_item(self, func, args): + self._current_section.items.append((func, args)) + + # ======================== + # Message building methods + # ======================== + def start_section(self, heading): + self._indent() + section = self._Section(self, self._current_section, heading) + self._add_item(section.format_help, []) + self._current_section = section + + def end_section(self): + self._current_section = self._current_section.parent + self._dedent() + + def add_text(self, text): + if text is not SUPPRESS and text is not None: + self._add_item(self._format_text, [text]) + + def add_usage(self, usage, actions, groups, prefix=None): + if usage is not SUPPRESS: + args = usage, actions, groups, prefix + self._add_item(self._format_usage, args) + + def add_argument(self, action): + if action.help is not SUPPRESS: + + # find all invocations + get_invocation = self._format_action_invocation + invocations = [get_invocation(action)] + for subaction in self._iter_indented_subactions(action): + invocations.append(get_invocation(subaction)) + + # update the maximum item length + invocation_length = max([len(s) for s in invocations]) + action_length = invocation_length + self._current_indent + self._action_max_length = max(self._action_max_length, + action_length) + + # add the item to the list + self._add_item(self._format_action, [action]) + + def add_arguments(self, actions): + for action in actions: + self.add_argument(action) + + # ======================= + # Help-formatting methods + # ======================= + def format_help(self): + help = self._root_section.format_help() + if help: + help = self._long_break_matcher.sub('\n\n', help) + help = help.strip('\n') + '\n' + return help + + def _join_parts(self, part_strings): + return ''.join([part + for part in part_strings + if part and part is not SUPPRESS]) + + def _format_usage(self, usage, actions, groups, prefix): + if prefix is None: + prefix = _('usage: ') + + # if usage is specified, use that + if usage is not None: + usage = usage % dict(prog=self._prog) + + # if no optionals or positionals are available, usage is just prog + elif usage is None and not actions: + usage = '%(prog)s' % dict(prog=self._prog) + + # if optionals and positionals are available, calculate usage + elif usage is None: + prog = '%(prog)s' % dict(prog=self._prog) + + # split optionals from positionals + optionals = [] + positionals = [] + for action in actions: + if action.option_strings: + optionals.append(action) + else: + positionals.append(action) + + # build full usage string + format = self._format_actions_usage + action_usage = format(optionals + positionals, groups) + usage = ' '.join([s for s in [prog, action_usage] if s]) + + # wrap the usage parts if it's too long + text_width = self._width - self._current_indent + if len(prefix) + len(usage) > text_width: + + # break usage into wrappable parts + part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' + opt_usage = format(optionals, groups) + pos_usage = format(positionals, groups) + opt_parts = _re.findall(part_regexp, opt_usage) + pos_parts = _re.findall(part_regexp, pos_usage) + assert ' '.join(opt_parts) == opt_usage + assert ' '.join(pos_parts) == pos_usage + + # helper for wrapping lines + def get_lines(parts, indent, prefix=None): + lines = [] + line = [] + if prefix is not None: + line_len = len(prefix) - 1 + else: + line_len = len(indent) - 1 + for part in parts: + if line_len + 1 + len(part) > text_width and line: + lines.append(indent + ' '.join(line)) + line = [] + line_len = len(indent) - 1 + line.append(part) + line_len += len(part) + 1 + if line: + lines.append(indent + ' '.join(line)) + if prefix is not None: + lines[0] = lines[0][len(indent):] + return lines + + # if prog is short, follow it with optionals or positionals + if len(prefix) + len(prog) <= 0.75 * text_width: + indent = ' ' * (len(prefix) + len(prog) + 1) + if opt_parts: + lines = get_lines([prog] + opt_parts, indent, prefix) + lines.extend(get_lines(pos_parts, indent)) + elif pos_parts: + lines = get_lines([prog] + pos_parts, indent, prefix) + else: + lines = [prog] + + # if prog is long, put it on its own line + else: + indent = ' ' * len(prefix) + parts = opt_parts + pos_parts + lines = get_lines(parts, indent) + if len(lines) > 1: + lines = [] + lines.extend(get_lines(opt_parts, indent)) + lines.extend(get_lines(pos_parts, indent)) + lines = [prog] + lines + + # join lines into usage + usage = '\n'.join(lines) + + # prefix with 'usage:' + return '%s%s\n\n' % (prefix, usage) + + def _format_actions_usage(self, actions, groups): + # find group indices and identify actions in groups + group_actions = set() + inserts = {} + for group in groups: + try: + start = actions.index(group._group_actions[0]) + except ValueError: + continue + else: + end = start + len(group._group_actions) + if actions[start:end] == group._group_actions: + for action in group._group_actions: + group_actions.add(action) + if not group.required: + if start in inserts: + inserts[start] += ' [' + else: + inserts[start] = '[' + inserts[end] = ']' + else: + if start in inserts: + inserts[start] += ' (' + else: + inserts[start] = '(' + inserts[end] = ')' + for i in range(start + 1, end): + inserts[i] = '|' + + # collect all actions format strings + parts = [] + for i, action in enumerate(actions): + + # suppressed arguments are marked with None + # remove | separators for suppressed arguments + if action.help is SUPPRESS: + parts.append(None) + if inserts.get(i) == '|': + inserts.pop(i) + elif inserts.get(i + 1) == '|': + inserts.pop(i + 1) + + # produce all arg strings + elif not action.option_strings: + default = self._get_default_metavar_for_positional(action) + part = self._format_args(action, default) + + # if it's in a group, strip the outer [] + if action in group_actions: + if part[0] == '[' and part[-1] == ']': + part = part[1:-1] + + # add the action string to the list + parts.append(part) + + # produce the first way to invoke the option in brackets + else: + option_string = action.option_strings[0] + + # if the Optional doesn't take a value, format is: + # -s or --long + if action.nargs == 0: + part = '%s' % option_string + + # if the Optional takes a value, format is: + # -s ARGS or --long ARGS + else: + default = self._get_default_metavar_for_optional(action) + args_string = self._format_args(action, default) + part = '%s %s' % (option_string, args_string) + + # make it look optional if it's not required or in a group + if not action.required and action not in group_actions: + part = '[%s]' % part + + # add the action string to the list + parts.append(part) + + # insert things at the necessary indices + for i in sorted(inserts, reverse=True): + parts[i:i] = [inserts[i]] + + # join all the action items with spaces + text = ' '.join([item for item in parts if item is not None]) + + # clean up separators for mutually exclusive groups + open = r'[\[(]' + close = r'[\])]' + text = _re.sub(r'(%s) ' % open, r'\1', text) + text = _re.sub(r' (%s)' % close, r'\1', text) + text = _re.sub(r'%s *%s' % (open, close), r'', text) + text = _re.sub(r'\(([^|]*)\)', r'\1', text) + text = text.strip() + + # return the text + return text + + def _format_text(self, text): + if '%(prog)' in text: + text = text % dict(prog=self._prog) + text_width = max(self._width - self._current_indent, 11) + indent = ' ' * self._current_indent + return self._fill_text(text, text_width, indent) + '\n\n' + + def _format_action(self, action): + # determine the required width and the entry label + help_position = min(self._action_max_length + 2, + self._max_help_position) + help_width = max(self._width - help_position, 11) + action_width = help_position - self._current_indent - 2 + action_header = self._format_action_invocation(action) + + # no help; start on same line and add a final newline + if not action.help: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + + # short action name; start on the same line and pad two spaces + elif len(action_header) <= action_width: + tup = self._current_indent, '', action_width, action_header + action_header = '%*s%-*s ' % tup + indent_first = 0 + + # long action name; start on the next line + else: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + indent_first = help_position + + # collect the pieces of the action help + parts = [action_header] + + # if there was help for the action, add lines of help text + if action.help: + help_text = self._expand_help(action) + help_lines = self._split_lines(help_text, help_width) + parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) + for line in help_lines[1:]: + parts.append('%*s%s\n' % (help_position, '', line)) + + # or add a newline if the description doesn't end with one + elif not action_header.endswith('\n'): + parts.append('\n') + + # if there are any sub-actions, add their help as well + for subaction in self._iter_indented_subactions(action): + parts.append(self._format_action(subaction)) + + # return a single string + return self._join_parts(parts) + + def _format_action_invocation(self, action): + if not action.option_strings: + default = self._get_default_metavar_for_positional(action) + metavar, = self._metavar_formatter(action, default)(1) + return metavar + + else: + parts = [] + + # if the Optional doesn't take a value, format is: + # -s, --long + if action.nargs == 0: + parts.extend(action.option_strings) + + # if the Optional takes a value, format is: + # -s ARGS, --long ARGS + else: + default = self._get_default_metavar_for_optional(action) + args_string = self._format_args(action, default) + for option_string in action.option_strings: + parts.append('%s %s' % (option_string, args_string)) + + return ', '.join(parts) + + def _metavar_formatter(self, action, default_metavar): + if action.metavar is not None: + result = action.metavar + elif action.choices is not None: + choice_strs = [str(choice) for choice in action.choices] + result = '{%s}' % ','.join(choice_strs) + else: + result = default_metavar + + def format(tuple_size): + if isinstance(result, tuple): + return result + else: + return (result, ) * tuple_size + return format + + def _format_args(self, action, default_metavar): + get_metavar = self._metavar_formatter(action, default_metavar) + if action.nargs is None: + result = '%s' % get_metavar(1) + elif action.nargs == OPTIONAL: + result = '[%s]' % get_metavar(1) + elif action.nargs == ZERO_OR_MORE: + result = '[%s [%s ...]]' % get_metavar(2) + elif action.nargs == ONE_OR_MORE: + result = '%s [%s ...]' % get_metavar(2) + elif action.nargs == REMAINDER: + result = '...' + elif action.nargs == PARSER: + result = '%s ...' % get_metavar(1) + else: + formats = ['%s' for _ in range(action.nargs)] + result = ' '.join(formats) % get_metavar(action.nargs) + return result + + def _expand_help(self, action): + params = dict(vars(action), prog=self._prog) + for name in list(params): + if params[name] is SUPPRESS: + del params[name] + for name in list(params): + if hasattr(params[name], '__name__'): + params[name] = params[name].__name__ + if params.get('choices') is not None: + choices_str = ', '.join([str(c) for c in params['choices']]) + params['choices'] = choices_str + return self._get_help_string(action) % params + + def _iter_indented_subactions(self, action): + try: + get_subactions = action._get_subactions + except AttributeError: + pass + else: + self._indent() + yield from get_subactions() + self._dedent() + + def _split_lines(self, text, width): + text = self._whitespace_matcher.sub(' ', text).strip() + return _textwrap.wrap(text, width) + + def _fill_text(self, text, width, indent): + text = self._whitespace_matcher.sub(' ', text).strip() + return _textwrap.fill(text, width, initial_indent=indent, + subsequent_indent=indent) + + def _get_help_string(self, action): + return action.help + + def _get_default_metavar_for_optional(self, action): + return action.dest.upper() + + def _get_default_metavar_for_positional(self, action): + return action.dest + + +class RawDescriptionHelpFormatter(HelpFormatter): + """Help message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _fill_text(self, text, width, indent): + return ''.join(indent + line for line in text.splitlines(keepends=True)) + + +class RawTextHelpFormatter(RawDescriptionHelpFormatter): + """Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _split_lines(self, text, width): + return text.splitlines() + + +class ArgumentDefaultsHelpFormatter(HelpFormatter): + """Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_help_string(self, action): + help = action.help + if '%(default)' not in action.help: + if action.default is not SUPPRESS: + defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] + if action.option_strings or action.nargs in defaulting_nargs: + help += ' (default: %(default)s)' + return help + + +class MetavarTypeHelpFormatter(HelpFormatter): + """Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_default_metavar_for_optional(self, action): + return action.type.__name__ + + def _get_default_metavar_for_positional(self, action): + return action.type.__name__ + + + +# ===================== +# Options and Arguments +# ===================== + +def _get_action_name(argument): + if argument is None: + return None + elif argument.option_strings: + return '/'.join(argument.option_strings) + elif argument.metavar not in (None, SUPPRESS): + return argument.metavar + elif argument.dest not in (None, SUPPRESS): + return argument.dest + else: + return None + + +class ArgumentError(Exception): + """An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + """ + + def __init__(self, argument, message): + self.argument_name = _get_action_name(argument) + self.message = message + + def __str__(self): + if self.argument_name is None: + format = '%(message)s' + else: + format = 'argument %(argument_name)s: %(message)s' + return format % dict(message=self.message, + argument_name=self.argument_name) + + +class ArgumentTypeError(Exception): + """An error from trying to convert a command line string to a type.""" + pass + + +# ============== +# Action classes +# ============== + +class Action(_AttributeHolder): + """Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + """ + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + self.option_strings = option_strings + self.dest = dest + self.nargs = nargs + self.const = const + self.default = default + self.type = type + self.choices = choices + self.required = required + self.help = help + self.metavar = metavar + + def _get_kwargs(self): + names = [ + 'option_strings', + 'dest', + 'nargs', + 'const', + 'default', + 'type', + 'choices', + 'help', + 'metavar', + ] + return [(name, getattr(self, name)) for name in names] + + def __call__(self, parser, namespace, values, option_string=None): + raise NotImplementedError(_('.__call__() not defined')) + + +class _StoreAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for store actions must be > 0; if you ' + 'have nothing to store, actions such as store ' + 'true or store const may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_StoreAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class _StoreConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_StoreConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + +class _StoreTrueAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=False, + required=False, + help=None): + super(_StoreTrueAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + required=required, + help=help) + + +class _StoreFalseAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=True, + required=False, + help=None): + super(_StoreFalseAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=False, + default=default, + required=required, + help=help) + + +class _AppendAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for append actions must be > 0; if arg ' + 'strings are not supplying the value to append, ' + 'the append const action may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_AppendAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = _copy.copy(_ensure_value(namespace, self.dest, [])) + items.append(values) + setattr(namespace, self.dest, items) + + +class _AppendConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_AppendConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = _copy.copy(_ensure_value(namespace, self.dest, [])) + items.append(self.const) + setattr(namespace, self.dest, items) + + +class _CountAction(Action): + + def __init__(self, + option_strings, + dest, + default=None, + required=False, + help=None): + super(_CountAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = _ensure_value(namespace, self.dest, 0) + 1 + setattr(namespace, self.dest, new_count) + + +class _HelpAction(Action): + + def __init__(self, + option_strings, + dest=SUPPRESS, + default=SUPPRESS, + help=None): + super(_HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + parser.print_help() + parser.exit() + + +class _VersionAction(Action): + + def __init__(self, + option_strings, + version=None, + dest=SUPPRESS, + default=SUPPRESS, + help="show program's version number and exit"): + super(_VersionAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + self.version = version + + def __call__(self, parser, namespace, values, option_string=None): + version = self.version + if version is None: + version = parser.version + formatter = parser._get_formatter() + formatter.add_text(version) + parser._print_message(formatter.format_help(), _sys.stdout) + parser.exit() + + +class _SubParsersAction(Action): + + class _ChoicesPseudoAction(Action): + + def __init__(self, name, aliases, help): + metavar = dest = name + if aliases: + metavar += ' (%s)' % ', '.join(aliases) + sup = super(_SubParsersAction._ChoicesPseudoAction, self) + sup.__init__(option_strings=[], dest=dest, help=help, + metavar=metavar) + + def __init__(self, + option_strings, + prog, + parser_class, + dest=SUPPRESS, + help=None, + metavar=None): + + self._prog_prefix = prog + self._parser_class = parser_class + self._name_parser_map = _collections.OrderedDict() + self._choices_actions = [] + + super(_SubParsersAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=PARSER, + choices=self._name_parser_map, + help=help, + metavar=metavar) + + def add_parser(self, name, **kwargs): + # set prog from the existing prefix + if kwargs.get('prog') is None: + kwargs['prog'] = '%s %s' % (self._prog_prefix, name) + + aliases = kwargs.pop('aliases', ()) + + # create a pseudo-action to hold the choice help + if 'help' in kwargs: + help = kwargs.pop('help') + choice_action = self._ChoicesPseudoAction(name, aliases, help) + self._choices_actions.append(choice_action) + + # create the parser and add it to the map + parser = self._parser_class(**kwargs) + self._name_parser_map[name] = parser + + # make parser available under aliases also + for alias in aliases: + self._name_parser_map[alias] = parser + + return parser + + def _get_subactions(self): + return self._choices_actions + + def __call__(self, parser, namespace, values, option_string=None): + parser_name = values[0] + arg_strings = values[1:] + + # set the parser name if requested + if self.dest is not SUPPRESS: + setattr(namespace, self.dest, parser_name) + + # select the parser + try: + parser = self._name_parser_map[parser_name] + except KeyError: + args = {'parser_name': parser_name, + 'choices': ', '.join(self._name_parser_map)} + msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args + raise ArgumentError(self, msg) + + # parse all the remaining options into the namespace + # store any unrecognized options on the object, so that the top + # level parser can decide what to do with them + + # In case this subparser defines new defaults, we parse them + # in a new namespace object and then update the original + # namespace for the relevant parts. + subnamespace, arg_strings = parser.parse_known_args(arg_strings, None) + for key, value in vars(subnamespace).items(): + setattr(namespace, key, value) + + if arg_strings: + vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) + getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) + + +# ============== +# Type classes +# ============== + +class FileType(object): + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + """ + + def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None): + self._mode = mode + self._bufsize = bufsize + self._encoding = encoding + self._errors = errors + + def __call__(self, string): + # the special argument "-" means sys.std{in,out} + if string == '-': + if 'r' in self._mode: + return _sys.stdin + elif 'w' in self._mode: + return _sys.stdout + else: + msg = _('argument "-" with mode %r') % self._mode + raise ValueError(msg) + + # all other arguments are used as file names + try: + return open(string, self._mode, self._bufsize, self._encoding, + self._errors) + except OSError as e: + message = _("can't open '%s': %s") + raise ArgumentTypeError(message % (string, e)) + + def __repr__(self): + args = self._mode, self._bufsize + kwargs = [('encoding', self._encoding), ('errors', self._errors)] + args_str = ', '.join([repr(arg) for arg in args if arg != -1] + + ['%s=%r' % (kw, arg) for kw, arg in kwargs + if arg is not None]) + return '%s(%s)' % (type(self).__name__, args_str) + +# =========================== +# Optional and Positional Parsing +# =========================== + +class Namespace(_AttributeHolder): + """Simple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + """ + + def __init__(self, **kwargs): + for name in kwargs: + setattr(self, name, kwargs[name]) + + def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented + return vars(self) == vars(other) + + def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented + return not (self == other) + + def __contains__(self, key): + return key in self.__dict__ + + +class _ActionsContainer(object): + + def __init__(self, + description, + prefix_chars, + argument_default, + conflict_handler): + super(_ActionsContainer, self).__init__() + + self.description = description + self.argument_default = argument_default + self.prefix_chars = prefix_chars + self.conflict_handler = conflict_handler + + # set up registries + self._registries = {} + + # register actions + self.register('action', None, _StoreAction) + self.register('action', 'store', _StoreAction) + self.register('action', 'store_const', _StoreConstAction) + self.register('action', 'store_true', _StoreTrueAction) + self.register('action', 'store_false', _StoreFalseAction) + self.register('action', 'append', _AppendAction) + self.register('action', 'append_const', _AppendConstAction) + self.register('action', 'count', _CountAction) + self.register('action', 'help', _HelpAction) + self.register('action', 'version', _VersionAction) + self.register('action', 'parsers', _SubParsersAction) + + # raise an exception if the conflict handler is invalid + self._get_handler() + + # action storage + self._actions = [] + self._option_string_actions = {} + + # groups + self._action_groups = [] + self._mutually_exclusive_groups = [] + + # defaults storage + self._defaults = {} + + # determines whether an "option" looks like a negative number + self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') + + # whether or not there are any optionals that look like negative + # numbers -- uses a list so it can be shared and edited + self._has_negative_number_optionals = [] + + # ==================== + # Registration methods + # ==================== + def register(self, registry_name, value, object): + registry = self._registries.setdefault(registry_name, {}) + registry[value] = object + + def _registry_get(self, registry_name, value, default=None): + return self._registries[registry_name].get(value, default) + + # ================================== + # Namespace default accessor methods + # ================================== + def set_defaults(self, **kwargs): + self._defaults.update(kwargs) + + # if these defaults match any existing arguments, replace + # the previous default on the object with the new one + for action in self._actions: + if action.dest in kwargs: + action.default = kwargs[action.dest] + + def get_default(self, dest): + for action in self._actions: + if action.dest == dest and action.default is not None: + return action.default + return self._defaults.get(dest, None) + + + # ======================= + # Adding argument actions + # ======================= + def add_argument(self, *args, **kwargs): + """ + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + """ + + # if no positional args are supplied or only one is supplied and + # it doesn't look like an option string, parse a positional + # argument + chars = self.prefix_chars + if not args or len(args) == 1 and args[0][0] not in chars: + if args and 'dest' in kwargs: + raise ValueError('dest supplied twice for positional argument') + kwargs = self._get_positional_kwargs(*args, **kwargs) + + # otherwise, we're adding an optional argument + else: + kwargs = self._get_optional_kwargs(*args, **kwargs) + + # if no default was supplied, use the parser-level default + if 'default' not in kwargs: + dest = kwargs['dest'] + if dest in self._defaults: + kwargs['default'] = self._defaults[dest] + elif self.argument_default is not None: + kwargs['default'] = self.argument_default + + # create the action object, and add it to the parser + action_class = self._pop_action_class(kwargs) + if not callable(action_class): + raise ValueError('unknown action "%s"' % (action_class,)) + action = action_class(**kwargs) + + # raise an error if the action type is not callable + type_func = self._registry_get('type', action.type, action.type) + if not callable(type_func): + raise ValueError('%r is not callable' % (type_func,)) + + # raise an error if the metavar does not match the type + if hasattr(self, "_get_formatter"): + try: + self._get_formatter()._format_args(action, None) + except TypeError: + raise ValueError("length of metavar tuple does not match nargs") + + return self._add_action(action) + + def add_argument_group(self, *args, **kwargs): + group = _ArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group + + def add_mutually_exclusive_group(self, **kwargs): + group = _MutuallyExclusiveGroup(self, **kwargs) + self._mutually_exclusive_groups.append(group) + return group + + def _add_action(self, action): + # resolve any conflicts + self._check_conflict(action) + + # add to actions list + self._actions.append(action) + action.container = self + + # index the action by any option strings it has + for option_string in action.option_strings: + self._option_string_actions[option_string] = action + + # set the flag if any option strings look like negative numbers + for option_string in action.option_strings: + if self._negative_number_matcher.match(option_string): + if not self._has_negative_number_optionals: + self._has_negative_number_optionals.append(True) + + # return the created action + return action + + def _remove_action(self, action): + self._actions.remove(action) + + def _add_container_actions(self, container): + # collect groups by titles + title_group_map = {} + for group in self._action_groups: + if group.title in title_group_map: + msg = _('cannot merge actions - two groups are named %r') + raise ValueError(msg % (group.title)) + title_group_map[group.title] = group + + # map each action to its group + group_map = {} + for group in container._action_groups: + + # if a group with the title exists, use that, otherwise + # create a new group matching the container's group + if group.title not in title_group_map: + title_group_map[group.title] = self.add_argument_group( + title=group.title, + description=group.description, + conflict_handler=group.conflict_handler) + + # map the actions to their new group + for action in group._group_actions: + group_map[action] = title_group_map[group.title] + + # add container's mutually exclusive groups + # NOTE: if add_mutually_exclusive_group ever gains title= and + # description= then this code will need to be expanded as above + for group in container._mutually_exclusive_groups: + mutex_group = self.add_mutually_exclusive_group( + required=group.required) + + # map the actions to their new mutex group + for action in group._group_actions: + group_map[action] = mutex_group + + # add all actions to this container or their group + for action in container._actions: + group_map.get(action, self)._add_action(action) + + def _get_positional_kwargs(self, dest, **kwargs): + # make sure required is not specified + if 'required' in kwargs: + msg = _("'required' is an invalid argument for positionals") + raise TypeError(msg) + + # mark positional arguments as required if at least one is + # always required + if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: + kwargs['required'] = True + if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: + kwargs['required'] = True + + # return the keyword arguments with no option strings + return dict(kwargs, dest=dest, option_strings=[]) + + def _get_optional_kwargs(self, *args, **kwargs): + # determine short and long option strings + option_strings = [] + long_option_strings = [] + for option_string in args: + # error on strings that don't start with an appropriate prefix + if not option_string[0] in self.prefix_chars: + args = {'option': option_string, + 'prefix_chars': self.prefix_chars} + msg = _('invalid option string %(option)r: ' + 'must start with a character %(prefix_chars)r') + raise ValueError(msg % args) + + # strings starting with two prefix characters are long options + option_strings.append(option_string) + if option_string[0] in self.prefix_chars: + if len(option_string) > 1: + if option_string[1] in self.prefix_chars: + long_option_strings.append(option_string) + + # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' + dest = kwargs.pop('dest', None) + if dest is None: + if long_option_strings: + dest_option_string = long_option_strings[0] + else: + dest_option_string = option_strings[0] + dest = dest_option_string.lstrip(self.prefix_chars) + if not dest: + msg = _('dest= is required for options like %r') + raise ValueError(msg % option_string) + dest = dest.replace('-', '_') + + # return the updated keyword arguments + return dict(kwargs, dest=dest, option_strings=option_strings) + + def _pop_action_class(self, kwargs, default=None): + action = kwargs.pop('action', default) + return self._registry_get('action', action, action) + + def _get_handler(self): + # determine function from conflict handler string + handler_func_name = '_handle_conflict_%s' % self.conflict_handler + try: + return getattr(self, handler_func_name) + except AttributeError: + msg = _('invalid conflict_resolution value: %r') + raise ValueError(msg % self.conflict_handler) + + def _check_conflict(self, action): + + # find all options that conflict with this option + confl_optionals = [] + for option_string in action.option_strings: + if option_string in self._option_string_actions: + confl_optional = self._option_string_actions[option_string] + confl_optionals.append((option_string, confl_optional)) + + # resolve any conflicts + if confl_optionals: + conflict_handler = self._get_handler() + conflict_handler(action, confl_optionals) + + def _handle_conflict_error(self, action, conflicting_actions): + message = ngettext('conflicting option string: %s', + 'conflicting option strings: %s', + len(conflicting_actions)) + conflict_string = ', '.join([option_string + for option_string, action + in conflicting_actions]) + raise ArgumentError(action, message % conflict_string) + + def _handle_conflict_resolve(self, action, conflicting_actions): + + # remove all conflicting options + for option_string, action in conflicting_actions: + + # remove the conflicting option + action.option_strings.remove(option_string) + self._option_string_actions.pop(option_string, None) + + # if the option now has no option string, remove it from the + # container holding it + if not action.option_strings: + action.container._remove_action(action) + + +class _ArgumentGroup(_ActionsContainer): + + def __init__(self, container, title=None, description=None, **kwargs): + # add any missing keyword arguments by checking the container + update = kwargs.setdefault + update('conflict_handler', container.conflict_handler) + update('prefix_chars', container.prefix_chars) + update('argument_default', container.argument_default) + super_init = super(_ArgumentGroup, self).__init__ + super_init(description=description, **kwargs) + + # group attributes + self.title = title + self._group_actions = [] + + # share most attributes with the container + self._registries = container._registries + self._actions = container._actions + self._option_string_actions = container._option_string_actions + self._defaults = container._defaults + self._has_negative_number_optionals = \ + container._has_negative_number_optionals + self._mutually_exclusive_groups = container._mutually_exclusive_groups + + def _add_action(self, action): + action = super(_ArgumentGroup, self)._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + super(_ArgumentGroup, self)._remove_action(action) + self._group_actions.remove(action) + + +class _MutuallyExclusiveGroup(_ArgumentGroup): + + def __init__(self, container, required=False): + super(_MutuallyExclusiveGroup, self).__init__(container) + self.required = required + self._container = container + + def _add_action(self, action): + if action.required: + msg = _('mutually exclusive arguments must be optional') + raise ValueError(msg) + action = self._container._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + self._container._remove_action(action) + self._group_actions.remove(action) + + +class ArgumentParser(_AttributeHolder, _ActionsContainer): + """Object for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: sys.argv[0]) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + """ + + def __init__(self, + prog=None, + usage=None, + description=None, + epilog=None, + parents=[], + formatter_class=HelpFormatter, + prefix_chars='-', + fromfile_prefix_chars=None, + argument_default=None, + conflict_handler='error', + add_help=True): + + superinit = super(ArgumentParser, self).__init__ + superinit(description=description, + prefix_chars=prefix_chars, + argument_default=argument_default, + conflict_handler=conflict_handler) + + # default setting for prog + if prog is None: + prog = _os.path.basename(_sys.argv[0]) + + self.prog = prog + self.usage = usage + self.epilog = epilog + self.formatter_class = formatter_class + self.fromfile_prefix_chars = fromfile_prefix_chars + self.add_help = add_help + + add_group = self.add_argument_group + self._positionals = add_group(_('positional arguments')) + self._optionals = add_group(_('optional arguments')) + self._subparsers = None + + # register types + def identity(string): + return string + self.register('type', None, identity) + + # add help argument if necessary + # (using explicit default to override global argument_default) + default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] + if self.add_help: + self.add_argument( + default_prefix+'h', default_prefix*2+'help', + action='help', default=SUPPRESS, + help=_('show this help message and exit')) + + # add parent arguments and defaults + for parent in parents: + self._add_container_actions(parent) + try: + defaults = parent._defaults + except AttributeError: + pass + else: + self._defaults.update(defaults) + + # ======================= + # Pretty __repr__ methods + # ======================= + def _get_kwargs(self): + names = [ + 'prog', + 'usage', + 'description', + 'formatter_class', + 'conflict_handler', + 'add_help', + ] + return [(name, getattr(self, name)) for name in names] + + # ================================== + # Optional/Positional adding methods + # ================================== + def add_subparsers(self, **kwargs): + if self._subparsers is not None: + self.error(_('cannot have multiple subparser arguments')) + + # add the parser class to the arguments if it's not present + kwargs.setdefault('parser_class', type(self)) + + if 'title' in kwargs or 'description' in kwargs: + title = _(kwargs.pop('title', 'subcommands')) + description = _(kwargs.pop('description', None)) + self._subparsers = self.add_argument_group(title, description) + else: + self._subparsers = self._positionals + + # prog defaults to the usage message of this parser, skipping + # optional arguments and with no "usage:" prefix + if kwargs.get('prog') is None: + formatter = self._get_formatter() + positionals = self._get_positional_actions() + groups = self._mutually_exclusive_groups + formatter.add_usage(self.usage, positionals, groups, '') + kwargs['prog'] = formatter.format_help().strip() + + # create the parsers action and add it to the positionals list + parsers_class = self._pop_action_class(kwargs, 'parsers') + action = parsers_class(option_strings=[], **kwargs) + self._subparsers._add_action(action) + + # return the created parsers action + return action + + def _add_action(self, action): + if action.option_strings: + self._optionals._add_action(action) + else: + self._positionals._add_action(action) + return action + + def _get_optional_actions(self): + return [action + for action in self._actions + if action.option_strings] + + def _get_positional_actions(self): + return [action + for action in self._actions + if not action.option_strings] + + # ===================================== + # Command line argument parsing methods + # ===================================== + def parse_args(self, args=None, namespace=None): + args, argv = self.parse_known_args(args, namespace) + if argv: + msg = _('unrecognized arguments: %s') + self.error(msg % ' '.join(argv)) + return args + + def parse_known_args(self, args=None, namespace=None): + if args is None: + # args default to the system args + args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) + + # default Namespace built from parser defaults + if namespace is None: + namespace = Namespace() + + # add any action defaults that aren't present + for action in self._actions: + if action.dest is not SUPPRESS: + if not hasattr(namespace, action.dest): + if action.default is not SUPPRESS: + setattr(namespace, action.dest, action.default) + + # add any parser defaults that aren't present + for dest in self._defaults: + if not hasattr(namespace, dest): + setattr(namespace, dest, self._defaults[dest]) + + # parse the arguments and exit if there are any errors + try: + namespace, args = self._parse_known_args(args, namespace) + if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): + args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) + delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) + return namespace, args + except ArgumentError: + err = _sys.exc_info()[1] + self.error(str(err)) + + def _parse_known_args(self, arg_strings, namespace): + # replace arg strings that are file references + if self.fromfile_prefix_chars is not None: + arg_strings = self._read_args_from_files(arg_strings) + + # map all mutually exclusive arguments to the other arguments + # they can't occur with + action_conflicts = {} + for mutex_group in self._mutually_exclusive_groups: + group_actions = mutex_group._group_actions + for i, mutex_action in enumerate(mutex_group._group_actions): + conflicts = action_conflicts.setdefault(mutex_action, []) + conflicts.extend(group_actions[:i]) + conflicts.extend(group_actions[i + 1:]) + + # find all option indices, and determine the arg_string_pattern + # which has an 'O' if there is an option at an index, + # an 'A' if there is an argument, or a '-' if there is a '--' + option_string_indices = {} + arg_string_pattern_parts = [] + arg_strings_iter = iter(arg_strings) + for i, arg_string in enumerate(arg_strings_iter): + + # all args after -- are non-options + if arg_string == '--': + arg_string_pattern_parts.append('-') + for arg_string in arg_strings_iter: + arg_string_pattern_parts.append('A') + + # otherwise, add the arg to the arg strings + # and note the index if it was an option + else: + option_tuple = self._parse_optional(arg_string) + if option_tuple is None: + pattern = 'A' + else: + option_string_indices[i] = option_tuple + pattern = 'O' + arg_string_pattern_parts.append(pattern) + + # join the pieces together to form the pattern + arg_strings_pattern = ''.join(arg_string_pattern_parts) + + # converts arg strings to the appropriate and then takes the action + seen_actions = set() + seen_non_default_actions = set() + + def take_action(action, argument_strings, option_string=None): + seen_actions.add(action) + argument_values = self._get_values(action, argument_strings) + + # error if this argument is not allowed with other previously + # seen arguments, assuming that actions that use the default + # value don't really count as "present" + if argument_values is not action.default: + seen_non_default_actions.add(action) + for conflict_action in action_conflicts.get(action, []): + if conflict_action in seen_non_default_actions: + msg = _('not allowed with argument %s') + action_name = _get_action_name(conflict_action) + raise ArgumentError(action, msg % action_name) + + # take the action if we didn't receive a SUPPRESS value + # (e.g. from a default) + if argument_values is not SUPPRESS: + action(self, namespace, argument_values, option_string) + + # function to convert arg_strings into an optional action + def consume_optional(start_index): + + # get the optional identified at this index + option_tuple = option_string_indices[start_index] + action, option_string, explicit_arg = option_tuple + + # identify additional optionals in the same arg string + # (e.g. -xyz is the same as -x -y -z if no args are required) + match_argument = self._match_argument + action_tuples = [] + while True: + + # if we found no optional action, skip it + if action is None: + extras.append(arg_strings[start_index]) + return start_index + 1 + + # if there is an explicit argument, try to match the + # optional's string arguments to only this + if explicit_arg is not None: + arg_count = match_argument(action, 'A') + + # if the action is a single-dash option and takes no + # arguments, try to parse more single-dash options out + # of the tail of the option string + chars = self.prefix_chars + if arg_count == 0 and option_string[1] not in chars: + action_tuples.append((action, [], option_string)) + char = option_string[0] + option_string = char + explicit_arg[0] + new_explicit_arg = explicit_arg[1:] or None + optionals_map = self._option_string_actions + if option_string in optionals_map: + action = optionals_map[option_string] + explicit_arg = new_explicit_arg + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if the action expect exactly one argument, we've + # successfully matched the option; exit the loop + elif arg_count == 1: + stop = start_index + 1 + args = [explicit_arg] + action_tuples.append((action, args, option_string)) + break + + # error if a double-dash option did not use the + # explicit argument + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if there is no explicit argument, try to match the + # optional's string arguments with the following strings + # if successful, exit the loop + else: + start = start_index + 1 + selected_patterns = arg_strings_pattern[start:] + arg_count = match_argument(action, selected_patterns) + stop = start + arg_count + args = arg_strings[start:stop] + action_tuples.append((action, args, option_string)) + break + + # add the Optional to the list and return the index at which + # the Optional's string args stopped + assert action_tuples + for action, args, option_string in action_tuples: + take_action(action, args, option_string) + return stop + + # the list of Positionals left to be parsed; this is modified + # by consume_positionals() + positionals = self._get_positional_actions() + + # function to convert arg_strings into positional actions + def consume_positionals(start_index): + # match as many Positionals as possible + match_partial = self._match_arguments_partial + selected_pattern = arg_strings_pattern[start_index:] + arg_counts = match_partial(positionals, selected_pattern) + + # slice off the appropriate arg strings for each Positional + # and add the Positional and its args to the list + for action, arg_count in zip(positionals, arg_counts): + args = arg_strings[start_index: start_index + arg_count] + start_index += arg_count + take_action(action, args) + + # slice off the Positionals that we just parsed and return the + # index at which the Positionals' string args stopped + positionals[:] = positionals[len(arg_counts):] + return start_index + + # consume Positionals and Optionals alternately, until we have + # passed the last option string + extras = [] + start_index = 0 + if option_string_indices: + max_option_string_index = max(option_string_indices) + else: + max_option_string_index = -1 + while start_index <= max_option_string_index: + + # consume any Positionals preceding the next option + next_option_string_index = min([ + index + for index in option_string_indices + if index >= start_index]) + if start_index != next_option_string_index: + positionals_end_index = consume_positionals(start_index) + + # only try to parse the next optional if we didn't consume + # the option string during the positionals parsing + if positionals_end_index > start_index: + start_index = positionals_end_index + continue + else: + start_index = positionals_end_index + + # if we consumed all the positionals we could and we're not + # at the index of an option string, there were extra arguments + if start_index not in option_string_indices: + strings = arg_strings[start_index:next_option_string_index] + extras.extend(strings) + start_index = next_option_string_index + + # consume the next optional and any arguments for it + start_index = consume_optional(start_index) + + # consume any positionals following the last Optional + stop_index = consume_positionals(start_index) + + # if we didn't consume all the argument strings, there were extras + extras.extend(arg_strings[stop_index:]) + + # make sure all required actions were present and also convert + # action defaults which were not given as arguments + required_actions = [] + for action in self._actions: + if action not in seen_actions: + if action.required: + required_actions.append(_get_action_name(action)) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) + + if required_actions: + self.error(_('the following arguments are required: %s') % + ', '.join(required_actions)) + + # make sure all required groups had one option present + for group in self._mutually_exclusive_groups: + if group.required: + for action in group._group_actions: + if action in seen_non_default_actions: + break + + # if no actions were used, report the error + else: + names = [_get_action_name(action) + for action in group._group_actions + if action.help is not SUPPRESS] + msg = _('one of the arguments %s is required') + self.error(msg % ' '.join(names)) + + # return the updated namespace and the extra arguments + return namespace, extras + + def _read_args_from_files(self, arg_strings): + # expand arguments referencing files + new_arg_strings = [] + for arg_string in arg_strings: + + # for regular arguments, just add them back into the list + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: + new_arg_strings.append(arg_string) + + # replace arguments referencing files with the file content + else: + try: + with open(arg_string[1:]) as args_file: + arg_strings = [] + for arg_line in args_file.read().splitlines(): + for arg in self.convert_arg_line_to_args(arg_line): + arg_strings.append(arg) + arg_strings = self._read_args_from_files(arg_strings) + new_arg_strings.extend(arg_strings) + except OSError: + err = _sys.exc_info()[1] + self.error(str(err)) + + # return the modified argument list + return new_arg_strings + + def convert_arg_line_to_args(self, arg_line): + return [arg_line] + + def _match_argument(self, action, arg_strings_pattern): + # match the pattern for this action to the arg strings + nargs_pattern = self._get_nargs_pattern(action) + match = _re.match(nargs_pattern, arg_strings_pattern) + + # raise an exception if we weren't able to find a match + if match is None: + nargs_errors = { + None: _('expected one argument'), + OPTIONAL: _('expected at most one argument'), + ONE_OR_MORE: _('expected at least one argument'), + } + default = ngettext('expected %s argument', + 'expected %s arguments', + action.nargs) % action.nargs + msg = nargs_errors.get(action.nargs, default) + raise ArgumentError(action, msg) + + # return the number of arguments matched + return len(match.group(1)) + + def _match_arguments_partial(self, actions, arg_strings_pattern): + # progressively shorten the actions list by slicing off the + # final actions until we find a match + result = [] + for i in range(len(actions), 0, -1): + actions_slice = actions[:i] + pattern = ''.join([self._get_nargs_pattern(action) + for action in actions_slice]) + match = _re.match(pattern, arg_strings_pattern) + if match is not None: + result.extend([len(string) for string in match.groups()]) + break + + # return the list of arg string counts + return result + + def _parse_optional(self, arg_string): + # if it's an empty string, it was meant to be a positional + if not arg_string: + return None + + # if it doesn't start with a prefix, it was meant to be positional + if not arg_string[0] in self.prefix_chars: + return None + + # if the option string is present in the parser, return the action + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + + # if it's just a single character, it was meant to be positional + if len(arg_string) == 1: + return None + + # if the option string before the "=" is present, return the action + if '=' in arg_string: + option_string, explicit_arg = arg_string.split('=', 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + + # search through all possible prefixes of the option string + # and all actions in the parser for possible interpretations + option_tuples = self._get_option_tuples(arg_string) + + # if multiple actions match, the option string was ambiguous + if len(option_tuples) > 1: + options = ', '.join([option_string + for action, option_string, explicit_arg in option_tuples]) + args = {'option': arg_string, 'matches': options} + msg = _('ambiguous option: %(option)s could match %(matches)s') + self.error(msg % args) + + # if exactly one action matched, this segmentation is good, + # so return the parsed action + elif len(option_tuples) == 1: + option_tuple, = option_tuples + return option_tuple + + # if it was not found as an option, but it looks like a negative + # number, it was meant to be positional + # unless there are negative-number-like options + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + + # if it contains a space, it was meant to be a positional + if ' ' in arg_string: + return None + + # it was meant to be an optional but there is no such option + # in this parser (though it might be a valid option in a subparser) + return None, arg_string, None + + def _get_option_tuples(self, option_string): + result = [] + + # option strings starting with two prefix characters are only + # split at the '=' + chars = self.prefix_chars + if option_string[0] in chars and option_string[1] in chars: + if '=' in option_string: + option_prefix, explicit_arg = option_string.split('=', 1) + else: + option_prefix = option_string + explicit_arg = None + for option_string in self._option_string_actions: + if option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # single character options can be concatenated with their arguments + # but multiple character options always have to have their argument + # separate + elif option_string[0] in chars and option_string[1] not in chars: + option_prefix = option_string + explicit_arg = None + short_option_prefix = option_string[:2] + short_explicit_arg = option_string[2:] + + for option_string in self._option_string_actions: + if option_string == short_option_prefix: + action = self._option_string_actions[option_string] + tup = action, option_string, short_explicit_arg + result.append(tup) + elif option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # shouldn't ever get here + else: + self.error(_('unexpected option string: %s') % option_string) + + # return the collected option tuples + return result + + def _get_nargs_pattern(self, action): + # in all examples below, we have to allow for '--' args + # which are represented as '-' in the pattern + nargs = action.nargs + + # the default (None) is assumed to be a single argument + if nargs is None: + nargs_pattern = '(-*A-*)' + + # allow zero or one arguments + elif nargs == OPTIONAL: + nargs_pattern = '(-*A?-*)' + + # allow zero or more arguments + elif nargs == ZERO_OR_MORE: + nargs_pattern = '(-*[A-]*)' + + # allow one or more arguments + elif nargs == ONE_OR_MORE: + nargs_pattern = '(-*A[A-]*)' + + # allow any number of options or arguments + elif nargs == REMAINDER: + nargs_pattern = '([-AO]*)' + + # allow one argument followed by any number of options or arguments + elif nargs == PARSER: + nargs_pattern = '(-*A[-AO]*)' + + # all others should be integers + else: + nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) + + # if this is an optional action, -- is not allowed + if action.option_strings: + nargs_pattern = nargs_pattern.replace('-*', '') + nargs_pattern = nargs_pattern.replace('-', '') + + # return the pattern + return nargs_pattern + + # ======================== + # Value conversion methods + # ======================== + def _get_values(self, action, arg_strings): + # for everything but PARSER, REMAINDER args, strip out first '--' + if action.nargs not in [PARSER, REMAINDER]: + try: + arg_strings.remove('--') + except ValueError: + pass + + # optional argument produces a default when not present + if not arg_strings and action.nargs == OPTIONAL: + if action.option_strings: + value = action.const + else: + value = action.default + if isinstance(value, str): + value = self._get_value(action, value) + self._check_value(action, value) + + # when nargs='*' on a positional, if there were no command-line + # args, use the default if it is anything other than None + elif (not arg_strings and action.nargs == ZERO_OR_MORE and + not action.option_strings): + if action.default is not None: + value = action.default + else: + value = arg_strings + self._check_value(action, value) + + # single argument or optional argument produces a single value + elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: + arg_string, = arg_strings + value = self._get_value(action, arg_string) + self._check_value(action, value) + + # REMAINDER arguments convert all values, checking none + elif action.nargs == REMAINDER: + value = [self._get_value(action, v) for v in arg_strings] + + # PARSER arguments convert all values, but check only the first + elif action.nargs == PARSER: + value = [self._get_value(action, v) for v in arg_strings] + self._check_value(action, value[0]) + + # all other types of nargs produce a list + else: + value = [self._get_value(action, v) for v in arg_strings] + for v in value: + self._check_value(action, v) + + # return the converted value + return value + + def _get_value(self, action, arg_string): + type_func = self._registry_get('type', action.type, action.type) + if not callable(type_func): + msg = _('%r is not callable') + raise ArgumentError(action, msg % type_func) + + # convert the value to the appropriate type + try: + result = type_func(arg_string) + + # ArgumentTypeErrors indicate errors + except ArgumentTypeError: + name = getattr(action.type, '__name__', repr(action.type)) + msg = str(_sys.exc_info()[1]) + raise ArgumentError(action, msg) + + # TypeErrors or ValueErrors also indicate errors + except (TypeError, ValueError): + name = getattr(action.type, '__name__', repr(action.type)) + args = {'type': name, 'value': arg_string} + msg = _('invalid %(type)s value: %(value)r') + raise ArgumentError(action, msg % args) + + # return the converted value + return result + + def _check_value(self, action, value): + # converted value must be one of the choices (if specified) + if action.choices is not None and value not in action.choices: + args = {'value': value, + 'choices': ', '.join(map(repr, action.choices))} + msg = _('invalid choice: %(value)r (choose from %(choices)s)') + raise ArgumentError(action, msg % args) + + # ======================= + # Help-formatting methods + # ======================= + def format_usage(self): + formatter = self._get_formatter() + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + return formatter.format_help() + + def format_help(self): + formatter = self._get_formatter() + + # usage + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + + # description + formatter.add_text(self.description) + + # positionals, optionals and user-defined groups + for action_group in self._action_groups: + formatter.start_section(action_group.title) + formatter.add_text(action_group.description) + formatter.add_arguments(action_group._group_actions) + formatter.end_section() + + # epilog + formatter.add_text(self.epilog) + + # determine help from format above + return formatter.format_help() + + def _get_formatter(self): + return self.formatter_class(prog=self.prog) + + # ===================== + # Help-printing methods + # ===================== + def print_usage(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_usage(), file) + + def print_help(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_help(), file) + + def _print_message(self, message, file=None): + if message: + if file is None: + file = _sys.stderr + file.write(message) + + # =============== + # Exiting methods + # =============== + def exit(self, status=0, message=None): + if message: + self._print_message(message, _sys.stderr) + _sys.exit(status) + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(_sys.stderr) + args = {'prog': self.prog, 'message': message} + self.exit(2, _('%(prog)s: error: %(message)s\n') % args) diff --git a/lib/assets/Lib/atexit.py b/lib/assets/Lib/atexit.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/atexit.py @@ -0,0 +1,37 @@ +"""allow programmer to define multiple exit functions to be executedupon normal program termination. + +Two public functions, register and unregister, are defined. +""" + + +class __loader__(object): + pass + +def _clear(*args,**kw): + """_clear() -> None + Clear the list of previously registered exit functions.""" + pass + +def _run_exitfuncs(*args,**kw): + """_run_exitfuncs() -> None + Run all registered exit functions.""" + pass + +def register(*args,**kw): + """register(func, *args, **kwargs) -> func + Register a function to be executed upon normal program termination + + func - function to be called at exit + args - optional arguments to pass to func + kwargs - optional keyword arguments to pass to func + + func is returned to facilitate usage as a decorator.""" + pass + +def unregister(*args,**kw): + """unregister(func) -> None + Unregister a exit function which was previously registered using + atexit.register + + func - function to be unregistered""" + pass diff --git a/lib/assets/Lib/base64.py b/lib/assets/Lib/base64.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/base64.py @@ -0,0 +1,406 @@ +#! /usr/bin/env python3 + +"""RFC 3548: Base16, Base32, Base64 Data Encodings""" + +# Modified 04-Oct-1995 by Jack Jansen to use binascii module +# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support +# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere + +#import re +#import struct +#import binascii + +import _base64 # Javascript module in libs + +__all__ = [ + # Legacy interface exports traditional RFC 1521 Base64 encodings + 'encode', 'decode', 'encodebytes', 'decodebytes', + # Generalized interface for other encodings + 'b64encode', 'b64decode', 'b32encode', 'b32decode', + 'b16encode', 'b16decode', + # Standard Base64 encoding + 'standard_b64encode', 'standard_b64decode', + # Some common Base64 alternatives. As referenced by RFC 3458, see thread + # starting at: + # + # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html + 'urlsafe_b64encode', 'urlsafe_b64decode', + ] + + +bytes_types = (bytes, bytearray) # Types acceptable as binary data + +def _bytes_from_decode_data(s): + if isinstance(s, str): + try: + return s.encode('ascii') + except UnicodeEncodeError: + raise ValueError('string argument should contain only ASCII characters') + elif isinstance(s, bytes_types): + return s + else: + raise TypeError("argument should be bytes or ASCII string, not %s" % s.__class__.__name__) + + + +# Base64 encoding/decoding uses binascii + +def b64encode(s, altchars=None): + """Encode a byte string using Base64. + + s is the byte string to encode. Optional altchars must be a byte + string of length 2 which specifies an alternative alphabet for the + '+' and '/' characters. This allows an application to + e.g. generate url or filesystem safe Base64 strings. + + The encoded byte string is returned. + """ + if not isinstance(s, bytes_types): + raise TypeError("expected bytes, not %s" % s.__class__.__name__) + if altchars is not None: + if not isinstance(altchars, bytes_types): + print('wrong altchars') + raise TypeError("expected bytes, not %s" + % altchars.__class__.__name__) + assert len(altchars) >= 2, repr(altchars) + return _base64.Base64.encode(s, altchars) + + +def b64decode(s, altchars=None, validate=False): + """Decode a Base64 encoded byte string. + + s is the byte string to decode. Optional altchars must be a + string of length 2 which specifies the alternative alphabet used + instead of the '+' and '/' characters. + + The decoded string is returned. A binascii.Error is raised if s is + incorrectly padded. + + If validate is False (the default), non-base64-alphabet characters are + discarded prior to the padding check. If validate is True, + non-base64-alphabet characters in the input result in a binascii.Error. + """ + if altchars is not None: + altchars = _bytes_from_decode_data(altchars) + assert len(altchars) == 2, repr(altchars) + s = s.translate(bytes.maketrans(altchars, b'+/')) + return _base64.Base64.decode(s, altchars, validate) + + +def standard_b64encode(s): + """Encode a byte string using the standard Base64 alphabet. + + s is the byte string to encode. The encoded byte string is returned. + """ + return b64encode(s) + +def standard_b64decode(s): + """Decode a byte string encoded with the standard Base64 alphabet. + + s is the byte string to decode. The decoded byte string is + returned. binascii.Error is raised if the input is incorrectly + padded or if there are non-alphabet characters present in the + input. + """ + return b64decode(s) + + +_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') + +def urlsafe_b64encode(s): + """Encode a byte string using a url-safe Base64 alphabet. + + s is the byte string to encode. The encoded byte string is + returned. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + """ + return b64encode(s).translate(_urlsafe_encode_translation) + +def urlsafe_b64decode(s): + """Decode a byte string encoded with the standard Base64 alphabet. + + s is the byte string to decode. The decoded byte string is + returned. binascii.Error is raised if the input is incorrectly + padded or if there are non-alphabet characters present in the + input. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + """ + s = _bytes_from_decode_data(s) + s = s.translate(_urlsafe_decode_translation) + return b64decode(s) + + + +# Base32 encoding/decoding must be done in Python +_b32alphabet = { + 0: b'A', 9: b'J', 18: b'S', 27: b'3', + 1: b'B', 10: b'K', 19: b'T', 28: b'4', + 2: b'C', 11: b'L', 20: b'U', 29: b'5', + 3: b'D', 12: b'M', 21: b'V', 30: b'6', + 4: b'E', 13: b'N', 22: b'W', 31: b'7', + 5: b'F', 14: b'O', 23: b'X', + 6: b'G', 15: b'P', 24: b'Y', + 7: b'H', 16: b'Q', 25: b'Z', + 8: b'I', 17: b'R', 26: b'2', + } + +_b32tab = [v[0] for k, v in sorted(_b32alphabet.items())] +_b32rev = dict([(v[0], k) for k, v in _b32alphabet.items()]) + + +def b32encode(s): + """Encode a byte string using Base32. + + s is the byte string to encode. The encoded byte string is returned. + """ + if not isinstance(s, bytes_types): + raise TypeError("expected bytes, not %s" % s.__class__.__name__) + quanta, leftover = divmod(len(s), 5) + # Pad the last quantum with zero bits if necessary + if leftover: + s = s + bytes(5 - leftover) # Don't use += ! + quanta += 1 + encoded = bytearray() + for i in range(quanta): + # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this + # code is to process the 40 bits in units of 5 bits. So we take the 1 + # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover + # bits of c2 and tack them onto c3. The shifts and masks are intended + # to give us values of exactly 5 bits in width. + c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5]) + c2 += (c1 & 1) << 16 # 17 bits wide + c3 += (c2 & 3) << 8 # 10 bits wide + encoded += bytes([_b32tab[c1 >> 11], # bits 1 - 5 + _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10 + _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15 + _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5) + _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10) + _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15) + _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5) + _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5) + ]) + # Adjust for any leftover partial quanta + if leftover == 1: + encoded[-6:] = b'======' + elif leftover == 2: + encoded[-4:] = b'====' + elif leftover == 3: + encoded[-3:] = b'===' + elif leftover == 4: + encoded[-1:] = b'=' + return bytes(encoded) + + +def b32decode(s, casefold=False, map01=None): + """Decode a Base32 encoded byte string. + + s is the byte string to decode. Optional casefold is a flag + specifying whether a lowercase alphabet is acceptable as input. + For security purposes, the default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the + letter O (oh), and for optional mapping of the digit 1 (one) to + either the letter I (eye) or letter L (el). The optional argument + map01 when not None, specifies which letter the digit 1 should be + mapped to (when map01 is not None, the digit 0 is always mapped to + the letter O). For security purposes the default is None, so that + 0 and 1 are not allowed in the input. + + The decoded byte string is returned. binascii.Error is raised if + the input is incorrectly padded or if there are non-alphabet + characters present in the input. + """ + s = _bytes_from_decode_data(s) + quanta, leftover = divmod(len(s), 8) + if leftover: + raise binascii.Error('Incorrect padding') + # Handle section 2.4 zero and one mapping. The flag map01 will be either + # False, or the character to map the digit 1 (one) to. It should be + # either L (el) or I (eye). + if map01 is not None: + map01 = _bytes_from_decode_data(map01) + assert len(map01) == 1, repr(map01) + s = s.translate(bytes.maketrans(b'01', b'O' + map01)) + if casefold: + s = s.upper() + # Strip off pad characters from the right. We need to count the pad + # characters because this will tell us how many null bytes to remove from + # the end of the decoded string. + padchars = 0 + mo = re.search(b'(?P[=]*)$', s) + if mo: + padchars = len(mo.group('pad')) + if padchars > 0: + s = s[:-padchars] + # Now decode the full quanta + parts = [] + acc = 0 + shift = 35 + for c in s: + val = _b32rev.get(c) + if val is None: + raise binascii.Error('Non-base32 digit found') + acc += _b32rev[c] << shift + shift -= 5 + if shift < 0: + parts.append(binascii.unhexlify(bytes('%010x' % acc, "ascii"))) + acc = 0 + shift = 35 + # Process the last, partial quanta + last = binascii.unhexlify(bytes('%010x' % acc, "ascii")) + if padchars == 0: + last = b'' # No characters + elif padchars == 1: + last = last[:-1] + elif padchars == 3: + last = last[:-2] + elif padchars == 4: + last = last[:-3] + elif padchars == 6: + last = last[:-4] + else: + raise binascii.Error('Incorrect padding') + parts.append(last) + return b''.join(parts) + + + +# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns +# lowercase. The RFC also recommends against accepting input case +# insensitively. +def b16encode(s): + """Encode a byte string using Base16. + + s is the byte string to encode. The encoded byte string is returned. + """ + if not isinstance(s, bytes_types): + raise TypeError("expected bytes, not %s" % s.__class__.__name__) + return binascii.hexlify(s).upper() + + +def b16decode(s, casefold=False): + """Decode a Base16 encoded byte string. + + s is the byte string to decode. Optional casefold is a flag + specifying whether a lowercase alphabet is acceptable as input. + For security purposes, the default is False. + + The decoded byte string is returned. binascii.Error is raised if + s were incorrectly padded or if there are non-alphabet characters + present in the string. + """ + s = _bytes_from_decode_data(s) + if casefold: + s = s.upper() + if re.search(b'[^0-9A-F]', s): + raise binascii.Error('Non-base16 digit found') + return binascii.unhexlify(s) + + + +# Legacy interface. This code could be cleaned up since I don't believe +# binascii has any line length limitations. It just doesn't seem worth it +# though. The files should be opened in binary mode. + +MAXLINESIZE = 76 # Excluding the CRLF +MAXBINSIZE = (MAXLINESIZE//4)*3 + +def encode(input, output): + """Encode a file; input and output are binary files.""" + while True: + s = input.read(MAXBINSIZE) + if not s: + break + while len(s) < MAXBINSIZE: + ns = input.read(MAXBINSIZE-len(s)) + if not ns: + break + s += ns + line = binascii.b2a_base64(s) + output.write(line) + + +def decode(input, output): + """Decode a file; input and output are binary files.""" + while True: + line = input.readline() + if not line: + break + s = binascii.a2b_base64(line) + output.write(s) + + +def encodebytes(s): + """Encode a bytestring into a bytestring containing multiple lines + of base-64 data.""" + if not isinstance(s, bytes_types): + raise TypeError("expected bytes, not %s" % s.__class__.__name__) + pieces = [] + for i in range(0, len(s), MAXBINSIZE): + chunk = s[i : i + MAXBINSIZE] + pieces.append(binascii.b2a_base64(chunk)) + return b"".join(pieces) + +def encodestring(s): + """Legacy alias of encodebytes().""" + import warnings + warnings.warn("encodestring() is a deprecated alias, use encodebytes()", + DeprecationWarning, 2) + return encodebytes(s) + + +def decodebytes(s): + """Decode a bytestring of base-64 data into a bytestring.""" + if not isinstance(s, bytes_types): + raise TypeError("expected bytes, not %s" % s.__class__.__name__) + return binascii.a2b_base64(s) + +def decodestring(s): + """Legacy alias of decodebytes().""" + import warnings + warnings.warn("decodestring() is a deprecated alias, use decodebytes()", + DeprecationWarning, 2) + return decodebytes(s) + + +# Usable as a script... +def main(): + """Small main program""" + import sys, getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'deut') + except getopt.error as msg: + sys.stdout = sys.stderr + print(msg) + print("""usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) + sys.exit(2) + func = encode + for o, a in opts: + if o == '-e': func = encode + if o == '-d': func = decode + if o == '-u': func = decode + if o == '-t': test(); return + if args and args[0] != '-': + with open(args[0], 'rb') as f: + func(f, sys.stdout.buffer) + else: + func(sys.stdin.buffer, sys.stdout.buffer) + + +def test(): + s0 = b"Aladdin:open sesame" + print(repr(s0)) + s1 = encodebytes(s0) + print(repr(s1)) + s2 = decodebytes(s1) + print(repr(s2)) + assert s0 == s2 + + +if __name__ == '__main__': + main() diff --git a/lib/assets/Lib/bdb.py b/lib/assets/Lib/bdb.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/bdb.py @@ -0,0 +1,673 @@ +"""Debugger basics""" + +import fnmatch +import sys +import os +from inspect import CO_GENERATOR + +__all__ = ["BdbQuit", "Bdb", "Breakpoint"] + +class BdbQuit(Exception): + """Exception to give up completely.""" + + +class Bdb: + """Generic Python debugger base class. + + This class takes care of details of the trace facility; + a derived class should implement user interaction. + The standard debugger class (pdb.Pdb) is an example. + """ + + def __init__(self, skip=None): + self.skip = set(skip) if skip else None + self.breaks = {} + self.fncache = {} + self.frame_returning = None + + def canonic(self, filename): + if filename == "<" + filename[1:-1] + ">": + return filename + canonic = self.fncache.get(filename) + if not canonic: + canonic = os.path.abspath(filename) + canonic = os.path.normcase(canonic) + self.fncache[filename] = canonic + return canonic + + def reset(self): + import linecache + linecache.checkcache() + self.botframe = None + self._set_stopinfo(None, None) + + def trace_dispatch(self, frame, event, arg): + if self.quitting: + return # None + if event == 'line': + return self.dispatch_line(frame) + if event == 'call': + return self.dispatch_call(frame, arg) + if event == 'return': + return self.dispatch_return(frame, arg) + if event == 'exception': + return self.dispatch_exception(frame, arg) + if event == 'c_call': + return self.trace_dispatch + if event == 'c_exception': + return self.trace_dispatch + if event == 'c_return': + return self.trace_dispatch + print('bdb.Bdb.dispatch: unknown debugging event:', repr(event)) + return self.trace_dispatch + + def dispatch_line(self, frame): + if self.stop_here(frame) or self.break_here(frame): + self.user_line(frame) + if self.quitting: raise BdbQuit + return self.trace_dispatch + + def dispatch_call(self, frame, arg): + # XXX 'arg' is no longer used + if self.botframe is None: + # First call of dispatch since reset() + self.botframe = frame.f_back # (CT) Note that this may also be None! + return self.trace_dispatch + if not (self.stop_here(frame) or self.break_anywhere(frame)): + # No need to trace this function + return # None + # Ignore call events in generator except when stepping. + if self.stopframe and frame.f_code.co_flags & CO_GENERATOR: + return self.trace_dispatch + self.user_call(frame, arg) + if self.quitting: raise BdbQuit + return self.trace_dispatch + + def dispatch_return(self, frame, arg): + if self.stop_here(frame) or frame == self.returnframe: + # Ignore return events in generator except when stepping. + if self.stopframe and frame.f_code.co_flags & CO_GENERATOR: + return self.trace_dispatch + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None + if self.quitting: raise BdbQuit + # The user issued a 'next' or 'until' command. + if self.stopframe is frame and self.stoplineno != -1: + self._set_stopinfo(None, None) + return self.trace_dispatch + + def dispatch_exception(self, frame, arg): + if self.stop_here(frame): + # When stepping with next/until/return in a generator frame, skip + # the internal StopIteration exception (with no traceback) + # triggered by a subiterator run with the 'yield from' statement. + if not (frame.f_code.co_flags & CO_GENERATOR + and arg[0] is StopIteration and arg[2] is None): + self.user_exception(frame, arg) + if self.quitting: raise BdbQuit + # Stop at the StopIteration or GeneratorExit exception when the user + # has set stopframe in a generator by issuing a return command, or a + # next/until command at the last statement in the generator before the + # exception. + elif (self.stopframe and frame is not self.stopframe + and self.stopframe.f_code.co_flags & CO_GENERATOR + and arg[0] in (StopIteration, GeneratorExit)): + self.user_exception(frame, arg) + if self.quitting: raise BdbQuit + + return self.trace_dispatch + + # Normally derived classes don't override the following + # methods, but they may if they want to redefine the + # definition of stopping and breakpoints. + + def is_skipped_module(self, module_name): + for pattern in self.skip: + if fnmatch.fnmatch(module_name, pattern): + return True + return False + + def stop_here(self, frame): + # (CT) stopframe may now also be None, see dispatch_call. + # (CT) the former test for None is therefore removed from here. + if self.skip and \ + self.is_skipped_module(frame.f_globals.get('__name__')): + return False + if frame is self.stopframe: + if self.stoplineno == -1: + return False + return frame.f_lineno >= self.stoplineno + if not self.stopframe: + return True + return False + + def break_here(self, frame): + filename = self.canonic(frame.f_code.co_filename) + if filename not in self.breaks: + return False + lineno = frame.f_lineno + if lineno not in self.breaks[filename]: + # The line itself has no breakpoint, but maybe the line is the + # first line of a function with breakpoint set by function name. + lineno = frame.f_code.co_firstlineno + if lineno not in self.breaks[filename]: + return False + + # flag says ok to delete temp. bp + (bp, flag) = effective(filename, lineno, frame) + if bp: + self.currentbp = bp.number + if (flag and bp.temporary): + self.do_clear(str(bp.number)) + return True + else: + return False + + def do_clear(self, arg): + raise NotImplementedError("subclass of bdb must implement do_clear()") + + def break_anywhere(self, frame): + return self.canonic(frame.f_code.co_filename) in self.breaks + + # Derived classes should override the user_* methods + # to gain control. + + def user_call(self, frame, argument_list): + """This method is called when there is the remote possibility + that we ever need to stop in this function.""" + pass + + def user_line(self, frame): + """This method is called when we stop or break at this line.""" + pass + + def user_return(self, frame, return_value): + """This method is called when a return trap is set here.""" + pass + + def user_exception(self, frame, exc_info): + """This method is called if an exception occurs, + but only if we are to stop at or just below this level.""" + pass + + def _set_stopinfo(self, stopframe, returnframe, stoplineno=0): + self.stopframe = stopframe + self.returnframe = returnframe + self.quitting = False + # stoplineno >= 0 means: stop at line >= the stoplineno + # stoplineno -1 means: don't stop at all + self.stoplineno = stoplineno + + # Derived classes and clients can call the following methods + # to affect the stepping state. + + def set_until(self, frame, lineno=None): + """Stop when the line with the line no greater than the current one is + reached or when returning from current frame""" + # the name "until" is borrowed from gdb + if lineno is None: + lineno = frame.f_lineno + 1 + self._set_stopinfo(frame, frame, lineno) + + def set_step(self): + """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch + self._set_stopinfo(None, None) + + def set_next(self, frame): + """Stop on the next line in or below the given frame.""" + self._set_stopinfo(frame, None) + + def set_return(self, frame): + """Stop when returning from the given frame.""" + if frame.f_code.co_flags & CO_GENERATOR: + self._set_stopinfo(frame, None, -1) + else: + self._set_stopinfo(frame.f_back, frame) + + def set_trace(self, frame=None): + """Start debugging from `frame`. + + If frame is not specified, debugging starts from caller's frame. + """ + if frame is None: + frame = sys._getframe().f_back + self.reset() + while frame: + frame.f_trace = self.trace_dispatch + self.botframe = frame + frame = frame.f_back + self.set_step() + sys.settrace(self.trace_dispatch) + + def set_continue(self): + # Don't stop except at breakpoints or when finished + self._set_stopinfo(self.botframe, None, -1) + if not self.breaks: + # no breakpoints; run without debugger overhead + sys.settrace(None) + frame = sys._getframe().f_back + while frame and frame is not self.botframe: + del frame.f_trace + frame = frame.f_back + + def set_quit(self): + self.stopframe = self.botframe + self.returnframe = None + self.quitting = True + sys.settrace(None) + + # Derived classes and clients can call the following methods + # to manipulate breakpoints. These methods return an + # error message is something went wrong, None if all is well. + # Set_break prints out the breakpoint line and file:lineno. + # Call self.get_*break*() to see the breakpoints or better + # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint(). + + def set_break(self, filename, lineno, temporary=False, cond=None, + funcname=None): + filename = self.canonic(filename) + import linecache # Import as late as possible + line = linecache.getline(filename, lineno) + if not line: + return 'Line %s:%d does not exist' % (filename, lineno) + list = self.breaks.setdefault(filename, []) + if lineno not in list: + list.append(lineno) + bp = Breakpoint(filename, lineno, temporary, cond, funcname) + + def _prune_breaks(self, filename, lineno): + if (filename, lineno) not in Breakpoint.bplist: + self.breaks[filename].remove(lineno) + if not self.breaks[filename]: + del self.breaks[filename] + + def clear_break(self, filename, lineno): + filename = self.canonic(filename) + if filename not in self.breaks: + return 'There are no breakpoints in %s' % filename + if lineno not in self.breaks[filename]: + return 'There is no breakpoint at %s:%d' % (filename, lineno) + # If there's only one bp in the list for that file,line + # pair, then remove the breaks entry + for bp in Breakpoint.bplist[filename, lineno][:]: + bp.deleteMe() + self._prune_breaks(filename, lineno) + + def clear_bpbynumber(self, arg): + try: + bp = self.get_bpbynumber(arg) + except ValueError as err: + return str(err) + bp.deleteMe() + self._prune_breaks(bp.file, bp.line) + + def clear_all_file_breaks(self, filename): + filename = self.canonic(filename) + if filename not in self.breaks: + return 'There are no breakpoints in %s' % filename + for line in self.breaks[filename]: + blist = Breakpoint.bplist[filename, line] + for bp in blist: + bp.deleteMe() + del self.breaks[filename] + + def clear_all_breaks(self): + if not self.breaks: + return 'There are no breakpoints' + for bp in Breakpoint.bpbynumber: + if bp: + bp.deleteMe() + self.breaks = {} + + def get_bpbynumber(self, arg): + if not arg: + raise ValueError('Breakpoint number expected') + try: + number = int(arg) + except ValueError: + raise ValueError('Non-numeric breakpoint number %s' % arg) + try: + bp = Breakpoint.bpbynumber[number] + except IndexError: + raise ValueError('Breakpoint number %d out of range' % number) + if bp is None: + raise ValueError('Breakpoint %d already deleted' % number) + return bp + + def get_break(self, filename, lineno): + filename = self.canonic(filename) + return filename in self.breaks and \ + lineno in self.breaks[filename] + + def get_breaks(self, filename, lineno): + filename = self.canonic(filename) + return filename in self.breaks and \ + lineno in self.breaks[filename] and \ + Breakpoint.bplist[filename, lineno] or [] + + def get_file_breaks(self, filename): + filename = self.canonic(filename) + if filename in self.breaks: + return self.breaks[filename] + else: + return [] + + def get_all_breaks(self): + return self.breaks + + # Derived classes and clients can call the following method + # to get a data structure representing a stack trace. + + def get_stack(self, f, t): + stack = [] + if t and t.tb_frame is f: + t = t.tb_next + while f is not None: + stack.append((f, f.f_lineno)) + if f is self.botframe: + break + f = f.f_back + stack.reverse() + i = max(0, len(stack) - 1) + while t is not None: + stack.append((t.tb_frame, t.tb_lineno)) + t = t.tb_next + if f is None: + i = max(0, len(stack) - 1) + return stack, i + + def format_stack_entry(self, frame_lineno, lprefix=': '): + import linecache, reprlib + frame, lineno = frame_lineno + filename = self.canonic(frame.f_code.co_filename) + s = '%s(%r)' % (filename, lineno) + if frame.f_code.co_name: + s += frame.f_code.co_name + else: + s += "" + if '__args__' in frame.f_locals: + args = frame.f_locals['__args__'] + else: + args = None + if args: + s += reprlib.repr(args) + else: + s += '()' + if '__return__' in frame.f_locals: + rv = frame.f_locals['__return__'] + s += '->' + s += reprlib.repr(rv) + line = linecache.getline(filename, lineno, frame.f_globals) + if line: + s += lprefix + line.strip() + return s + + # The following methods can be called by clients to use + # a debugger to debug a statement or an expression. + # Both can be given as a string, or a code object. + + def run(self, cmd, globals=None, locals=None): + if globals is None: + import __main__ + globals = __main__.__dict__ + if locals is None: + locals = globals + self.reset() + if isinstance(cmd, str): + cmd = compile(cmd, "", "exec") + sys.settrace(self.trace_dispatch) + try: + exec(cmd, globals, locals) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + + def runeval(self, expr, globals=None, locals=None): + if globals is None: + import __main__ + globals = __main__.__dict__ + if locals is None: + locals = globals + self.reset() + sys.settrace(self.trace_dispatch) + try: + return eval(expr, globals, locals) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + + def runctx(self, cmd, globals, locals): + # B/W compatibility + self.run(cmd, globals, locals) + + # This method is more useful to debug a single function call. + + def runcall(self, func, *args, **kwds): + self.reset() + sys.settrace(self.trace_dispatch) + res = None + try: + res = func(*args, **kwds) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + return res + + +def set_trace(): + Bdb().set_trace() + + +class Breakpoint: + """Breakpoint class. + + Implements temporary breakpoints, ignore counts, disabling and + (re)-enabling, and conditionals. + + Breakpoints are indexed by number through bpbynumber and by + the file,line tuple using bplist. The former points to a + single instance of class Breakpoint. The latter points to a + list of such instances since there may be more than one + breakpoint per line. + + """ + + # XXX Keeping state in the class is a mistake -- this means + # you cannot have more than one active Bdb instance. + + next = 1 # Next bp to be assigned + bplist = {} # indexed by (file, lineno) tuple + bpbynumber = [None] # Each entry is None or an instance of Bpt + # index 0 is unused, except for marking an + # effective break .... see effective() + + def __init__(self, file, line, temporary=False, cond=None, funcname=None): + self.funcname = funcname + # Needed if funcname is not None. + self.func_first_executable_line = None + self.file = file # This better be in canonical form! + self.line = line + self.temporary = temporary + self.cond = cond + self.enabled = True + self.ignore = 0 + self.hits = 0 + self.number = Breakpoint.next + Breakpoint.next += 1 + # Build the two lists + self.bpbynumber.append(self) + if (file, line) in self.bplist: + self.bplist[file, line].append(self) + else: + self.bplist[file, line] = [self] + + def deleteMe(self): + index = (self.file, self.line) + self.bpbynumber[self.number] = None # No longer in list + self.bplist[index].remove(self) + if not self.bplist[index]: + # No more bp for this f:l combo + del self.bplist[index] + + def enable(self): + self.enabled = True + + def disable(self): + self.enabled = False + + def bpprint(self, out=None): + if out is None: + out = sys.stdout + print(self.bpformat(), file=out) + + def bpformat(self): + if self.temporary: + disp = 'del ' + else: + disp = 'keep ' + if self.enabled: + disp = disp + 'yes ' + else: + disp = disp + 'no ' + ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp, + self.file, self.line) + if self.cond: + ret += '\n\tstop only if %s' % (self.cond,) + if self.ignore: + ret += '\n\tignore next %d hits' % (self.ignore,) + if self.hits: + if self.hits > 1: + ss = 's' + else: + ss = '' + ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss) + return ret + + def __str__(self): + return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line) + +# -----------end of Breakpoint class---------- + +def checkfuncname(b, frame): + """Check whether we should break here because of `b.funcname`.""" + if not b.funcname: + # Breakpoint was set via line number. + if b.line != frame.f_lineno: + # Breakpoint was set at a line with a def statement and the function + # defined is called: don't break. + return False + return True + + # Breakpoint set via function name. + + if frame.f_code.co_name != b.funcname: + # It's not a function call, but rather execution of def statement. + return False + + # We are in the right frame. + if not b.func_first_executable_line: + # The function is entered for the 1st time. + b.func_first_executable_line = frame.f_lineno + + if b.func_first_executable_line != frame.f_lineno: + # But we are not at the first line number: don't break. + return False + return True + +# Determines if there is an effective (active) breakpoint at this +# line of code. Returns breakpoint number or 0 if none +def effective(file, line, frame): + """Determine which breakpoint for this file:line is to be acted upon. + + Called only if we know there is a bpt at this + location. Returns breakpoint that was triggered and a flag + that indicates if it is ok to delete a temporary bp. + + """ + possibles = Breakpoint.bplist[file, line] + for b in possibles: + if not b.enabled: + continue + if not checkfuncname(b, frame): + continue + # Count every hit when bp is enabled + b.hits += 1 + if not b.cond: + # If unconditional, and ignoring go on to next, else break + if b.ignore > 0: + b.ignore -= 1 + continue + else: + # breakpoint and marker that it's ok to delete if temporary + return (b, True) + else: + # Conditional bp. + # Ignore count applies only to those bpt hits where the + # condition evaluates to true. + try: + val = eval(b.cond, frame.f_globals, frame.f_locals) + if val: + if b.ignore > 0: + b.ignore -= 1 + # continue + else: + return (b, True) + # else: + # continue + except: + # if eval fails, most conservative thing is to stop on + # breakpoint regardless of ignore count. Don't delete + # temporary, as another hint to user. + return (b, False) + return (None, None) + + +# -------------------- testing -------------------- + +class Tdb(Bdb): + def user_call(self, frame, args): + name = frame.f_code.co_name + if not name: name = '???' + print('+++ call', name, args) + def user_line(self, frame): + import linecache + name = frame.f_code.co_name + if not name: name = '???' + fn = self.canonic(frame.f_code.co_filename) + line = linecache.getline(fn, frame.f_lineno, frame.f_globals) + print('+++', fn, frame.f_lineno, name, ':', line.strip()) + def user_return(self, frame, retval): + print('+++ return', retval) + def user_exception(self, frame, exc_stuff): + print('+++ exception', exc_stuff) + self.set_continue() + +def foo(n): + print('foo(', n, ')') + x = bar(n*10) + print('bar returned', x) + +def bar(a): + print('bar(', a, ')') + return a/2 + +def test(): + t = Tdb() + t.run('import bdb; bdb.foo(10)') diff --git a/lib/assets/Lib/binascii.py b/lib/assets/Lib/binascii.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/binascii.py @@ -0,0 +1,727 @@ +"""A pure Python implementation of binascii. + +Rather slow and buggy in corner cases. +PyPy provides an RPython version too. +""" + +# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py + +class Error(Exception): + pass + +class Done(Exception): + pass + +class Incomplete(Exception): + pass + +def a2b_uu(s): + if not s: + return '' + + length = (ord(s[0]) - 0x20) % 64 + + def quadruplets_gen(s): + while s: + try: + yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) + except IndexError: + s += ' ' + yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3]) + return + s = s[4:] + + try: + result = [''.join( + [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)), + chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)), + chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f)) + ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())] + except ValueError: + raise Error('Illegal char') + result = ''.join(result) + trailingdata = result[length:] + if trailingdata.strip('\x00'): + raise Error('Trailing garbage') + result = result[:length] + if len(result) < length: + result += ((length - len(result)) * '\x00') + return bytes(result, __BRYTHON__.charset) + + +def b2a_uu(s): + length = len(s) + if length > 45: + raise Error('At most 45 bytes at once') + + def triples_gen(s): + while s: + try: + yield ord(s[0]), ord(s[1]), ord(s[2]) + except IndexError: + s += '\0\0' + yield ord(s[0]), ord(s[1]), ord(s[2]) + return + s = s[3:] + + result = [''.join( + [chr(0x20 + (( A >> 2 ) & 0x3F)), + chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)), + chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)), + chr(0x20 + (( C ) & 0x3F))]) + for A, B, C in triples_gen(s)] + return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n' + + +table_a2b_base64 = { + 'A': 0, + 'B': 1, + 'C': 2, + 'D': 3, + 'E': 4, + 'F': 5, + 'G': 6, + 'H': 7, + 'I': 8, + 'J': 9, + 'K': 10, + 'L': 11, + 'M': 12, + 'N': 13, + 'O': 14, + 'P': 15, + 'Q': 16, + 'R': 17, + 'S': 18, + 'T': 19, + 'U': 20, + 'V': 21, + 'W': 22, + 'X': 23, + 'Y': 24, + 'Z': 25, + 'a': 26, + 'b': 27, + 'c': 28, + 'd': 29, + 'e': 30, + 'f': 31, + 'g': 32, + 'h': 33, + 'i': 34, + 'j': 35, + 'k': 36, + 'l': 37, + 'm': 38, + 'n': 39, + 'o': 40, + 'p': 41, + 'q': 42, + 'r': 43, + 's': 44, + 't': 45, + 'u': 46, + 'v': 47, + 'w': 48, + 'x': 49, + 'y': 50, + 'z': 51, + '0': 52, + '1': 53, + '2': 54, + '3': 55, + '4': 56, + '5': 57, + '6': 58, + '7': 59, + '8': 60, + '9': 61, + '+': 62, + '/': 63, + '=': 0, +} + + +def a2b_base64(s): + if not isinstance(s, (str, bytes)): + raise TypeError("expected string, got %r" % (s,)) + s = s.rstrip() + # clean out all invalid characters, this also strips the final '=' padding + # check for correct padding + + def next_valid_char(s, pos): + for i in range(pos + 1, len(s)): + c = s[i] + if c < 0x7f: + try: + table_a2b_base64[chr(c)] + return chr(c) + except KeyError: + pass + return None + + quad_pos = 0 + leftbits = 0 + leftchar = 0 + res = [] + for i, c in enumerate(s): + if isinstance(c, int): + c = chr(c) + if c > '\x7f' or c == '\n' or c == '\r' or c == ' ': + continue + if c == '=': + if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='): + continue + else: + leftbits = 0 + break + try: + next_c = table_a2b_base64[c] + except KeyError: + continue + quad_pos = (quad_pos + 1) & 0x03 + leftchar = (leftchar << 6) | next_c + leftbits += 6 + if leftbits >= 8: + leftbits -= 8 + res.append((leftchar >> leftbits & 0xff)) + leftchar &= ((1 << leftbits) - 1) + if leftbits != 0: + raise Error('Incorrect padding') + + return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset) + +table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\ + "0123456789+/" + +def b2a_base64(s): + length = len(s) + final_length = length % 3 + + def triples_gen(s): + while s: + try: + yield s[0], s[1], s[2] + except IndexError: + s += b'\0\0' + yield s[0], s[1], s[2] + return + s = s[3:] + + a = triples_gen(s[ :length - final_length]) + + result = [''.join( + [table_b2a_base64[( A >> 2 ) & 0x3F], + table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F], + table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F], + table_b2a_base64[( C ) & 0x3F]]) + for A, B, C in a] + + final = s[length - final_length:] + if final_length == 0: + snippet = '' + elif final_length == 1: + a = final[0] + snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \ + table_b2a_base64[(a << 4 ) & 0x3F] + '==' + else: + a = final[0] + b = final[1] + snippet = table_b2a_base64[(a >> 2) & 0x3F] + \ + table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \ + table_b2a_base64[(b << 2) & 0x3F] + '=' + return bytes(''.join(result) + snippet + '\n', __BRYTHON__.charset) + +def a2b_qp(s, header=False): + inp = 0 + odata = [] + while inp < len(s): + if s[inp] == '=': + inp += 1 + if inp >= len(s): + break + # Soft line breaks + if (s[inp] == '\n') or (s[inp] == '\r'): + if s[inp] != '\n': + while inp < len(s) and s[inp] != '\n': + inp += 1 + if inp < len(s): + inp += 1 + elif s[inp] == '=': + # broken case from broken python qp + odata.append('=') + inp += 1 + elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers: + ch = chr(int(s[inp:inp+2], 16)) + inp += 2 + odata.append(ch) + else: + odata.append('=') + elif header and s[inp] == '_': + odata.append(' ') + inp += 1 + else: + odata.append(s[inp]) + inp += 1 + return bytes(''.join(odata), __BRYTHON__.charset) + +def b2a_qp(data, quotetabs=False, istext=True, header=False): + """quotetabs=True means that tab and space characters are always + quoted. + istext=False means that \r and \n are treated as regular characters + header=True encodes space characters with '_' and requires + real '_' characters to be quoted. + """ + MAXLINESIZE = 76 + + # See if this string is using CRLF line ends + lf = data.find('\n') + crlf = lf > 0 and data[lf-1] == '\r' + + inp = 0 + linelen = 0 + odata = [] + while inp < len(data): + c = data[inp] + if (c > '~' or + c == '=' or + (header and c == '_') or + (c == '.' and linelen == 0 and (inp+1 == len(data) or + data[inp+1] == '\n' or + data[inp+1] == '\r')) or + (not istext and (c == '\r' or c == '\n')) or + ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or + (c <= ' ' and c != '\r' and c != '\n' and + (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))): + linelen += 3 + if linelen >= MAXLINESIZE: + odata.append('=') + if crlf: odata.append('\r') + odata.append('\n') + linelen = 3 + odata.append('=' + two_hex_digits(ord(c))) + inp += 1 + else: + if (istext and + (c == '\n' or (inp+1 < len(data) and c == '\r' and + data[inp+1] == '\n'))): + linelen = 0 + # Protect against whitespace on end of line + if (len(odata) > 0 and + (odata[-1] == ' ' or odata[-1] == '\t')): + ch = ord(odata[-1]) + odata[-1] = '=' + odata.append(two_hex_digits(ch)) + + if crlf: odata.append('\r') + odata.append('\n') + if c == '\r': + inp += 2 + else: + inp += 1 + else: + if (inp + 1 < len(data) and + data[inp+1] != '\n' and + (linelen + 1) >= MAXLINESIZE): + odata.append('=') + if crlf: odata.append('\r') + odata.append('\n') + linelen = 0 + + linelen += 1 + if header and c == ' ': + c = '_' + odata.append(c) + inp += 1 + return ''.join(odata) + +hex_numbers = '0123456789ABCDEF' +def hex(n): + if n == 0: + return '0' + + if n < 0: + n = -n + sign = '-' + else: + sign = '' + arr = [] + + def hex_gen(n): + """ Yield a nibble at a time. """ + while n: + yield n % 0x10 + n = n / 0x10 + + for nibble in hex_gen(n): + arr = [hex_numbers[nibble]] + arr + return sign + ''.join(arr) + +def two_hex_digits(n): + return hex_numbers[n / 0x10] + hex_numbers[n % 0x10] + + +def strhex_to_int(s): + i = 0 + for c in s: + i = i * 0x10 + hex_numbers.index(c) + return i + +hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr' + +DONE = 0x7f +SKIP = 0x7e +FAIL = 0x7d + +table_a2b_hqx = [ + #^@ ^A ^B ^C ^D ^E ^F ^G + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + #\b \t \n ^K ^L \r ^N ^O + FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL, + #^P ^Q ^R ^S ^T ^U ^V ^W + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + #^X ^Y ^Z ^[ ^\ ^] ^^ ^_ + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + # ! " # $ % & ' + FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + #( ) * + , - . / + 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL, + #0 1 2 3 4 5 6 7 + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL, + #8 9 : ; < = > ? + 0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL, + #@ A B C D E F G + 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, + #H I J K L M N O + 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL, + #P Q R S T U V W + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL, + #X Y Z [ \ ] ^ _ + 0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL, + #` a b c d e f g + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL, + #h i j k l m n o + 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL, + #p q r s t u v w + 0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL, + #x y z { | } ~ ^? + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, + FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, +] + +def a2b_hqx(s): + result = [] + + def quadruples_gen(s): + t = [] + for c in s: + res = table_a2b_hqx[ord(c)] + if res == SKIP: + continue + elif res == FAIL: + raise Error('Illegal character') + elif res == DONE: + yield t + raise Done + else: + t.append(res) + if len(t) == 4: + yield t + t = [] + yield t + + done = 0 + try: + for snippet in quadruples_gen(s): + length = len(snippet) + if length == 4: + result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) + result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) + result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) + elif length == 3: + result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) + result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) + elif length == 2: + result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) + except Done: + done = 1 + except Error: + raise + return (''.join(result), done) + # should this return a bytes object? + #return (bytes(''.join(result), __BRYTHON__.charset), done) + +def b2a_hqx(s): + result =[] + + def triples_gen(s): + while s: + try: + yield ord(s[0]), ord(s[1]), ord(s[2]) + except IndexError: + yield tuple([ord(c) for c in s]) + s = s[3:] + + for snippet in triples_gen(s): + length = len(snippet) + if length == 3: + result.append( + hqx_encoding[(snippet[0] & 0xfc) >> 2]) + result.append(hqx_encoding[ + ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) + result.append(hqx_encoding[ + (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)]) + result.append(hqx_encoding[snippet[2] & 0x3f]) + elif length == 2: + result.append( + hqx_encoding[(snippet[0] & 0xfc) >> 2]) + result.append(hqx_encoding[ + ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)]) + result.append(hqx_encoding[ + (snippet[1] & 0x0f) << 2]) + elif length == 1: + result.append( + hqx_encoding[(snippet[0] & 0xfc) >> 2]) + result.append(hqx_encoding[ + ((snippet[0] & 0x03) << 4)]) + return ''.join(result) + +crctab_hqx = [ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +] + +def crc_hqx(s, crc): + for c in s: + crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)] + + return crc + +def rlecode_hqx(s): + """ + Run length encoding for binhex4. + The CPython implementation does not do run length encoding + of \x90 characters. This implementation does. + """ + if not s: + return '' + result = [] + prev = s[0] + count = 1 + # Add a dummy character to get the loop to go one extra round. + # The dummy must be different from the last character of s. + # In the same step we remove the first character, which has + # already been stored in prev. + if s[-1] == '!': + s = s[1:] + '?' + else: + s = s[1:] + '!' + + for c in s: + if c == prev and count < 255: + count += 1 + else: + if count == 1: + if prev != '\x90': + result.append(prev) + else: + result.extend(['\x90', '\x00']) + elif count < 4: + if prev != '\x90': + result.extend([prev] * count) + else: + result.extend(['\x90', '\x00'] * count) + else: + if prev != '\x90': + result.extend([prev, '\x90', chr(count)]) + else: + result.extend(['\x90', '\x00', '\x90', chr(count)]) + count = 1 + prev = c + + return ''.join(result) + +def rledecode_hqx(s): + s = s.split('\x90') + result = [s[0]] + prev = s[0] + for snippet in s[1:]: + count = ord(snippet[0]) + if count > 0: + result.append(prev[-1] * (count-1)) + prev = snippet + else: + result.append('\x90') + prev = '\x90' + result.append(snippet[1:]) + + return ''.join(result) + +crc_32_tab = [ + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, + 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, + 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, + 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, + 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, + 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, + 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, + 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, + 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, + 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, + 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, + 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, + 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, + 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, + 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, + 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, + 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, + 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, + 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, + 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, + 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, + 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, + 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, + 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, + 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, + 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, + 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, + 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, + 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, + 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, + 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, + 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, + 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, + 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, + 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, + 0x2d02ef8d +] + +def crc32(s, crc=0): + result = 0 + crc = ~int(crc) & 0xffffffff + #crc = ~long(crc) & 0xffffffffL + for c in s: + crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8) + #crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8) + #/* Note: (crc >> 8) MUST zero fill on left + + result = crc ^ 0xffffffff + + if result > 2**31: + result = ((result + 2**31) % 2**32) - 2**31 + + return result + +def b2a_hex(s): + result = [] + for char in s: + c = (ord(char) >> 4) & 0xf + if c > 9: + c = c + ord('a') - 10 + else: + c = c + ord('0') + result.append(chr(c)) + c = ord(char) & 0xf + if c > 9: + c = c + ord('a') - 10 + else: + c = c + ord('0') + result.append(chr(c)) + return ''.join(result) + +hexlify = b2a_hex + +table_hex = [ + -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, + -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, + -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1, + -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, + -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, + -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1, + -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1 +] + + +def a2b_hex(t): + result = [] + + def pairs_gen(s): + while s: + try: + yield table_hex[ord(s[0])], table_hex[ord(s[1])] + except IndexError: + if len(s): + raise TypeError('Odd-length string') + return + s = s[2:] + + for a, b in pairs_gen(t): + if a < 0 or b < 0: + raise TypeError('Non-hexadecimal digit found') + result.append(chr((a << 4) + b)) + return bytes(''.join(result), __BRYTHON__.charset) + + +unhexlify = a2b_hex diff --git a/lib/assets/Lib/bisect.py b/lib/assets/Lib/bisect.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/bisect.py @@ -0,0 +1,92 @@ +"""Bisection algorithms.""" + +def insort_right(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + a.insert(lo, x) + +insort = insort_right # backward compatibility + +def bisect_right(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + return lo + +bisect = bisect_right # backward compatibility + +def insort_left(a, x, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + a.insert(lo, x) + + +def bisect_left(a, x, lo=0, hi=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if a[mid] < x: lo = mid+1 + else: hi = mid + return lo + +# Overwrite above definitions with a fast C implementation +try: + from _bisect import * +except ImportError: + pass diff --git a/lib/assets/Lib/browser/__init__.py b/lib/assets/Lib/browser/__init__.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/__init__.py @@ -0,0 +1,9 @@ +import javascript + +from _browser import * + +from .local_storage import LocalStorage +from .session_storage import SessionStorage +from .object_storage import ObjectStorage + +WebSocket = javascript.JSConstructor(window.WebSocket) \ No newline at end of file diff --git a/lib/assets/Lib/browser/ajax.py b/lib/assets/Lib/browser/ajax.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/ajax.py @@ -0,0 +1,1 @@ +from _ajax import * diff --git a/lib/assets/Lib/browser/html.py b/lib/assets/Lib/browser/html.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/html.py @@ -0,0 +1,1 @@ +from _html import * \ No newline at end of file diff --git a/lib/assets/Lib/browser/indexed_db.py b/lib/assets/Lib/browser/indexed_db.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/indexed_db.py @@ -0,0 +1,106 @@ +class EventListener: + def __init__(self, events=[]): + self._events=events + + def append(self, event): + self._events.append(event) + + def fire(self, e): + for _event in self._events: + _event(e) + +class IndexedDB: + def __init__(self): + if not __BRYTHON__.has_indexedDB: + raise NotImplementedError("Your browser doesn't support indexedDB") + return + + self._indexedDB=__BRYTHON__.indexedDB() + self._db=None + self._version=None + + def _onsuccess(self, event): + self._db=event.target.result + + def open(self, name, onsuccess, version=1.0, onerror=None, + onupgradeneeded=None): + self._version=version + _result=self._indexedDB.open(name, version) + _success=EventListener([self._onsuccess, onsuccess]) + _result.onsuccess=_success.fire + _result.onupgradeneeded=onupgradeneeded + + #if onerror is None: + def onerror(e): + print("onerror: %s:%s" % (e.type, e.target.result)) + + def onblocked(e): + print("blocked: %s:%s" % (e.type, e.result)) + + _result.onerror=onerror + _result.onblocked=onblocked + + def transaction(self, entities, mode='read'): + return Transaction(self._db.transaction(entities, mode)) + +class Transaction: + + def __init__(self, transaction): + self._transaction=transaction + + def objectStore(self, name): + return ObjectStore(self._transaction.objectStore(name)) + +class ObjectStore: + + def __init__(self, objectStore): + self._objectStore=objectStore + self._data=[] + + def clear(self, onsuccess=None, onerror=None): + _result=self._objectStore.clear() + + if onsuccess is not None: + _result.onsuccess=onsuccess + + if onerror is not None: + _result.onerror=onerror + + def _helper(self, func, object, onsuccess=None, onerror=None): + _result=func(object) + + if onsuccess is not None: + _result.onsuccess=onsuccess + + if onerror is not None: + _result.onerror=onerror + + def put(self, obj, key=None, onsuccess=None, onerror=None): + _r = self._objectStore.put(obj, key) + _r.onsuccess = onsuccess + _r.onerror = onerror + + def add(self, obj, key, onsuccess=None, onerror=None): + _r = self._objectStore.add(obj, key) + _r.onsuccess = onsuccess + _r.onerror = onerror + #self._helper(self._objectStore.add, object, onsuccess, onerror) + + def delete(self, index, onsuccess=None, onerror=None): + self._helper(self._objectStore.delete, index, onsuccess, onerror) + + def query(self, *args): + self._data=[] + def onsuccess(event): + cursor=event.target.result + if cursor is not None: + self._data.append(cursor.value) + getattr(cursor,"continue")() # cursor.continue() is illegal + + self._objectStore.openCursor(args).onsuccess=onsuccess + + def fetchall(self): + yield self._data + + def get(self, key, onsuccess=None, onerror=None): + self._helper(self._objectStore.get, key, onsuccess, onerror) diff --git a/lib/assets/Lib/browser/local_storage.py b/lib/assets/Lib/browser/local_storage.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/local_storage.py @@ -0,0 +1,92 @@ +# local storage in browser +import sys +from javascript import JSObject +from browser import window, console + +has_local_storage = hasattr(window, 'localStorage') + +class __UnProvided(): + pass + +class LocalStorage(): + storage_type = "local_storage" + + def __init__(self): + if not has_local_storage: + raise EnvironmentError("LocalStorage not available") + self.store = JSObject(window.localStorage) + + def __delitem__(self, key): + if (not isinstance(key, str)): + raise TypeError("key must be string") + if key not in self: + raise KeyError(key) + self.store.removeItem(key) + + def __getitem__(self, key): + if (not isinstance(key, str)): + raise TypeError("key must be string") + res = __BRYTHON__.JSObject(self.store.getItem(key)) + if res is not None: + return res + raise KeyError(key) + + def __setitem__(self, key, value): + if (not isinstance(key, str)): + raise TypeError("key must be string") + if (not isinstance(value, str)): + raise TypeError("value must be string") + self.store.setItem(key, value) + + # implement "in" functionality + def __contains__(self, key): + if (not isinstance(key, str)): + raise TypeError("key must be string") + res = __BRYTHON__.JSObject(self.store.getItem(key)) + if res is None: + return False + return True + + def __iter__(self): + keys = self.keys() + return keys.__iter__() + + def get(self, key, default=None): + if (not isinstance(key, str)): + raise TypeError("key must be string") + return __BRYTHON__.JSObject(self.store.getItem(key)) or default + + def pop(self, key, default=__UnProvided()): + if (not isinstance(key, str)): + raise TypeError("key must be string") + if type(default) is __UnProvided: + ret = self.get(key) + del self[key] # will throw key error if doesn't exist + return ret + else: + if key in self: + ret = self.get(key) + del self[key] + return ret + else: + return default + + # while a real dict provides a view, returning a generator would less helpful than simply returning a list + # and creating a custom iterator is overkill and would likely result in slower performance + def keys(self): + return [__BRYTHON__.JSObject(self.store.key(i)) for i in range(self.store.length)] + + def values(self): + return [__BRYTHON__.JSObject(self.__getitem__(k)) for k in self.keys()] + + def items(self): + return list(zip(self.keys(), self.values())) + + def clear(self): + self.store.clear() + + def __len__(self): + return self.store.length + +if has_local_storage: + storage = LocalStorage() diff --git a/lib/assets/Lib/browser/markdown.py b/lib/assets/Lib/browser/markdown.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/markdown.py @@ -0,0 +1,423 @@ +# -*- coding: utf-8 -*- + +try: + import _jsre as re +except: + import re + +import random +import time + +letters = 'abcdefghijklmnopqrstuvwxyz' +letters += letters.upper()+'0123456789' + +class URL: + def __init__(self,src): + elts = src.split(maxsplit=1) + self.href = elts[0] + self.alt = '' + if len(elts)==2: + alt = elts[1] + if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1] + elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1] + elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1] + +class CodeBlock: + def __init__(self,line): + self.lines = [line] + if line.startswith("```") and len(line)>3: + self.info = line[3:] + else: + self.info = None + + def to_html(self): + if self.lines[0].startswith("`"): + self.lines.pop(0) + res = escape('\n'.join(self.lines)) + res = unmark(res) + _class = self.info or "marked" + res = '
%s
\n' %(_class, res) + return res,[] + +class HtmlBlock: + + def __init__(self, src): + self.src = src + + def to_html(self): + return self.src + +class Marked: + def __init__(self, line=''): + self.line = line + self.children = [] + + def to_html(self): + return apply_markdown(self.line) + +# get references +refs = {} +ref_pattern = r"^\[(.*)\]:\s+(.*)" + +def mark(src): + + global refs + t0 = time.time() + refs = {} + # split source in sections + # sections can be : + # - a block-level HTML element (markdown syntax will not be processed) + # - a script + # - a span-level HTML tag (markdown syntax will be processed) + # - a code block + + # normalise line feeds + src = src.replace('\r\n','\n') + + # lines followed by dashes + src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src) + src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src) + + lines = src.split('\n')+[''] + + i = bq = 0 + ul = ol = 0 + + while i in a blockquote + if lines[i].startswith('>'): + nb = 1 + while nb': + nb += 1 + lines[i] = lines[i][nb:] + if nb>bq: + lines.insert(i,'
'*(nb-bq)) + i += 1 + bq = nb + elif nb'*(bq-nb)) + i += 1 + bq = nb + elif bq>0: + lines.insert(i,'
'*bq) + i += 1 + bq = 0 + + # unordered lists + if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \ + and len(lines[i].lstrip())>1 \ + and lines[i].lstrip()[1]==' ' \ + and (i==0 or ul or not lines[i-1].strip()): + # line indentation indicates nesting level + nb = 1+len(lines[i])-len(lines[i].lstrip()) + lines[i] = '
  • '+lines[i][nb:] + if nb>ul: + lines.insert(i,'
      '*(nb-ul)) + i += 1 + elif nb'*(ul-nb)) + i += 1 + ul = nb + elif ul and not lines[i].strip(): + if i1 and nline[1]==' ': + pass + else: + lines.insert(i,'
    '*ul) + i += 1 + ul = 0 + + # ordered lists + mo = re.search(r'^(\d+\.)',lines[i]) + if mo: + if not ol: + lines.insert(i,'
      ') + i += 1 + lines[i] = '
    1. '+lines[i][len(mo.groups()[0]):] + ol = 1 + elif ol and not lines[i].strip() and i') + i += 1 + ol = 0 + + i += 1 + + if ul: + lines.append(''*ul) + if ol: + lines.append('
    '*ol) + if bq: + lines.append(''*bq) + + t1 = time.time() + #print('part 1', t1-t0) + sections = [] + scripts = [] + section = Marked() + + i = 0 + while i'): + scripts.append('\n'.join(lines[i+1:j])) + for k in range(i,j+1): + lines[k] = '' + break + j += 1 + i = j + continue + + # atext header + elif line.startswith('#'): + level = 1 + line = lines[i] + while level','>') + czone = czone.replace('_','_') + czone = czone.replace('*','*') + return czone + +def s_escape(mo): + # used in re.sub + czone = mo.string[mo.start():mo.end()] + return escape(czone) + +def unmark(code_zone): + # convert _ to _ inside inline code + code_zone = code_zone.replace('_','_') + return code_zone + +def s_unmark(mo): + # convert _ to _ inside inline code + code_zone = mo.string[mo.start():mo.end()] + code_zone = code_zone.replace('_','_') + return code_zone + +def apply_markdown(src): + + scripts = [] + key = None + + t0 = time.time() + i = 0 + while i-1 and src[start_a:end_a].find('\n')==-1: + link = src[start_a:end_a] + rest = src[end_a+1:].lstrip() + if rest and rest[0]=='(': + j = 0 + while True: + end_href = rest.find(')',j) + if end_href == -1: + break + if rest[end_href-1]=='\\': + j = end_href+1 + else: + break + if end_href>-1 and rest[:end_href].find('\n')==-1: + tag = ''+link+'' + src = src[:start_a-1]+tag+rest[end_href+1:] + i = start_a+len(tag) + elif rest and rest[0]=='[': + j = 0 + while True: + end_key = rest.find(']',j) + if end_key == -1: + break + if rest[end_key-1]=='\\': + j = end_key+1 + else: + break + if end_key>-1 and rest[:end_key].find('\n')==-1: + if not key: + key = link + if key.lower() not in refs: + raise KeyError('unknown reference %s' %key) + url = refs[key.lower()] + tag = ''+link+'' + src = src[:start_a-1]+tag+rest[end_key+1:] + i = start_a+len(tag) + + i += 1 + + t1 = time.time() + #print('apply markdown 1', t1-t0) + # before applying the markup with _ and *, isolate HTML tags because + # they can contain these characters + + # We replace them temporarily by a random string + rstr = ''.join(random.choice(letters) for i in range(16)) + + i = 0 + state = None + start = -1 + data = '' + tags = [] + while i' and state is None: + tags.append(src[i:j+1]) + src = src[:i]+rstr+src[j+1:] + i += len(rstr) + break + elif state=='"' or state=="'": + data += src[j] + elif src[j]=='\n': + # if a sign < is not followed by > in the same ligne, it + # is the sign "lesser than" + src = src[:i]+'<'+src[i+1:] + j=i+4 + break + j += 1 + elif src[i]=='`' and i>0 and src[i-1]!='\\': + # ignore the content of inline code + j = i+1 + while j", "&" and "_" in inline code + code_pattern = r'\`(.*?)\`' + src = re.sub(code_pattern,s_escape,src) + + # replace escaped ` _ * by HTML characters + src = src.replace(r'\\`','`') + src = src.replace(r'\_','_') + src = src.replace(r'\*','*') + + # emphasis + strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')] + for tag,strong_pattern in strong_patterns: + src = re.sub(strong_pattern,r'<%s>\1' %(tag,tag),src) + + em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')] + for tag,em_pattern in em_patterns: + src = re.sub(em_pattern,r'<%s>\1' %(tag,tag),src) + + # inline code + code_pattern = r'\`(.*?)\`' + src = re.sub(code_pattern,r'\1',src) + + # restore tags + while True: + pos = src.rfind(rstr) + if pos==-1: + break + repl = tags.pop() + src = src[:pos]+repl+src[pos+len(rstr):] + + src = '

    '+src+'

    ' + + t3 = time.time() + #print('apply markdown 3', t3-t2) + + return src,scripts diff --git a/lib/assets/Lib/browser/object_storage.py b/lib/assets/Lib/browser/object_storage.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/object_storage.py @@ -0,0 +1,51 @@ +import pickle + +class __UnProvided(): + pass + + +class ObjectStorage(): + + def __init__(self, storage): + self.storage = storage + + def __delitem__(self, key): + del self.storage[pickle.dumps(key)] + + def __getitem__(self, key): + return pickle.loads(self.storage[pickle.dumps(key)]) + + def __setitem__(self, key, value): + self.storage[pickle.dumps(key)] = pickle.dumps(value) + + def __contains__(self, key): + return pickle.dumps(key) in self.storage + + def get(self, key, default=None): + if pickle.dumps(key) in self.storage: + return self.storage[pickle.dumps(key)] + return default + + def pop(self, key, default=__UnProvided()): + if type(default) is __UnProvided or pickle.dumps(key) in self.storage: + return pickle.loads(self.storage.pop(pickle.dumps(key))) + return default + + def __iter__(self): + keys = self.keys() + return keys.__iter__() + + def keys(self): + return [pickle.loads(key) for key in self.storage.keys()] + + def values(self): + return [pickle.loads(val) for val in self.storage.values()] + + def items(self): + return list(zip(self.keys(), self.values())) + + def clear(self): + self.storage.clear() + + def __len__(self): + return len(self.storage) diff --git a/lib/assets/Lib/browser/session_storage.py b/lib/assets/Lib/browser/session_storage.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/session_storage.py @@ -0,0 +1,19 @@ +# session storage in browser +import sys +from javascript import JSObject +from browser import window +from .local_storage import LocalStorage + +has_session_storage = hasattr(window, 'sessionStorage') + +class SessionStorage(LocalStorage): + + storage_type = "session_storage" + + def __init__(self): + if not has_session_storage: + raise EnvironmentError("SessionStorage not available") + self.store = window.sessionStorage + +if has_session_storage: + storage = SessionStorage() diff --git a/lib/assets/Lib/browser/svg.py b/lib/assets/Lib/browser/svg.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/svg.py @@ -0,0 +1,1 @@ +from _svg import * \ No newline at end of file diff --git a/lib/assets/Lib/browser/timer.py b/lib/assets/Lib/browser/timer.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/timer.py @@ -0,0 +1,34 @@ +from browser import window + +def wrap(func): + # Transforms a function f into another function that prints a + # traceback in case of exception + def f(*args, **kw): + try: + return func(*args, **kw) + except Exception as exc: + msg = '{0.info}\n{0.__name__}: {0.args[0]}'.format(exc) + import sys + sys.stderr.write(msg) + return f + +clear_interval = window.clearInterval + +clear_timeout = window.clearTimeout + +def set_interval(func,interval): + return window.setInterval(wrap(func),interval) + +def set_timeout(func,interval): + return int(window.setTimeout(wrap(func),interval)) + +def request_animation_frame(func): + return int(window.requestAnimationFrame(func)) + +def cancel_animation_frame(int_id): + window.cancelAnimationFrame(int_id) + +def set_loop_timeout(x): + # set a variable used to stop loops that last more than x seconds + assert isinstance(x, int) + __BRYTHON__.loop_timeout = x \ No newline at end of file diff --git a/lib/assets/Lib/browser/websocket.py b/lib/assets/Lib/browser/websocket.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/browser/websocket.py @@ -0,0 +1,10 @@ +from browser import window +import javascript + +if hasattr(window, 'WebSocket'): + supported = True + WebSocket = javascript.JSConstructor(window.WebSocket) +else: + supported = False + def WebSocket(*args,**kw): + raise NotImplementedError \ No newline at end of file diff --git a/lib/assets/Lib/calendar.py b/lib/assets/Lib/calendar.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/calendar.py @@ -0,0 +1,703 @@ +"""Calendar printing functions + +Note when comparing these calendars to the ones printed by cal(1): By +default, these calendars have Monday as the first day of the week, and +Sunday as the last (the European convention). Use setfirstweekday() to +set the first day of the week (0=Monday, 6=Sunday).""" + +import sys +import datetime +import locale as _locale + +__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", + "firstweekday", "isleap", "leapdays", "weekday", "monthrange", + "monthcalendar", "prmonth", "month", "prcal", "calendar", + "timegm", "month_name", "month_abbr", "day_name", "day_abbr"] + +# Exception raised for bad input (with string parameter for details) +error = ValueError + +# Exceptions raised for bad input +class IllegalMonthError(ValueError): + def __init__(self, month): + self.month = month + def __str__(self): + return "bad month number %r; must be 1-12" % self.month + + +class IllegalWeekdayError(ValueError): + def __init__(self, weekday): + self.weekday = weekday + def __str__(self): + return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday + + +# Constants for months referenced later +January = 1 +February = 2 + +# Number of days per month (except for February in leap years) +mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + +# This module used to have hard-coded lists of day and month names, as +# English strings. The classes following emulate a read-only version of +# that, but supply localized names. Note that the values are computed +# fresh on each call, in case the user changes locale between calls. + +class _localized_month: + + _months = [datetime.date(2001, i+1, 1).strftime for i in range(12)] + _months.insert(0, lambda x: "") + + def __init__(self, format): + self.format = format + + def __getitem__(self, i): + funcs = self._months[i] + if isinstance(i, slice): + return [f(self.format) for f in funcs] + else: + return funcs(self.format) + + def __len__(self): + return 13 + + +class _localized_day: + + # January 1, 2001, was a Monday. + _days = [datetime.date(2001, 1, i+1).strftime for i in range(7)] + + def __init__(self, format): + self.format = format + + def __getitem__(self, i): + funcs = self._days[i] + if isinstance(i, slice): + return [f(self.format) for f in funcs] + else: + return funcs(self.format) + + def __len__(self): + return 7 + + +# Full and abbreviated names of weekdays +day_name = _localized_day('%A') +day_abbr = _localized_day('%a') + +# Full and abbreviated names of months (1-based arrays!!!) +month_name = _localized_month('%B') +month_abbr = _localized_month('%b') + +# Constants for weekdays +(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) + + +def isleap(year): + """Return True for leap years, False for non-leap years.""" + return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) + + +def leapdays(y1, y2): + """Return number of leap years in range [y1, y2). + Assume y1 <= y2.""" + y1 -= 1 + y2 -= 1 + return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400) + + +def weekday(year, month, day): + """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12), + day (1-31).""" + return datetime.date(year, month, day).weekday() + + +def monthrange(year, month): + """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for + year, month.""" + if not 1 <= month <= 12: + raise IllegalMonthError(month) + day1 = weekday(year, month, 1) + ndays = mdays[month] + (month == February and isleap(year)) + return day1, ndays + + +class Calendar(object): + """ + Base calendar class. This class doesn't do any formatting. It simply + provides data to subclasses. + """ + + def __init__(self, firstweekday=0): + self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday + + def getfirstweekday(self): + return self._firstweekday % 7 + + def setfirstweekday(self, firstweekday): + self._firstweekday = firstweekday + + firstweekday = property(getfirstweekday, setfirstweekday) + + def iterweekdays(self): + """ + Return a iterator for one week of weekday numbers starting with the + configured first one. + """ + for i in range(self.firstweekday, self.firstweekday + 7): + yield i%7 + + def itermonthdates(self, year, month): + """ + Return an iterator for one month. The iterator will yield datetime.date + values and will always iterate through complete weeks, so it will yield + dates outside the specified month. + """ + date = datetime.date(year, month, 1) + # Go back to the beginning of the week + days = (date.weekday() - self.firstweekday) % 7 + date -= datetime.timedelta(days=days) + oneday = datetime.timedelta(days=1) + while True: + yield date + try: + date += oneday + except OverflowError: + # Adding one day could fail after datetime.MAXYEAR + break + if date.month != month and date.weekday() == self.firstweekday: + break + + def itermonthdays2(self, year, month): + """ + Like itermonthdates(), but will yield (day number, weekday number) + tuples. For days outside the specified month the day number is 0. + """ + for date in self.itermonthdates(year, month): + if date.month != month: + yield (0, date.weekday()) + else: + yield (date.day, date.weekday()) + + def itermonthdays(self, year, month): + """ + Like itermonthdates(), but will yield day numbers. For days outside + the specified month the day number is 0. + """ + for date in self.itermonthdates(year, month): + if date.month != month: + yield 0 + else: + yield date.day + + def monthdatescalendar(self, year, month): + """ + Return a matrix (list of lists) representing a month's calendar. + Each row represents a week; week entries are datetime.date values. + """ + dates = list(self.itermonthdates(year, month)) + return [ dates[i:i+7] for i in range(0, len(dates), 7) ] + + def monthdays2calendar(self, year, month): + """ + Return a matrix representing a month's calendar. + Each row represents a week; week entries are + (day number, weekday number) tuples. Day numbers outside this month + are zero. + """ + days = list(self.itermonthdays2(year, month)) + return [ days[i:i+7] for i in range(0, len(days), 7) ] + + def monthdayscalendar(self, year, month): + """ + Return a matrix representing a month's calendar. + Each row represents a week; days outside this month are zero. + """ + days = list(self.itermonthdays(year, month)) + return [ days[i:i+7] for i in range(0, len(days), 7) ] + + def yeardatescalendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting. The return + value is a list of month rows. Each month row contains up to width months. + Each month contains between 4 and 6 weeks and each week contains 1-7 + days. Days are datetime.date objects. + """ + months = [ + self.monthdatescalendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + def yeardays2calendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are + (day number, weekday number) tuples. Day numbers outside this month are + zero. + """ + months = [ + self.monthdays2calendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + def yeardayscalendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are day numbers. + Day numbers outside this month are zero. + """ + months = [ + self.monthdayscalendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + +class TextCalendar(Calendar): + """ + Subclass of Calendar that outputs a calendar as a simple plain text + similar to the UNIX program cal. + """ + + def prweek(self, theweek, width): + """ + Print a single week (no newline). + """ + print(self.formatweek(theweek, width), end=' ') + + def formatday(self, day, weekday, width): + """ + Returns a formatted day. + """ + if day == 0: + s = '' + else: + s = '%2i' % day # right-align single-digit days + return s.center(width) + + def formatweek(self, theweek, width): + """ + Returns a single week in a string (no newline). + """ + return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek) + + def formatweekday(self, day, width): + """ + Returns a formatted week day name. + """ + if width >= 9: + names = day_name + else: + names = day_abbr + return names[day][:width].center(width) + + def formatweekheader(self, width): + """ + Return a header for a week. + """ + return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays()) + + def formatmonthname(self, theyear, themonth, width, withyear=True): + """ + Return a formatted month name. + """ + s = month_name[themonth] + if withyear: + s = "%s %r" % (s, theyear) + return s.center(width) + + def prmonth(self, theyear, themonth, w=0, l=0): + """ + Print a month's calendar. + """ + print(self.formatmonth(theyear, themonth, w, l), end=' ') + + def formatmonth(self, theyear, themonth, w=0, l=0): + """ + Return a month's calendar string (multi-line). + """ + w = max(2, w) + l = max(1, l) + s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1) + s = s.rstrip() + s += '\n' * l + s += self.formatweekheader(w).rstrip() + s += '\n' * l + for week in self.monthdays2calendar(theyear, themonth): + s += self.formatweek(week, w).rstrip() + s += '\n' * l + return s + + def formatyear(self, theyear, w=2, l=1, c=6, m=3): + """ + Returns a year's calendar as a multi-line string. + """ + w = max(2, w) + l = max(1, l) + c = max(2, c) + colwidth = (w + 1) * 7 - 1 + v = [] + a = v.append + a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip()) + a('\n'*l) + header = self.formatweekheader(w) + for (i, row) in enumerate(self.yeardays2calendar(theyear, m)): + # months in this row + months = range(m*i+1, min(m*(i+1)+1, 13)) + a('\n'*l) + names = (self.formatmonthname(theyear, k, colwidth, False) + for k in months) + a(formatstring(names, colwidth, c).rstrip()) + a('\n'*l) + headers = (header for k in months) + a(formatstring(headers, colwidth, c).rstrip()) + a('\n'*l) + # max number of weeks for this row + height = max(len(cal) for cal in row) + for j in range(height): + weeks = [] + for cal in row: + if j >= len(cal): + weeks.append('') + else: + weeks.append(self.formatweek(cal[j], w)) + a(formatstring(weeks, colwidth, c).rstrip()) + a('\n' * l) + return ''.join(v) + + def pryear(self, theyear, w=0, l=0, c=6, m=3): + """Print a year's calendar.""" + print(self.formatyear(theyear, w, l, c, m)) + + +class HTMLCalendar(Calendar): + """ + This calendar returns complete HTML pages. + """ + + # CSS classes for the day s + cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] + + def formatday(self, day, weekday): + """ + Return a day as a table cell. + """ + if day == 0: + return ' ' # day outside month + else: + return '%d' % (self.cssclasses[weekday], day) + + def formatweek(self, theweek): + """ + Return a complete week as a table row. + """ + s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) + return '%s' % s + + def formatweekday(self, day): + """ + Return a weekday name as a table header. + """ + return '%s' % (self.cssclasses[day], day_abbr[day]) + + def formatweekheader(self): + """ + Return a header for a week as a table row. + """ + s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) + return '%s' % s + + def formatmonthname(self, theyear, themonth, withyear=True): + """ + Return a month name as a table row. + """ + if withyear: + s = '%s %s' % (month_name[themonth], theyear) + else: + s = '%s' % month_name[themonth] + return '%s' % s + + def formatmonth(self, theyear, themonth, withyear=True): + """ + Return a formatted month as a table. + """ + v = [] + a = v.append + a('') + a('\n') + a(self.formatmonthname(theyear, themonth, withyear=withyear)) + a('\n') + a(self.formatweekheader()) + a('\n') + for week in self.monthdays2calendar(theyear, themonth): + a(self.formatweek(week)) + a('\n') + a('
    ') + a('\n') + return ''.join(v) + + def formatyear(self, theyear, width=3): + """ + Return a formatted year as a table of tables. + """ + v = [] + a = v.append + width = max(width, 1) + a('') + a('\n') + a('' % (width, theyear)) + for i in range(January, January+12, width): + # months in this row + months = range(i, min(i+width, 13)) + a('') + for m in months: + a('') + a('') + a('
    %s
    ') + a(self.formatmonth(theyear, m, withyear=False)) + a('
    ') + return ''.join(v) + + def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None): + """ + Return a formatted year as a complete HTML page. + """ + if encoding is None: + encoding = sys.getdefaultencoding() + v = [] + a = v.append + a('\n' % encoding) + a('\n') + a('\n') + a('\n') + a('\n' % encoding) + if css is not None: + a('\n' % css) + a('Calendar for %d\n' % theyear) + a('\n') + a('\n') + a(self.formatyear(theyear, width)) + a('\n') + a('\n') + return ''.join(v).encode(encoding, "xmlcharrefreplace") + + +class different_locale: + def __init__(self, locale): + self.locale = locale + + def __enter__(self): + self.oldlocale = _locale.getlocale(_locale.LC_TIME) + _locale.setlocale(_locale.LC_TIME, self.locale) + + def __exit__(self, *args): + _locale.setlocale(_locale.LC_TIME, self.oldlocale) + + +class LocaleTextCalendar(TextCalendar): + """ + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + """ + + def __init__(self, firstweekday=0, locale=None): + TextCalendar.__init__(self, firstweekday) + if locale is None: + locale = _locale.getdefaultlocale() + self.locale = locale + + def formatweekday(self, day, width): + with different_locale(self.locale): + if width >= 9: + names = day_name + else: + names = day_abbr + name = names[day] + return name[:width].center(width) + + def formatmonthname(self, theyear, themonth, width, withyear=True): + with different_locale(self.locale): + s = month_name[themonth] + if withyear: + s = "%s %r" % (s, theyear) + return s.center(width) + + +class LocaleHTMLCalendar(HTMLCalendar): + """ + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + """ + def __init__(self, firstweekday=0, locale=None): + HTMLCalendar.__init__(self, firstweekday) + if locale is None: + locale = _locale.getdefaultlocale() + self.locale = locale + + def formatweekday(self, day): + with different_locale(self.locale): + s = day_abbr[day] + return '%s' % (self.cssclasses[day], s) + + def formatmonthname(self, theyear, themonth, withyear=True): + with different_locale(self.locale): + s = month_name[themonth] + if withyear: + s = '%s %s' % (s, theyear) + return '%s' % s + + +# Support for old module level interface +c = TextCalendar() + +firstweekday = c.getfirstweekday + +def setfirstweekday(firstweekday): + if not MONDAY <= firstweekday <= SUNDAY: + raise IllegalWeekdayError(firstweekday) + c.firstweekday = firstweekday + +monthcalendar = c.monthdayscalendar +prweek = c.prweek +week = c.formatweek +weekheader = c.formatweekheader +prmonth = c.prmonth +month = c.formatmonth +calendar = c.formatyear +prcal = c.pryear + + +# Spacing of month columns for multi-column year calendar +_colwidth = 7*3 - 1 # Amount printed by prweek() +_spacing = 6 # Number of spaces between columns + + +def format(cols, colwidth=_colwidth, spacing=_spacing): + """Prints multi-column formatting for year calendars""" + print(formatstring(cols, colwidth, spacing)) + + +def formatstring(cols, colwidth=_colwidth, spacing=_spacing): + """Returns a string formatted from n strings, centered within n columns.""" + spacing *= ' ' + return spacing.join(c.center(colwidth) for c in cols) + + +EPOCH = 1970 +_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal() + + +def timegm(tuple): + """Unrelated but handy function to calculate Unix timestamp from GMT.""" + year, month, day, hour, minute, second = tuple[:6] + days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 + hours = days*24 + hour + minutes = hours*60 + minute + seconds = minutes*60 + second + return seconds + + +def main(args): + import optparse + parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]") + parser.add_option( + "-w", "--width", + dest="width", type="int", default=2, + help="width of date column (default 2, text only)" + ) + parser.add_option( + "-l", "--lines", + dest="lines", type="int", default=1, + help="number of lines for each week (default 1, text only)" + ) + parser.add_option( + "-s", "--spacing", + dest="spacing", type="int", default=6, + help="spacing between months (default 6, text only)" + ) + parser.add_option( + "-m", "--months", + dest="months", type="int", default=3, + help="months per row (default 3, text only)" + ) + parser.add_option( + "-c", "--css", + dest="css", default="calendar.css", + help="CSS to use for page (html only)" + ) + parser.add_option( + "-L", "--locale", + dest="locale", default=None, + help="locale to be used from month and weekday names" + ) + parser.add_option( + "-e", "--encoding", + dest="encoding", default=None, + help="Encoding to use for output." + ) + parser.add_option( + "-t", "--type", + dest="type", default="text", + choices=("text", "html"), + help="output type (text or html)" + ) + + (options, args) = parser.parse_args(args) + + if options.locale and not options.encoding: + parser.error("if --locale is specified --encoding is required") + sys.exit(1) + + locale = options.locale, options.encoding + + if options.type == "html": + if options.locale: + cal = LocaleHTMLCalendar(locale=locale) + else: + cal = HTMLCalendar() + encoding = options.encoding + if encoding is None: + encoding = sys.getdefaultencoding() + optdict = dict(encoding=encoding, css=options.css) + write = sys.stdout.buffer.write + if len(args) == 1: + write(cal.formatyearpage(datetime.date.today().year, **optdict)) + elif len(args) == 2: + write(cal.formatyearpage(int(args[1]), **optdict)) + else: + parser.error("incorrect number of arguments") + sys.exit(1) + else: + if options.locale: + cal = LocaleTextCalendar(locale=locale) + else: + cal = TextCalendar() + optdict = dict(w=options.width, l=options.lines) + if len(args) != 3: + optdict["c"] = options.spacing + optdict["m"] = options.months + if len(args) == 1: + result = cal.formatyear(datetime.date.today().year, **optdict) + elif len(args) == 2: + result = cal.formatyear(int(args[1]), **optdict) + elif len(args) == 3: + result = cal.formatmonth(int(args[1]), int(args[2]), **optdict) + else: + parser.error("incorrect number of arguments") + sys.exit(1) + write = sys.stdout.write + if options.encoding: + result = result.encode(options.encoding) + write = sys.stdout.buffer.write + write(result) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/lib/assets/Lib/cmd.py b/lib/assets/Lib/cmd.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/cmd.py @@ -0,0 +1,401 @@ +"""A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +""" + +import string, sys + +__all__ = ["Cmd"] + +PROMPT = '(Cmd) ' +IDENTCHARS = string.ascii_letters + string.digits + '_' + +class Cmd: + """A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + """ + prompt = PROMPT + identchars = IDENTCHARS + ruler = '=' + lastcmd = '' + intro = None + doc_leader = "" + doc_header = "Documented commands (type help ):" + misc_header = "Miscellaneous help topics:" + undoc_header = "Undocumented commands:" + nohelp = "*** No help on %s" + use_rawinput = 1 + + def __init__(self, completekey='tab', stdin=None, stdout=None): + """Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + """ + if stdin is not None: + self.stdin = stdin + else: + self.stdin = sys.stdin + if stdout is not None: + self.stdout = stdout + else: + self.stdout = sys.stdout + self.cmdqueue = [] + self.completekey = completekey + + def cmdloop(self, intro=None): + """Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + """ + + self.preloop() + if self.use_rawinput and self.completekey: + try: + import readline + self.old_completer = readline.get_completer() + readline.set_completer(self.complete) + readline.parse_and_bind(self.completekey+": complete") + except ImportError: + pass + try: + if intro is not None: + self.intro = intro + if self.intro: + self.stdout.write(str(self.intro)+"\n") + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue.pop(0) + else: + if self.use_rawinput: + try: + line = input(self.prompt) + except EOFError: + line = 'EOF' + else: + self.stdout.write(self.prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + line = 'EOF' + else: + line = line.rstrip('\r\n') + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + finally: + if self.use_rawinput and self.completekey: + try: + import readline + readline.set_completer(self.old_completer) + except ImportError: + pass + + + def precmd(self, line): + """Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + """ + return line + + def postcmd(self, stop, line): + """Hook method executed just after a command dispatch is finished.""" + return stop + + def preloop(self): + """Hook method executed once when the cmdloop() method is called.""" + pass + + def postloop(self): + """Hook method executed once when the cmdloop() method is about to + return. + + """ + pass + + def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + """ + cmd, arg, line = self.parseline(line) + if not line: + return self.emptyline() + if cmd is None: + return self.default(line) + self.lastcmd = line + if line == 'EOF' : + self.lastcmd = '' + if cmd == '': + return self.default(line) + else: + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + return self.default(line) + return func(arg) + + def emptyline(self): + """Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + """ + if self.lastcmd: + return self.onecmd(self.lastcmd) + + def default(self, line): + """Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + """ + self.stdout.write('*** Unknown syntax: %s\n'%line) + + def completedefault(self, *ignored): + """Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + """ + return [] + + def completenames(self, text, *ignored): + dotext = 'do_'+text + return [a[3:] for a in self.get_names() if a.startswith(dotext)] + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + """ + if state == 0: + import readline + origline = readline.get_line_buffer() + line = origline.lstrip() + stripped = len(origline) - len(line) + begidx = readline.get_begidx() - stripped + endidx = readline.get_endidx() - stripped + if begidx>0: + cmd, args, foo = self.parseline(line) + if cmd == '': + compfunc = self.completedefault + else: + try: + compfunc = getattr(self, 'complete_' + cmd) + except AttributeError: + compfunc = self.completedefault + else: + compfunc = self.completenames + self.completion_matches = compfunc(text, line, begidx, endidx) + try: + return self.completion_matches[state] + except IndexError: + return None + + def get_names(self): + # This method used to pull in base class attributes + # at a time dir() didn't do it yet. + return dir(self.__class__) + + def complete_help(self, *args): + commands = set(self.completenames(*args)) + topics = set(a[5:] for a in self.get_names() + if a.startswith('help_' + args[0])) + return list(commands | topics) + + def do_help(self, arg): + 'List available commands with "help" or detailed help with "help cmd".' + if arg: + # XXX check arg syntax + try: + func = getattr(self, 'help_' + arg) + except AttributeError: + try: + doc=getattr(self, 'do_' + arg).__doc__ + if doc: + self.stdout.write("%s\n"%str(doc)) + return + except AttributeError: + pass + self.stdout.write("%s\n"%str(self.nohelp % (arg,))) + return + func() + else: + names = self.get_names() + cmds_doc = [] + cmds_undoc = [] + help = {} + for name in names: + if name[:5] == 'help_': + help[name[5:]]=1 + names.sort() + # There can be duplicates if routines overridden + prevname = '' + for name in names: + if name[:3] == 'do_': + if name == prevname: + continue + prevname = name + cmd=name[3:] + if cmd in help: + cmds_doc.append(cmd) + del help[cmd] + elif getattr(self, name).__doc__: + cmds_doc.append(cmd) + else: + cmds_undoc.append(cmd) + self.stdout.write("%s\n"%str(self.doc_leader)) + self.print_topics(self.doc_header, cmds_doc, 15,80) + self.print_topics(self.misc_header, list(help.keys()),15,80) + self.print_topics(self.undoc_header, cmds_undoc, 15,80) + + def print_topics(self, header, cmds, cmdlen, maxcol): + if cmds: + self.stdout.write("%s\n"%str(header)) + if self.ruler: + self.stdout.write("%s\n"%str(self.ruler * len(header))) + self.columnize(cmds, maxcol-1) + self.stdout.write("\n") + + def columnize(self, list, displaywidth=80): + """Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + """ + if not list: + self.stdout.write("\n") + return + + nonstrings = [i for i in range(len(list)) + if not isinstance(list[i], str)] + if nonstrings: + raise TypeError("list[i] not a string for i in %s" + % ", ".join(map(str, nonstrings))) + size = len(list) + if size == 1: + self.stdout.write('%s\n'%str(list[0])) + return + # Try every row count from 1 upwards + for nrows in range(1, len(list)): + ncols = (size+nrows-1) // nrows + colwidths = [] + totwidth = -2 + for col in range(ncols): + colwidth = 0 + for row in range(nrows): + i = row + nrows*col + if i >= size: + break + x = list[i] + colwidth = max(colwidth, len(x)) + colwidths.append(colwidth) + totwidth += colwidth + 2 + if totwidth > displaywidth: + break + if totwidth <= displaywidth: + break + else: + nrows = len(list) + ncols = 1 + colwidths = [0] + for row in range(nrows): + texts = [] + for col in range(ncols): + i = row + nrows*col + if i >= size: + x = "" + else: + x = list[i] + texts.append(x) + while texts and not texts[-1]: + del texts[-1] + for col in range(len(texts)): + texts[col] = texts[col].ljust(colwidths[col]) + self.stdout.write("%s\n"%str(" ".join(texts))) diff --git a/lib/assets/Lib/code.py b/lib/assets/Lib/code.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/code.py @@ -0,0 +1,302 @@ +"""Utilities needed to emulate Python's interactive interpreter. + +""" + +# Inspired by similar code by Jeff Epler and Fredrik Lundh. + + +import sys +import traceback +from codeop import CommandCompiler, compile_command + +__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", + "compile_command"] + +class InteractiveInterpreter: + """Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + """ + + def __init__(self, locals=None): + """Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + """ + if locals is None: + locals = {"__name__": "__console__", "__doc__": None} + self.locals = locals + self.compile = CommandCompiler() + + def runsource(self, source, filename="", symbol="single"): + """Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + """ + try: + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self.runcode(code) + return False + + def runcode(self, code): + """Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + """ + try: + exec(code, self.locals) + except SystemExit: + raise + except: + self.showtraceback() + + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + if sys.excepthook is sys.__excepthook__: + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def showtraceback(self): + """Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + """ + try: + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + tblist = traceback.extract_tb(tb) + del tblist[:1] + lines = traceback.format_list(tblist) + if lines: + lines.insert(0, "Traceback (most recent call last):\n") + lines.extend(traceback.format_exception_only(type, value)) + finally: + tblist = tb = None + if sys.excepthook is sys.__excepthook__: + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def write(self, data): + """Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + """ + sys.stderr.write(data) + + +class InteractiveConsole(InteractiveInterpreter): + """Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + """ + + def __init__(self, locals=None, filename=""): + """Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + """ + InteractiveInterpreter.__init__(self, locals) + self.filename = filename + self.resetbuffer() + + def resetbuffer(self): + """Reset the input buffer.""" + self.buffer = [] + + def interact(self, banner=None): + """Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + """ + try: + sys.ps1 + except AttributeError: + sys.ps1 = ">>> " + try: + sys.ps2 + except AttributeError: + sys.ps2 = "... " + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' + if banner is None: + self.write("Python %s on %s\n%s\n(%s)\n" % + (sys.version, sys.platform, cprt, + self.__class__.__name__)) + elif banner: + self.write("%s\n" % str(banner)) + more = 0 + while 1: + try: + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + + def push(self, line): + """Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + """ + self.buffer.append(line) + source = "\n".join(self.buffer) + more = self.runsource(source, self.filename) + if not more: + self.resetbuffer() + return more + + def raw_input(self, prompt=""): + """Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + """ + return input(prompt) + + + +def interact(banner=None, readfunc=None, local=None): + """Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + + """ + console = InteractiveConsole(local) + if readfunc is not None: + console.raw_input = readfunc + else: + try: + import readline + except ImportError: + pass + console.interact(banner) + + +if __name__ == "__main__": + interact() diff --git a/lib/assets/Lib/codecs.py b/lib/assets/Lib/codecs.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/codecs.py @@ -0,0 +1,1099 @@ +""" codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +"""#" + +import builtins, sys + +### Registry and builtin stateless codec functions + +try: + from _codecs import * +except ImportError as why: + raise SystemError('Failed to load the builtin codecs: %s' % why) + +__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", + "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", + "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", + "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", + "strict_errors", "ignore_errors", "replace_errors", + "xmlcharrefreplace_errors", + "register_error", "lookup_error"] + +### Constants + +# +# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) +# and its possible byte string values +# for UTF8/UTF16/UTF32 output and little/big endian machines +# + +# UTF-8 +BOM_UTF8 = b'\xef\xbb\xbf' + +# UTF-16, little endian +BOM_LE = BOM_UTF16_LE = b'\xff\xfe' + +# UTF-16, big endian +BOM_BE = BOM_UTF16_BE = b'\xfe\xff' + +# UTF-32, little endian +BOM_UTF32_LE = b'\xff\xfe\x00\x00' + +# UTF-32, big endian +BOM_UTF32_BE = b'\x00\x00\xfe\xff' + +if sys.byteorder == 'little': + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_LE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_LE + +else: + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_BE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_BE + +# Old broken names (don't use in new code) +BOM32_LE = BOM_UTF16_LE +BOM32_BE = BOM_UTF16_BE +BOM64_LE = BOM_UTF32_LE +BOM64_BE = BOM_UTF32_BE + + +### Codec base classes (defining the API) + +class CodecInfo(tuple): + + def __new__(cls, encode, decode, streamreader=None, streamwriter=None, + incrementalencoder=None, incrementaldecoder=None, name=None): + self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) + self.name = name + self.encode = encode + self.decode = decode + self.incrementalencoder = incrementalencoder + self.incrementaldecoder = incrementaldecoder + self.streamwriter = streamwriter + self.streamreader = streamreader + return self + + def __repr__(self): + return "<%s.%s object for encoding %s at 0x%x>" % \ + (self.__class__.__module__, self.__class__.__name__, + self.name, id(self)) + +class Codec: + + """ Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private codepoints U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + """ + def encode(self, input, errors='strict'): + + """ Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamCodec for codecs which have to keep state in order to + make encoding/decoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + + def decode(self, input, errors='strict'): + + """ Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamCodec for codecs which have to keep state in order to + make encoding/decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + +class IncrementalEncoder(object): + """ + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + """ + def __init__(self, errors='strict'): + """ + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + self.buffer = "" + + def encode(self, input, final=False): + """ + Encodes input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Resets the encoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the encoder. + """ + return 0 + + def setstate(self, state): + """ + Set the current state of the encoder. state must have been + returned by getstate(). + """ + +class BufferedIncrementalEncoder(IncrementalEncoder): + """ + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + """ + def __init__(self, errors='strict'): + IncrementalEncoder.__init__(self, errors) + # unencoded input that is kept between calls to encode() + self.buffer = "" + + def _buffer_encode(self, input, errors, final): + # Overwrite this method in subclasses: It must encode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def encode(self, input, final=False): + # encode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_encode(data, self.errors, final) + # keep unencoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalEncoder.reset(self) + self.buffer = "" + + def getstate(self): + return self.buffer or 0 + + def setstate(self, state): + self.buffer = state or "" + +class IncrementalDecoder(object): + """ + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + """ + def __init__(self, errors='strict'): + """ + Create a IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + + def decode(self, input, final=False): + """ + Decode input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the decoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + """ + return (b"", 0) + + def setstate(self, state): + """ + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + """ + +class BufferedIncrementalDecoder(IncrementalDecoder): + """ + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + """ + def __init__(self, errors='strict'): + IncrementalDecoder.__init__(self, errors) + # undecoded input that is kept between calls to decode() + self.buffer = b"" + + def _buffer_decode(self, input, errors, final): + # Overwrite this method in subclasses: It must decode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def decode(self, input, final=False): + # decode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_decode(data, self.errors, final) + # keep undecoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalDecoder.reset(self) + self.buffer = b"" + + def getstate(self): + # additional state info is always 0 + return (self.buffer, 0) + + def setstate(self, state): + # ignore additional state info + self.buffer = state[0] + +# +# The StreamWriter and StreamReader class provide generic working +# interfaces which can be used to implement new encoding submodules +# very easily. See encodings/utf_8.py for an example on how this is +# done. +# + +class StreamWriter(Codec): + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamWriter instance. + + stream must be a file-like object open for writing + (binary) data. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences (only for encoding). + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + + def write(self, object): + + """ Writes the object's contents encoded to self.stream. + """ + data, consumed = self.encode(object, self.errors) + self.stream.write(data) + + def writelines(self, list): + + """ Writes the concatenated list of strings to the stream + using .write(). + """ + self.write(''.join(list)) + + def reset(self): + + """ Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + """ + pass + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + if whence == 0 and offset == 0: + self.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReader(Codec): + + charbuffertype = str + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamReader instance. + + stream must be a file-like object open for reading + (binary) data. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character; + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + self.bytebuffer = b"" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def decode(self, input, errors='strict'): + raise NotImplementedError + + def read(self, size=-1, chars=-1, firstline=False): + + """ Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of characters to read from the + stream. read() will never return more than chars + characters, but it might return less, if there are not enough + characters available. + + size indicates the approximate maximum number of bytes to + read from the stream for decoding purposes. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + """ + # If we have lines cached, first merge them back into characters + if self.linebuffer: + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) + self.linebuffer = None + + # read until we get the required number of characters (if available) + while True: + # can the request be satisfied from the character buffer? + if chars < 0: + if size < 0: + if self.charbuffer: + break + elif len(self.charbuffer) >= size: + break + else: + if len(self.charbuffer) >= chars: + break + # we need more data + if size < 0: + newdata = self.stream.read() + else: + newdata = self.stream.read(size) + # decode bytes (those remaining from the last call included) + data = self.bytebuffer + newdata + try: + newchars, decodedbytes = self.decode(data, self.errors) + except UnicodeDecodeError as exc: + if firstline: + newchars, decodedbytes = \ + self.decode(data[:exc.start], self.errors) + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: + raise + else: + raise + # keep undecoded bytes until the next call + self.bytebuffer = data[decodedbytes:] + # put new characters in the character buffer + self.charbuffer += newchars + # there was no data available + if not newdata: + break + if chars < 0: + # Return everything we've got + result = self.charbuffer + self.charbuffer = self._empty_charbuffer + else: + # Return the first chars characters + result = self.charbuffer[:chars] + self.charbuffer = self.charbuffer[chars:] + return result + + def readline(self, size=None, keepends=True): + + """ Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + """ + # If we have lines cached from an earlier read, return + # them unconditionally + if self.linebuffer: + line = self.linebuffer[0] + del self.linebuffer[0] + if len(self.linebuffer) == 1: + # revert to charbuffer mode; we might need more data + # next time + self.charbuffer = self.linebuffer[0] + self.linebuffer = None + if not keepends: + line = line.splitlines(keepends=False)[0] + return line + + readsize = size or 72 + line = self._empty_charbuffer + # If size is given, we call read() only once + while True: + data = self.read(readsize, firstline=True) + if data: + # If we're at a "\r" read one extra character (which might + # be a "\n") to get a proper line ending. If the stream is + # temporarily exhausted we return the wrong line ending. + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): + data += self.read(size=1, chars=1) + + line += data + lines = line.splitlines(keepends=True) + if lines: + if len(lines) > 1: + # More than one line result; the first line is a full line + # to return + line = lines[0] + del lines[0] + if len(lines) > 1: + # cache the remaining lines + lines[-1] += self.charbuffer + self.linebuffer = lines + self.charbuffer = None + else: + # only one remaining line, put it back into charbuffer + self.charbuffer = lines[0] + self.charbuffer + if not keepends: + line = line.splitlines(keepends=False)[0] + break + line0withend = lines[0] + line0withoutend = lines[0].splitlines(keepends=False)[0] + if line0withend != line0withoutend: # We really have a line end + # Put the rest back together and keep it until the next call + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer + if keepends: + line = line0withend + else: + line = line0withoutend + break + # we didn't get anything or this was our only try + if not data or size is not None: + if line and not keepends: + line = line.splitlines(keepends=False)[0] + break + if readsize < 8000: + readsize *= 2 + return line + + def readlines(self, sizehint=None, keepends=True): + + """ Read all lines available on the input stream + and return them as list of lines. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + """ + data = self.read() + return data.splitlines(keepends) + + def reset(self): + + """ Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + """ + self.bytebuffer = b"" + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def seek(self, offset, whence=0): + """ Set the input stream's current position. + + Resets the codec buffers used for keeping state. + """ + self.stream.seek(offset, whence) + self.reset() + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + line = self.readline() + if line: + return line + raise StopIteration + + def __iter__(self): + return self + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReaderWriter: + + """ StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + """ + # Optional attributes set by the file wrappers below + encoding = 'unknown' + + def __init__(self, stream, Reader, Writer, errors='strict'): + + """ Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + return self.reader.read(size) + + def readline(self, size=None): + + return self.reader.readline(size) + + def readlines(self, sizehint=None): + + return self.reader.readlines(sizehint) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + return next(self.reader) + + def __iter__(self): + return self + + def write(self, data): + + return self.writer.write(data) + + def writelines(self, list): + + return self.writer.writelines(list) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + self.reader.reset() + if whence == 0 and offset == 0: + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + # these are needed to make "with codecs.open(...)" work properly + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamRecoder: + + """ StreamRecoder instances provide a frontend - backend + view of encoding data. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the stream is first decoded into an + intermediate format (which is dependent on the given codec + combination) and then written to the stream using an instance + of the provided Writer class. + + In the other direction, data is read from the stream using a + Reader instance and then return encoded data to the caller. + + """ + # Optional attributes set by the file wrappers below + data_encoding = 'unknown' + file_encoding = 'unknown' + + def __init__(self, stream, encode, decode, Reader, Writer, + errors='strict'): + + """ Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + input to .read() and output of .write()) while + Reader and Writer work on the backend (reading and + writing to the stream). + + You can use these objects to do transparent direct + recodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode, decode must adhere to the Codec interface, Reader, + Writer must be factory functions or classes providing the + StreamReader, StreamWriter interface resp. + + encode and decode are needed for the frontend translation, + Reader and Writer for the backend translation. Unicode is + used as intermediate encoding. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.encode = encode + self.decode = decode + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + data = self.reader.read(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readline(self, size=None): + + if size is None: + data = self.reader.readline() + else: + data = self.reader.readline(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readlines(self, sizehint=None): + + data = self.reader.read() + data, bytesencoded = self.encode(data, self.errors) + return data.splitlines(keepends=True) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + data = next(self.reader) + data, bytesencoded = self.encode(data, self.errors) + return data + + def __iter__(self): + return self + + def write(self, data): + + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def writelines(self, list): + + data = ''.join(list) + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### Shortcuts + +def open(filename, mode='rb', encoding=None, errors='strict', buffering=1): + + """ Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Files are always opened in binary mode, even if no binary mode + was specified. This is done to avoid data loss due to encodings + using 8-bit values. The default file mode is 'rb' meaning to + open the file in binary read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to line buffered. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + """ + if encoding is not None and \ + 'b' not in mode: + # Force opening of the file in binary mode + mode = mode + 'b' + file = builtins.open(filename, mode, buffering) + if encoding is None: + return file + info = lookup(encoding) + srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) + # Add attributes to simplify introspection + srw.encoding = encoding + return srw + +def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): + + """ Return a wrapped version of file which provides transparent + encoding translation. + + Strings written to the wrapped file are interpreted according + to the given data_encoding and then written to the original + file as string using file_encoding. The intermediate encoding + will usually be Unicode but depends on the specified codecs. + + Strings are read from the file using file_encoding and then + passed back to the caller as string using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + """ + if file_encoding is None: + file_encoding = data_encoding + data_info = lookup(data_encoding) + file_info = lookup(file_encoding) + sr = StreamRecoder(file, data_info.encode, data_info.decode, + file_info.streamreader, file_info.streamwriter, errors) + # Add attributes to simplify introspection + sr.data_encoding = data_encoding + sr.file_encoding = file_encoding + return sr + +### Helpers for codec lookup + +def getencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).encode + +def getdecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).decode + +def getincrementalencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + """ + encoder = lookup(encoding).incrementalencoder + if encoder is None: + raise LookupError(encoding) + return encoder + +def getincrementaldecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + """ + decoder = lookup(encoding).incrementaldecoder + if decoder is None: + raise LookupError(encoding) + return decoder + +def getreader(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamreader + +def getwriter(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamwriter + +def iterencode(iterator, encoding, errors='strict', **kwargs): + """ + Encoding iterator. + + Encodes the input strings from the iterator using a IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + """ + encoder = getincrementalencoder(encoding)(errors, **kwargs) + for input in iterator: + output = encoder.encode(input) + if output: + yield output + output = encoder.encode("", True) + if output: + yield output + +def iterdecode(iterator, encoding, errors='strict', **kwargs): + """ + Decoding iterator. + + Decodes the input strings from the iterator using a IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + """ + decoder = getincrementaldecoder(encoding)(errors, **kwargs) + for input in iterator: + output = decoder.decode(input) + if output: + yield output + output = decoder.decode(b"", True) + if output: + yield output + +### Helpers for charmap-based codecs + +def make_identity_dict(rng): + + """ make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + """ + return {i:i for i in rng} + +def make_encoding_map(decoding_map): + + """ Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \u001a. + + """ + m = {} + for k,v in decoding_map.items(): + if not v in m: + m[v] = k + else: + m[v] = None + return m + +### error handlers + +try: + strict_errors = lookup_error("strict") + ignore_errors = lookup_error("ignore") + replace_errors = lookup_error("replace") + xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") + backslashreplace_errors = lookup_error("backslashreplace") +except LookupError: + # In --disable-unicode builds, these error handler are missing + strict_errors = None + ignore_errors = None + replace_errors = None + xmlcharrefreplace_errors = None + backslashreplace_errors = None + +# Tell modulefinder that using codecs probably needs the encodings +# package +_false = 0 +if _false: + import encodings + +### Tests + +if __name__ == '__main__': + + # Make stdout translate Latin-1 output into UTF-8 output + sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') + + # Have stdin translate Latin-1 input into UTF-8 input + sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/lib/assets/Lib/codeop.py b/lib/assets/Lib/codeop.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/codeop.py @@ -0,0 +1,168 @@ +r"""Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +Approach: + +First, check if the source consists entirely of blank lines and +comments; if so, replace it with 'pass', because the built-in +parser doesn't always do the right thing for these. + +Compile three times: as is, with \n, and with \n\n appended. If it +compiles as is, it's complete. If it compiles with one \n appended, +we expect more. If it doesn't compile either way, we compare the +error we get when compiling with \n or \n\n appended. If the errors +are the same, the code is broken. But if the errors are different, we +expect more. Not intuitive; not even guaranteed to hold in future +releases; but this matches the compiler's behavior from Python 1.4 +through 2.2, at least. + +Caveat: + +It is possible (but not likely) that the parser stops parsing with a +successful outcome before reaching the end of the source; in this +case, trailing symbols may be ignored instead of causing an error. +For example, a backslash followed by two newlines may be followed by +arbitrary garbage. This will be fixed once the API for the parser is +better. + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +""" + +import __future__ + +_features = [getattr(__future__, fname) + for fname in __future__.all_feature_names] + +__all__ = ["compile_command", "Compile", "CommandCompiler"] + +PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h + +def _maybe_compile(compiler, source, filename, symbol): + # Check for source consisting of only blank lines and comments + for line in source.split("\n"): + line = line.strip() + if line and line[0] != '#': + break # Leave it alone + else: + if symbol != "eval": + source = "pass" # Replace it with a 'pass' statement + + err = err1 = err2 = None + code = code1 = code2 = None + + try: + code = compiler(source, filename, symbol) + except SyntaxError as err: + pass + + try: + code1 = compiler(source + "\n", filename, symbol) + except SyntaxError as e: + err1 = e + + try: + code2 = compiler(source + "\n\n", filename, symbol) + except SyntaxError as e: + err2 = e + + if code: + return code + if not code1 and repr(err1) == repr(err2): + raise err1 + +def _compile(source, filename, symbol): + return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT) + +def compile_command(source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default) or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(_compile, source, filename, symbol) + +class Compile: + """Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.""" + def __init__(self): + self.flags = PyCF_DONT_IMPLY_DEDENT + + def __call__(self, source, filename, symbol): + codeob = compile(source, filename, symbol, self.flags, 1) + for feature in _features: + if codeob.co_flags & feature.compiler_flag: + self.flags |= feature.compiler_flag + return codeob + +class CommandCompiler: + """Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.""" + + def __init__(self,): + self.compiler = Compile() + + def __call__(self, source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(self.compiler, source, filename, symbol) diff --git a/lib/assets/Lib/collections/__init__.py b/lib/assets/Lib/collections/__init__.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/collections/__init__.py @@ -0,0 +1,932 @@ +#__all__ = ['deque', 'defaultdict', 'Counter'] + +from _collections import deque, defaultdict + +#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap + +__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', + 'UserString', 'Counter', 'OrderedDict'] +# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. +# They should however be considered an integral part of collections.py. + +# fixme brython.. there is an issue with _abcoll +#from _abcoll import * +#from _abcoll import Set +from _abcoll import MutableMapping +#import _abcoll +#__all__ += _abcoll.__all__ + +from collections.abc import * +import collections.abc +__all__ += collections.abc.__all__ + +from _collections import deque, defaultdict, namedtuple +from operator import itemgetter as _itemgetter +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +#fixme brython +#from weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from reprlib import recursive_repr as _recursive_repr + +class Set(set): + pass + +class Sequence(list): + pass + +def _proxy(obj): + return obj + +################################################################################ +### OrderedDict +################################################################################ + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last==False). + + Raises KeyError if the element does not exist. + When last=True, acts like a fast version of self[key]=self.pop(key). + + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + last.next = root.prev = link + else: + first = root.next + link.prev = root + link.next = first + root.next = first.prev = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + #fixme brython.. Issue with _abcoll, which contains MutableMapping + update = __update = MutableMapping.update + keys = MutableMapping.keys + values = MutableMapping.values + items = MutableMapping.items + __ne__ = MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + #fixme, brython issue + #@_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + all(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + + +######################################################################## +### Counter +######################################################################## + + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +#try: # Load C helper function if available +# from _collections import _count_elements +#except ImportError: +# pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(self, iterable=None, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + #super().__init__() #BE modified since super not supported + dict.__init__(self) + self.update(iterable, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because setting v=1 + # means that no element can have a count greater than one. + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(self, iterable=None, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if iterable is not None: + if isinstance(iterable, Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super().update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(self, iterable=None, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if iterable is not None: + self_get = self.get + if isinstance(iterable, Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + +######################################################################## +### ChainMap (helper for configparser) +######################################################################## + +class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + #fixme, brython + #@_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + def __repr__(self): + return ','.join(str(_map) for _map in self.maps) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + #raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + raise KeyError('Key not found in the first mapping: %s' % key) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(MutableMapping): + + # Start by filling-out the abstract methods + def __init__(self, dict=None, **kwargs): + self.data = {} + if dict is not None: + self.update(dict) + if len(kwargs): + self.update(kwargs) + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): return repr(self.data) + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + +################################################################################ +### UserList +################################################################################ + +class UserList(MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + def __repr__(self): return repr(self.data) + def __lt__(self, other): return self.data < self.__cast(other) + def __le__(self, other): return self.data <= self.__cast(other) + def __eq__(self, other): return self.data == self.__cast(other) + def __ne__(self, other): return self.data != self.__cast(other) + def __gt__(self, other): return self.data > self.__cast(other) + def __ge__(self, other): return self.data >= self.__cast(other) + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + def __contains__(self, item): return item in self.data + def __len__(self): return len(self.data) + def __getitem__(self, i): return self.data[i] + def __setitem__(self, i, item): self.data[i] = item + def __delitem__(self, i): del self.data[i] + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __imul__(self, n): + self.data *= n + return self + def append(self, item): self.data.append(item) + def insert(self, i, item): self.data.insert(i, item) + def pop(self, i=-1): return self.data.pop(i) + def remove(self, item): self.data.remove(item) + def clear(self): self.data.clear() + def copy(self): return self.__class__(self) + def count(self, item): return self.data.count(item) + def index(self, item, *args): return self.data.index(item, *args) + def reverse(self): self.data.reverse() + def sort(self, *args, **kwds): self.data.sort(*args, **kwds) + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + + +################################################################################ +### UserString +################################################################################ + +class UserString(Sequence): + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + def __str__(self): return str(self.data) + def __repr__(self): return repr(self.data) + def __int__(self): return int(self.data) + def __float__(self): return float(self.data) + def __complex__(self): return complex(self.data) + def __hash__(self): return hash(self.data) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + def __ne__(self, string): + if isinstance(string, UserString): + return self.data != string.data + return self.data != string + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): return len(self.data) + def __getitem__(self, index): return self.__class__(self.data[index]) + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __mod__(self, args): + return self.__class__(self.data % args) + + # the following methods are defined in alphabetical order: + def capitalize(self): return self.__class__(self.data.capitalize()) + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + def encode(self, encoding=None, errors=None): # XXX improve this? + if encoding: + if errors: + return self.__class__(self.data.encode(encoding, errors)) + return self.__class__(self.data.encode(encoding)) + return self.__class__(self.data.encode()) + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + def format(self, *args, **kwds): + return self.data.format(*args, **kwds) + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + def isalpha(self): return self.data.isalpha() + def isalnum(self): return self.data.isalnum() + def isdecimal(self): return self.data.isdecimal() + def isdigit(self): return self.data.isdigit() + def isidentifier(self): return self.data.isidentifier() + def islower(self): return self.data.islower() + def isnumeric(self): return self.data.isnumeric() + def isspace(self): return self.data.isspace() + def istitle(self): return self.data.istitle() + def isupper(self): return self.data.isupper() + def join(self, seq): return self.data.join(seq) + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + def lower(self): return self.__class__(self.data.lower()) + def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) + def partition(self, sep): + return self.data.partition(sep) + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + def rpartition(self, sep): + return self.data.rpartition(sep) + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + def splitlines(self, keepends=False): return self.data.splitlines(keepends) + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + def strip(self, chars=None): return self.__class__(self.data.strip(chars)) + def swapcase(self): return self.__class__(self.data.swapcase()) + def title(self): return self.__class__(self.data.title()) + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + def upper(self): return self.__class__(self.data.upper()) + def zfill(self, width): return self.__class__(self.data.zfill(width)) diff --git a/lib/assets/Lib/collections/abc.py b/lib/assets/Lib/collections/abc.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/collections/abc.py @@ -0,0 +1,660 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Hashable", "Iterable", "Iterator", + "Sized", "Container", "Callable", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types many not be distinct +# and they make have their own implementation specific types that +# are not included on this list. +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +mappingproxy = type(type.__dict__) + + +### ONE-TRICK PONIES ### + +class Hashable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + for B in C.__mro__: + if "__hash__" in B.__dict__: + if B.__dict__["__hash__"]: + return True + break + return NotImplemented + + +class Iterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + if any("__iter__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Iterator(Iterable): + + __slots__ = () + + @abstractmethod + def __next__(self): + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + if (any("__next__" in B.__dict__ for B in C.__mro__) and + any("__iter__" in B.__dict__ for B in C.__mro__)): + return True + return NotImplemented + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + +class Sized(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + if any("__len__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Container(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + if any("__contains__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Callable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + if any("__call__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +### SETS ### + + +class Set(Sized, Iterable, Container): + + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + __slots__ = () + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return other < self + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + return other <= self + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + def __ne__(self, other): + return not (self == other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + def isdisjoint(self, other): + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + + __slots__ = () + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Sized, Iterable, Container): + + __slots__ = () + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + return KeysView(self) + + def items(self): + return ItemsView(self) + + def values(self): + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + def __ne__(self, other): + return not (self == other) + +Mapping.register(mappingproxy) + + +class MappingView(Sized): + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + for key in self._mapping: + yield key + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(dict_items) + + +class ValuesView(MappingView): + + def __contains__(self, value): + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + + __slots__ = () + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + try: + key = next(iter(self)) + except StopIteration: + raise KeyError + value = self[key] + del self[key] + return key, value + + def clear(self): + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + if len(args) > 2: + raise TypeError("update() takes at most 2 positional " + "arguments ({} given)".format(len(args))) + elif not args: + raise TypeError("update() takes at least 1 argument (0 given)") + self = args[0] + other = args[1] if len(args) >= 2 else () + + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Sized, Iterable, Container): + + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + __slots__ = () + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value): + for i, v in enumerate(self): + if v == value: + return i + raise ValueError + + def count(self, value): + return sum(1 for v in self if v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) + + +class ByteString(Sequence): + + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + + __slots__ = () + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + + __slots__ = () + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + raise IndexError + + def append(self, value): + self.insert(len(self), value) + + def clear(self): + try: + while True: + self.pop() + except IndexError: + pass + + def reverse(self): + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + for v in values: + self.append(v) + + def pop(self, index=-1): + v = self[index] + del self[index] + return v + + def remove(self, value): + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/lib/assets/Lib/colorsys.py b/lib/assets/Lib/colorsys.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/colorsys.py @@ -0,0 +1,156 @@ +"""Conversion functions between RGB and other color systems. + +This modules provides two functions for each color system ABC: + + rgb_to_abc(r, g, b) --> a, b, c + abc_to_rgb(a, b, c) --> r, g, b + +All inputs and outputs are triples of floats in the range [0.0...1.0] +(with the exception of I and Q, which covers a slightly larger range). +Inputs outside the valid range may cause exceptions or invalid outputs. + +Supported color systems: +RGB: Red, Green, Blue components +YIQ: Luminance, Chrominance (used by composite video signals) +HLS: Hue, Luminance, Saturation +HSV: Hue, Saturation, Value +""" + +# References: +# http://en.wikipedia.org/wiki/YIQ +# http://en.wikipedia.org/wiki/HLS_color_space +# http://en.wikipedia.org/wiki/HSV_color_space + +__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", + "rgb_to_hsv","hsv_to_rgb"] + +# Some floating point constants + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +# YIQ: used by composite video signals (linear combinations of RGB) +# Y: perceived grey level (0.0 == black, 1.0 == white) +# I, Q: color components + +def rgb_to_yiq(r, g, b): + y = 0.30*r + 0.59*g + 0.11*b + i = 0.60*r - 0.28*g - 0.32*b + q = 0.21*r - 0.52*g + 0.31*b + return (y, i, q) + +def yiq_to_rgb(y, i, q): + r = y + 0.948262*i + 0.624013*q + g = y - 0.276066*i - 0.639810*q + b = y - 1.105450*i + 1.729860*q + if r < 0.0: + r = 0.0 + if g < 0.0: + g = 0.0 + if b < 0.0: + b = 0.0 + if r > 1.0: + r = 1.0 + if g > 1.0: + g = 1.0 + if b > 1.0: + b = 1.0 + return (r, g, b) + + +# HLS: Hue, Luminance, Saturation +# H: position in the spectrum +# L: color lightness +# S: color saturation + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + # XXX Can optimize (maxc+minc) and (maxc-minc) + l = (minc+maxc)/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = (maxc-minc) / (maxc+minc) + else: + s = (maxc-minc) / (2.0-maxc-minc) + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + + +# HSV: Hue, Saturation, Value +# H: position in the spectrum +# S: color saturation ("purity") +# V: color brightness + +def rgb_to_hsv(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + v = maxc + if minc == maxc: + return 0.0, 0.0, v + s = (maxc-minc) / maxc + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, s, v + +def hsv_to_rgb(h, s, v): + if s == 0.0: + return v, v, v + i = int(h*6.0) # XXX assume int() truncates! + f = (h*6.0) - i + p = v*(1.0 - s) + q = v*(1.0 - s*f) + t = v*(1.0 - s*(1.0-f)) + i = i%6 + if i == 0: + return v, t, p + if i == 1: + return q, v, p + if i == 2: + return p, v, t + if i == 3: + return p, q, v + if i == 4: + return t, p, v + if i == 5: + return v, p, q + # Cannot get here diff --git a/lib/assets/Lib/configparser.py b/lib/assets/Lib/configparser.py new file mode 100644 --- /dev/null +++ b/lib/assets/Lib/configparser.py @@ -0,0 +1,1271 @@ +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True): + Create the parser. When `defaults' is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type' is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters' is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values' is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value' is True (default: False), options without + values are accepted; the value presented for these is None. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the list of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name' attribute, the string `' is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars' argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option' is a key in + `vars', the value from `vars' is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters' is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +from collections.abc import MutableMapping +from collections import OrderedDict as _default_dict, ChainMap as _ChainMap +import functools +import io +import itertools +import re +import sys +import warnings + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationSyntaxError", "ParsingError", + "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def _get_message(self): + """Getter for 'message'; needed only to override deprecation in + BaseException. + """ + return self.__message + + def _set_message(self, value): + """Setter for 'message'; needed only to override deprecation in + BaseException. + """ + self.__message = value + + # BaseException.message has been deprecated since Python 2.6. To prevent + # DeprecationWarning from popping up over this pre-existing attribute, use + # a new property that takes lookup precedence. + message = property(_get_message, _set_message) + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", source] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", source] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution:\n" + "\tsection: [%s]\n" + "\toption : %s\n" + "\tkey : %s\n" + "\trawval : %s\n" + % (section, option, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Value interpolation too deeply recursive:\n" + "\tsection: [%s]\n" + "\toption : %s\n" + "\trawval : %s\n" + % (section, option, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source=None, filename=None): + # Exactly one of `source'/`filename' arguments has to be given. + # `filename' kept for compatibility. + if filename and source: + raise ValueError("Cannot specify both `filename' and `source'. " + "Use `source'.") + elif not filename and not source: + raise ValueError("Required argument `source' not given.") + elif filename: + source = filename + Error.__init__(self, 'Source contains parsing errors: %s' % source) + self.source = source + self.errors = [] + self.args = (source, ) + + @property + def filename(self): + """Deprecated, use `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + return self.source + + @filename.setter + def filename(self, value): + """Deprecated, user `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + self.source = value + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, line) + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %s, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None' as +# a valid fallback value. +_UNSET = object() + + +class Interpolation: + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError'.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rest) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise InterpolationMissingOptionError( + option, section, rest, var) + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout'. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rest) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise InterpolationMissingOptionError( + option, section, rest, ":".join(path)) + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class LegacyInterpolation(Interpolation): + """Deprecated interpolation used in old versions of ConfigParser. + Use BasicInterpolation or ExtendedInterpolation instead.""" + + _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") + + def before_get(self, parser, section, option, value, vars): + rawval = value + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if value and "%(" in value: + replace = functools.partial(self._interpolation_replace, + parser=parser) + value = self._KEYCRE.sub(replace, value) + try: + value = value % vars + except KeyError as e: + raise InterpolationMissingOptionError( + option, section, rawval, e.args[0]) + else: + break + if value and "%(" in value: + raise InterpolationDepthError(option, section, rawval) + return value + + def before_set(self, parser, section, option, value): + return value + + @staticmethod + def _interpolation_replace(match, parser): + s = match.group(1) + if s is None: + return match.group() + else: + return "%%(%s)s" % parser.optionxform(s) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
    [^]]+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P