# HG changeset patch # User Petter T. # Date 1682361279 -7200 # Mon Apr 24 20:34:39 2023 +0200 # Node ID d2de83a801656ada1836f718d4a95d533d5843ae # Parent edbe81ee00c536a72536cab1a30bd046f6431488 Experimental stack VM for GNU Octave The patch adds a bytecode stack-based VM for executing Octave user functions. The user command '__enable_vm_eval__(bool)' disables or enables the VM dynamically. * libinterp/parse-tree/ * pt-bytecode-vm.cc: The bytecode vm * pt-bytecode-vm-walk.cc: 'walker' class that compiles 'tree' nodes to bytecode * libinterp/corefcn/compile.cc: * Auxilliary test and debug functions for the VM. __compile, __print_bytecode, __vm_clear_cache__, __ref_count, __dummy_mark_1, __dummy_mark_2, __enable_vm_eval__, __vm_profile * test/compile/: * Tests for the VM. * test/compile-bench/: * Microbenchmarks for VM developement. * build-aux/mk-octave-config-h.sh: Branch hint macros OCTAVE_LIKELY and OCTAVE_UNLIKELY using __builtin_expect. * libinterp/corefcn/: * call-stack.cc: Handle dynamic bytecode stack frames. * interpreter.cc: Getter for global varrefs. * load-path.cc: Signal VM function caching when path is cleared. * load-path.h: Counter for load path updates. * stack-frame.cc: Dynamic lazy stack frame for the VM, 'bytecode_fcn_stack_frame'. * variables.cc: Clear VM function cache on calls to "clear". * libinterp/octave-value: * ov.h: Methods specific to the VM hidden as protected. simple_subsasgn() simple_subsref(): Specializations that avoid std::list and std::string ctors. * ovl.h: Getter first_or_nil_ov () to get either first element or nil octave_value. * ov-inline.h: octave_value inline "factory". * ov-vm.h: octave_value-ish container to help GCC's poor optimizer in the VM. * ov-range.h: octave_trivial_range for fast ranges that can be stored as int:s. * ov-usr-fcn.cc: Compile the user function on calls. * ov-usr-fcn.h: Field to store bytecode in. * Implementations of VM specific virtual functions in various octave_base_value subclasses. * libinterp/parse-tree/: * pt-binop.h: Marker for "braindead" short circuits. * pt-eval.cc: Some functions to access the call stack. execute_user_function(): Execute in VM if compiled. * pt-tm-const.cc: Specializations to construct matrix from pointer range. * liboctave/util/lo-array-errwarn.cc: Function to allocate copy on the heap of index_exception subclasses. * bootstrap: Import gethrxtime from gnulib for profiler * liboctave/wrappers/time-wrappers.c: Wrapper for gethrxtime diff -r edbe81ee00c5 -r d2de83a80165 bootstrap.conf --- a/bootstrap.conf Sat Jun 03 20:28:49 2023 -0700 +++ b/bootstrap.conf Mon Apr 24 20:34:39 2023 +0200 @@ -56,6 +56,7 @@ getopt-gnu getrusage gettimeofday + gethrxtime glob intprops isatty diff -r edbe81ee00c5 -r d2de83a80165 build-aux/mk-octave-config-h.sh --- a/build-aux/mk-octave-config-h.sh Sat Jun 03 20:28:49 2023 -0700 +++ b/build-aux/mk-octave-config-h.sh Mon Apr 24 20:34:39 2023 +0200 @@ -185,6 +185,20 @@ cat << EOF +/* Branch hint macros for use in if condititions. + Returns logical value of x. */ +# if defined (__GNUC__) +# define OCTAVE_LIKELY(x) __builtin_expect(!!(x), 1) +# else +# define OCTAVE_LIKELY(x) !!(x) +# endif + +# if defined (__GNUC__) +# define OCTAVE_UNLIKELY(x) __builtin_expect(!!(x), 0) +# else +# define OCTAVE_UNLIKELY(x) !!(x) +# endif + /* Enable inline functions or typedefs that provide access to symbols that have been moved to the octave namespace so that users of Octave may continue to access symbols using the diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/call-stack.cc --- a/libinterp/corefcn/call-stack.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/call-stack.cc Mon Apr 24 20:34:39 2023 +0200 @@ -30,6 +30,7 @@ #include "lo-regexp.h" #include "str-vec.h" +#include "pt-bytecode-vm.h" #include "builtin-defun-decls.h" #include "call-stack.h" #include "defun.h" @@ -448,6 +449,30 @@ m_curr_frame = new_frame_idx; } +void call_stack::push (vm &vm, octave_user_function *fcn, int nargout, int nargin) +{ + std::size_t new_frame_idx; + std::shared_ptr parent_link; + std::shared_ptr static_link; + + get_new_frame_index_and_links (new_frame_idx, parent_link, static_link); + + std::shared_ptr new_frame = + stack_frame::create_bytecode ( + m_evaluator, + fcn, + vm, + new_frame_idx, // ??? index + parent_link, + static_link, + nargout, + nargin); + + m_cs.push_back (new_frame); + + m_curr_frame = new_frame_idx; +} + void call_stack::push (octave_function *fcn) { std::size_t new_frame_idx; @@ -493,7 +518,7 @@ std::shared_ptr frm = m_cs[user_frame]; if (! (frm->is_user_fcn_frame () || frm->is_user_script_frame () - || frm->is_scope_frame ())) + || frm->is_scope_frame () || frm->is_bytecode_fcn_frame())) { frm = frm->static_link (); @@ -533,7 +558,8 @@ if (! (frm && (frm->is_user_fcn_frame () || frm->is_user_script_frame () - || frm->is_scope_frame ()))) + || frm->is_scope_frame () + || frm->is_bytecode_fcn_frame ()))) error ("call_stack::dbupdown: invalid initial frame in call stack!"); // Use index into the call stack to begin the search. At this point @@ -569,7 +595,7 @@ frm = m_cs[xframe]; if (frm->is_user_fcn_frame () || frm->is_user_script_frame () - || frm->is_scope_frame ()) + || frm->is_scope_frame () || frm->is_bytecode_fcn_frame ()) { last_good_frame = xframe; @@ -653,7 +679,7 @@ std::shared_ptr frm = m_cs[n]; if (frm->is_user_script_frame () || frm->is_user_fcn_frame () - || frm->is_scope_frame ()) + || frm->is_scope_frame () || frm->is_bytecode_fcn_frame()) { if (frm->index () == curr_frame) curr_user_frame = frames.size (); @@ -688,7 +714,7 @@ for (const auto& frm : frames) { if (frm->is_user_script_frame () || frm->is_user_fcn_frame () - || frm->is_scope_frame ()) + || frm->is_scope_frame () || frm->is_bytecode_fcn_frame()) { retval.push_back (frame_info (frm->fcn_file_name (), frm->fcn_name (print_subfn), @@ -726,7 +752,7 @@ for (const auto& frm : frames) { if (frm->is_user_script_frame () || frm->is_user_fcn_frame () - || frm->is_scope_frame ()) + || frm->is_scope_frame () || frm->is_bytecode_fcn_frame()) { file(k) = frm->fcn_file_name (); name(k) = frm->fcn_name (print_subfn); @@ -772,6 +798,24 @@ } } +std::shared_ptr call_stack::pop_return () +{ + if (!m_cs.empty ()) + { + std::shared_ptr elt = std::move (m_cs.back ()); + m_cs.pop_back (); + + m_curr_frame = elt->parent_frame_index (); + + if (elt->is_closure_context ()) + elt->break_closure_cycles (elt); + + return elt; + } + + return nullptr; +} + void call_stack::clear () { while (! m_cs.empty ()) @@ -1131,6 +1175,24 @@ m_cs[m_curr_frame]->set_auto_fcn_var (avt, val); } +void +call_stack::set_nargin (int nargin) +{ + m_cs[m_curr_frame]->set_nargin (nargin); +} + +void +call_stack::set_nargout (int nargout) +{ + m_cs[m_curr_frame]->set_nargout (nargout); +} + +void +call_stack::set_active_bytecode_ip (int ip) +{ + m_cs[m_curr_frame]->set_active_bytecode_ip (ip); +} + octave_value call_stack::get_auto_fcn_var (stack_frame::auto_var_type avt) const { return m_cs[m_curr_frame]->get_auto_fcn_var (avt); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/call-stack.h --- a/libinterp/corefcn/call-stack.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/call-stack.h Mon Apr 24 20:34:39 2023 +0200 @@ -49,6 +49,7 @@ class tree_evaluator; class symbol_info_list; class unwind_protect; +class vm; class OCTINTERP_API @@ -166,6 +167,8 @@ void push (octave_function *fcn); + void push (vm &vm, octave_user_function *fcn, int nargout, int nargin); + void set_location (int l, int c) { if (! m_cs.empty ()) @@ -242,6 +245,8 @@ void pop (); + std::shared_ptr pop_return (); + void clear (); symbol_info_list all_variables (); @@ -299,8 +304,13 @@ void set_auto_fcn_var (stack_frame::auto_var_type avt, const octave_value& val); + void set_nargin (int nargin); + void set_nargout (int nargout); + octave_value get_auto_fcn_var (stack_frame::auto_var_type avt) const; + void set_active_bytecode_ip (int ip); + private: void get_new_frame_index_and_links diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/compile.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/corefcn/compile.cc Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,453 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 1996-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if defined (HAVE_CONFIG_H) +# include "config.h" +#endif + +#include "ovl.h" +#include "ov.h" +#include "defun.h" +#include "variables.h" +#include "interpreter.h" + +#include "pt-bytecode-vm.h" +#include "pt-bytecode-walk.h" + +OCTAVE_BEGIN_NAMESPACE(octave) + +// Cleverly hidden in pt-bytecode-vm.cc to prevent inlining here +extern "C" void dummy_mark_1 (void); +extern "C" void dummy_mark_2 (void); + +DEFUN (__dummy_mark_1, , , + doc: /* -*- texinfo -*- +@deftypefn {} {} __dummy_mark_1 () + +Dummy function that calls the c-function void dummy_mark_1 (void) +that does nothing. + +Usefull for e.g. marking start and end for Callgrind analyzis +or as an entry point for gdb. + +@end deftypefn */) +{ + dummy_mark_1 (); + + return {}; +} + +DEFUN (__dummy_mark_2, , , + doc: /* -*- texinfo -*- +@deftypefn {} {} __dummy_mark_2 () + +Dummy function that calls the c-function void dummy_mark_2 (void) +that does nothing. + +Usefull for e.g. marking start and end for Callgrind analyzis +or as an entry point for gdb. + +@end deftypefn */) +{ + dummy_mark_2 (); + + return {}; +} + +DEFUN (__vm_clear_cache__, , , + doc: /* -*- texinfo -*- +@deftypefn {} {@var{val} =} __vm_clear_cache__ () + +Internal function. + +@end deftypefn */) +{ + octave::load_path::signal_clear_fcn_cache (); + + return octave_value {true}; +} + +DEFUN (__vm_print_trace, , , + doc: /* -*- texinfo -*- +@deftypefn {} {@var{prints_trace} =} __vm_print_trace ()) + +Internal function. + +Print a debug trace from the VM. Toggles on or off each call. + +There has to be a breakpoint set in some file for the trace +to actually print anything. + +Returns true if a trace will be printed from now on, false otherwise. + +@end deftypefn */) +{ + vm::m_trace_enabled = !vm::m_trace_enabled; + + return octave_value {vm::m_trace_enabled}; +} + +DEFUN (__ref_count, args, , + doc: /* -*- texinfo -*- +@deftypefn {} {@var{count} =} __ref_count (@var{obj})) + +Internal function. + +Returns reference count for an object. + +@end deftypefn */) +{ + int nargin = args.length (); + + if (nargin != 1) + print_usage (); + + octave_value ov = args (0); + + return octave_value {ov.get_count ()}; +} + +DEFMETHOD (__vm_is_executing, interp, , , + doc: /* -*- texinfo -*- +@deftypefn {} {@var{is_executing} =} __vm_is_executing ()) + +Internal function. + +Returns true if the VM is executing the function calling __vm_is_executing (). + +False otherwise. + +@end deftypefn */) +{ + bool bytecode_running = interp.get_evaluator ().get_current_stack_frame ()->is_bytecode_fcn_frame (); + + return octave_value {bytecode_running}; +} + +DEFMETHOD (__vm_profile, interp, args, , + doc: /* -*- texinfo -*- +@deftypefn {} {} __vm_profile on +@deftypefnx {} {} __vm_profile off +@deftypefnx {} {} __vm_profile resume +@deftypefnx {} {} __vm_profile clear +@deftypefnx {} {@var{T} =} __vm_profile ("info") +@deftypefnx {} {} __vm_profile + +Internal function. + +Profile code running in the VM. + +@table @code +@item profile on +Start the profiler, clearing all previously collected data if there is any. + +@item profile off +Stop profiling. The collected data can later be retrieved and examined +with @code{T = profile ("info")}. + +@item profile clear +Clear all collected profiler data. + +@item profile resume +Restart profiling without clearing the old data. All newly collected +statistics are added to the existing ones. + +@item profile +Toggles between profiling and printing the result of the profiler. +Clears the profiler on each print. + +@item info +Prints the profiler data. + +Not that output to a variable is not implemented yet. + +@end table + +@end deftypefn */) +{ + int nargin = args.length (); + + // Unless a "profiler enabled" flag is added to the evaluator + // the vm profiler need the debugger to be active for it to actually + // be able to profile. + if (!interp.get_evaluator ().debug_mode_active ()) + warning ("As a workaround atleast one breakpoint has to be set" + " in any file (preferably not being profiled) for the profiler to actually profile anything."); + + std::string arg0; + + if (nargin >= 1) + arg0 = args (0).string_value (); + + if (!arg0.size ()) + { + if (!vm::m_vm_profiler) + { + vm::m_vm_profiler = std::make_shared (); + + vm::m_profiler_enabled = true; + } + else + { + vm::m_profiler_enabled = false; + auto p = vm::m_vm_profiler; + vm::m_vm_profiler = nullptr; + + auto cpy = *p; + cpy.print_to_stdout (); + } + } + else if (arg0 == "on") + { + vm::m_profiler_enabled = false; + vm::m_vm_profiler = std::make_shared (); + vm::m_profiler_enabled = true; + } + else if (arg0 == "resume") + { + if (!vm::m_vm_profiler) + vm::m_vm_profiler = std::make_shared (); + + vm::m_profiler_enabled = true; + } + else if (arg0 == "off") + { + vm::m_profiler_enabled = false; + } + else if (arg0 == "clear") + { + vm::m_profiler_enabled = false; + vm::m_vm_profiler = nullptr; + } + else if (arg0 == "info") + { + auto p_vm_profiler = vm::m_vm_profiler; + if (p_vm_profiler) + { + auto cpy = *p_vm_profiler; + cpy.print_to_stdout (); + } + else + warning ("Nothing recorded."); + } + else + print_usage (); + + return octave_value {true}; +} + +DEFMETHOD (__print_bytecode, interp, args, , + doc: /* -*- texinfo -*- +@deftypefn {} {@var{success} =} __print_bytecode (@var{fn_name})) + +Internal function. + +Prints the bytecode of a function, if any. + +@end deftypefn */) +{ + int nargin = args.length (); + + if (nargin != 1) + print_usage (); + + std::string fn_name = args(0).string_value (); + symbol_table& symtab = interp.get_symbol_table (); + + octave_value ov = symtab.find_function (fn_name); + + if (!ov.is_defined ()) + { + error ("Function not defined: %s", fn_name.c_str ()); + } + + octave_user_function *ufn = ov.user_function_value (); + + if (!ufn || !ufn->is_user_function ()) + { + error ("Function not a user function: %s", fn_name.c_str ()); + } + + if (!ufn->is_compiled () && V__enable_vm_eval__) + compile_user_function (*ufn, 0); + else if (!ufn->is_compiled ()) + error ("Function not compiled: %s", fn_name.c_str ()); + + if (!ufn->is_compiled ()) + error ("Function can't be compiled: %s", fn_name.c_str ()); + + auto bc = ufn->get_bytecode (); + + print_bytecode (bc); + + return octave_value {true}; +} + +DEFMETHOD (__compile, interp, args, , + doc: /* -*- texinfo -*- +@deftypefn {} {@var{success} =} __compile (@var{fn_name}) +@deftypefnx {} {@var{success} =} __compile (@var{fn_name}, "clear") +@deftypefnx {} {@var{success} =} __compile (@var{fn_name}, "print") + +Compile the specified function to bytecode. + +The compiled function and its subfunctions will be executed +by the VM when called. + +Returns true on success, otherwise false. + +The @qcode{"print"} option prints the bytecode after compilation. + +The @qcode{"clear"} option removes the bytecode from the function instead. + +@end deftypefn */) +{ + int nargin = args.length (); + + if (! nargin) + print_usage (); + + std::string fcn_to_compile; + bool do_clear = false; + bool do_print = false; + + for (int i = 0; i < nargin; i++) + { + auto arg = args(i); + + if (! arg.is_string()) + error ("Non string argument"); + + std::string arg_s = arg.string_value (); + + if (i == 0) + { + fcn_to_compile = arg_s; + continue; + } + + if (arg_s == "clear") + do_clear = true; + + if (arg_s == "print") + do_print = true; + } + + if (do_clear) + { + std::string name = fcn_to_compile; + symbol_table& symtab = interp.get_symbol_table (); + octave_value ov = symtab.find_function (name); + + if (!ov.is_defined ()) + { + error ("Function not defined: %s", name.c_str ()); + } + + octave_user_function *ufn = ov.user_function_value (); + + if (!ufn || !ufn->is_user_function ()) + { + error ("Function not a user function: %s", name.c_str ()); + } + + ufn->clear_bytecode (); + + return octave_value {true}; + } + + + { + std::string name = fcn_to_compile; + symbol_table& symtab = interp.get_symbol_table (); + octave_value ov = symtab.find_function (name); + + if (!ov.is_defined ()) + { + error ("Function not defined: %s", name.c_str ()); + } + + if (! ov.is_user_function ()) + { + error ("Function is not a user function: %s", name.c_str ()); + } + + octave_user_function *ufn = ov.user_function_value (); + + if (!ufn || !ufn->is_user_function ()) + { + error ("Function is not really a user function: %s", name.c_str ()); + } + + // Throws on errors + compile_user_function (*ufn, do_print); + } + + return octave_value {true}; +} + +// If TRUE, use VM evaluator rather than tree walker. +// FIXME: Use OCTAVE_ENABLE_VM_EVALUATOR define to set it to true when +// the VM has been tested properly. +bool V__enable_vm_eval__ = false; + +DEFUN (__enable_vm_eval__, args, nargout, + doc: /* -*- texinfo -*- +@deftypefn {} {@var{val} =} __enable_vm_eval__ () +@deftypefnx {} {@var{old_val} =} __enable_vm_eval__ (@var{new_val}) +@deftypefnx {} {@var{old_val} =} __enable_vm_eval__ (@var{new_val}, "local") +Query or set whether Octave automatically compiles functions to bytecode +and executes them in a virtual machine (VM). + +Note that the virtual machine feature is experimental. + +The default value is currently false, while the VM is still experimental. +Users need to explicitly call @code{__enable_vm_eval__ (1)} to enable it. +In future, this will be set to the value of the OCTAVE_ENABLE_VM_EVALUATOR +flag that was set when building Octave. + +When false, Octave uses a traditional tree walker +to evaluate statements parsed from m-code. When true, Octave translates parsed +statements to an intermediate representation that is then evaluated by a +virtual machine. + +When called from inside a function with the @qcode{"local"} option, the setting +is changed locally for the function and any subroutines it calls. The original +setting is restored when exiting the function. + +Once compiled to bytecode, the function will always be evaluated by the +VM no matter the state of @qcode{"__enable_vm_eval__"}, until the bytecode is +cleared, by e.g. @qcode{"clear all"} or an modification to the +function's m-file. + +@seealso{__compile} + +@end deftypefn */) +{ + return set_internal_variable (V__enable_vm_eval__, args, nargout, + "__enable_vm_eval__"); +} + +OCTAVE_END_NAMESPACE(octave) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/interpreter.cc --- a/libinterp/corefcn/interpreter.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/interpreter.cc Mon Apr 24 20:34:39 2023 +0200 @@ -1623,6 +1623,11 @@ return m_evaluator.global_varval (name); } +octave_value & interpreter::global_varref (const std::string& name) +{ + return m_evaluator.global_varref (name); +} + void interpreter::global_assign (const std::string& name, const octave_value& val) { diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/interpreter.h --- a/libinterp/corefcn/interpreter.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/interpreter.h Mon Apr 24 20:34:39 2023 +0200 @@ -402,6 +402,8 @@ octave_value global_varval (const std::string& name) const; + octave_value & global_varref (const std::string& name); + void global_assign (const std::string& name, const octave_value& val = octave_value ()); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/load-path.cc --- a/libinterp/corefcn/load-path.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/load-path.cc Mon Apr 24 20:34:39 2023 +0200 @@ -251,6 +251,8 @@ m_dir_info_list (), m_init_dirs (), m_command_line_path () { } +std::atomic load_path::m_n_updated; + void load_path::initialize (bool set_initial_path) { @@ -292,6 +294,8 @@ void load_path::clear () { + signal_clear_fcn_cache (); + m_dir_info_list.clear (); m_top_level_package.clear (); @@ -415,6 +419,8 @@ // preserve the correct directory ordering for new files that // have appeared. + signal_clear_fcn_cache (); + m_top_level_package.clear (); m_package_map.clear (); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/load-path.h --- a/libinterp/corefcn/load-path.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/load-path.h Mon Apr 24 20:34:39 2023 +0200 @@ -215,7 +215,15 @@ static const int OCT_FILE = 2; static const int MEX_FILE = 4; + static octave_idx_type get_weak_n_updated () { return m_n_updated; } + + static void signal_clear_fcn_cache () + { + m_n_updated++; + } + private: + static std::atomic m_n_updated; class dir_info { diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/module.mk --- a/libinterp/corefcn/module.mk Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/module.mk Mon Apr 24 20:34:39 2023 +0200 @@ -144,6 +144,7 @@ %reldir%/coct-hdf5-types.c \ %reldir%/colamd.cc \ %reldir%/colloc.cc \ + %reldir%/compile.cc \ %reldir%/conv2.cc \ %reldir%/daspk.cc \ %reldir%/dasrt.cc \ diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/stack-frame.cc --- a/libinterp/corefcn/stack-frame.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/stack-frame.cc Mon Apr 24 20:34:39 2023 +0200 @@ -27,6 +27,8 @@ # include "config.h" #endif +#include + #include "lo-regexp.h" #include "str-vec.h" @@ -47,9 +49,888 @@ #include "symrec.h" #include "symscope.h" #include "variables.h" +#include +#include "ov-ref.h" + +#include "pt-bytecode-vm.h" OCTAVE_BEGIN_NAMESPACE(octave) + +// bytecode_fcn_stack_frame is for the VM. +// +// The VM stack does not directly translate to the tree walker's stack, +// so there is a translation table to make accessing the VM stack opaque +// for non-VM use. +// +// The octave_values are not acctually stored in the bytecode_fcn_stack_frame +// object, but on the VM stack. Extra values created by eg. 'eval ("e = 3;")' +// are stored in this frame object though. +class bytecode_fcn_stack_frame : public stack_frame +{ +public: + + enum class bytecode_offsets + { + DATA_NLOCALS = 2, + }; + + bytecode_fcn_stack_frame (void) = delete; + + bytecode_fcn_stack_frame (tree_evaluator& tw, + octave_user_function *fcn, + std::size_t index, + const std::shared_ptr& parent_link, + const std::shared_ptr& static_link, + vm &vm, + int nargout, int nargin) + : stack_frame (tw, index, parent_link, static_link, + nullptr), + m_fcn (fcn), + m_unwind_data (vm.m_unwind_data), + m_name_data (vm.m_name_data), + m_stack_start (vm.m_sp), + m_code (vm.m_code), + m_size (m_unwind_data->m_ids_size), + // The above fields in vm change during execution so we need to store them in the frame + m_vm (&vm), + m_nargin (nargin), + m_nargout (nargout) + { + // If the function scope has more variables now due to something adding e.g. + // a global too it after the compilation of the function was done we need + // to resize the bytecode frame size. + std::size_t n_syms = fcn->scope_num_symbols (); + + m_orig_size = m_unwind_data->m_external_frame_offset_to_internal.size (); + + if (n_syms > m_orig_size) + { + int n_to_add = m_orig_size - n_syms; + internal_resize (m_size + n_to_add); + } + } + + bytecode_fcn_stack_frame (const bytecode_fcn_stack_frame& elt) + : stack_frame(elt.m_evaluator, elt.m_index, elt.m_parent_link, + elt.m_static_link, elt.m_access_link) + { + // A copy of a bytecode frame has no pointers to the actual VM stack or VM itself. + m_nargin = elt.m_nargin; + m_nargout = elt.m_nargout; + m_name_data = elt.m_name_data; + m_size = elt.m_size; + m_orig_size = elt.m_orig_size; + m_ip = elt.m_ip; + m_unwind_data = elt.m_unwind_data; // TODO: Ownership? + m_size = m_unwind_data->m_ids_size; + + if (elt.m_lazy_data) + { + auto &lazy = lazy_data (); + + lazy.m_extra_slots = elt.m_lazy_data->m_extra_slots; + lazy.m_extra_flags = elt.m_lazy_data->m_extra_flags; + lazy.m_extra_names = elt.m_lazy_data->m_extra_names; + } + } + + bytecode_fcn_stack_frame& + operator = (const bytecode_fcn_stack_frame& elt) = delete; + + bytecode_fcn_stack_frame& + operator = (bytecode_fcn_stack_frame&& elt) + { + if (m_lazy_data) + { + if (m_lazy_data->m_stack_cpy) + { + // Note: int nargout at offset 0 + for (unsigned i = 1; i < m_size; i++) + m_lazy_data->m_stack_cpy[i].ov.~octave_value (); + delete m_lazy_data->m_stack_cpy; + } + delete m_lazy_data->m_unwind_protect_frame; + delete m_lazy_data; + } + + *this = std::move (elt); + elt.m_lazy_data = nullptr; + + return *this; + } + + // vm_clear_for_cache () and the dtor need to mirror eachother + // so they both call dispose() + void dispose () + { + if (m_lazy_data) + { + if (m_lazy_data->m_stack_cpy) + { + // Note: int nargout at offset 0 + for (unsigned i = 1; i < m_size; i++) + m_lazy_data->m_stack_cpy[i].ov.~octave_value (); + delete m_lazy_data->m_stack_cpy; + } + delete m_lazy_data->m_unwind_protect_frame; + delete m_lazy_data; + m_lazy_data = nullptr; + } + } + + ~bytecode_fcn_stack_frame () + { + // vm_clear_for_cache () need to mirror the dtor + dispose (); + } + + + size_t local_to_external_offset (size_t local_offset) const + { + for (auto it : m_unwind_data->m_external_frame_offset_to_internal) + { + size_t local_tmp = it.second; + if (local_tmp != local_offset) + continue; + + return it.first; // external offset + } + + if (local_offset < m_size) + error ("VM internal error: Invalid internal offset. Smaller than original size and not in table"); + + return local_offset - m_size + m_orig_size; + } + + std::size_t external_to_local_offset (std::size_t external_offset) const + { + auto it = m_unwind_data->m_external_frame_offset_to_internal.find (external_offset); + if (it == m_unwind_data->m_external_frame_offset_to_internal.end ()) + { + if (external_offset < m_orig_size) + error ("VM internal error: Invalid external offset. Smaller than original size and not in table"); + // The offsets that are not in the original translation table are in the extra slots added dynamically + return m_size + (external_offset - m_orig_size); + } + + return it->second; + } + + // Do an expensive check if the stack is in order. Call only during debugging. + void vm_dbg_check_scope () + { + symbol_scope scope = __get_interpreter__().get_current_scope (); + auto symbols = scope.symbol_list (); + + for (symbol_record sym_scope : symbols) + { + if (sym_scope.frame_offset ()) // Don't check nested function stuff becouse it aint working anyways + continue; + + std::size_t scope_offset = sym_scope.data_offset (); + std::string scope_name = sym_scope.name (); + + std::size_t internal_offset = external_to_local_offset (scope_offset); + if (internal_offset >= m_size) + continue; // We don't check the "extra" slots since these can change with eval and evalin:s etc + + symbol_record sym_frame = lookup_symbol (scope_name); + + std::size_t frame_offset = sym_frame.data_offset (); + std::string frame_name = sym_frame.name (); + + if (scope_name != frame_name && frame_name != "") + error ("VM stack check failed: %s != %s\n", scope_name.c_str (), frame_name.c_str ()); + if (scope_offset != frame_offset && frame_name != "") + error ("VM stack check failed: %zu != %zu\n", scope_offset, frame_offset); + + if (internal_offset >= internal_size ()) + internal_resize (internal_offset + 1); + + auto flag = get_scope_flag (scope_offset); + if (flag == PERSISTENT || flag == GLOBAL) + continue; + + octave_value ov1 = varval (sym_scope); + octave_value ov2 = varval (scope_name); + octave_value ov3 = varval (scope_offset); + octave_value ov4 = varref (sym_scope); + octave_value ov5 = varref (scope_offset); + + if (!ov1.same_rep (ov2)) + error ("VM stack check failed: Same object differs 2\n"); + if (!ov1.same_rep (ov3)) + error ("VM stack check failed: Same object differs 3\n"); + if (!ov1.same_rep (ov4)) + error ("VM stack check failed: Same object differs 4\n"); + if (!ov1.same_rep (ov5)) + error ("VM stack check failed: Same object differs 5\n"); + } + + } + + void vm_clear_for_cache () + { + m_parent_link = nullptr; + m_static_link = nullptr; + m_access_link = nullptr; + m_dispatch_class.clear (); + + dispose (); + } + + // Since a reference to the stackframe can be saved somewhere + // we need to check at stack unwind in the VM if that is the case + // and save the variables on the VM stack in this frame object so + // they can be accessed. + void vm_unwinds () + { + //vm_dbg_check_scope (); + bool is_alone = m_weak_ptr_to_self.use_count () <= 2; // Two seems about right + + if (m_lazy_data) + { + delete m_lazy_data->m_unwind_protect_frame; + m_lazy_data->m_unwind_protect_frame = nullptr; + + // Restore warningstates + if (m_fcn) + { + auto usr_fn_p = m_fcn->user_function_value (); + if (usr_fn_p) + usr_fn_p->restore_warning_states (); // TODO: octave_user_function::restore_warning_states() could be static. + } + } + + if (is_alone) + { + if (m_lazy_data) + delete m_lazy_data; + + // Zero these so it is easier to find a "use-after-unwind" + // error + m_lazy_data = nullptr; + m_stack_start = nullptr; + m_code = nullptr; + m_name_data = nullptr; + m_unwind_data = nullptr; + m_vm = nullptr; + + return; + } + + // These pointers might become invalid + m_vm = nullptr; + m_unwind_data = nullptr; + + // Copy the stack to the frame + size_t stack_slots = m_size; + + lazy_data (); + + m_lazy_data->m_stack_cpy = new octave::stack_element[stack_slots]; + for (unsigned i = 1; i < m_size; i++) + new (&m_lazy_data->m_stack_cpy[i].ov) octave_value {}; + + // Note: int nargout at offset 0 + m_lazy_data->m_stack_cpy[0].i = m_stack_start[0].i; + for (unsigned i = 1; i < m_size; i++) + m_lazy_data->m_stack_cpy[i].ov = m_stack_start[i].ov; + + m_stack_start = m_lazy_data->m_stack_cpy; + } + + std::size_t size (void) const + { + return m_orig_size + + (m_lazy_data ? m_lazy_data->m_extra_slots.size () : 0); + } + + std::size_t internal_size (void) const + { + return m_size + + (m_lazy_data ? m_lazy_data->m_extra_slots.size () : 0); + } + + void resize (std::size_t arg) + { + int diff = static_cast (arg) - static_cast(size ()); + + if (diff > 0) + internal_resize (internal_size () + diff); + } + + void internal_resize (std::size_t arg) + { + int diff = static_cast (arg) - static_cast(internal_size ()); + + if (diff > 0) + { + auto &lazy = lazy_data (); + lazy.m_extra_slots.resize (lazy.m_extra_slots.size () + diff); + + lazy.m_extra_flags.resize (lazy.m_extra_flags.size () + diff); + + lazy.m_extra_names.resize (lazy.m_extra_names.size () + diff); + } + } + + bool slot_is_global (std::size_t local_offset) const + { + if (local_offset >= m_size) + { + if (!m_lazy_data) + panic ("bytecode_fcn_stack_frame::slot_is_global(%zu): Bad request", local_offset); + scope_flags flag = m_lazy_data->m_extra_flags.at (local_offset - m_size); + return flag == GLOBAL; + } + + octave_value &ov = m_stack_start [local_offset].ov; + if (!ov.is_ref ()) + return false; + if (ov.ref_rep ()->is_global_ref ()) + return true; + + return false; + } + + bool slot_is_persistent (std::size_t local_offset) const + { + if (local_offset >= m_size) + { + if (!m_lazy_data) + panic ("bytecode_fcn_stack_frame::slot_is_global(%zu): Bad request", local_offset); + scope_flags flag = m_lazy_data->m_extra_flags.at (local_offset - m_size); + return flag == PERSISTENT; + } + + octave_value &ov = m_stack_start [local_offset].ov; + if (!ov.is_ref ()) + return false; + if (ov.ref_rep ()->is_persistent_ref ()) + return true; + return false; + } + + stack_frame::scope_flags get_scope_flag (std::size_t external_offset) const + { + std::size_t local_offset = external_to_local_offset (external_offset); + // Is the slot on the original bytecode stack frame? + if (local_offset < m_size) + { + octave_value &ov = m_stack_start [local_offset].ov; + if (!ov.is_ref ()) + return LOCAL; + if (ov.ref_rep ()->is_global_ref ()) + return GLOBAL; + if (ov.ref_rep ()->is_persistent_ref ()) + return PERSISTENT; + return LOCAL; + } + + size_t extra_offset = local_offset - m_size; + if (m_lazy_data && extra_offset < m_lazy_data->m_extra_flags.size ()) + return m_lazy_data->m_extra_flags.at (local_offset - m_size); + + return LOCAL; + } + + void set_scope_flag (std::size_t external_offset, scope_flags flag) + { + std::size_t local_offset = external_to_local_offset (external_offset); + if (local_offset >= m_size) + { + if (!m_lazy_data) + error ("VM internal error: Trying to set scope flag on invalid offset"); + m_lazy_data->m_extra_flags.at (local_offset - m_size) = flag; + return; + } + + scope_flags current_flag = get_scope_flag (external_offset); + + bool is_global = current_flag == GLOBAL; + bool is_pers = current_flag == PERSISTENT; + + if (flag == GLOBAL) + { + if (is_global) + return; + if (is_pers) + error ("VM internal error: Trying to make persistent variable global"); + + octave_value &ov = m_stack_start [local_offset].ov; + ov = octave_value {new octave_value_ref_global {m_name_data [local_offset]}}; + + return; + } + + if (flag == PERSISTENT) + { + if (is_pers) + return; + if (is_global) + error ("VM internal error: Trying to make global variable persistent"); + + octave_value &ov = m_stack_start [local_offset].ov; + ov = octave_value {new octave_value_ref_persistent {get_scope (), static_cast (external_offset)}}; + + return; + } + + if (flag == LOCAL) + { + if (!is_global && !is_pers) + return; + + // Clear the global or persistent ref on the stack + if (is_global || is_pers) + { + // Clear the ref in its slot + octave_value& ov_ref = m_stack_start [local_offset].ov; + ov_ref = octave_value {}; + } + + return; + } + + panic ("VM internal error: Strange state: %d", flag); + } + + stack_frame::scope_flags scope_flag (const symbol_record& sym) const + { + std::size_t external_offset = sym.data_offset (); + + if (sym.frame_offset()) + error ("TODO: Frame offset %d", __LINE__); + + return get_scope_flag (external_offset); + } + + virtual octave_value get_active_bytecode_call_arg_names () + { + + // Handle ARG_NAMES + if (!m_unwind_data || !m_vm) + return Cell {}; + + int best_match = -1; + int best_start = -1; + + auto &entries = m_unwind_data->m_argname_entries; + for (unsigned i = 0; i < entries.size (); i++) + { + int start = entries[i].m_ip_start; + int end = entries[i].m_ip_end; + + if (start > m_ip || end < m_ip) + continue; + + if (best_match != -1) + { + if (best_start > start) + continue; + } + + best_match = i; + best_start = start; + } + + if (best_match == -1) + return Cell {}; + + Cell c = entries[best_match].m_arg_names; + return c; + } + + virtual void set_active_bytecode_ip (int ip) + { + m_ip = ip; + } + + octave_value get_auto_fcn_var (auto_var_type avt) const + { + switch (avt) + { + case stack_frame::NARGIN: + return octave_value {m_nargin}; + case stack_frame::NARGOUT: + return octave_value {m_nargout}; + case stack_frame::SAVED_WARNING_STATES: + if (!m_lazy_data) + return {}; + else + return m_lazy_data->m_saved_warnings_states; + case stack_frame::IGNORED: + if (!m_lazy_data) + return {}; + else + return m_lazy_data->m_ignored; + case stack_frame::ARG_NAMES: + { + // If the current bytecode stack frame is the root one in the VM, the caller + // sets ARG_NAMES in the root bytecode stack frame + if (m_lazy_data) + { + octave_value ov = m_lazy_data->m_arg_names; + if (ov.is_defined ()) + return ov; + } + // In bytecode stack frames, the arg names are stored in the caller frame. + return m_parent_link->get_active_bytecode_call_arg_names (); + } + default: + panic ("bytecode_fcn_stack_frame::get_auto_fcn_var() : Invalid call idx=%d", static_cast (avt)); + } + } + + void set_nargin (int nargin) { m_nargin = nargin; } + void set_nargout (int nargout) { m_nargout = nargout; } + + void set_auto_fcn_var (auto_var_type avt, const octave_value& val) + { + switch (avt) + { + case stack_frame::NARGIN: + m_nargin = val.int_value (); + return; + case stack_frame::NARGOUT: + m_nargout = val.int_value (); + return; + case stack_frame::SAVED_WARNING_STATES: + lazy_data ().m_saved_warnings_states = val; + return; + case stack_frame::IGNORED: + lazy_data ().m_ignored = val; + return; + case stack_frame::ARG_NAMES: + lazy_data ().m_arg_names = val; + return; + default: + panic ("bytecode_fcn_stack_frame::set_auto_fcn_var() : Invalid call idx=%d", static_cast (avt)); + } + } + + // We only need to override one of each of these functions. The + // using declaration will avoid warnings about partially-overloaded + // virtual functions. + using stack_frame::varval; + using stack_frame::varref; + + octave_value varval (std::size_t external_offset) const + { + size_t extra_size = (m_lazy_data ? m_lazy_data->m_extra_slots.size () : 0); + size_t stack_slots = m_size; + + std::size_t local_offset = external_to_local_offset (external_offset); + + if (local_offset == 0) // Handle native int %nargout specially + return octave_value {m_stack_start [0].i}; + if (local_offset < stack_slots) + { + octave_value ov = m_stack_start [local_offset].ov; + if (ov.is_ref ()) + return ov.ref_rep ()->deref (); + return ov; + } + else + { + std::size_t extra_offset = local_offset - stack_slots; + if (!m_lazy_data || extra_offset >= extra_size) + error ("VM internal error: Trying to access extra slot out of range, %zu", extra_offset); + return m_lazy_data->m_extra_slots.at (extra_offset); + } + } + + octave_value varval (const symbol_record& sym) const + { + // We don't use frame offsets. Just return nil + if (sym.frame_offset()) + return octave_value {}; + + std::size_t external_offset = sym.data_offset (); + std::size_t local_offset = external_to_local_offset (external_offset); + + // If the offset is out of range we return a nil ov + if (local_offset >= internal_size ()) + return {}; + if (local_offset >= m_size) + { + std::string nm_src = sym.name (); + if (!m_lazy_data) + panic ("bytecode_fcn_stack_frame::varval() Invalid request"); + std::string &nm = m_lazy_data->m_extra_names.at (local_offset - m_size); + if (nm == "") + nm = nm_src; + else if (nm != nm_src && nm_src != "") + error ("VM internal error: Trying to access extra slot with the wrong name. Current: %s, New: %s\n", + nm.c_str (), nm_src.c_str ()); + } + + bool is_global = slot_is_global (local_offset); + bool is_pers = slot_is_persistent (local_offset); + + if (is_global) + return m_evaluator.global_varval (sym.name ()); + if (is_pers) + return get_scope ().persistent_varval (external_offset); + + return varval (external_offset); + } + + octave_value& varref (std::size_t external_offset) + { + static octave_value fake_dummy_nargout{0}; + + std::size_t extra_size = (m_lazy_data ? m_lazy_data->m_extra_slots.size () : 0); + std::size_t stack_slots = m_size; + + std::size_t local_offset = external_to_local_offset (external_offset); + + // Handle native int %nargout specially. Note that changing + // the value of %nargout via the this ref wont work. + if (local_offset == 0) + return fake_dummy_nargout = octave_value {m_stack_start [0].i}; + if (local_offset < stack_slots) + { + octave_value &ov = m_stack_start [local_offset].ov; + if (ov.is_ref ()) + return ov.ref_rep ()->ref (); + return ov; + } + else + { + std::size_t extra_offset = local_offset - stack_slots; + if (!m_lazy_data || extra_offset >= extra_size) + error ("VM internal error: Trying to access extra slot out of range, %zu", extra_offset); + return m_lazy_data->m_extra_slots.at (extra_offset); + } + } + + octave_value& varref (const symbol_record& sym) + { + std::size_t external_offset = sym.data_offset (); + std::size_t local_offset = external_to_local_offset (external_offset); + + if (sym.frame_offset()) + error ("TODO: Frame offset"); + + // If the offset is out of range we make room for it + if (local_offset >= internal_size ()) + internal_resize (local_offset + 1); + if (local_offset >= m_size) + { + std::string nm_src = sym.name (); + if (!m_lazy_data) + panic ("bytecode_fcn_stack_frame::varval() Invalid request"); + std::string &nm = m_lazy_data->m_extra_names.at (local_offset - m_size); + if (nm == "") + nm = nm_src; + else if (nm != nm_src && nm_src != "") + error ("VM internal error: Trying to access extra slot with wrong name. Current: %s, New: %s\n", + nm.c_str (), nm_src.c_str ()); + } + + bool is_global = slot_is_global (local_offset); + bool is_pers = slot_is_persistent (local_offset); + + if (is_global) + return m_evaluator.global_varref (sym.name ()); + if (is_pers) + return get_scope ().persistent_varref(external_offset); + + return varref (external_offset); + } + + void mark_scope (const symbol_record& sym, + scope_flags flag) + { + std::size_t external_offset = sym.data_offset (); + std::size_t local_offset = external_to_local_offset (external_offset); + + if (local_offset >= internal_size ()) + internal_resize (local_offset + 1); + + set_scope_flag (external_offset, flag); + } + + bool is_bytecode_fcn_frame (void) const { return true; } + + symbol_record lookup_symbol (const std::string& name) const + { + int local_offset = -1; + scope_flags flag = LOCAL; + + for (int i = 0; i < static_cast (m_size); i++) + { + if (m_name_data [i] == name) + { + local_offset = i; + + bool is_global = slot_is_global (local_offset); + bool is_pers = slot_is_persistent (local_offset); + + if (is_global) + flag = GLOBAL; + else if (is_pers) + flag = PERSISTENT; + + break; + } + } + + if (local_offset >= 0) + { + symbol_record ret (name, flag); + ret.set_data_offset (local_to_external_offset (static_cast (local_offset))); + // Check if the symbol is an argument or return symbol. Note: Negative count for vararg and varargin + int n_returns = abs (static_cast (m_code[0])); + int n_args = abs (static_cast (m_code[1])); + if (local_offset < n_returns + n_args) + ret.mark_formal (); + + return ret; + } + + if (m_lazy_data) + { + for (unsigned i = 0; i < m_lazy_data->m_extra_slots.size (); i++) + { + if (m_lazy_data->m_extra_names.at (i) == name) + { + symbol_record ret (name, m_lazy_data->m_extra_flags.at (i)); + ret.set_data_offset (m_orig_size + i); + return ret; + } + } + } + + // Search the "scope" object of this and any nested frame + // The scope object will have e.g. variables added by scripts + const stack_frame *frame = this; + while (frame) + { + symbol_scope scope = frame->get_scope (); + + symbol_record sym = scope.lookup_symbol (name); + + if (sym) + return sym; + + std::shared_ptr nxt = frame->access_link (); + frame = nxt.get (); + } + + return symbol_record (); + } + + symbol_record insert_symbol (const std::string& name) + { + // If the symbols is already in the immediate scope, there is + // nothing more to do. + + symbol_scope scope = get_scope (); + + symbol_record sym = scope.lookup_symbol (name); + + if (sym) + return sym; + + // If we have not created the extra slots, now is the time + auto &lazy = lazy_data (); + lazy.m_extra_names.push_back (name); + lazy.m_extra_slots.push_back ({}); + lazy.m_extra_flags.push_back (LOCAL); + + sym = scope.find_symbol (name); + + panic_unless (sym.is_valid ()); + + return sym; + } + + symbol_scope get_scope (void) const + { + return m_fcn->scope (); + } + + octave_function * function () const { return m_fcn; } + + void accept (stack_frame_walker& sfw); + + void display (bool follow = true) const; + + int line () const + { + if (! m_vm) + return -1; + + loc_entry loc = vm::find_loc (m_ip, m_vm->m_unwind_data->m_loc_entry); // TODO: Does not work in nested bytecode stack frames + return loc.m_line; + } + + int column () const + { + if (! m_vm) + return -1; + + loc_entry loc = m_vm->find_loc (m_ip, m_vm->m_unwind_data->m_loc_entry); + return loc.m_col; + } + + unwind_protect *unwind_protect_frame () + { + if (! lazy_data ().m_unwind_protect_frame) + lazy_data ().m_unwind_protect_frame = new unwind_protect (); + + return lazy_data ().m_unwind_protect_frame; + } + + std::weak_ptr m_weak_ptr_to_self; + +private: + // To keep down the footprint of the frame some seldom used + // variables are lazy initialized and stored in *m_lazy_data + struct lazy_data_struct + { + octave_value m_ignored; + octave_value m_arg_names; + octave_value m_saved_warnings_states; + + std::vector m_extra_slots; + std::vector m_extra_names; + std::vector m_extra_flags; + + unwind_protect *m_unwind_protect_frame = nullptr; + stack_element *m_stack_cpy = nullptr; + }; + + lazy_data_struct & lazy_data () + { + if (!m_lazy_data) + m_lazy_data = new lazy_data_struct {}; + return *m_lazy_data; + } + + lazy_data_struct *m_lazy_data = nullptr; + + octave_function *m_fcn; + + unwind_data *m_unwind_data; + + std::string *m_name_data; + stack_element *m_stack_start; + + unsigned char *m_code; + unsigned m_size; + unsigned m_orig_size; + vm *m_vm; + int m_ip; + + int m_nargin; + int m_nargout; +}; + class compiled_fcn_stack_frame : public stack_frame { public: @@ -331,6 +1212,10 @@ octave_value get_auto_fcn_var (auto_var_type avt) const { + if (avt != stack_frame::auto_var_type::ARG_NAMES) + return m_auto_vars.at (avt); + if (m_parent_link->is_bytecode_fcn_frame ()) + return m_parent_link->get_active_bytecode_call_arg_names (); return m_auto_vars.at (avt); } @@ -438,6 +1323,7 @@ delete m_unwind_protect_frame; } + bool is_user_fcn_frame () const { return true; } static std::shared_ptr @@ -607,6 +1493,9 @@ virtual void visit_scope_stack_frame (scope_stack_frame&) = 0; + + virtual void + visit_bytecode_fcn_stack_frame (bytecode_fcn_stack_frame&) = 0; }; class symbol_cleaner : public stack_frame_walker @@ -675,6 +1564,16 @@ alink->accept (*this); } + void visit_bytecode_fcn_stack_frame (bytecode_fcn_stack_frame& frame) + { + clean_frame (frame); + + std::shared_ptr alink = frame.access_link (); + + if (alink) + alink->accept (*this); + } + private: void maybe_clear_symbol (stack_frame& frame, const symbol_record& sym) @@ -915,6 +1814,16 @@ alink->accept (*this); } + void visit_bytecode_fcn_stack_frame (bytecode_fcn_stack_frame& frame) + { + append_list (frame); + + std::shared_ptr alink = frame.access_link (); + + if (alink) + alink->accept (*this); + } + private: typedef std::pair syminf_list_elt; @@ -1077,6 +1986,46 @@ return new scope_stack_frame (tw, scope, index, parent_link, static_link); } +std::shared_ptr stack_frame::create_bytecode ( + tree_evaluator& tw, + octave_user_function *fcn, + vm &vm, + std::size_t index, + const std::shared_ptr& parent_link, + const std::shared_ptr& static_link, + int nargout, int nargin) +{ + // If we have any cached shared_ptr to empty bytecode_fcn_stack_frame objects + // we use on of those + if (vm.m_frame_ptr_cache.size ()) + { + std::shared_ptr sp = std::move (vm.m_frame_ptr_cache.back ()); + vm.m_frame_ptr_cache.pop_back (); + + bytecode_fcn_stack_frame *p = static_cast (sp.get ()); + // Most objects where cleared when the shared_ptr was put into the cache but call the + // dtor anyways to be sure. + p->~bytecode_fcn_stack_frame (); + // Placement new into the storage managed by the shared_ptr + new (p) bytecode_fcn_stack_frame (tw, fcn, index, parent_link, static_link, vm, nargout, nargin); + + p->m_weak_ptr_to_self = sp; + + return sp; + } + + bytecode_fcn_stack_frame *new_frame_raw = new bytecode_fcn_stack_frame (tw, fcn, + index, parent_link, static_link, vm, nargout, nargin); + std::shared_ptr new_frame (new_frame_raw); + + // The bytecode stackframe needs to know if it needs to save away + // all the stack variables. So it need to keep track of if it is saved + // somewhere outsite the VM + new_frame_raw->m_weak_ptr_to_self = new_frame; + + return new_frame; +} + // This function is only implemented and should only be called for // user_fcn stack frames. Anything else indicates an error in the // implementation, but we'll simply warn if that happens. @@ -2343,6 +3292,18 @@ set_scope_flag (data_offset, flag); } +void bytecode_fcn_stack_frame::display (bool) const +{ + std::ostream& os = octave_stdout; + + os << "-- [bytecode_fcn_stack_frame] (" << this << ") --" << std::endl; + + os << "fcn: " << m_fcn->name () + << " (" << m_fcn->type_name () << ")" << std::endl; + + display_scope (os, get_scope ()); +} + void user_fcn_stack_frame::display (bool follow) const { std::ostream& os = octave_stdout; @@ -2483,4 +3444,10 @@ sfw.visit_scope_stack_frame (*this); } +void bytecode_fcn_stack_frame::accept (stack_frame_walker& sfw) +{ + sfw.visit_bytecode_fcn_stack_frame (*this); +} + + OCTAVE_END_NAMESPACE(octave) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/stack-frame.h --- a/libinterp/corefcn/stack-frame.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/stack-frame.h Mon Apr 24 20:34:39 2023 +0200 @@ -104,6 +104,8 @@ class unwind_protect; class stack_frame_walker; +class vm; +union stack_element; class stack_frame { @@ -182,6 +184,16 @@ const std::shared_ptr& parent_link, const std::shared_ptr& static_link); + // Bytecode function stackframe + static std::shared_ptr + create_bytecode (tree_evaluator& tw, + octave_user_function *fcn, + vm &vm, + std::size_t index, + const std::shared_ptr& parent_link, + const std::shared_ptr& static_link, + int nargout, int nargin); + stack_frame (const stack_frame& elt) = default; stack_frame& operator = (const stack_frame& elt) = delete; @@ -196,16 +208,17 @@ virtual bool is_user_script_frame () const { return false; } virtual bool is_user_fcn_frame () const { return false; } virtual bool is_scope_frame () const { return false; } + virtual bool is_bytecode_fcn_frame (void) const { return false; } virtual void clear_values (); std::size_t index () const { return m_index; } void line (int l) { m_line = l; } - int line () const { return m_line; } + virtual int line () const { return m_line; } void column (int c) { m_column = c; } - int column () const { return m_column; } + virtual int column () const { return m_column; } std::string fcn_file_name () const { @@ -307,6 +320,9 @@ mark_global (sym); } + std::size_t + parent_frame_index () const { return m_parent_link->index (); } + std::shared_ptr parent_link () const {return m_parent_link; } @@ -423,10 +439,23 @@ install_variable (sym, value, global); } + virtual octave_value get_active_bytecode_call_arg_names () + { + panic_impossible (); // Only bytecode frame need to implement this + } + + virtual void set_active_bytecode_ip (int) + { + panic_impossible (); // Only bytecode frame need to implement this + } + virtual octave_value get_auto_fcn_var (auto_var_type) const = 0; virtual void set_auto_fcn_var (auto_var_type, const octave_value&) = 0; + virtual void set_nargin (int nargin) { set_auto_fcn_var (NARGIN, nargin); } + virtual void set_nargout (int nargout) { set_auto_fcn_var (NARGOUT, nargout); } + virtual octave_value varval (const symbol_record& sym) const = 0; virtual octave_value varval (std::size_t data_offset) const; @@ -557,8 +586,13 @@ void mark_closure_context () { m_is_closure_context = true; } bool is_closure_context () const { return m_is_closure_context; } + // The VM needs to tell the bytecode stackframe that it unwinds so + // that it can check if it needs to save the stack or not. + virtual void vm_unwinds () {} + virtual void vm_dbg_check_scope () {} + virtual void vm_clear_for_cache () {} + protected: - // Reference to the call stack that contains this frame. Global // variables are stored in the call stack. This link gives us // immediate access to them. diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/symscope.h --- a/libinterp/corefcn/symscope.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/symscope.h Mon Apr 24 20:34:39 2023 +0200 @@ -66,15 +66,19 @@ subfunctions_iterator; symbol_scope_rep (const std::string& name = "") + : symbol_scope_rep (name, true) + {} + + symbol_scope_rep (const std::string& name, bool add_ans) : m_name (name), m_symbols (), m_subfunctions (), m_persistent_values (), m_code (nullptr), m_fcn_name (), m_fcn_file_name (), m_dir_name (), m_parent (), m_primary_parent (), m_children (), m_nesting_depth (0), m_is_static (false), m_is_primary_fcn_scope (false) { - // All scopes have ans as the first symbol, initially undefined. - - insert_local ("ans"); + // Most scopes have ans as the first symbol, initially undefined. + if (add_ans) + insert_local ("ans"); } OCTAVE_DISABLE_COPY_MOVE (symbol_scope_rep) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/corefcn/variables.cc --- a/libinterp/corefcn/variables.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/corefcn/variables.cc Mon Apr 24 20:34:39 2023 +0200 @@ -1235,6 +1235,9 @@ string_vector argv = args.make_argv ("clear"); + // Mark any function cache in use by the VM as invalid + octave::load_path::signal_clear_fcn_cache (); + if (argc == 1) { do_clear_variables (interp, argv, argc, true); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/module.mk --- a/libinterp/octave-value/module.mk Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/module.mk Mon Apr 24 20:34:39 2023 +0200 @@ -51,6 +51,7 @@ %reldir%/ov-flt-cx-mat.h \ %reldir%/ov-flt-re-diag.h \ %reldir%/ov-flt-re-mat.h \ + %reldir%/ov-inline.h \ %reldir%/ov-java.h \ %reldir%/ov-lazy-idx.h \ %reldir%/ov-legacy-range.h \ @@ -61,6 +62,7 @@ %reldir%/ov-perm.h \ %reldir%/ov-range-traits.h \ %reldir%/ov-range.h \ + %reldir%/ov-ref.h \ %reldir%/ov-re-diag.h \ %reldir%/ov-re-mat.h \ %reldir%/ov-scalar.h \ @@ -69,6 +71,7 @@ %reldir%/ov-typeinfo.h \ %reldir%/ov-usr-fcn.h \ %reldir%/ov.h \ + %reldir%/ov-vm.h \ %reldir%/ovl.h \ $(OV_INTTYPE_INC) \ $(OV_SPARSE_INC) @@ -127,6 +130,7 @@ %reldir%/ov-oncleanup.cc \ %reldir%/ov-perm.cc \ %reldir%/ov-range.cc \ + %reldir%/ov-ref.cc \ %reldir%/ov-re-diag.cc \ %reldir%/ov-re-mat.cc \ %reldir%/ov-scalar.cc \ diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base-int.h --- a/libinterp/octave-value/ov-base-int.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base-int.h Mon Apr 24 20:34:39 2023 +0200 @@ -136,6 +136,8 @@ octave_base_value * try_narrowing_conversion () { return nullptr; } + bool is_maybe_function (void) const { return false; } + bool isreal () const { return true; } bool is_real_scalar () const { return true; } diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base-mat.cc --- a/libinterp/octave-value/ov-base-mat.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base-mat.cc Mon Apr 24 20:34:39 2023 +0200 @@ -39,9 +39,33 @@ #include "ov-base.h" #include "ov-base-mat.h" #include "ov-base-scalar.h" +#include "ov-inline.h" #include "pr-output.h" template +octave_value_list +octave_base_matrix::simple_subsref (char type, octave_value_list &idx, int) +{ + switch (type) + { + case '(': + return do_index_op (idx); + break; + + case '{': + case '.': + { + std::string nm = type_name (); + error ("%s cannot be indexed with %c", nm.c_str (), type); + } + break; + + default: + panic_impossible (); + } +} + +template octave_value octave_base_matrix::subsref (const std::string& type, const std::list& idx) @@ -550,6 +574,55 @@ } template +octave_value +octave_base_matrix::checked_full_matrix_elem (octave_idx_type i) const +{ + return m_matrix.checkelem (i); +} + +template +octave_value +octave_base_matrix::checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const +{ + return m_matrix.checkelem (i, j); +} + +template +octave_value +octave_base_matrix::vm_extract_forloop_value (octave_idx_type counter) +{ + // TODO: Maybe this is slow? Should preferably be done once per loop + octave_value_list idx; + octave_value arg = octave_value_factory::make_copy (this); + + dim_vector dv = arg.dims ().redim (2); + octave_idx_type nrows = dv(0); + + if (arg.ndims () > 2) + arg = arg.reshape (dv); + + octave_idx_type iidx; + + // for row vectors, use single index to speed things up. + if (nrows == 1) + { + idx.resize (1); + iidx = 0; + } + else + { + idx.resize (2); + idx(0) = octave_value::magic_colon_t; + iidx = 1; + } + + // One based indexing + idx(iidx) = counter + 1; + return arg.index_op (idx).storable_value (); +} + + +template bool octave_base_matrix::fast_elem_insert (octave_idx_type n, const octave_value& x) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base-mat.h --- a/libinterp/octave-value/ov-base-mat.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base-mat.h Mon Apr 24 20:34:39 2023 +0200 @@ -80,6 +80,8 @@ octave_value full_value () const { return m_matrix; } void maybe_economize () { m_matrix.maybe_economize (); } + bool vm_need_storable_call (void) const { return true; } + bool is_maybe_function (void) const { return false; } // We don't need to override all three forms of subsref. The using // declaration will avoid warnings about partially-overloaded virtual @@ -89,6 +91,9 @@ OCTINTERP_API octave_value subsref (const std::string& type, const std::list& idx); + octave_value_list + simple_subsref (char type, octave_value_list &idx, int nargout); + octave_value_list subsref (const std::string& type, const std::list& idx, int) { return subsref (type, idx); } @@ -159,6 +164,8 @@ bool is_matrix_type () const { return true; } + bool is_full_num_matrix () const { return true; } + bool isnumeric () const { return true; } bool is_defined () const { return true; } @@ -203,6 +210,15 @@ // You should not use it anywhere else. const void * mex_get_data () const { return m_matrix.data (); } + OCTINTERP_API octave_value + vm_extract_forloop_value (octave_idx_type idx); + + virtual octave_value + checked_full_matrix_elem (octave_idx_type i) const; + + virtual octave_value + checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const; + protected: MT m_matrix; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base-scalar.cc --- a/libinterp/octave-value/ov-base-scalar.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base-scalar.cc Mon Apr 24 20:34:39 2023 +0200 @@ -69,6 +69,14 @@ template octave_value +octave_base_scalar::vm_extract_forloop_value (octave_idx_type) +{ + return octave_value (scalar); +} + + +template +octave_value octave_base_scalar::subsasgn (const std::string& type, const std::list& idx, const octave_value& rhs) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base-scalar.h --- a/libinterp/octave-value/ov-base-scalar.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base-scalar.h Mon Apr 24 20:34:39 2023 +0200 @@ -135,6 +135,8 @@ MatrixType matrix_type (const MatrixType&) const { return matrix_type (); } + bool is_maybe_function (void) const { return false; } + bool is_scalar_type () const { return true; } bool isnumeric () const { return true; } @@ -167,9 +169,16 @@ OCTINTERP_API octave_value fast_elem_extract (octave_idx_type n) const; + OCTINTERP_API octave_value + vm_extract_forloop_value (octave_idx_type idx); + OCTINTERP_API bool fast_elem_insert_self (void *where, builtin_type_t btyp) const; + bool vm_need_dispatch_assign_rhs (void) { return false; } + bool vm_need_dispatch_assign_lhs (void) { return false; } + bool vm_need_dispatch_push (void) { return false; } + protected: // The value of this scalar. diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base.cc --- a/libinterp/octave-value/ov-base.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base.cc Mon Apr 24 20:34:39 2023 +0200 @@ -61,6 +61,8 @@ #include "pr-output.h" #include "utils.h" #include "variables.h" +#include "ov-magic-int.h" +#include "ov-inline.h" builtin_type_t btyp_mixed_numeric (builtin_type_t x, builtin_type_t y) { @@ -122,6 +124,18 @@ } octave_value +octave_base_value::storable_value (void) +{ + return octave_value_factory::make_copy (this); +} + +octave_base_value * +octave_base_value::make_storable_value (void) +{ + return this; +} + +octave_value octave_base_value::squeeze () const { std::string nm = type_name (); @@ -141,6 +155,13 @@ } octave_value +octave_base_value::as_double_or_copy (void) +{ + const octave_base_value * cthis = this; + return cthis->as_double (); +} + +octave_value octave_base_value::as_single () const { err_invalid_conversion (type_name (), "single"); @@ -210,6 +231,15 @@ return octave::dims_to_numel (dims (), idx); } +octave_value_list +octave_base_value:: +simple_subsref (char type, octave_value_list &idx, int nargout) +{ + std::list idx_list; + idx_list.push_back (idx); + return subsref (std::string {type}, idx_list, nargout); +} + octave_value octave_base_value::subsref (const std::string&, const std::list&) @@ -250,6 +280,36 @@ } octave_value +octave_base_value::vm_extract_forloop_value (octave_idx_type idx) +{ + return fast_elem_extract (idx).as_double_or_copy (); +} + +double +octave_base_value::vm_extract_forloop_double (octave_idx_type) +{ + error ("Type error extracting for loop iterator double value for VM"); +} + +bool +octave_base_value::maybe_update_double (double) +{ + return false; +} + +octave_value +octave_base_value:: +simple_subsasgn (char type, + octave_value_list& idx, + const octave_value& rhs) +{ + std::list idx_list; + idx_list.push_back (idx); + + return subsasgn (std::string {type}, idx_list, rhs); +} + +octave_value octave_base_value::subsasgn (const std::string& type, const std::list& idx, const octave_value& rhs) @@ -901,6 +961,14 @@ return nullptr; } +octave_value_ref * +octave_base_value::ref_rep () +{ + err_wrong_type_arg ("octave_base_value::ref_value()", type_name ()); + + return nullptr; +} + octave_user_function * octave_base_value::user_function_value (bool silent) { @@ -1189,6 +1257,132 @@ } octave_value +octave_base_value:: +simple_numeric_assign (char type, + octave_value_list& idx, + const octave_value& rhs) +{ + octave_value retval; + + if (idx.empty ()) + error ("missing index in indexed assignment"); + + int t_lhs = type_id (); + int t_rhs = rhs.type_id (); + + static const std::string who = "octave_base_value::numeric_assign"; + octave::type_info& ti + = octave::__get_type_info__ (who); + + octave::type_info::assign_op_fcn f + = ti.lookup_assign_op (octave_value::op_asn_eq, t_lhs, t_rhs); + + bool done = false; + + if (f) + { + f (*this, idx, rhs.get_rep ()); + + done = true; + } + + if (done) + { + m_count++; + retval = octave_value (this); + } + else + { + int t_result = ti.lookup_pref_assign_conv (t_lhs, t_rhs); + + if (t_result >= 0) + { + octave_base_value::type_conv_fcn cf + = ti.lookup_widening_op (t_lhs, t_result); + + if (! cf) + err_indexed_assignment (type_name (), rhs.type_name ()); + + octave_base_value *tmp = cf (*this); + + if (! tmp) + err_assign_conversion_failed (type_name (), rhs.type_name ()); + + octave_value val (tmp); + + retval = val.simple_subsasgn (type, idx, rhs); + + done = true; + } + + if (! done) + { + octave_value tmp_rhs; + + octave_base_value::type_conv_info cf_rhs + = rhs.numeric_conversion_function (); + + octave_base_value::type_conv_info cf_this + = numeric_conversion_function (); + + // Try biased (one-sided) conversions first. + if (cf_rhs.type_id () >= 0 + && (ti.lookup_assign_op (octave_value::op_asn_eq, + t_lhs, cf_rhs.type_id ()) + || ti.lookup_pref_assign_conv (t_lhs, + cf_rhs.type_id ()) >= 0)) + cf_this = nullptr; + else if (cf_this.type_id () >= 0 + && (ti.lookup_assign_op (octave_value::op_asn_eq, + cf_this.type_id (), t_rhs) + || ti.lookup_pref_assign_conv (cf_this.type_id (), + t_rhs) >= 0)) + cf_rhs = nullptr; + + if (cf_rhs) + { + octave_base_value *tmp = cf_rhs (rhs.get_rep ()); + + if (! tmp) + err_assign_conversion_failed (type_name (), rhs.type_name ()); + + tmp_rhs = octave_value (tmp); + } + else + tmp_rhs = rhs; + + m_count++; + octave_value tmp_lhs = octave_value (this); + + if (cf_this) + { + octave_base_value *tmp = cf_this (*this); + + if (! tmp) + err_assign_conversion_failed (type_name (), rhs.type_name ()); + + tmp_lhs = octave_value (tmp); + } + + if (! cf_this && ! cf_rhs) + err_no_conversion (octave_value::assign_op_as_string + (octave_value::op_asn_eq), + type_name (), rhs.type_name ()); + + retval = tmp_lhs.simple_subsasgn (type, idx, tmp_rhs); + + done = true; + } + } + + // The assignment may have converted to a type that is wider than necessary. + + retval.maybe_mutate (); + + return retval; +} + +octave_value octave_base_value::numeric_assign (const std::string& type, const std::list& idx, const octave_value& rhs) @@ -1363,6 +1557,18 @@ return octave_value (); } +octave_value +octave_base_value::checked_full_matrix_elem (octave_idx_type) const +{ + err_wrong_type_arg ("octave_base_value::checked_full_matrix_elem (octave_idx_type)", type_name ()); +} + +octave_value +octave_base_value::checked_full_matrix_elem (octave_idx_type, octave_idx_type) const +{ + err_wrong_type_arg ("octave_base_value::checked_full_matrix_elem (octave_idx_type, octave_idx_type)", type_name ()); +} + bool octave_base_value::fast_elem_insert (octave_idx_type, const octave_value&) { diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-base.h --- a/libinterp/octave-value/ov-base.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-base.h Mon Apr 24 20:34:39 2023 +0200 @@ -72,12 +72,15 @@ class octave_value; class octave_value_list; class octave_classdef; +class octave_value_ref; class octave_function; class octave_user_function; class octave_user_script; class octave_user_code; class octave_fcn_handle; class octave_value_list; +class octave_fcn_cache; +class octave_value_vm; enum builtin_type_t { @@ -261,6 +264,7 @@ }; friend class octave_value; + friend class octave_value_vm; octave_base_value (); @@ -296,6 +300,11 @@ virtual octave_value full_value () const; + // Will return a copy of it-self when the representation + // allready is a scalar (.i.e. double). The const variant + // as_double () would allocate a new octave value. + virtual octave_value as_double_or_copy (void); + virtual octave_value as_double () const; virtual octave_value as_single () const; @@ -334,6 +343,9 @@ const std::list& idx, bool auto_add); + virtual octave_value_list + simple_subsref (char type, octave_value_list &idx, int nargout); + virtual octave_value do_index_op (const octave_value_list& idx, bool resize_ok = false); @@ -345,6 +357,11 @@ const octave_value& rhs); virtual octave_value + simple_subsasgn (char type, + octave_value_list& idx, + const octave_value& rhs); + + virtual octave_value undef_subsasgn (const std::string& type, const std::list& idx, const octave_value& rhs); @@ -490,6 +507,8 @@ virtual bool is_matrix_type () const { return false; } + virtual bool is_full_num_matrix () const { return false; } + virtual bool isnumeric () const { return false; } virtual bool issparse () const { return false; } @@ -522,6 +541,19 @@ virtual bool is_mex_function () const { return false; } + virtual bool is_function_cache (void) const { return false; } + + // Checks if the ov could be a function. If it is undefined, + // the name associated with the ov could be a function to call. + virtual bool is_maybe_function (void) const + { return !is_defined () || is_function (); } + + virtual bool has_function_cache (void) const { return false; } + + virtual octave_function * get_cached_fcn (const octave_value_list&) { return nullptr; } + + virtual octave_fcn_cache * fcn_cache_value (void) { return nullptr; } + virtual void erase_subfunctions () { } virtual short int short_value (bool = false, bool = false) const; @@ -769,8 +801,34 @@ virtual void call_object_destructor () { } + virtual void maybe_call_dtor () { } + virtual octave_value dump () const; + virtual octave_value storable_value (void); + + virtual octave_base_value * make_storable_value (void); + + virtual bool vm_need_storable_call (void) const { return false; } + + virtual bool vm_need_dispatch_assign_rhs (void) { return true; } + virtual bool vm_need_dispatch_assign_lhs (void) { return true; } + virtual bool vm_need_dispatch_push (void) { return true; } + + virtual bool is_ref () { return false; } + + virtual octave_value_ref * ref_rep (); + + virtual octave_value + vm_extract_forloop_value (octave_idx_type idx); + + virtual double + vm_extract_forloop_double (octave_idx_type idx); + + virtual bool maybe_update_double (double d); + + virtual bool is_trivial_range () const { return false; }; + // Standard mappers. Register new ones here. enum unary_mapper_t { @@ -860,6 +918,12 @@ virtual bool fast_elem_insert_self (void *where, builtin_type_t btyp) const; + virtual octave_value + checked_full_matrix_elem (octave_idx_type i) const; + + virtual octave_value + checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const; + protected: // This should only be called for derived types. @@ -869,6 +933,11 @@ const std::list& idx, const octave_value& rhs); + OCTINTERP_API octave_value + simple_numeric_assign (char type, + octave_value_list& idx, + const octave_value& rhs); + void reset_indent_level () const { s_curr_print_indent_level = 0; } diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-cell.h --- a/libinterp/octave-value/ov-cell.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-cell.h Mon Apr 24 20:34:39 2023 +0200 @@ -132,6 +132,8 @@ bool is_true () const; + bool is_full_num_matrix () const { return false; } + Cell cell_value () const { return m_matrix; } octave_value_list list_value () const; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-complex.cc --- a/libinterp/octave-value/ov-complex.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-complex.cc Mon Apr 24 20:34:39 2023 +0200 @@ -80,6 +80,8 @@ update_message (); } + OCTAVE_DEFAULT_COPY_MOVE (complex_index_exception) + ~complex_index_exception () = default; void update_message () @@ -93,6 +95,13 @@ { return "Octave:invalid-index"; } + + index_exception* vm_dup () + { + complex_index_exception *p = new complex_index_exception {*this}; + p->set_identifier (p->err_id ()); + return p; + } }; OCTAVE_END_NAMESPACE(octave) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-fcn-handle.cc --- a/libinterp/octave-value/ov-fcn-handle.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-fcn-handle.cc Mon Apr 24 20:34:39 2023 +0200 @@ -229,9 +229,14 @@ friend bool is_equal_to (const simple_fcn_handle& fh1, const simple_fcn_handle& fh2); + octave_function * + get_cached_fcn (const octave_value_list &args); + + bool has_function_cache (void) const; + private: - octave_value m_fcn; + octave_fcn_cache m_cache; // Only used by the VM via get_cached_fcn() and has_function_cache() }; class scoped_fcn_handle : public base_fcn_handle @@ -674,6 +679,15 @@ bool parse (const std::string& fcn_text); + octave_function * + get_cached_fcn (const octave_value_list&) { return m_fcn.function_value (); } + // TODO: This is a hack to get uncompiled anonymous functions to be subsrefed in the VM + bool has_function_cache (void) const + { + octave_function *fn = m_fcn.function_value (); + return fn ? fn->is_compiled () : false; + } + protected: // The function we are handling. @@ -942,6 +956,201 @@ return false; } +octave_function * +simple_fcn_handle:: +get_cached_fcn (const octave_value_list &args) +{ + if (m_cache.has_cached_function (args)) + return m_cache.get_cached_fcn (); + + { + // The lookup is done like in call() + interpreter& interp = __get_interpreter__ (); + symbol_table& symtab = interp.get_symbol_table (); + + octave_value fcn_to_call; + octave_value ov_fcn = symtab.find_function (m_name, args); + + if (m_fcn.is_defined ()) + { + // A simple function was found when the handle was created. + // Use that unless we find a class method to override it. + + fcn_to_call = m_fcn; + + if (ov_fcn.is_defined ()) + { + octave_function *fcn = ov_fcn.function_value (); + + std::string dispatch_class = fcn->dispatch_class (); + + if (fcn->is_class_method ()) + { + // Function found through lookup is a class method + // so use it instead of the simple one found when + // the handle was created. + + fcn_to_call = ov_fcn; + } + } + } + else + { + // There was no simple function found when the handle was + // created so use the one found here (if any). + + fcn_to_call = ov_fcn; + } + + + if (! fcn_to_call.is_defined ()) + err_invalid_fcn_handle (m_name); + + m_cache.set_cached_function (fcn_to_call, args, 0); + + return fcn_to_call.function_value (); + } +} + +// Like call(), but instead returns true if the call() would end up with another +// call(), or false if there would be a subsref() or an error on the path to +// subsref() call. +bool +simple_fcn_handle::has_function_cache () const +{ + // FIXME: if m_name has a '.' in the name, lookup first component. If + // it is a classdef meta object, then build TYPE and IDX arguments and + // make a subsref call using them. + + interpreter& interp = __get_interpreter__ (); + + octave_value fcn_to_call; + + // The following code is similar to part of + // tree_evaluator::visit_index_expression but simpler because it + // handles a more restricted case. + + symbol_table& symtab = interp.get_symbol_table (); + + std::size_t pos = m_name.find ('.'); + + if (pos != std::string::npos) + { + // FIXME: check to see which of these cases actually work in + // Octave and Matlab. For the last two, assume handle is + // created before object is defined as an object. + // + // We can have one of + // + // pkg-list . fcn (args) + // pkg-list . cls . meth (args) + // class-name . method (args) + // class-name . static-method (args) + // object . method (args) + // object . static-method (args) + + // Evaluate package elements until we find a function, + // classdef object, or classdef_meta object that is not a + // package. An object may only appear as the first element, + // then it must be followed directly by a function name. + + std::size_t beg = 0; + std::size_t end = pos; + + std::vector idx_elts; + + while (true) + { + end = m_name.find ('.', beg); + + idx_elts.push_back (m_name.substr (beg, end-beg)); + + if (end == std::string::npos) + break; + + beg = end+1; + } + + std::size_t n_elts = idx_elts.size (); + + bool have_object = false; + octave_value partial_expr_val; + + // Lazy evaluation. The first element was not known to be defined + // as an object in the scope where the handle was created. See if + // there is a definition in the current scope. + + partial_expr_val = interp.varval (idx_elts[0]); + + if (partial_expr_val.is_defined ()) + { + if (! partial_expr_val.is_classdef_object () || n_elts != 2) + return false; + + have_object = true; + } + else + partial_expr_val = symtab.find_function (idx_elts[0], ovl ()); + + std::string type; + std::list arg_list; + + for (std::size_t i = 1; i < n_elts; i++) + { + if (partial_expr_val.is_package ()) + { + if (have_object) + return false; + + type = "."; + arg_list.push_back (ovl (idx_elts[i])); + + try + { + // Silently ignore extra output values. + + octave_value_list tmp_list + = partial_expr_val.subsref (type, arg_list, 0); + + partial_expr_val + = tmp_list.length () ? tmp_list(0) : octave_value (); + + if (partial_expr_val.is_cs_list ()) + return false; + + arg_list.clear (); + } + catch (const index_exception&) + { + return false; + } + } + else if (have_object || partial_expr_val.is_classdef_meta ()) + { + // Object or class name must be the next to the last + // element (it was the previous one, so if this is the + // final element, it should be a classdef method, + // but we'll let the classdef or classdef_meta subsref + // function sort that out. + return false; + } + else + return false; + } + + // If we get here, we must have a function to call. + + if (! partial_expr_val.is_function ()) + return false; + + return true; + } + else + { + return true; + } +} + octave_value_list simple_fcn_handle::call (int nargout, const octave_value_list& args) { diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-fcn-handle.h --- a/libinterp/octave-value/ov-fcn-handle.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-fcn-handle.h Mon Apr 24 20:34:39 2023 +0200 @@ -154,6 +154,11 @@ return false; } + virtual octave_function * + get_cached_fcn (const octave_value_list&) { return nullptr; } + virtual bool + has_function_cache (void) const { return false; } + protected: void warn_load (const char *file_type) const; @@ -221,6 +226,11 @@ const std::shared_ptr& closure_frames = std::shared_ptr ()); + // Create a simple function handle that is not bound to a function. + // Lookup happens when a function call is attempted and the function + // lookup is cached in a octave_fcn_cache. + octave_fcn_handle (const std::string& name, octave_value cache); + octave_fcn_handle (octave::base_fcn_handle *rep); octave_fcn_handle (const octave_fcn_handle& fh); @@ -361,6 +371,10 @@ friend bool is_equal_to (const octave_fcn_handle& fh1, const octave_fcn_handle& fh2); + octave_function * + get_cached_fcn (const octave_value_list& args) { return m_rep->get_cached_fcn (args); } + bool has_function_cache (void) const { return m_rep->has_function_cache (); } + private: std::shared_ptr m_rep; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-fcn.cc --- a/libinterp/octave-value/ov-fcn.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-fcn.cc Mon Apr 24 20:34:39 2023 +0200 @@ -34,6 +34,14 @@ #include "ov-fcn.h" #include "pt-eval.h" +#include "error.h" +#include "interpreter-private.h" +#include "symtab.h" +#include "interpreter.h" +#include "lo-array-errwarn.h" + +#include "pt-bytecode-walk.h" + octave_base_value * octave_function::clone () const { @@ -50,9 +58,171 @@ octave_function::call (octave::tree_evaluator& tw, int nargout, const octave_value_list& args) { - tw.push_stack_frame (this); + octave_user_function *usr = this->user_function_value(true); + + bool is_compiled = false; + if (usr) + { + is_compiled = usr->is_compiled (); + if (octave::V__enable_vm_eval__ && !is_compiled && !usr->m_compilation_failed) + { + try + { + octave::compile_user_function (*usr, false); + is_compiled = true; + } + catch (std::exception &e) + { + warning ("Compilation failed with message %s", e.what ()); + usr->m_compilation_failed = true; + } + } + } + + // Bytecode functions push their own stack frames in the vm + + if (!usr || !is_compiled) + { + tw.push_stack_frame (this); + } octave::unwind_action act ([&tw] () { tw.pop_stack_frame (); }); return execute (tw, nargout, args); } + + +void +octave_fcn_cache::set_cached_function (octave_value ov, + const octave_value_list &args, + octave_idx_type current_n_updated) +{ + clear_cached_function (); + + if (!ov.is_defined ()) + return; + + // We need to keep a reference to the metaobject for as long as the function is alive + if (ov.is_classdef_meta ()) + m_cached_object = ov; + + std::vector v_types; + + for (int i = 0; i < args.length (); i++) + { + // FIXME: We don't cache methods or functions with class object + // arguments. Classes need some kind of unique simple key for this + // simple approach. + if (args(i).isobject()) + return; + + v_types.push_back (args (i).type_id ()); + } + + m_cached_args = v_types; + m_cached_function = ov; + + m_n_updated = current_n_updated; +} + +octave_value +octave_fcn_cache:: +get_cached_obj (const octave_value_list& args) +{ + octave_function *fcn = nullptr; + + octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated (); + if (has_cached_function (args)) + { + if (m_n_updated == current_n_updated) + return m_cached_function; + else + clear_cached_function (); + } + + if (! fcn) + { + octave::interpreter& interp = + octave::__get_interpreter__ (); + + octave::symbol_table& symtab = interp.get_symbol_table (); + octave_value val = symtab.find_function (m_fcn_name, args); + + if (val.is_function ()) + { + fcn = val.function_value (true); + set_cached_function (val, args, current_n_updated); + return val; + } + + val = symtab.find_function (m_fcn_name); + if (val.is_function ()) + { + return val; + } + } + + return {}; +} + +octave_function * +octave_fcn_cache:: +get_cached_fcn_internal (const octave_value_list& args) +{ + clear_cached_function (); + + octave_function *fcn = nullptr; + octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated (); + + octave::interpreter& interp = + octave::__get_interpreter__ (); + + octave::symbol_table& symtab = interp.get_symbol_table (); + octave_value val = symtab.find_function (m_fcn_name, args); + + if (val.is_function ()) + { + fcn = val.function_value (true); + set_cached_function (val, args, current_n_updated); + return fcn; + } + + val = symtab.find_function (m_fcn_name); + if (val.is_function ()) + { + return val.function_value (true); + } + + return fcn; +} + +octave_function * +octave_fcn_cache:: +get_cached_fcn (const octave_value_list& args) +{ + octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated (); + if (OCTAVE_LIKELY (has_cached_function (args))) + if (OCTAVE_LIKELY (m_n_updated == current_n_updated)) + return m_cached_function.function_value (true); + + return get_cached_fcn_internal (args); +} + +octave_value_list +octave_fcn_cache:: +call (octave::tree_evaluator& tw, + octave_function *fcn, + const octave_value_list& args, + int nargout) +{ + try + { + return fcn->call (tw, nargout, args); + } + catch (octave::index_exception& ie) + { + error ("Proper error message here for function calls"); + // Maybe return the octave_function pointer? + //tw.final_index_error (ie, m_expr); + } +} diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-fcn.h --- a/libinterp/octave-value/ov-fcn.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-fcn.h Mon Apr 24 20:34:39 2023 +0200 @@ -48,6 +48,82 @@ // Functions. +// Class that holds a cached reference to a octave function +// for use in the bytecode VM. +class +OCTINTERP_API +octave_fcn_cache : public octave_base_value +{ +public: + octave_fcn_cache (const std::string &name) :m_fcn_name (name) { } + octave_fcn_cache () {} + + bool is_function_cache (void) const { return true; } + + bool has_function_cache (void) const { return true; } + + octave_function * + get_cached_fcn (const octave_value_list& args); + + octave_function * + get_cached_fcn () { return m_cached_function.function_value (); } + + octave_value + get_cached_obj (const octave_value_list& args); + + octave_fcn_cache * fcn_cache_value (void) + { + return this; + } + + octave_value_list + call (octave::tree_evaluator& tw, + octave_function *fcn, + const octave_value_list& args, + int nargout); + + void set_cached_function (octave_value ov, const octave_value_list &args, octave_idx_type current_n_updated); + + bool has_cached_function (const octave_value_list &args) const + { + if (m_n_updated == 0) + return false; + + unsigned vec_n = m_cached_args.size (); + + unsigned n_args = args.length (); + if (n_args != vec_n) + return false; + + for (unsigned i = 0; i < n_args; i++) + { + if (args (i).type_id () != m_cached_args [i]) + return false; + } + + return true; + } + +private: + + octave_function * get_cached_fcn_internal (const octave_value_list& args); + + void clear_cached_function () + { + m_cached_object = octave_value {}; + m_cached_function = octave_value {}; + m_n_updated = 0; + m_cached_args.clear (); + } + + octave_value m_cached_object; + octave_value m_cached_function; + std::vector m_cached_args; + octave_idx_type m_n_updated = 0; + std::string m_fcn_name; +}; + + class OCTINTERP_API octave_function : public octave_base_value @@ -105,6 +181,8 @@ virtual bool is_subfunction () const { return false; } + virtual bool is_compiled () const { return false; } + bool is_class_constructor (const std::string& cname = "") const { return (is_classdef_constructor (cname) || is_legacy_constructor (cname)); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-inline.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/octave-value/ov-inline.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,135 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 1996-2021 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_ov_inline_h) +#define octave_ov_inline_h 1 + +#include "octave-config.h" + +#include "ov.h" + +#include "ov-scalar.h" +#include "ov-float.h" +#include "ov-complex.h" +#include "ov-flt-complex.h" +#include "ov-bool.h" +#include "ov-base.h" + + +// class to construct octave_value:s inline + +class octave_value_factory +{ + public: + + static octave_value make (double d) + { + return octave_value (0, new octave_scalar (d)); + } + + static octave_value make (float d) + { + return octave_value (0, new octave_float_scalar (d)); + } + + static octave_value make (short int i) + { + return octave_value (0, new octave_scalar (i)); + } + + static octave_value make (unsigned short int i) + { + return octave_value (0, new octave_scalar (i)); + } + + static octave_value make (int i) + { + return octave_value (0, new octave_scalar (i)); + } + + static octave_value make (unsigned int i) + { + return octave_value (0, new octave_scalar (i)); + } + + static octave_value make (long int i) + { + return octave_value (0, new octave_scalar (i)); + } + + static octave_value make (unsigned long int i) + { + return octave_value (0, new octave_scalar (i)); + } + +#if defined (OCTAVE_HAVE_LONG_LONG_INT) + static octave_value make (long long int i) + { + return octave_value (0, new octave_scalar (i)); + } +#endif + +#if defined (OCTAVE_HAVE_UNSIGNED_LONG_LONG_INT) + static octave_value make (unsigned long long int i) + { + return octave_value (0, new octave_scalar (i)); + } +#endif + + static octave_value make (octave::sys::time t) + { + return octave_value (0, new octave_scalar (t.double_value ())); + } + + static octave_value make (const Complex& C) + { + octave_value ov(0, new octave_complex (C)); + ov.maybe_mutate (); // Fold e.g. 1+0i to 1 + return ov; + } + + static octave_value make (const FloatComplex& C) + { + octave_value ov(0, new octave_float_complex (C)); + ov.maybe_mutate (); + return ov; + } + + static octave_value make (bool b) + { + return octave_value (0, new octave_bool (b)); + } + + static octave_value make_copy (octave_base_value *rep) + { + return octave_value (0, 0, rep); + } + + private: + ~octave_value_factory () = delete; + octave_value_factory () = delete; +}; + +#endif diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-magic-int.h --- a/libinterp/octave-value/ov-magic-int.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-magic-int.h Mon Apr 24 20:34:39 2023 +0200 @@ -85,6 +85,7 @@ bool is_storable () const { return false; } bool is_magic_int () const { return true; } + bool vm_need_storable_call (void) const { return true; } bool is_real_scalar () const { return true; } diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-null-mat.h --- a/libinterp/octave-value/ov-null-mat.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-null-mat.h Mon Apr 24 20:34:39 2023 +0200 @@ -52,6 +52,7 @@ static const octave_value instance; bool isnull () const { return true; } + bool vm_need_storable_call (void) const { return true; } type_conv_info numeric_conversion_function () const; @@ -75,6 +76,7 @@ bool is_storable () const { return false; } bool isnull () const { return true; } + bool vm_need_storable_call (void) const { return true; } type_conv_info numeric_conversion_function () const; @@ -98,6 +100,7 @@ bool is_storable () const { return false; } bool isnull () const { return true; } + bool vm_need_storable_call (void) const { return true; } type_conv_info numeric_conversion_function () const; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-oncleanup.h --- a/libinterp/octave-value/ov-oncleanup.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-oncleanup.h Mon Apr 24 20:34:39 2023 +0200 @@ -91,6 +91,12 @@ void call_object_destructor (); + void maybe_call_dtor () + { + if (m_count == 1) + call_object_destructor (); + } + private: DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-pool.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/octave-value/ov-pool.cc Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,70 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if defined (HAVE_CONFIG_H) +# include "config.h" +#endif + +#include "ov-bool.h" +#include "ov-scalar.h" +#include "ov-int8.h" +#include "ov-int16.h" +#include "ov-int32.h" +#include "ov-int64.h" +#include "ov-uint8.h" +#include "ov-uint16.h" +#include "ov-uint32.h" +#include "ov-uint64.h" +#include "ov-magic-int.h" +#include "ov-vm.h" + +OBJECT_POOL_DEF_STATICS (octave_scalar) + +OBJECT_POOL_DEF_STATICS (octave_bool) + +OBJECT_POOL_DEF_STATICS (octave_int8_scalar) +OBJECT_POOL_DEF_STATICS (octave_int16_scalar) +OBJECT_POOL_DEF_STATICS (octave_int32_scalar) +OBJECT_POOL_DEF_STATICS (octave_int64_scalar) + +OBJECT_POOL_DEF_STATICS (octave_uint8_scalar) +OBJECT_POOL_DEF_STATICS (octave_uint16_scalar) +OBJECT_POOL_DEF_STATICS (octave_uint32_scalar) +OBJECT_POOL_DEF_STATICS (octave_uint64_scalar) + +OBJECT_POOL_DEF_STATICS (octave_magic_int) +OBJECT_POOL_DEF_STATICS (octave_magic_uint) + +OBJECT_POOL_DEF_STATICS (octave_cached_value) + +octave_value ov_true {true}; +octave_value ov_false {false}; +octave_value ov_dbl_0 {0.0}; +octave_value ov_dbl_1 {1.0}; +octave_value ov_dbl_2 {2.0}; + +// Some stack-frames also have object pools, but their defintions +// needed to be in stack-frame.cc since their implementations are +// hidden. diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-pool.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/octave-value/ov-pool.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,138 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_ov_pool_h) +#define octave_ov_pool_h 1 + +#include "octave-config.h" + +extern octave_value ov_true; +extern octave_value ov_false; +extern octave_value ov_dbl_0; +extern octave_value ov_dbl_1; +extern octave_value ov_dbl_2; + +#ifdef OCTAVE_ENABLE_OBJECT_POOL + +// thread_local destructors seems abit bugged/non-standard compliant in +// gcc and clang and seems to only be executed if the object is "used" somehow +// even though the destructor has side effects. +// +// So we just make objects here that are initialized with the addres of the +// cleaner object. +// +// See e.g. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61991 + +#define OBJECT_POOL_PUBLICS(n_pool_size) \ +/* Overloaded new and delete operators that uses a object pool. */ \ +static void * operator new (std::size_t count) \ +{ \ + /* If n_in_obj_pool is negative, the pool is closed */ \ + if (n_in_obj_pool > 0) \ + return obj_pool[--n_in_obj_pool]; \ + \ + return ::operator new (count); \ +} \ + \ +static void * operator new (std::size_t count, void *p) \ +{ \ + /* Placement new just uses default one */ \ + return ::operator new (count, p); \ +} \ + \ +static void operator delete (void* ptr) \ +{ \ + /* If n_in_obj_pool is negative, the pool is closed */ \ + if (n_in_obj_pool >= 0 && n_in_obj_pool < obj_pool_size) \ + { \ + obj_pool[n_in_obj_pool++] = ptr; \ + return; \ + } \ + ::operator delete (ptr); \ +} \ + \ +/* Class that cleans the object pool when the thread terminates */ \ +class object_pool_cleaner \ +{ \ +public: \ + ~object_pool_cleaner (); \ +}; \ + \ +/* For work around reasons pool_cleaner can't be private. Its addres \ + * has to be taken somewhere to fool the compiler to trigger its \ + * destructor. */ \ +thread_local static object_pool_cleaner pool_cleaner \ +__attribute__ ((tls_model ("initial-exec"))); \ + \ +private: \ +static constexpr int obj_pool_size = n_pool_size; \ + \ +/* Counter for amount of objects in the pool */ \ +thread_local static int n_in_obj_pool \ +__attribute__ ((tls_model ("initial-exec"))); \ +/* The pool */ \ +thread_local static void * obj_pool[obj_pool_size] \ +__attribute__ ((tls_model ("initial-exec"))); \ +public: \ + +// The following macro defines the static symbols for an object pool for a class. +// a class. The corresponding class need to use the OBJECT_POOL_PUBLICS(n_pool_size) +// macro. + +#define OBJECT_POOL_DEF_STATICS(type) \ +thread_local type::object_pool_cleaner \ +type::pool_cleaner __attribute__ ((tls_model ("initial-exec"))); \ + \ +/* Assure the compiler thinks we are using the pool \ + * so that it actually call its dtor. */ \ +type::object_pool_cleaner *dummy_idx_ ## type = \ + &type::pool_cleaner; \ + \ +/* Live objects in pool */ \ +thread_local int type::n_in_obj_pool \ +__attribute__ ((tls_model ("initial-exec"))); \ + \ +/* The actual pool */ \ +thread_local void * type::obj_pool[type::obj_pool_size] \ + __attribute__ ((tls_model ("initial-exec"))); \ + \ +type::object_pool_cleaner::~object_pool_cleaner () \ + { \ + int n = n_in_obj_pool; \ + /* Set the counter to -1 so that any lingering objects \ + * do use the pool */ \ + n_in_obj_pool = -1; \ + \ + for (int i = 0; i < n; i++) \ + type::operator delete (obj_pool[i]); \ + } \ + + +#else +#define OBJECT_POOL_PUBLICS(n_pool_size) +#define OBJECT_POOL_DEF_STATICS(type) +#endif + +#endif diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-range.cc --- a/libinterp/octave-value/ov-range.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-range.cc Mon Apr 24 20:34:39 2023 +0200 @@ -53,6 +53,7 @@ #include "ops.h" #include "ovl.h" #include "oct-hdf5.h" +#include "ov-inline.h" #include "ov-range-traits.h" #include "ov-range.h" #include "ov-re-mat.h" @@ -977,6 +978,47 @@ return retval; } +DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_trivial_range, + "trivial range", "double"); + +template +bool +ov_range::could_be_trivial_range () { return false; } + +template <> +bool +ov_range::could_be_trivial_range () +{ + octave_idx_type n = m_range.numel (); + if (n > std::numeric_limits::max()) + return false; + if (n <= 1) + return false; + + double f = m_range.final_value (); + if (f > std::numeric_limits::max() || + f < std::numeric_limits::min()) + return false; + if (std::isnan (f)) + return false; + + return true; +} + +template +octave_value +ov_range::as_trivial_range () +{ + error ("Type error returning trivial range"); +} + +template <> +octave_value +ov_range::as_trivial_range () +{ + return octave_value (new octave_trivial_range (m_range.numel (), m_range.base (), m_range.increment ())); +} + template octave_value ov_range::fast_elem_extract (octave_idx_type n) const @@ -993,6 +1035,24 @@ return m_range; } +template +octave_value +ov_range::vm_extract_forloop_value (octave_idx_type n) +{ + octave_value ov = octave_value_factory::make (m_range.elem (n)); + + return ov.as_double_or_copy (); +} + +template <> +octave_value +ov_range::vm_extract_forloop_value (octave_idx_type n) +{ + octave_value ov = octave_value_factory::make (m_range.elem (n)); + + return ov; +} + // For now, enable only ov_range. template <> diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-range.h --- a/libinterp/octave-value/ov-range.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-range.h Mon Apr 24 20:34:39 2023 +0200 @@ -51,6 +51,44 @@ class octave_value_list; + +class octave_trivial_range : public octave_base_value +{ +public: + + octave_trivial_range (octave_idx_type numel, double base, double incr) + : m_numel (numel), m_base (base), m_increment(incr) { } + + octave_trivial_range () {}; + + octave_trivial_range (const octave_trivial_range&) = default; + + OCTINTERP_API octave_value + vm_extract_forloop_value (octave_idx_type i) + { + if (i < m_numel - 1) + return m_base + static_cast (i) * m_increment; + return m_base + (m_numel - 1) * m_increment; + } + + double + vm_extract_forloop_double (octave_idx_type i) + { + if (i < m_numel - 1) + return m_base + static_cast (i) * m_increment; + return m_base + (m_numel - 1) * m_increment; + } + + bool is_trivial_range () const { return true; }; + +private: + int m_numel = 0; + int m_base = 0; + int m_increment = 0; + + DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA +}; + // For now, we only need ov_range but we don't attempt to // enforce that restriction. @@ -130,6 +168,12 @@ return dim_vector (n > 0, n); } + OCTINTERP_API octave_value as_trivial_range (); + OCTINTERP_API bool could_be_trivial_range (); + + OCTINTERP_API octave_value + vm_extract_forloop_value (octave_idx_type idx); + octave_idx_type numel () const { return m_range.numel (); } octave_idx_type nnz () const @@ -166,6 +210,7 @@ bool is_constant () const { return true; } bool is_range () const { return true; } + bool vm_need_storable_call (void) const { return true; } bool is_double_type () const { return builtin_type () == btyp_double; } diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-ref.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/octave-value/ov-ref.cc Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,123 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2007-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if defined (HAVE_CONFIG_H) +# include "config.h" +#endif + +#include "ov.h" +#include "ov-ref.h" + + +#include "interpreter.h" +#include "interpreter-private.h" + +DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_value_ref_global, + "global value reference", + "global value reference"); +DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_value_ref_persistent, + "global value persistent", + "global value persistent"); +void +octave_value_ref::maybe_call_dtor () +{ + ref ().m_rep->maybe_call_dtor (); +} + +bool +octave_value_ref::is_defined () const +{ + return const_cast (this)->ref ().m_rep->is_defined (); +} + +bool +octave_value_ref::is_maybe_function () const +{ + return const_cast (this)->ref ().m_rep->is_maybe_function (); +} + +octave_base_value * +octave_value_ref::unique_clone () +{ + return ref ().m_rep->unique_clone (); +} + +octave_value +octave_value_ref::simple_subsasgn (char type, octave_value_list& idx, const octave_value& rhs) +{ + octave_value ans = ref ().m_rep->simple_subsasgn (type, idx, rhs); + ref () = ans; + return octave_value {this, true}; +} + +octave_value +octave_value_ref::subsasgn (const std::string& type, + const std::list& idx, + const octave_value& rhs) +{ + octave_value ans = ref ().m_rep->subsasgn (type, idx, rhs); + ref () = ans; + return octave_value {this, true}; +} + +octave_value +octave_value_ref_global::deref () +{ + auto &interp = octave::__get_interpreter__(); + return interp.global_varval (m_name); +} + +void +octave_value_ref_global::set_value (octave_value val) +{ + auto &interp = octave::__get_interpreter__(); + interp.global_assign (m_name, val); +} + +octave_value & +octave_value_ref_global::ref () +{ + auto &interp = octave::__get_interpreter__(); + return interp.global_varref (m_name); +} + +octave_value +octave_value_ref_persistent::deref () +{ + return m_scope.persistent_varval (m_offset); +} + +void +octave_value_ref_persistent::set_value (octave_value val) +{ + octave_value &ov_pers = m_scope.persistent_varref (m_offset); + ov_pers = val; +} + +octave_value & +octave_value_ref_persistent::ref () +{ + return m_scope.persistent_varref (m_offset); +} \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-ref.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/octave-value/ov-ref.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,117 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 1996-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_ov_ref_h) +#define octave_ov_ref_h 1 + +#include "octave-config.h" + +#include "ov-base.h" +#include "ovl.h" +#include "symscope.h" + +#include +#include + +// octave_value_ref is to be used by the VM to implement +// global and persistent values. +// +// octave_value_ref need to overload any virtual call +// used by the assign and push slot op-codes. +// +// Any octave_value_ref should never leave the VM slots +// on the VM stack. + +// Abstract type +class OCTINTERP_API +octave_value_ref : public octave_base_value +{ +public: + octave_value_ref () = default; + ~octave_value_ref () = default; + + octave_value_ref * ref_rep () { return this; } + bool is_ref () { return true; } + + virtual octave_value deref () = 0; + virtual void set_value (octave_value val) = 0; + virtual octave_value & ref () = 0; + virtual void maybe_save_state () {}; + + virtual bool is_global_ref () { return false; } + virtual bool is_persistent_ref () { return false; } + + void maybe_call_dtor (); + octave_value simple_subsasgn (char type, octave_value_list& idx, const octave_value& rhs); + octave_value subsasgn (const std::string& type, const std::list& idx, const octave_value& rhs); + octave_base_value * unique_clone (); + bool is_defined () const; + bool is_maybe_function (void) const; +}; + +class OCTINTERP_API +octave_value_ref_global : public octave_value_ref +{ +public: + octave_value_ref_global () = default; + ~octave_value_ref_global () = default; + octave_value_ref_global (std::string name) + : m_name (name) {}; + + octave_value deref (); + octave_value & ref (); + void set_value (octave_value val); + + bool is_global_ref () { return true; } + +private: + std::string m_name; + + DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA +}; + +class OCTINTERP_API +octave_value_ref_persistent : public octave_value_ref +{ +public: + octave_value_ref_persistent () = default; + ~octave_value_ref_persistent () = default; + octave_value_ref_persistent (octave::symbol_scope scope, int offset) + : m_offset (offset), m_scope (scope) {}; + + octave_value deref (); + octave_value & ref (); + void set_value (octave_value val); + + bool is_persistent_ref () { return true; } + +private: + int m_offset; + octave::symbol_scope m_scope; + + DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA +}; + +#endif \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-scalar.cc --- a/libinterp/octave-value/ov-scalar.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-scalar.cc Mon Apr 24 20:34:39 2023 +0200 @@ -54,6 +54,7 @@ #include "xdiv.h" #include "xpow.h" #include "ops.h" +#include "ov-inline.h" #include "ls-oct-text.h" #include "ls-hdf5.h" @@ -125,7 +126,13 @@ } octave_value -octave_scalar::as_double () const +octave_scalar::as_double_or_copy (void) +{ + return octave_value_factory::make_copy (this); +} + +octave_value +octave_scalar::as_double (void) const { return scalar; } diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-scalar.h --- a/libinterp/octave-value/ov-scalar.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-scalar.h Mon Apr 24 20:34:39 2023 +0200 @@ -223,6 +223,16 @@ return boolNDArray (dim_vector (1, 1), scalar); } + octave_value as_double_or_copy (void); + + bool maybe_update_double (double d) + { + if (m_count != 1) + return false; + scalar = d; + return true; + } + octave_value as_double () const; octave_value as_single () const; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-struct.cc --- a/libinterp/octave-value/ov-struct.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-struct.cc Mon Apr 24 20:34:39 2023 +0200 @@ -52,6 +52,7 @@ #include "ls-hdf5.h" #include "ls-utils.h" #include "pr-output.h" +#include "ov-inline.h" DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA(octave_struct, "struct", "struct"); @@ -1095,6 +1096,39 @@ return retval; } +octave_value +octave_struct::vm_extract_forloop_value (octave_idx_type counter) +{ + // TODO: Maybe this is slow? Should preferably be done once per loop + octave_value_list idx; + octave_value arg = octave_value_factory::make_copy (this); + + dim_vector dv = arg.dims ().redim (2); + octave_idx_type nrows = dv(0); + + if (arg.ndims () > 2) + arg = arg.reshape (dv); + + octave_idx_type iidx; + + // for row vectors, use single index to speed things up. + if (nrows == 1) + { + idx.resize (1); + iidx = 0; + } + else + { + idx.resize (2); + idx(0) = octave_value::magic_colon_t; + iidx = 1; + } + + // One based indexing + idx(iidx) = counter + 1; + return arg.index_op (idx).storable_value (); +} + DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA(octave_scalar_struct, "scalar struct", "struct"); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-struct.h --- a/libinterp/octave-value/ov-struct.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-struct.h Mon Apr 24 20:34:39 2023 +0200 @@ -101,6 +101,8 @@ dim_vector dims () const { return m_map.dims (); } + octave_value vm_extract_forloop_value (octave_idx_type idx); + std::size_t byte_size () const; // This is the number of elements in each field. The total number diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-usr-fcn.cc --- a/libinterp/octave-value/ov-usr-fcn.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-usr-fcn.cc Mon Apr 24 20:34:39 2023 +0200 @@ -60,6 +60,7 @@ #include "profiler.h" #include "variables.h" #include "ov-fcn-handle.h" +#include "pt-bytecode-walk.h" // Whether to optimize subsasgn method calls. static bool Voptimize_subsasgn_calls = true; @@ -481,7 +482,28 @@ octave_user_function::call (octave::tree_evaluator& tw, int nargout, const octave_value_list& args) { - tw.push_stack_frame (this); + bool is_compiled = false; + + is_compiled = this->is_compiled (); + if (octave::V__enable_vm_eval__ && !is_compiled && !m_compilation_failed) + { + try + { + octave::compile_user_function (*this, false); + is_compiled = true; + } + catch (std::exception &e) + { + warning ("Auto-compilation of %s failed with message %s", name().c_str (), e.what ()); + this->m_compilation_failed = true; + } + } + + // Bytecode functions push their own stack frames in the vm + if (!is_compiled) + { + tw.push_stack_frame (this); + } octave::unwind_action act ([&tw] () { tw.pop_stack_frame (); }); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-usr-fcn.h --- a/libinterp/octave-value/ov-usr-fcn.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov-usr-fcn.h Mon Apr 24 20:34:39 2023 +0200 @@ -36,6 +36,7 @@ #include "ov-typeinfo.h" #include "symscope.h" #include "unwind-prot.h" +#include "pt-bytecode.h" class string_vector; @@ -87,7 +88,7 @@ void cache_function_text (const std::string& text, const octave::sys::time& timestamp); - octave::symbol_scope scope () { return m_scope; } + virtual octave::symbol_scope scope () { return m_scope; } void stash_fcn_file_name (const std::string& nm) { m_file_name = nm; } @@ -239,6 +240,10 @@ int ending_line () const { return m_end_location_line; } int ending_column () const { return m_end_location_column; } + octave::symbol_scope scope (void) { return m_scope; } + + std::size_t scope_num_symbols () { return m_scope.num_symbols (); } + void maybe_relocate_end (); void stash_parent_fcn_scope (const octave::symbol_scope& ps); @@ -397,8 +402,34 @@ octave_value dump () const; + void set_bytecode (octave::bytecode &bytecode) + { + m_bytecode = bytecode; + } + + void clear_bytecode () + { + m_bytecode = octave::bytecode {}; + + auto subs = subfunctions (); + for (auto kv : subs) + { + octave_user_function *sub = kv.second.user_function_value (); + if (sub) + sub->clear_bytecode (); + } + } + + bool is_compiled () const { return m_bytecode.m_code.size (); } + + octave::bytecode &get_bytecode () { return m_bytecode; } + + bool m_compilation_failed = false; + private: + octave::bytecode m_bytecode; + enum class_method_type { none, diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov-vm.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/octave-value/ov-vm.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,201 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 1996-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_ov_vm_h) +#define octave_ov_vm_h 1 + +#include "octave-config.h" + +#include "ov.h" +#include "load-path.h" + + +// octave_value_vm is to be used only by the VM +// and need to have the same bit representation as +// an octave_value. +// +// A octave_value_vm object might not be assigned +// to itself or have a nullptr m_rep when being +// assigned to. + +class octave_value_vm +{ +public: + octave_value_vm () + : m_rep (octave_value::nil_rep ()) + { + m_rep->m_count++; + } + + octave_value_vm (octave_base_value *rep, bool count_add1 = true) + : m_rep (rep) + { + if (count_add1) + m_rep->m_count++; + } + + octave_value_vm (const octave_value_vm& a) + : m_rep (a.m_rep) + { + m_rep->m_count++; + } + octave_value_vm (const octave_value& a) + : m_rep (a.m_rep) + { + m_rep->m_count++; + } + + octave_value_vm (octave_value_vm&& a) + : m_rep (a.m_rep) + { + a.m_rep = nullptr; + } + octave_value_vm (octave_value&& a) + : m_rep (a.m_rep) + { + a.m_rep = nullptr; + } + + ~octave_value_vm () __attribute__ ((always_inline)) + { + // Because we define a move constructor and a move assignment + // operator, rep may be a nullptr here. We should only need to + // protect the move assignment operator in a similar way. + + if (m_rep && --m_rep->m_count == 0) + delete m_rep; + } + + octave_value_vm& operator = (const octave_value_vm& a) + { + if (--m_rep->m_count == 0) + delete m_rep; + + m_rep = a.m_rep; + m_rep->m_count++; + + return *this; + } + + octave_value_vm& operator = (octave_value_vm&& a) + { + if (--m_rep->m_count == 0) + delete m_rep; + + m_rep = a.m_rep; + a.m_rep = nullptr; + + return *this; + } + + octave_value_vm& operator = (octave_value&& a) + { + if (--m_rep->m_count == 0) + delete m_rep; + + m_rep = a.m_rep; + a.m_rep = nullptr; + + return *this; + } + + static void release_rep (octave_base_value *rep) + { + if (--rep->m_count == 0) + delete rep; + } + + void steal_ov_rep (octave_value &&ov) + { + if (m_rep && --m_rep->m_count == 0) + delete m_rep; + + m_rep = ov.m_rep; + ov.m_rep = nullptr; + } + + octave_base_value & get_rep () { return *m_rep; } + + octave_value_vm& operator = (octave_base_value *rep) + { + if (--m_rep->m_count == 0) + delete m_rep; + + m_rep = rep; + + return *this; + } + + bool vm_need_dispatch_push () __attribute__ ((pure, always_inline, nothrow)) + { return m_rep->vm_need_dispatch_push (); } + + bool vm_need_dispatch_assign_rhs () __attribute__ ((pure, always_inline, nothrow)) + { return m_rep->vm_need_dispatch_assign_rhs (); } + + bool vm_need_dispatch_assign_lhs () __attribute__ ((pure, always_inline, nothrow)) + { return m_rep->vm_need_dispatch_assign_lhs (); } + + int type_id() const __attribute__ ((pure, always_inline, nothrow)) + { return m_rep->type_id (); } + + bool is_matrix_type () const __attribute__ ((pure, always_inline, nothrow)) + { return m_rep->is_matrix_type (); } + + octave_base_value *m_rep; +}; + +class +octave_cached_value : public octave_base_value +{ +public: + octave_cached_value () + { + m_n_updated = octave::load_path::get_weak_n_updated (); + } + + void set_cached_obj (octave_value cache_obj) + { + m_cached_object = cache_obj; + } + + octave_value get_cached_value () + { + return m_cached_object; + } + + bool cache_is_valid () + { + return m_n_updated == octave::load_path::get_weak_n_updated () && m_cached_object.is_defined (); + } + + bool is_defined () const { return true; } + + +private: + octave_value m_cached_object; + octave_idx_type m_n_updated = 0; +}; + +#endif \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov.cc --- a/libinterp/octave-value/ov.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov.cc Mon Apr 24 20:34:39 2023 +0200 @@ -1448,6 +1448,11 @@ } octave_value_list +octave_value:: +simple_subsref (char type, octave_value_list &idx, int nargout) +{ return m_rep->simple_subsref (type, idx, nargout); } + +octave_value_list octave_value::subsref (const std::string& type, const std::list& idx, int nargout) { @@ -1487,6 +1492,21 @@ } octave_value +octave_value::maybe_as_trivial_range () +{ + if (m_rep->is_trivial_range ()) + return *this; + if (!is_range ()) + return *this; + + ov_range range = range_value (); + if (!range.could_be_trivial_range ()) + return *this; + + return range.as_trivial_range (); +} + +octave_value octave_value::next_subsref (bool auto_add, const std::string& type, const std::list& idx, std::size_t skip) @@ -3557,6 +3577,7 @@ octave_lazy_index::register_type (ti); octave_oncleanup::register_type (ti); octave_java::register_type (ti); + octave_trivial_range::register_type (ti); } OCTAVE_BEGIN_NAMESPACE(octave) diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ov.h --- a/libinterp/octave-value/ov.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ov.h Mon Apr 24 20:34:39 2023 +0200 @@ -48,6 +48,8 @@ class stack_frame; class type_info; +class vm; +class bytecode_fcn_stack_frame; OCTAVE_END_NAMESPACE(octave) @@ -60,6 +62,7 @@ class octave_user_function; class octave_fcn_handle; class octave_value_list; +class octave_fcn_cache; #include "mxtypes.h" @@ -429,6 +432,11 @@ // Type conversions. + // Returns a copy of a scalar (double), or makes a scalar + // for other types. + octave_value as_double_or_copy (void) const + { return m_rep->as_double_or_copy (); } + octave_value as_double () const { return m_rep->as_double (); } octave_value as_single () const { return m_rep->as_single (); } @@ -473,6 +481,9 @@ subsref (const std::string& type, const std::list& idx, int nargout); + octave_value_list + simple_subsref (char type, octave_value_list &idx, int nargout); + OCTINTERP_API octave_value next_subsref (const std::string& type, const std::list& idx, std::size_t skip = 1); @@ -494,6 +505,10 @@ subsasgn (const std::string& type, const std::list& idx, const octave_value& rhs); + octave_value + simple_subsasgn (char type, octave_value_list &idx, const octave_value& rhs) + { return m_rep->simple_subsasgn (type, idx, rhs); } + OCTINTERP_API octave_value undef_subsasgn (const std::string& type, const std::list& idx, @@ -1515,12 +1530,103 @@ protected: + // Functions for use by the VM. + friend class octave_value_factory; + friend class octave_value_ref; + friend class octave_value_vm; + friend class octave::vm; + friend class octave::bytecode_fcn_stack_frame; + + bool is_ref () + { + return m_rep->is_ref (); + } + octave_value_ref * ref_rep () { return m_rep->ref_rep (); } + + bool is_nil (void) const + { return m_rep == nil_rep(); } + + // True for the types based on ov-base-mat + bool is_full_num_matrix () const + { return m_rep->is_full_num_matrix (); } + + bool is_function_cache (void) const + { return m_rep->is_function_cache (); } + + // function handles might have a function cache embedded + bool has_function_cache (void) const + { return m_rep->has_function_cache (); } + + octave_function * get_cached_fcn (const octave_value_list& args) + { return m_rep->get_cached_fcn (args); } + + // Returns true if the octave_value is either undefined or + // or a function. + bool is_maybe_function (void) const + { return m_rep->is_maybe_function (); } + + octave_fcn_cache * + fcn_cache_value () const + { + return m_rep->fcn_cache_value (); + } + + bool vm_need_storable_call () const {return m_rep->vm_need_storable_call (); }; + bool vm_need_dispatch_assign_rhs () { return m_rep->vm_need_dispatch_assign_rhs (); } + bool vm_need_dispatch_assign_lhs () { return m_rep->vm_need_dispatch_assign_lhs (); } + bool vm_need_dispatch_push () { return m_rep->vm_need_dispatch_push (); } + + bool same_rep (octave_value &ov) const { return m_rep == ov.m_rep; } + + void maybe_call_dtor () { m_rep->maybe_call_dtor (); } + + octave_value + checked_full_matrix_elem (octave_idx_type n) const + { return m_rep->checked_full_matrix_elem (n); } + + octave_value + checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const + { return m_rep->checked_full_matrix_elem (i, j); } + + octave_value + vm_extract_forloop_value (octave_idx_type idx) + { + return m_rep->vm_extract_forloop_value (idx); + } + + double + vm_extract_forloop_double (octave_idx_type idx) + { + return m_rep->vm_extract_forloop_double (idx); + } + + bool + maybe_update_double (double d) + { + return m_rep->maybe_update_double (d); + } + + octave_value + maybe_as_trivial_range (); + + bool + is_trivial_range () { return m_rep->is_trivial_range (); } + + // For use by inline octave_value "factory" octave_value_inline + octave_value (int /* dummy */, octave_base_value *rep) : m_rep (rep) {} + // Also increase the counter, for a "copy" + octave_value(int /* dummy */, int, octave_base_value *rep) : m_rep (rep) + { + m_rep->m_count++; + } + + //! The real representation. octave_base_value *m_rep; -private: + static OCTINTERP_API octave_base_value * nil_rep (); - static OCTINTERP_API octave_base_value * nil_rep (); +private: OCTINTERP_API assign_op unary_op_to_assign_op (unary_op op); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/octave-value/ovl.h --- a/libinterp/octave-value/ovl.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/octave-value/ovl.h Mon Apr 24 20:34:39 2023 +0200 @@ -172,6 +172,14 @@ void clear () { m_data.clear (); } + octave_value first_or_nil_ov () const + { + if (length ()) + return m_data.front (); + + return octave_value (); + } + private: std::vector m_data; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/module.mk --- a/libinterp/parse-tree/module.mk Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/parse-tree/module.mk Mon Apr 24 20:34:39 2023 +0200 @@ -15,6 +15,10 @@ %reldir%/pt-assign.h \ %reldir%/pt-binop.h \ %reldir%/pt-bp.h \ + %reldir%/pt-bytecode.h \ + %reldir%/pt-bytecode-walk.h \ + %reldir%/pt-bytecode-vm.h \ + %reldir%/pt-bytecode-vm-internal.h \ %reldir%/pt-cbinop.h \ %reldir%/pt-cell.h \ %reldir%/pt-check.h \ @@ -64,6 +68,8 @@ %reldir%/pt-assign.cc \ %reldir%/pt-binop.cc \ %reldir%/pt-bp.cc \ + %reldir%/pt-bytecode-walk.cc \ + %reldir%/pt-bytecode-vm.cc \ %reldir%/pt-cbinop.cc \ %reldir%/pt-cell.cc \ %reldir%/pt-check.cc \ @@ -86,7 +92,6 @@ %reldir%/pt-stmt.cc \ %reldir%/pt-tm-const.cc \ %reldir%/pt-unop.cc \ - %reldir%/pt-vm-eval.cc \ %reldir%/pt-walk.cc \ %reldir%/pt.cc \ %reldir%/token.cc diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-binop.h --- a/libinterp/parse-tree/pt-binop.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/parse-tree/pt-binop.h Mon Apr 24 20:34:39 2023 +0200 @@ -107,6 +107,7 @@ void matlab_style_short_circuit_warning (const char *op); + virtual bool is_braindead () { return false;} protected: // The operands for the expression. @@ -143,6 +144,8 @@ octave_value evaluate (tree_evaluator&, int nargout = 1); using tree_binary_expression::evaluate_n; + + bool is_braindead () { return true;} }; // Boolean expressions. diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-bytecode-vm-internal.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/parse-tree/pt-bytecode-vm-internal.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,416 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_pt_bytecode_vm_internal_h) +#define octave_pt_bytecode_vm_internal_h 1 + +#include "octave-config.h" + +#define EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK(ovl,n) \ +do {\ + if (nargout <= 1)\ + PUSH_OV (ovl.first_or_nil_ov());\ +else /* TODO: Should be function call to keep code shorter. */\ + {\ + int actual_nargout = 0;\ +\ + int n_retval = std::min (static_cast (ovl.length ()), static_cast (nargout));\ + /* We want to push the ovl backwards */\ + for (int i = n_retval - 1; i >= 0 && actual_nargout < nargout; i--)\ + {\ + octave_value &arg = ovl (i);\ +\ + if (arg.is_cs_list ())\ + {\ + /* cs-list are also pushed backwards */\ + octave_value_list args = arg.list_value ();\ + /* We might need to skip the elements in the cs-list's end */ \ + int n_left = nargout - actual_nargout;\ + for (int j = std::min (static_cast (args.length () - 1), n_left - 1);\ + j >= 0 && actual_nargout < nargout; \ + j--)\ + {\ + PUSH_OV (args (j));\ + actual_nargout++;\ + }\ + }\ + else\ + {\ + PUSH_OV (std::move (arg));\ + actual_nargout++;\ + }\ + }\ +\ + /* TODO: Need errors here for missing arguments in assignment somehow */ \ + if (actual_nargout != nargout)\ + {\ + int diff = nargout - actual_nargout;\ + stack_element *start = sp - actual_nargout;\ + stack_lift (start, actual_nargout, diff);\ + sp += diff;\ + }\ + }\ +} while (0) + + +#define MAKE_BINOP(op) \ +{ \ + octave_value &rhs = TOP_OV (); \ + octave_value &lhs = SEC_OV (); \ + \ + try \ + { \ + octave_value ans = \ + binary_op (*m_ti, \ + octave_value::op, \ + lhs, rhs); \ + STACK_DESTROY (2); \ + PUSH_OV (std::move (ans)); \ + } \ + CATCH_INTERRUPT_EXCEPTION \ + CATCH_INDEX_EXCEPTION \ + CATCH_EXECUTION_EXCEPTION \ + CATCH_BAD_ALLOC \ +} \ + +#define MAKE_BINOP_SPECIALIZED(op_fn,jmp_target,op_target,target_type) \ +{ \ + octave_value &rhs = TOP_OV (); \ + octave_value_vm &lhs = SEC_OV_VM (); \ + \ + int rhs_type = rhs.type_id (); \ + int lhs_type = lhs.type_id (); \ + int t_type = target_type; \ + if (OCTAVE_UNLIKELY (rhs_type != lhs_type || rhs_type != t_type)) \ + { \ + ip[-2] = static_cast (INSTR::op_target); \ + goto jmp_target; \ + } \ + \ + try \ + { \ + lhs = op_fn (lhs.get_rep (), rhs.get_rep ()); \ + rhs.~octave_value (); \ + STACK_SHRINK (1); \ + } \ + CATCH_INTERRUPT_EXCEPTION \ + CATCH_INDEX_EXCEPTION \ + CATCH_EXECUTION_EXCEPTION \ + CATCH_BAD_ALLOC \ +} \ + +#define MAKE_UNOP_SPECIALIZED(op_fn, jmp_target, op_target, target_type) \ +{ \ + octave_value &ov = TOP_OV (); \ + \ + if (OCTAVE_UNLIKELY (ov.type_id () != target_type)) \ + { \ + /* Change the specialized opcode to the generic one */ \ + ip[-2] = static_cast (INSTR::op_target); \ + goto jmp_target; \ + } \ + \ + try \ + { \ + ov = op_fn (ov.get_rep ()); \ + } \ + CATCH_INTERRUPT_EXCEPTION \ + CATCH_INDEX_EXCEPTION \ + CATCH_EXECUTION_EXCEPTION \ + CATCH_BAD_ALLOC \ +} \ + +#define MAKE_BINOP_SELFMODIFYING(op, jmp_target, op_target) \ +{ \ + octave_value &rhs = TOP_OV (); \ + octave_value &lhs = SEC_OV (); \ + \ + int rhs_type = rhs.type_id (); \ + int lhs_type = lhs.type_id (); \ + if (rhs_type == lhs_type && rhs_type == m_scalar_typeid) \ + { \ + ip[-2] = static_cast (INSTR::op_target); \ + goto jmp_target; \ + } \ + \ + try \ + { \ + octave_value ans = \ + binary_op (*m_ti, \ + octave_value::op, \ + lhs, rhs); \ + STACK_DESTROY (2); \ + PUSH_OV (std::move (ans)); \ + } \ + CATCH_INTERRUPT_EXCEPTION \ + CATCH_INDEX_EXCEPTION \ + CATCH_EXECUTION_EXCEPTION \ + CATCH_BAD_ALLOC \ +} \ + +#define CATCH_INDEX_EXCEPTION \ +catch (index_exception& ie) \ +{ \ + (*sp++).pee = ie.vm_dup (); \ + (*sp++).i = static_cast (error_type::INDEX_ERROR);\ + goto unwind; \ +} \ + +#define CATCH_INDEX_EXCEPTION_WITH_NAME \ +catch (index_exception& ie) \ +{ \ + ie.set_var (name_data [slot]); \ + (*sp++).pee = ie.vm_dup (); \ + (*sp++).i = static_cast (error_type::INDEX_ERROR);\ + goto unwind; \ +} \ + +#define CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME(has_name) \ +catch (index_exception& ie) \ +{ \ + if (has_name) \ + ie.set_var (name_data [slot]); \ + (*sp++).pee = ie.vm_dup (); \ + (*sp++).i = static_cast (error_type::INDEX_ERROR);\ + goto unwind; \ +} \ + +#define CATCH_INTERRUPT_EXCEPTION \ +catch (interrupt_exception& e) \ + { \ + (*sp++).i = static_cast(error_type::INTERRUPT_EXC); \ + goto unwind; \ + } \ + +#define CATCH_EXECUTION_EXCEPTION \ +catch (execution_exception& e) \ + { \ + /* TODO: Id? */ \ + (*sp++).pee = new execution_exception {e}; \ + (*sp++).i = static_cast (error_type::EXECUTION_EXC); \ + \ + goto unwind; \ + } \ + +#define CATCH_STACKPUSH_EXECUTION_EXCEPTION \ +catch (execution_exception& e) \ + { \ + m_could_not_push_frame = true; \ + (*sp++).pee = new execution_exception {e}; \ + (*sp++).i = static_cast (error_type::EXECUTION_EXC); \ + \ + goto unwind; \ + } \ + +#define CATCH_STACKPUSH_BAD_ALLOC \ +catch (const std::bad_alloc&) \ +{ \ + m_could_not_push_frame = true; \ + (*sp++).i = static_cast (error_type::BAD_ALLOC); \ + \ + goto unwind; \ +} + +#define CATCH_EXIT_EXCEPTION \ +catch (const exit_exception& e) \ +{ \ + (*sp++).i = e.exit_status (); \ + (*sp++).i = e.safe_to_return (); \ + (*sp++).i = static_cast (error_type::EXIT_EXCEPTION); \ + \ + goto unwind; \ +} + +#define CATCH_BAD_ALLOC \ +catch (const std::bad_alloc&) \ +{ \ + (*sp++).i = static_cast (error_type::BAD_ALLOC); \ + \ + goto unwind; \ +} + +#define MAKE_BYTECODE_CALL \ +if (sp + stack_min_for_new_call >= m_stack + stack_size) \ + { \ + (*sp++).pee = new execution_exception {"error","","VM is running out of stack space"}; \ + (*sp++).i = static_cast (error_type::EXECUTION_EXC); \ + goto unwind; \ + } \ +/* We are now going to call another function */ \ +/* compiled to bytecode */ \ + \ +m_tw->set_active_bytecode_ip (ip - code); \ +stack_element *first_arg = sp - n_args_on_stack; \ + \ +/* Push address to first arg (or would one would have been */ \ +/* if there are no args), so we can restore the sp at return */ \ +(*sp++).pse = first_arg; \ + \ +/* Push unwind data */ \ +(*sp++).pud = unwind_data; \ + \ +/* Push code */ \ +(*sp++).puc = code; \ + \ +/* Push data */ \ +(*sp++).pov = data; \ + \ +/* Push id names */ \ +(*sp++).ps = name_data; \ + \ +/* Push bsp */ \ +(*sp++).pse = bsp; \ + \ +/* Push the instruction pointer */ \ +/* (last on caller stack) */ \ +(*sp++).puc = ip; \ + \ +/* set callee bsp */ \ +m_sp = bsp = sp; \ + \ +/* Push nargout (first on callee stack) */ \ +(*sp++).u = nargout; \ + \ +/* Set the new data, code etc */ \ +bytecode &bc = usr_fcn->get_bytecode (); \ +if (OCTAVE_UNLIKELY (m_profiler_enabled)) \ + { \ + auto p = vm::m_vm_profiler; \ + if (p) \ + { \ + std::string caller_name = data[2].string_value (); /* profiler_name () querried at compile time */ \ + p->enter_fn (caller_name, bc); \ + } \ + } \ +m_data = data = bc.m_data.data (); \ +m_code = code = bc.m_code.data (); \ +m_name_data = name_data = bc.m_ids.data (); \ +m_unwind_data = unwind_data = &bc.m_unwind_data; \ + \ + \ +/* Set the ip to 0 */ \ +ip = code; \ +int n_returns_callee = static_cast (*ip++); /* Negative for varargout */ \ +n_returns_callee = n_returns_callee >= 0 ? n_returns_callee : -n_returns_callee; \ +int n_args_callee = static_cast (*ip++); /* Negative for varargin */ \ +int n_locals_callee = POP_CODE_USHORT (); \ + \ +if (n_args_callee < 0) \ +{ \ + sp[0].pv = static_cast (usr_fcn); \ + goto varargin_call; \ +} \ + \ +/* Construct return values - note nargout */ \ +/* is allready pushed as a uint64 */ \ +for (int i = 1; i < n_returns_callee; i++) \ + PUSH_OV (); \ + \ +int ii; \ +int n_args_on_callee_stack = 0; \ +/* Move the args to the new stack */ \ +if (!has_cs_list_arg) /* TODO: Kludge. Move to beginning. */ \ + { \ + n_args_on_callee_stack = n_args_on_stack; \ + for (ii = 0; ii < n_args_on_stack; ii++) \ + { \ + PUSH_OV (std::move (first_arg[ii].ov)); \ + /* Destroy the args */ \ + first_arg[ii].ov.~octave_value (); \ + } \ + } \ +else \ + { \ + for (ii = 0; ii < n_args_on_stack; ii++) \ + { \ + octave_value &arg = first_arg[ii].ov; \ + \ + if (arg.is_cs_list ()) \ + { \ + /* TODO: Use opcode instead? */ \ + octave_value_list args = arg.list_value (); \ + for (int j = 0; j < args.length (); j++) \ + { \ + PUSH_OV (args (j)); \ + n_args_on_callee_stack++; \ + } \ + } \ + else \ + { \ + PUSH_OV (std::move (arg)); \ + n_args_on_callee_stack++; \ + } \ + /* Destroy the args */ \ + arg.~octave_value (); \ + } \ + } \ +/* Construct missing args */ \ +for (int i = n_args_on_callee_stack; i < n_args_callee; i++) \ + PUSH_OV (); \ + \ +/* Construct locals */ \ +int n_locals_to_ctor = \ + n_locals_callee - n_args_callee - n_returns_callee; \ +for (int i = 0; i < n_locals_to_ctor; i++) \ + PUSH_OV (); \ + \ + \ +try \ + { \ + m_tw->push_stack_frame(*this, usr_fcn, nargout, n_args_on_callee_stack); \ + } \ +CATCH_STACKPUSH_EXECUTION_EXCEPTION /* Sets m_could_not_push_frame to true */ \ +CATCH_STACKPUSH_BAD_ALLOC \ + \ +/* "auto var" in the frame object. This is needed if nargout() etc are called */ \ +set_nargout (nargout); \ +/* Called fn needs to know about ignored outputs .e.g. [~, a] = foo() */ \ +if (m_output_ignore_data) \ + { \ + if (m_output_ignore_data->is_pending ()) \ + m_tw->set_auto_fcn_var (stack_frame::IGNORED, m_output_ignore_data->get_ignore_matrix ()); \ + else \ + m_tw->set_auto_fcn_var (stack_frame::IGNORED, {}); \ + m_output_ignore_data->m_v_lvalue_list.push_back (m_tw->lvalue_list ()); \ + m_tw->set_lvalue_list (nullptr); \ + } \ + \ +if (n_args_on_callee_stack > n_args_callee) \ + { \ + (*sp++).pee = new execution_exception {"error","","function called with too many inputs"}; \ + (*sp++).i = static_cast (error_type::EXECUTION_EXC); \ + goto unwind; \ + } \ +/* N_RETURNS is negative for varargout */ \ +int n_returns = N_RETURNS () - 1; /* %nargout in N_RETURNS */ \ +if (n_returns >= 0 && nargout > n_returns) \ + { \ + (*sp++).pee = new execution_exception {"error","","function called with too many outputs"}; \ + (*sp++).i = static_cast (error_type::EXECUTION_EXC); \ + goto unwind; \ + } \ + + +#endif diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-bytecode-vm.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/parse-tree/pt-bytecode-vm.cc Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,6766 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if defined (HAVE_CONFIG_H) +# include "config.h" +#endif + +#include + +#include "time-wrappers.h" + +#include "pt-bytecode-vm.h" +#include "pt-bytecode-vm-internal.h" +#include "ov.h" +#include "error.h" +#include "symtab.h" +#include "interpreter-private.h" +#include "interpreter.h" +#include "pt-eval.h" +#include "pt-tm-const.h" +#include "pt-stmt.h" +#include "ov-classdef.h" +#include "ov-ref.h" +#include "ov-range.h" +#include "ov-inline.h" + +#include "ov-vm.h" + +//#pragma GCC optimize("O0") + +extern "C" void vm_debug_print_ovl (void *p); +extern "C" void vm_debug_print_ov (void *p); +static bool ov_need_stepwise_subsrefs (octave_value &ov); +static void copy_many_args_to_caller (octave::stack_element *sp, octave::stack_element *caller_stack_end, + int n_args_to_move, int n_args_caller_expects); +static int lhs_assign_numel (octave_value &ov, const std::string& type, const std::list& idx); + +#define TODO(msg) error("Not done yet %d: " msg, __LINE__) +#define ERR(msg) error("VM error %d: " msg, __LINE__) +#define CHECK(cond) \ + do { \ + if (!(cond)) \ + ERR("Internal VM conistency check failed, " #cond); \ + } while ((0)) +#define PANIC(msg) error("VM panic %d: " msg, __LINE__) + +using namespace octave; + +static unsigned +chars_to_uint (unsigned char *p) +{ + unsigned u = 0; + u |= *p++; + u |= *p++ << 8; + u |= *p++ << 16; + u |= *p << 24; + + return u; +} + +std::vector> +octave::opcodes_to_strings (bytecode &bc) +{ + return opcodes_to_strings (bc.m_code, bc.m_ids); +} + +std::vector> +octave::opcodes_to_strings (std::vector &v_code, std::vector &names) +{ + unsigned char *p = v_code.data (); + unsigned char *code = p; + int n = v_code.size (); + bool wide_opext_active = false; + + // Skip some framedata + p += 4; + + std::vector> v_pair_row_str; + +#define CASE_START(type) \ + case INSTR::type: \ + { /* Line stored in s */ \ + std::string s; \ + /* Code offset */ \ + int ip = static_cast (p - code); \ + s += #type; \ + /* vec for id names */ \ + std::vector v_ids; \ + +#define CASE_END() \ + if (v_ids.size ()) \ + { \ + s += " #"; \ + for (auto ss : v_ids) \ + s += " " + ss; \ + } \ + v_pair_row_str.push_back ({ip, s}); \ + break;} \ + +#define PRINT_OP(type) \ + CASE_START (type) \ + CASE_END () \ + +#define PCHAR() \ + {p++; \ + CHECK_END (); \ + s += " " + std::to_string (*p);} + +#define PCHAR_AS_CHAR() \ + {p++; \ + CHECK_END (); \ + s += std::string {" '"} + static_cast (*p) + "'";} + +#define PSHORT() \ + {p++; \ + CHECK_END (); \ + unsigned char b0 = *p; \ + p++; \ + CHECK_END (); \ + unsigned char b1 = *p; \ + unsigned u = b0 | (b1 << 8); \ + s += " " + std::to_string (u);} + +#define PSSLOT() \ + {p++; \ + CHECK_END (); \ + s += " " + std::to_string (*p); \ + v_ids.push_back (std::string {*p < names.size() ? \ + names[*p].c_str() : \ + "INVALID SLOT"});} + +#define PSLOT() \ + {if (wide_opext_active) \ + PWSLOT () \ + else \ + PSSLOT () \ + wide_opext_active = false;} + +#define PWSLOT() \ + {p++; \ + CHECK_END (); \ + unsigned char b0 = *p; \ + p++; \ + CHECK_END (); \ + unsigned char b1 = *p; \ + unsigned u = b0 | (b1 << 8); \ + s += " " + std::to_string (u); \ + v_ids.push_back (std::string {u < names.size() ? \ + names[u].c_str() : \ + "INVALID SLOT"});} + +#define CHECK_END() \ + do {if (p >= v_code.data () + v_code.size ()) { error ("Invalid bytecode\n");}} while((0)) + +#define PINT() \ + do {\ + unsigned u = 0;\ + p++;\ + CHECK_END ();\ + u |= *p++;\ + CHECK_END ();\ + u |= *p++ << 8;\ + CHECK_END ();\ + u |= *p++ << 16;\ + CHECK_END ();\ + u |= *p << 24;\ + s += " " + std::to_string (u);\ + } while (0); + + while (p < code + n) + { + switch (static_cast (*p)) + { + + PRINT_OP (POP) + PRINT_OP (DUP) + PRINT_OP (MUL) + PRINT_OP (MUL_DBL) + PRINT_OP (ADD) + PRINT_OP (ADD_DBL) + PRINT_OP (SUB) + PRINT_OP (SUB_DBL) + PRINT_OP (DIV) + PRINT_OP (DIV_DBL) + PRINT_OP (RET) + PRINT_OP (LE) + PRINT_OP (LE_DBL) + PRINT_OP (LE_EQ) + PRINT_OP (LE_EQ_DBL) + PRINT_OP (GR) + PRINT_OP (GR_DBL) + PRINT_OP (GR_EQ) + PRINT_OP (GR_EQ_DBL) + PRINT_OP (EQ) + PRINT_OP (EQ_DBL) + PRINT_OP (NEQ) + PRINT_OP (NEQ_DBL) + PRINT_OP (TRANS_MUL) + PRINT_OP (MUL_TRANS) + PRINT_OP (HERM_MUL) + PRINT_OP (MUL_HERM) + PRINT_OP (INCR_PREFIX) + PRINT_OP (ROT) + PRINT_OP (TRANS_LDIV) + PRINT_OP (HERM_LDIV) + PRINT_OP (PUSH_CELL) + PRINT_OP (PUSH_OV_U64) + PRINT_OP (EXPAND_CS_LIST) + PRINT_OP (POW_DBL) + PRINT_OP (POW) + PRINT_OP (LDIV) + PRINT_OP (EL_MUL) + PRINT_OP (EL_DIV) + PRINT_OP (EL_POW) + PRINT_OP (EL_AND) + PRINT_OP (EL_OR) + PRINT_OP (EL_LDIV) + PRINT_OP (NOT_DBL) + PRINT_OP (NOT_BOOL) + PRINT_OP (NOT) + PRINT_OP (UADD) + PRINT_OP (USUB) + PRINT_OP (USUB_DBL) + PRINT_OP (TRANS) + PRINT_OP (HANDLE_SIGNALS) + PRINT_OP (HERM) + PRINT_OP (UNARY_TRUE) + PRINT_OP (PUSH_TRUE) + PRINT_OP (PUSH_FALSE) + PRINT_OP (COLON2) + PRINT_OP (COLON3) + PRINT_OP (COLON2_CMD) + PRINT_OP (COLON3_CMD) + PRINT_OP (FOR_SETUP) + PRINT_OP (PUSH_NIL); + PRINT_OP (THROW_IFERROBJ); + PRINT_OP (BRAINDEAD_PRECONDITION); + PRINT_OP (PUSH_DBL_0); + PRINT_OP (PUSH_DBL_1); + PRINT_OP (PUSH_DBL_2); + + CASE_START (WIDE) + wide_opext_active = true; + CASE_END () + + CASE_START (PUSH_FOLDED_CST) PSLOT () PSHORT () CASE_END () + CASE_START (SET_FOLDED_CST) PSLOT () CASE_END () + + CASE_START (LOAD_CST) PCHAR () CASE_END () + CASE_START (LOAD_CST_ALT2) PCHAR () CASE_END () + CASE_START (LOAD_CST_ALT3) PCHAR () CASE_END () + CASE_START (LOAD_CST_ALT4) PCHAR () CASE_END () + CASE_START (LOAD_2_CST) PCHAR () CASE_END () + CASE_START (POP_N_INTS) PCHAR () CASE_END () + + CASE_START (ASSIGN) PSLOT() CASE_END () + CASE_START (BIND_ANS) PSLOT() CASE_END () + CASE_START (INCR_ID_PREFIX) PSLOT() CASE_END () + CASE_START (INCR_ID_POSTFIX) PSLOT() CASE_END () + CASE_START (DECR_ID_PREFIX) PSLOT() CASE_END () + CASE_START (DECR_ID_POSTFIX) PSLOT() CASE_END () + CASE_START (INCR_ID_PREFIX_DBL) PSLOT() CASE_END () + CASE_START (INCR_ID_POSTFIX_DBL) PSLOT() CASE_END () + CASE_START (DECR_ID_PREFIX_DBL) PSLOT() CASE_END () + CASE_START (DECR_ID_POSTFIX_DBL) PSLOT() CASE_END () + CASE_START (FORCE_ASSIGN) PSLOT() CASE_END () + CASE_START (PUSH_SLOT_NARGOUT1) PSLOT() CASE_END () + CASE_START (PUSH_PI) PSLOT() CASE_END () + CASE_START (PUSH_SLOT_NARGOUT1_SPECIAL) PSLOT() CASE_END () + CASE_START (PUSH_SLOT_INDEXED) PSLOT() CASE_END () + CASE_START (PUSH_FCN_HANDLE) PSLOT() CASE_END () + CASE_START (PUSH_SLOT_NARGOUT0) PSLOT() CASE_END () + CASE_START (SET_SLOT_TO_STACK_DEPTH) PSLOT() CASE_END () + + CASE_START (DISP) PSLOT() PWSLOT() CASE_END () + CASE_START (PUSH_SLOT_DISP) PSLOT() PWSLOT() CASE_END () + + CASE_START (JMP_IFDEF) PSHORT() CASE_END () + CASE_START (JMP_IFNCASEMATCH) PSHORT() CASE_END () + CASE_START (JMP) PSHORT() CASE_END () + CASE_START (JMP_IF) PSHORT() CASE_END () + CASE_START (JMP_IFN) PSHORT() CASE_END () + CASE_START (JMP_IF_BOOL) PSHORT() CASE_END () + CASE_START (JMP_IFN_BOOL) PSHORT() CASE_END () + CASE_START (FOR_COMPLEX_SETUP) PSHORT() CASE_END () + + CASE_START (ASSIGN_COMPOUND) PSLOT () PCHAR () CASE_END () + + CASE_START (INDEX_ID_NARGOUT0) PSLOT () PCHAR () CASE_END () + CASE_START (INDEX_ID_NARGOUT1) PSLOT () PCHAR () CASE_END () + CASE_START (INDEX_ID1_MAT_2D) PSLOT () PCHAR () CASE_END () + CASE_START (INDEX_ID1_MAT_1D) PSLOT () PCHAR () CASE_END () + + CASE_START (INDEX_CELL_ID_NARGOUT0) PSLOT () PCHAR () CASE_END () + CASE_START (INDEX_CELL_ID_NARGOUT1) PSLOT () PCHAR () CASE_END () + + CASE_START (INDEX_CELL_ID_NARGOUTN) PSLOT () PCHAR () PCHAR () CASE_END () + CASE_START (INDEX_IDN) PSLOT () PCHAR () PCHAR () CASE_END () + + CASE_START (SUBASSIGN_OBJ) PCHAR () PCHAR () CASE_END () + CASE_START (MATRIX) PCHAR () PCHAR () CASE_END () + CASE_START (DUPN) PCHAR () PCHAR () CASE_END () + + CASE_START (INDEX_ID1_MATHY_UFUN) PCHAR () PSLOT () PCHAR () CASE_END () + + CASE_START (INDEX_OBJ) PCHAR () PCHAR () PWSLOT () PCHAR () PCHAR () CASE_END () + + CASE_START (FOR_COND) PSLOT () PSHORT () CASE_END () + + CASE_START (FOR_COMPLEX_COND) PSHORT () PWSLOT () PWSLOT () CASE_END () + + CASE_START (INDEX_STRUCT_NARGOUTN) PCHAR () PWSLOT () PWSLOT () CASE_END () + CASE_START (END_ID) PSLOT () PCHAR () PCHAR () CASE_END () + + CASE_START (PUSH_SLOT_NARGOUTN) PSLOT () PCHAR () CASE_END () + CASE_START (BRAINDEAD_WARNING) PSLOT () PCHAR () CASE_END () + CASE_START (SUBASSIGN_STRUCT) PSLOT () PWSLOT () CASE_END () + + CASE_START (SUBASSIGN_ID) PSLOT () PCHAR () CASE_END () + CASE_START (SUBASSIGN_ID_MAT_1D) PSLOT () PCHAR () CASE_END () + CASE_START (SUBASSIGN_CELL_ID) PSLOT () PCHAR () CASE_END () + + CASE_START (EVAL) PCHAR () PINT () CASE_END () + + CASE_START (PUSH_ANON_FCN_HANDLE) PINT () CASE_END () + + CASE_START (INDEX_STRUCT_CALL) + PCHAR () + PCHAR () + PCHAR () + PCHAR () + int nn = *p; + s += " {"; + for (int i = 0; i < nn; i++) + { + PCHAR () + PCHAR_AS_CHAR () + if (i + 1 != nn) + s += ", "; + } + s += "} "; + CASE_END () + + CASE_START (LOAD_FAR_CST) PINT () CASE_END () + + CASE_START (END_OBJ) PSLOT () PCHAR () PCHAR () CASE_END () + + CASE_START (WORDCMD) PSLOT () PCHAR () PCHAR () CASE_END () + + CASE_START (SET_IGNORE_OUTPUTS) + PCHAR () + int nn = *p; + PCHAR () + for (int i = 0; i < nn; i++) + PCHAR () + CASE_END () + + CASE_START (CLEAR_IGNORE_OUTPUTS) + PCHAR () + int nn = *p; + for (int i = 0; i < nn; i++) + { + PWSLOT () + } + CASE_END () + + CASE_START (END_X_N) + PCHAR () + + int nn = *p; + for (int i = 0; i < nn; i++) + { + PCHAR () + PCHAR () + PCHAR () + PWSLOT () + } + CASE_END () + + CASE_START (MATRIX_UNEVEN) + s += " TYPE"; + PCHAR () + int type = *p; + + if (type == 1) + { + s += " ROWS"; PINT (); + s += " COLS"; PINT (); + } + else + { + if (p + 3 >= code + n) + error ("Invalid bytecode\n"); + int i = chars_to_uint (p); + s += " ROWS"; PINT (); + s += " COLS"; + for (int j = 0; j < i; j++) + PINT (); + } + CASE_END () + + CASE_START (SUBASSIGN_CHAINED) + PCHAR (); // op + PCHAR (); // nchained + int nn = *p; + for (int i = 0; i < nn; i++) + { + PCHAR (); + PCHAR (); + } + CASE_END () + + CASE_START (GLOBAL_INIT) + p++; + CHECK_END (); + if (static_cast (*p) == global_type::GLOBAL) + s += " 'GLOBAL'"; + else if (static_cast (*p) == global_type::PERSISTENT) + s += " 'PERSISTENT'"; + + PWSLOT () + PWSLOT () + + s += " HAS-TARGET"; + PCHAR () + int has_target = *p; + if (has_target) + { + s += " AFTER INIT"; + PSHORT (); + } + CASE_END () + + CASE_START (ASSIGNN) + PCHAR () + int n_slots = *p; + for (int i = 0; i < n_slots; i++) + PWSLOT () + CASE_END () + + default: + CHECK_END (); + error ("Unknown op: %d\n", *p); + break; + } + p++; + } + + return v_pair_row_str; +} + +void +octave::print_bytecode(bytecode &bc) +{ + unsigned char *p = bc.m_code.data (); + int n = bc.m_code.size (); + + CHECK (bc.m_data.size () >= 2); + printf ("metadata:\n"); + printf ("\t%s\n", bc.m_data[0].string_value ().c_str ()); // function name + printf ("\t%s\n\n", bc.m_data[1].string_value ().c_str ()); // function type + + printf("frame:\n"); + printf("\t.n_return %d\n", *p++); + printf("\t.n_args %d\n", *p++); + printf("\t.n_locals %d\n\n", *p++); + + printf("slots:\n"); + int idx = 0; + for (std::string local : bc.m_ids) + printf("%5d: %s\n", idx++, local.c_str ()); + printf ("\n"); + + printf ("source code lut:\n"); + for (auto it : bc.m_unwind_data.m_loc_entry) + { + printf ("\tl:%5d c:%5d ip0:%5d ip1:%5d\n", it.m_line, it.m_col, it.m_ip_start, it.m_ip_end); + } + + printf ("dbg tree object:\n"); + for (auto it : bc.m_unwind_data.m_ip_to_tree) + { + printf ("\tip:%5d obj=%p\n", it.first, it.second); + } + + printf("code: (n=%d)\n", n); + auto v_ls = opcodes_to_strings (bc); + for (auto ls : v_ls) + { + printf ("\t%5d: %s\n", ls.first, ls.second.c_str ()); + } +} + +static int pop_code_int (unsigned char *ip) +{ + unsigned int ans; + ip -= 4; + ans = *ip++; + ans |= *ip++ << 8; + ans |= *ip++ << 16; + ans |= *ip++ << 24; + + return ans; +} + +static int pop_code_ushort (unsigned char *ip) +{ + unsigned int ans; + ip -= 2; + ans = *ip++; + ans |= *ip++ << 8; + + return ans; +} + + + +// Debug functions easy to break out into in gdb. Called by __dummy_mark_1() in Octave +extern "C" void dummy_mark_1 (void); +extern "C" void dummy_mark_2 (void); + +#define POP_CODE() *ip++ +#define POP_CODE_INT() (ip++,ip++,ip++,ip++,pop_code_int (ip)) +#define POP_CODE_USHORT() (ip++, ip++, pop_code_ushort (ip)) + +#define PUSH_OV(ov) \ + do { \ + new (sp++) octave_value (ov); \ + } while ((0)) + +#define PUSH_OVB(ovb) \ + do { \ + new (sp++) octave_value_vm (ovb); \ + } while ((0)) + +#define PUSH_OV_VM(ov) \ + do { \ + new (sp++) octave_value_vm (ov); \ + } while ((0)) + +#define POP() (*--sp) + +#define TOP_OVB() (sp[-1]).ovb +#define SEC_OVB() (sp[-2]).ovb + +#define TOP_OV_VM() (sp[-1]).ov_vm +#define SEC_OV_VM() (sp[-2]).ov_vm + +#define TOP_OV() (sp[-1]).ov +#define SEC_OV() (sp[-2]).ov +#define THIRD_OV() (sp[-3]).ov +#define FOURTH_OV() (sp[-4]).ov + +#define TOP() (sp[-1]) +#define SEC() (sp[-2]) +#define THIRD() (sp[-3]) + +#define STACK_SHRINK(n) sp -= n +#define STACK_GROW(n) sp += n +#define STACK_DESTROY(n) \ + do { \ + for (int iii = 0; iii < n; iii++) \ + (*--sp).ov.~octave_value (); \ + } while ((0)) + +static void stack_lift (stack_element *start, int n_elem, int n_lift) +{ + octave_value_list tmp; + for (int i = 0; i < n_elem; i++) + tmp.append (std::move (start[i].ov)); + for (int i = 0; i < n_elem; i++) + start[i].ov.~octave_value (); + for (int i = 0; i < n_lift; i++) + new (start + i) octave_value; + for (int i = 0; i < n_elem; i++) + new (start + n_lift + i) octave_value (std::move (tmp.xelem (i))); +} + +#define COMMA , +#define PRINT_VM_STATE(msg) \ + do { \ + printf(msg); \ + printf("\n"); \ + printf("sp : %p\n", sp); \ + printf("bsp : %p\n", bsp); \ + printf("sp i: %zu\n", sp - bsp); \ + printf("sp ii: %zu\n", sp - m_stack); \ + printf("ip : %zu\n", ip - code); \ + printf("code: %p\n", code); \ + printf("data: %p\n", data); \ + printf("ids : %p\n", name_data); \ + printf("fn : %s\n", m_tw->get_current_stack_frame ()->fcn_name ().c_str ());\ + printf("Next op: %u\n\n", *ip);\ + } while ((0)) + +#define CHECK_STACK(n) \ + do {\ + for (unsigned i = 0; i < stack_pad; i++)\ + {\ + CHECK (m_stack0[i].u == stack_magic_int);\ + CHECK (m_stack0[i + stack_size].u == stack_magic_int);\ + }\ + CHECK (sp <= m_stack + stack_size);\ + CHECK (sp + n <= m_stack + stack_size);\ + CHECK (sp >= m_stack);\ + } while (0) + +#define CHECK_STACK_N(n) CHECK (sp + n <= m_stack + stack_size) + +// Access the octave_base_value as subclass type of an octave_value ov +#define REP(type,ov) static_cast (const_cast (ov.get_rep())) + +#define DISPATCH() do { \ + /*if (!m_tw->get_current_stack_frame ()->is_bytecode_fcn_frame ()) \ + { \ + printf ("Why oh why\n"); \ + dummy_mark_1 (); \ + } */ \ + /* PRINT_VM_STATE ("%d" COMMA __LINE__); */ \ + /* CHECK_STACK (0); */ \ +\ + if (OCTAVE_UNLIKELY (m_tw->debug_mode_active ())) /* Do we need to check for breakpoints? */\ + goto debug_check;\ + int opcode = ip[0];\ + arg0 = ip[1];\ + ip += 2;\ + goto *instr [opcode]; /* Dispatch to next instruction */\ +} while ((0)) + +#define DISPATCH_1BYTEOP() do { \ + /*if (!m_tw->get_current_stack_frame ()->is_bytecode_fcn_frame ()) \ + { \ + printf ("Why oh why\n"); \ + dummy_mark_1 (); \ + } */ \ + /* PRINT_VM_STATE ("%d" COMMA __LINE__); */ \ + /* CHECK_STACK (0); */ \ +\ + if (OCTAVE_UNLIKELY (m_tw->debug_mode_active ())) /* Do we need to check for breakpoints? */\ + goto debug_check_1b;\ + int opcode = arg0;\ + arg0 = *ip++;\ + goto *instr [opcode]; /* Dispatch to next instruction */\ +} while ((0)) + +std::shared_ptr vm::m_vm_profiler; +bool vm::m_profiler_enabled; +bool vm::m_trace_enabled; + +// These two are used for pushing true and false ov:s to the +// operand stack. +static octave_value ov_true {true}; +static octave_value ov_false {false}; +#if defined (M_PI) + static octave_value ov_pi {M_PI}; +#else + // Initialized in vm::vm() + static octave_value ov_pi; +#endif +static octave_value ov_dbl_0 {0.0}; +static octave_value ov_dbl_1 {1.0}; +static octave_value ov_dbl_2 {2.0}; + +// TODO: Push non-nil and nil ov instead of true false to make some checks +// faster? Would they be faster? + +octave_value_list +vm::execute_code (const octave_value_list &root_args, int root_nargout) +{ + // This field is set to true at each return from this function so we can + // assure in the caller that no exception escapes the VM in some way. + this->m_dbg_proper_return = false; + + // Array of label pointers, corresponding to opcodes by position in + // the array. "&&" is label address, not rvalue reference. + static const void* instr[] = + { + &&pop, // POP, + &&dup, // DUP, + &&load_cst, // LOAD_CST, + &&mul, // MUL, + &&div, // DIV, + &&add, // ADD, + &&sub, // SUB, + &&ret, // RET, + &&assign, // ASSIGN, + &&jmp_if, // JMP_IF, + &&jmp, // JMP, + &&jmp_ifn, // JMP_IFN, + &&push_slot_nargout0, // PUSH_SLOT_NARGOUT0, + &&le, // LE, + &&le_eq, // LE_EQ, + &&gr, // GR, + &&gr_eq, // GR_EQ, + &&eq, // EQ, + &&neq, // NEQ, + &&index_id_nargout0, // INDEX_ID_NARGOUT0, + &&push_slot_indexed, // PUSH_SLOT_INDEXED, + &&pow, // POW, + &&ldiv, // LDIV, + &&el_mul, // EL_MUL, + &&el_div, // EL_DIV, + &&el_pow, // EL_POW, + &&el_and, // EL_AND, + &&el_or, // EL_OR, + &&el_ldiv, // EL_LDIV, + &&op_not, // NOT, + &&uadd, // UADD, + &&usub, // USUB, + &&trans, // TRANS, + &&herm, // HERM, + &&incr_id_prefix, // INCR_ID_PREFIX, + &&decr_id_prefix, // DECR_ID_PREFIX, + &&incr_id_postfix, // INCR_ID_POSTFIX, + &&decr_id_postfix, // DECR_ID_POSTFIX, + &&for_setup, // FOR_SETUP, + &&for_cond, // FOR_COND, + &&pop_n_ints, // POP_N_INTS, + &&push_slot_nargout1, // PUSH_SLOT_NARGOUT1, + &&index_id1, // INDEX_ID_NARGOUT1, + &&push_fcn_handle, // PUSH_FCN_HANDLE, + &&colon, // COLON3, + &&colon, // COLON2, + &&colon_cmd, // COLON3_CMD, + &&colon_cmd, // COLON2_CMD, + &&push_true, // PUSH_TRUE, + &&push_false, // PUSH_FALSE, + &&unary_true, // UNARY_TRUE, + &&index_idn, // INDEX_IDN, + &&assign_n, // ASSIGNN, + &&push_slot_nargoutn, // PUSH_SLOT_NARGOUTN, + &&subassign_id, // SUBASSIGN_ID, + &&end_id, // END_ID, + &&matrix, // MATRIX, + &&trans_mul, // TRANS_MUL, + &&mul_trans, // MUL_TRANS, + &&herm_mul, // HERM_MUL, + &&mul_herm, // MUL_HERM, + &&trans_ldiv, // TRANS_LDIV, + &&herm_ldiv, // HERM_LDIV, + &&wordcmd, // WORDCMD, + &&handle_signals, // HANDLE_SIGNALS, + &&push_cell, // PUSH_CELL, + &&push_ov_u64, // PUSH_OV_U64, + &&expand_cs_list, // EXPAND_CS_LIST, + &&index_cell_id0, // INDEX_CELL_ID_NARGOUT0, + &&index_cell_id1, // INDEX_CELL_ID_NARGOUT1, + &&index_cell_idn, // INDEX_CELL_ID_NARGOUTN, + &&incr_prefix, // INCR_PREFIX, + &&rot, // ROT, + &&init_global, // GLOBAL_INIT, + &&assign_compound, // ASSIGN_COMPOUND, + &&jmp_ifdef, // JMP_IFDEF, + &&switch_cmp, // JMP_IFNCASEMATCH, + &&braindead_precond, // BRAINDEAD_PRECONDITION, + &&braindead_warning, // BRAINDEAD_WARNING, + &&force_assign, // FORCE_ASSIGN, // Accepts undefined rhs + &&push_nil, // PUSH_NIL, + &&throw_iferrorobj, // THROW_IFERROBJ, + &&index_struct_n, // INDEX_STRUCT_NARGOUTN, + &&subasgn_struct, // SUBASSIGN_STRUCT, + &&subasgn_cell_id, // SUBASSIGN_CELL_ID, + &&index_obj, // INDEX_OBJ, + &&subassign_obj, // SUBASSIGN_OBJ, + &&matrix_big, // MATRIX_UNEVEN, + &&load_far_cst, // LOAD_FAR_CST, + &&end_obj, // END_OBJ, + &&set_ignore_outputs, // SET_IGNORE_OUTPUTS, + &&clear_ignore_outputs, // CLEAR_IGNORE_OUTPUTS, + &&subassign_chained, // SUBASSIGN_CHAINED, + &&set_slot_to_stack_depth, // SET_SLOT_TO_STACK_DEPTH, + &&dupn, // DUPN, + &&debug, // DEBUG, + &&index_struct_call, // INDEX_STRUCT_CALL, + &&end_x_n, // END_X_N, + &&eval, // EVAL, + &&bind_ans, // BIND_ANS, + &&push_anon_fcn_handle, // PUSH_ANON_FCN_HANDLE, + &&for_complex_setup, // FOR_COMPLEX_SETUP, // opcode + &&for_complex_cond, // FOR_COMPLEX_COND, + &&push_slot1_special, // PUSH_SLOT_NARGOUT1_SPECIAL, + &&disp, // DISP, + &&push_slot_disp, // PUSH_SLOT_DISP, + &&load_cst_alt2, // LOAD_CST_ALT2, + &&load_cst_alt3, // LOAD_CST_ALT3, + &&load_cst_alt4, // LOAD_CST_ALT4, + &&load_2_cst, // LOAD_2_CST, + &&mul_dbl, // MUL_DBL, + &&add_dbl, // ADD_DBL, + &&sub_dbl, // SUB_DBL, + &&div_dbl, // DIV_DBL, + &&pow_dbl, // POW_DBL, + &&le_dbl, // LE_DBL, + &&le_eq_dbl, // LE_EQ_DBL, + &&gr_dbl, // GR_DBL, + &&gr_eq_dbl, // GR_EQ_DBL, + &&eq_dbl, // EQ_DBL, + &&neq_dbl, // NEQ_DBL, + &&index_id1_mat_1d, // INDEX_ID1_MAT_1D, + &&index_id1_mat_2d, // INDEX_ID1_MAT_2D, + &&push_pi, // PUSH_PI, + &&index_math_ufun_id1, // INDEX_ID1_MATHY_UFUN, + &&subassign_id_mat_1d, // SUBASSIGN_ID_MAT_1D, + &&incr_id_prefix_dbl, // INCR_ID_PREFIX_DBL, + &&decr_id_prefix_dbl, // DECR_ID_PREFIX_DBL, + &&incr_id_postfix_dbl, // INCR_ID_POSTFIX_DBL, + &&decr_id_postfix_dbl, // DECR_ID_POSTFIX_DBL, + &&push_cst_dbl_0, // PUSH_DBL_0, + &&push_cst_dbl_1, // PUSH_DBL_1, + &&push_cst_dbl_2, // PUSH_DBL_2, + &&jmp_if_bool, // JMP_IF_BOOL, + &&jmp_ifn_bool, // JMP_IFN_BOOL, + &&usub_dbl, // USUB_DBL, + &¬_dbl, // NOT_DBL, + &¬_bool, // NOT_BOOL, + &&push_folded_cst, // PUSH_FOLDED_CST, + &&set_folded_cst, // SET_FOLDED_CST, + &&wide, // WIDE + }; + + if (OCTAVE_UNLIKELY (m_profiler_enabled)) + { + auto p = vm::m_vm_profiler; + if (p) + { + std::string fn_name = m_data[2].string_value (); // profiler_name () querried at compile time + p->enter_fn (fn_name, "", m_unwind_data, m_name_data, m_code); + } + } + +#if defined (__GNUC__) && defined (__x86_64__) + // We strongly suggest to GCC to put sp, ip and bsp in actual registers with + // the "local register variable" extension. + // + // If GCC is not nudged to put these in registers, its register allocator + // might make the VM spend quite some time pushing and popping of the C-stack. + register int arg0 asm("r12"); + register stack_element *sp asm("r14"); // Stack pointer register + register unsigned char *ip asm("r15"); // The instruction pointer register + register stack_element *bsp asm("r13"); // Base stack pointer +#else + int arg0; + stack_element *sp; + unsigned char *ip; + stack_element *bsp; +#endif + + unsigned char *code; // The instruction base register + + stack_element *rsp; // Root stack pointer. Marks the beginning of the VM stack + + octave_value *data = m_data; + std::string *name_data = m_name_data; + unwind_data *unwind_data = m_unwind_data; + + code = m_code; + ip = code; + + sp = bsp = rsp = m_stack; + + // Read the meta data for constructing a stack frame. + { +#define N_RETURNS() static_cast(code[0]) +#define N_ARGS() static_cast(code[1]) +#define N_LOCALS() (code[2] | (code [3] << 8)) + + int n_returns = static_cast (*ip++); + // n_args is negative for varargin calls + int n_args = static_cast (*ip++); + int n_locals = POP_CODE_USHORT (); // Note: An arg and return can share slot + + bool is_varargin = n_args < 0; + bool is_varargout = n_returns < 0; + + int n_root_args = root_args.length (); + + if (is_varargin) + n_args = -n_args; + if (n_returns < 0) // Negative for varargout + n_returns = -n_returns; + + // The first return is always nargout, as a uint64 + (*sp++).u = 0; //TODO: nargout as arg to this func + + // Construct nil octave_values for the return slots + for (int i = 1; i < n_returns; i++) + PUSH_OV (); // TODO: Might be an arg i.e "[a,i] = foo (i,b)" + + // Push the args onto the stack, filling their local slots + if (!is_varargin) + { + int i = 0; + for (i = 0; i < n_root_args; i++) + PUSH_OV (root_args (i)); + // If not all args are given, fill up with nil objects + for (; i < n_args; i++) + PUSH_OV (); + + set_nargin (n_root_args); // Needed for nargin function + } + else + { + // Dont push varargin arguments + int n_args_to_push = std::min (n_args - 1, n_root_args); + int ii = 0; + for (ii = 0; ii < n_args_to_push; ii++) + PUSH_OV (root_args (ii)); + + // Construct missing args (if any) + for (; ii < n_args - 1; ii++) + PUSH_OV (); + + // The rest of the args are to be put in a cell and be put + // in the last argument slot + int n_varargin = n_root_args - n_args_to_push; + + if (n_varargin > 0) + { + Cell cell(1, n_varargin); + int i; + for (i = 0; i < n_varargin; i++) + { + cell (0, i) = root_args (ii + i); + } + PUSH_OV (cell); + } + else + PUSH_OV (Cell (0,0)); // Empty cell into varargin's slot + + set_nargin (n_args_to_push + n_varargin); + } + // Construct nil octave_values for locals in their slots + for (int i = 0; i < n_locals - n_args - n_returns; i++) + PUSH_OV (); + + /* We do the number of args check after frame init so that the unwind is easier. */ + if (!is_varargin && n_args < n_root_args) + { + (*sp++).pee = new execution_exception {"error","","function called with too many inputs"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + ip++; // unwind expects ip to point to two after the opcode being executed + goto unwind; + } + if (!is_varargout && root_nargout > n_returns - 1) // n_returns includes %nargout, so subtract one + { + (*sp++).pee = new execution_exception {"error","","function called with too many outputs"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + ip++; + goto unwind; + } + + m_original_lvalue_list = m_tw->lvalue_list (); + m_tw->set_lvalue_list (nullptr); + } + + // Go go go + DISPATCH (); + +pop: + { + (*--sp).ov.~octave_value (); + DISPATCH_1BYTEOP (); + } +dup: + { + new (sp) octave_value ((sp[-1]).ov); + sp++; + DISPATCH_1BYTEOP (); + } +load_cst: + { + // The next instruction is the offset in the data. + int offset = arg0; + + // Copy construct it into the top of the stack + new (sp++) octave_value (data [offset]); + + DISPATCH (); + } +mul_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_mul, mul, MUL, m_scalar_typeid) + DISPATCH_1BYTEOP(); +mul: + MAKE_BINOP_SELFMODIFYING (binary_op::op_mul, mul_dbl, MUL_DBL) + DISPATCH_1BYTEOP(); +div_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_div, div, DIV, m_scalar_typeid) + DISPATCH_1BYTEOP(); +div: + MAKE_BINOP_SELFMODIFYING (binary_op::op_div, div_dbl, DIV_DBL) + DISPATCH_1BYTEOP(); +add_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_add, add, ADD, m_scalar_typeid) + DISPATCH_1BYTEOP(); +add: + MAKE_BINOP_SELFMODIFYING (binary_op::op_add, add_dbl, ADD_DBL) + DISPATCH_1BYTEOP(); +sub_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_sub, sub, SUB, m_scalar_typeid) + DISPATCH_1BYTEOP(); +sub: + MAKE_BINOP_SELFMODIFYING (binary_op::op_sub, sub_dbl, SUB_DBL) + DISPATCH_1BYTEOP(); +ret: + { + // We need to tell the bytecode frame we are unwinding so that it can save + // variables on the VM stack if it is referenced from somewhere else. + m_tw->get_current_stack_frame ()->vm_unwinds (); + + // Assert that the stack pointer is back where it should be + assert (bsp + N_LOCALS() == sp); + + int n_returns_callee = N_RETURNS (); + + bool is_varargout = n_returns_callee < 0; + if (n_returns_callee < 0) + n_returns_callee = -n_returns_callee; + + assert (n_returns_callee > 0); + int n_locals_callee = N_LOCALS (); + + // Destroy locals + // + // Note that we destroy from the bottom towards + // the top of the stack to calls ctors in the same + // order as the treewalker. + int n_dtor = n_locals_callee - n_returns_callee; + + stack_element *first = sp - n_dtor; + while (first != sp) + { + (*first++).ov.~octave_value (); + } + sp -= n_dtor; + + if (is_varargout) + { + n_returns_callee--; // Assume empty varargout + + // Expand the cell array and push the elements to the stack + octave_value ov_vararg = std::move (sp[-1].ov); + STACK_DESTROY (1); + + bool vararg_defined = ov_vararg.is_defined (); + if (vararg_defined && !ov_vararg.iscell ()) + { + (*sp++).pee = new execution_exception {"error","","varargout must be a cell array object"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + if (vararg_defined) + { + // Push the cell array elements to the stack + Cell cell_vararg = ov_vararg.cell_value (); + for (int i = 0; i < cell_vararg.numel (); i++) + { + octave_value &arg = cell_vararg(i); + PUSH_OV (std::move (arg)); + n_returns_callee++; + } + } + // Only push an empty varargout if we are returning to another bytecode function + else if (bsp != rsp) + { + n_returns_callee++; + PUSH_OV (); + } + } + + if (OCTAVE_UNLIKELY (m_profiler_enabled)) + { + auto p = vm::m_vm_profiler; + if (p) + { + std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time + p->exit_fn (fn_name); + } + } + + // Are we at the root routine? + if (bsp == rsp) + { + CHECK (m_output_ignore_data == nullptr); // This can't be active + + octave_value_list ret; + + // Skip nargout, the first value, which is an integer + for (int i = 1; i < n_returns_callee; i++) + { + ret.append(std::move (bsp[i].ov)); + bsp[i].ov.~octave_value (); + } + + //Note: Stack frame object popped by caller + CHECK_STACK (0); + this->m_dbg_proper_return = true; + + m_tw->set_lvalue_list (m_original_lvalue_list); + return ret; + } + + // If the root stack pointer is not the same as the base pointer, + // we are returning from a bytecode routine to another bytecode routine, + // so we have to restore the caller stack frame and cleanup the callee's. + // + // Essentially do the same thing as in the call but in reverse order. + + // If we have any active ~/"black hole", e.g. [~] = foo() in the stack + // the m_output_ignore_data pointer is live. We need to pop and reset + // lvalue lists for the tree walker. + if (m_output_ignore_data) + { + delete m_tw->lvalue_list (); + CHECK (!m_output_ignore_data->m_v_lvalue_list.empty ()); + + m_tw->set_lvalue_list (m_output_ignore_data->m_v_lvalue_list.back ()); + m_output_ignore_data->m_v_lvalue_list.pop_back (); + } + + // The sp now points one past the last return value + stack_element *caller_stack_end = sp - n_returns_callee; + sp = caller_stack_end; // sp points to one past caller stack + + int callee_nargout = caller_stack_end[0].i; + //TODO: Check nargout + + // Restore ip + ip = (*--sp).puc; + + // Restore bsp + bsp = (*--sp).pse; + + // Restore id names + name_data = (*--sp).ps; + + // Restore data + data = (*--sp).pov; + + // Restore code + code = (*--sp).puc; + + // Restore unwind data + unwind_data = (*--sp).pud; + + // Restore the stack pointer. The stored address is the first arg + // on the caller stack, or where it would have been if there are no args. + // The args were moved to the callee stack and destroyed on the caller + // stack in the call. + sp = sp[-1].pse; + + // We now have the object that was called on the stack, destroy it + STACK_DESTROY (1); + + // Move the callee's return values to the top of the stack of the caller. + // Renaming variables to keep my sanity. + int n_args_caller_expects = callee_nargout; + int n_args_callee_has = n_returns_callee - 1; // Exclude %nargout + int n_args_to_move = std::min (n_args_caller_expects, n_args_callee_has); + int n_args_actually_moved = 0; + + // If no return values is requested but there exists return values, + // we need to push one to be able to write it to ans. + if (n_args_caller_expects == 0 && n_args_callee_has) + { + n_args_actually_moved++; + PUSH_OV (std::move (caller_stack_end[1].ov)); + } + // If the callee aint returning anything, we need to push a + // nil object, since the caller always anticipates atleast + // one object, even for nargout == 0. + else if (n_args_caller_expects == 0 && !n_args_callee_has) + PUSH_OV(); + // If the stacks will overlap due to many returns, do copy via container + else if (sp + n_args_caller_expects >= caller_stack_end) + { + // This pushes 'n_args_to_move' number of return values and 'n_args_caller_expects - n_args_to_move' + // number of nils. + copy_many_args_to_caller (sp, caller_stack_end + 1, n_args_to_move, n_args_caller_expects); + n_args_actually_moved = n_args_caller_expects; + sp += n_args_actually_moved; + } + // Move 'n_args_to_move' return value from callee to caller + else + { + // If the caller wants '[a, b, ~]' and the callee has 'd e' + // we need to push 'nil' 'd' 'e' + for (int i = n_args_to_move; i < n_args_caller_expects; i++) + PUSH_OV (); + for (int i = 0; i < n_args_to_move; i++) + { + // Move into caller stack. Note that the order is reversed, such that + // a b c on the callee stack becomes c b a on the caller stack. + int idx = n_args_to_move - 1 - i; + octave_value &arg = caller_stack_end[1 + idx].ov; + + PUSH_OV (std::move (arg)); + } + n_args_actually_moved = n_args_caller_expects; + } + + // Destroy the unused return values on the callee stack + for (int i = 0; i < n_args_callee_has; i++) + { + int idx = n_args_callee_has - 1 - i; + caller_stack_end[1 + idx].ov.~octave_value (); // Destroy ov in callee + } + + // Pop the current dynamic stack frame + std::shared_ptr fp = m_tw->pop_return_stack_frame (); + // If the pointer is not shared, stash it in a cache which is used + // to avoid having to allocate shared pointers each frame push. + if (fp.unique () && m_frame_ptr_cache.size () < 8) + { + fp->vm_clear_for_cache (); + m_frame_ptr_cache.push_back (std::move (fp)); + } + + // Continue execution back in the caller + } + DISPATCH (); +assign: + { + // The next instruction is the slot number + int slot = arg0; + + octave_value_vm &ov_rhs = TOP_OV_VM (); + octave_value_vm &ov_lhs = bsp[slot].ov_vm; + + // Handle undefined, cs-lists, objects that need an unique call etc + // in a separate code block to keep assign short. + if (OCTAVE_UNLIKELY (ov_rhs.vm_need_dispatch_assign_rhs () || + ov_lhs.vm_need_dispatch_assign_lhs ())) + goto assign_dispath; + + ov_lhs = std::move (ov_rhs); // Note move + + ov_rhs.~octave_value_vm (); // Destroy the top of the stack. + STACK_SHRINK (1); + } + DISPATCH(); + +// Note: Not an op-code. Only jumped to from assign above. +assign_dispath: +{ + // Extract the slot number again + int slot = arg0; + + octave_value &ov_rhs = TOP_OV (); + octave_value &ov_lhs = bsp[slot].ov; + + // If rhs is a "comma separated list" we just assign the first one. + // E.g.: + // a = {1,2,3}; + // b = a{:}; % This assignment + // + // TODO: Do some smart function in ov for this? + // Combine with undefined check? + if (ov_rhs.is_cs_list ()) + { + const octave_value_list lst = ov_rhs.list_value (); + + if (lst.empty ()) + { + // TODO: Need id, name + (*sp++).i = static_cast(error_type::INVALID_N_EL_RHS_IN_ASSIGNMENT); + goto unwind; + } + + ov_rhs = lst(0); + } + + if (ov_rhs.is_undefined ()) + { + // TODO: Need id, name + (*sp++).i = static_cast(error_type::RHS_UNDEF_IN_ASSIGNMENT); + goto unwind; + } + + // If the object in the slot is the last one of it, we need + // to call its object dtor. + // TODO: Probably not needed since the Octave dtor will be called + // by the C++ dtor of ov_lhs's m_count is 0??? The assign + // function calls this function though ... + ov_lhs.maybe_call_dtor (); + + if (ov_rhs.vm_need_storable_call ()) + ov_rhs.make_storable_value (); // Some types have lazy copy + + if (OCTAVE_LIKELY (!ov_lhs.is_ref ())) + ov_lhs = std::move (ov_rhs); // Note move + else + ov_lhs.ref_rep ()->set_value (std::move (ov_rhs)); + + STACK_DESTROY (1); +} +DISPATCH(); + +jmp_if_bool: +{ + octave_value_vm &ov_1 = TOP_OV_VM (); + + if (OCTAVE_UNLIKELY (ov_1.type_id () != m_bool_typeid)) + { + // Change the specialized opcode to the generic one + ip[-2] = static_cast (INSTR::JMP_IF); + goto jmp_if; + } + + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + octave_bool &ovb_bool = REP (octave_bool, ov_1); + + bool is_true = ovb_bool.octave_bool::is_true (); + + ov_1.~octave_value_vm (); + STACK_SHRINK (1); + + if (is_true) + ip = code + target; +} +DISPATCH (); + +jmp_if: + { + octave_value &ov_1 = TOP_OV (); + + if (OCTAVE_UNLIKELY (ov_1.type_id () == m_bool_typeid)) + { + // Change the generic opcode to the specialized one + ip[-2] = static_cast (INSTR::JMP_IF_BOOL); + goto jmp_if_bool; + } + + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + bool is_true; + if (ov_1.is_defined ()) + { + try + { + is_true = ov_1.is_true (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + else + { + (*sp++).i = static_cast(error_type::IF_UNDEFINED); + goto unwind; + } + + STACK_DESTROY (1); + + if (is_true) + ip = code + target; + } + DISPATCH(); +jmp: + { + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + ip = code + target; + } + DISPATCH (); +jmp_ifn_bool: +{ + octave_value_vm &ov_1 = TOP_OV_VM (); + + if (OCTAVE_UNLIKELY (ov_1.type_id () != m_bool_typeid)) + { + // Change the specialized opcode to the generic one + ip[-2] = static_cast (INSTR::JMP_IFN); + goto jmp_ifn; + } + + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + octave_bool &ovb_bool = REP (octave_bool, ov_1); + + bool is_true = ovb_bool.octave_bool::is_true (); + + ov_1.~octave_value_vm (); + STACK_SHRINK (1); + + if (!is_true) + ip = code + target; +} +DISPATCH (); + +jmp_ifn: + { + octave_value &ov_1 = TOP_OV (); + + if (OCTAVE_UNLIKELY (ov_1.type_id () == m_bool_typeid)) + { + // Change the generic opcode to the specialized one + ip[-2] = static_cast (INSTR::JMP_IFN_BOOL); + goto jmp_ifn_bool; + } + + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + bool is_true; + if (ov_1.is_defined ()) //10 + { + try + { + is_true = ov_1.is_true (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + else + { + (*sp++).i = static_cast (error_type::IF_UNDEFINED); + goto unwind; + } + + STACK_DESTROY (1); + + if (!is_true) + ip = code + target; + } + DISPATCH (); +push_slot_nargoutn: + { + // The next instruction is the slot number + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + // Handle undefined (which might be an error or function + // call on command form) or a function object. + if (ov.is_maybe_function ()) + goto cmd_fcn_or_undef_error; + + ip++; // nargout not needed + + // Push the value in the slot to the stack + if (OCTAVE_LIKELY (!ov.is_ref ())) + PUSH_OV (ov); + else + PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing + } + DISPATCH(); +set_folded_cst: +{ + int slot = arg0; + octave_cached_value *ovb = static_cast (bsp[slot].ovb); + ovb->set_cached_obj (std::move (TOP_OV ())); + STACK_DESTROY (1); +} +DISPATCH(); +push_folded_cst: + { + int slot = arg0; + unsigned char b0 = *ip++; + unsigned char b1 = *ip++; + + octave_cached_value *ovb = static_cast (bsp[slot].ovb); + if (ovb->is_defined () && ovb->cache_is_valid ()) + { + PUSH_OV (ovb->get_cached_value ()); + int target = b0 | (b1 << 8); + ip = code + target; + } + else + { + bsp[slot].ov = octave_value {new octave_cached_value}; + } + } + DISPATCH(); + +push_slot_nargout0: +push_slot_nargout1: +push_slot1_special: + { + int slot = arg0; + + octave_base_value *ovb = bsp[slot].ovb; + + // Some ov:s need some checks before pushing + if (OCTAVE_UNLIKELY (ovb->vm_need_dispatch_push ())) + goto push_slot_dispatch; + + PUSH_OVB (ovb); + } + DISPATCH(); +// This is not an op-code and is only jumped to from above opcode. +push_slot_dispatch: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + // Handle some special cases separately. + // I.e. cmd fn calls or classdef metas. + // Also error if no function-ish thing is found + // in lookups. + if (ov.is_maybe_function ()) + goto cmd_fcn_or_undef_error; + + // Push the value in the slot to the stack + if (OCTAVE_LIKELY (!ov.is_ref ())) + PUSH_OV (ov); + else + PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing + } + DISPATCH(); + +disp: + { + octave_value &ov = TOP_OV (); + // 0 is magic slot number that indicates no name or always not a command + // for this opcode. + int slot = arg0; + int slot_was_cmd = POP_CODE_USHORT (); // Marker for if the preceding call was a command call + + bool call_was_cmd = false; + if (slot_was_cmd) + { + octave_value &ov_call_was_cmd = bsp[slot_was_cmd].ov; + if (ov_call_was_cmd.is_defined ()) + call_was_cmd = true; + } + + if (m_tw->statement_printing_enabled () && ov.is_defined ()) + { + interpreter& interp = m_tw->get_interpreter (); + + if (ov.is_cs_list ()) + { + octave_value_list ovl = ov.list_value (); + + for (int i = 0; i < ovl.length (); i++) + { + octave_value_list el_ovl = octave_value_list {ovl(i)}; + el_ovl.stash_name_tags (string_vector ("ans")); + m_tw->set_active_bytecode_ip (ip - code); // Needed if display calls inputname() + + try + { + interp.feval ("display", el_ovl); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + } + else + { + octave_value_list ovl; + ovl.append (ov); + + if (call_was_cmd) + ovl.stash_name_tags (string_vector ("ans")); + else if (slot != 0) + ovl.stash_name_tags (string_vector (name_data[slot])); + else + ovl.stash_name_tags (string_vector {}); + + m_tw->set_active_bytecode_ip (ip - code); // Needed if display calls inputname() + + try + { + interp.feval ("display", ovl); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + } + + STACK_DESTROY (1); + } + DISPATCH (); + +push_slot_disp: + { + int slot = arg0; + int slot_was_cmd = POP_CODE_USHORT (); + octave_value &ov = bsp[slot].ov; + octave_value &ov_was_cmd = bsp[slot_was_cmd].ov; + + // Handle some special cases separately. + // I.e. cmd fn calls or classdef metas. + // Also error if no function-ish thing is found + // in lookups. + + // Assume that the pushed slot will not be a cmd. + // disp will later use the ov_was_cmd slot to choose between printing + // 'ans = ...' or 'foo = ...' + ov_was_cmd = octave_value (); + + if (ov.is_maybe_function ()) + { + if (ov.is_undefined ()) // class objects are defined + ov_was_cmd = true; + ip -= 2; // Rewind to slot so the state matches 'push_slot_nargoutn' and 'push_slot_dispatch'. + goto cmd_fcn_or_undef_error; + } + + // Push the value in the slot to the stack + if (OCTAVE_LIKELY (!ov.is_ref ())) + PUSH_OV (ov); + else + PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing + } + DISPATCH(); + +// Some kludge to handle the possibility of command form function calls. +cmd_fcn_or_undef_error: + { + int slot = arg0; + octave_value ov = bsp[slot].ov; + bool is_ref = ov.is_ref (); + if (is_ref) + ov = ov.ref_rep ()->deref (); + + // Check to opcode to see how many nargout there are. + // Also skip ip to the end of the opcode. + int nargout; + bool push_classdef_metas = false; + INSTR opcode = static_cast (*(ip - 2)); + if (opcode == INSTR::PUSH_SLOT_NARGOUT1 || + opcode == INSTR::PUSH_PI) + nargout = 1; + else if (opcode == INSTR::PUSH_SLOT_NARGOUT0) + nargout = 0; + else if (opcode == INSTR::PUSH_SLOT_NARGOUTN) + nargout = *ip++; + else if (opcode == INSTR::PUSH_SLOT_NARGOUT1_SPECIAL) + { + push_classdef_metas = true; + nargout = 1; + } + else if (opcode == INSTR::PUSH_SLOT_DISP) + { + nargout = 0; + ip += 2; // Skip the maybe command slot + } + else + PANIC ("Invalid opcode"); + + // A global or persistent var should not be undefined but whatever check anyway + bool ov_defined1 = ov.is_defined (); + if (is_ref && !ov_defined1) + { + (*sp++).ps = new std::string {name_data[slot]}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + if (!ov_defined1 && ov.is_nil ()) + { + ov = bsp[slot].ov = + octave_value (new octave_fcn_cache (name_data[slot])); + } + + if (!ov_defined1 && ov.is_function_cache ()) + { + try + { + octave_fcn_cache *ovb_cache = ov.fcn_cache_value (); + ov = ovb_cache->get_cached_obj ({}); + } + CATCH_EXECUTION_EXCEPTION + } + + if (! ov.is_defined ()) + { + (*sp++).ps = new std::string {name_data[slot]}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + // When executing op-code PUSH_SLOT_NARGOUT1_SPECIAL ... + // Essentially if we have a foo{1} where foo is a classdef + // we need to push it for the {1} indexing. + if (push_classdef_metas && ov.is_classdef_meta ()) + PUSH_OV (ov); + else if (ov.is_function ()) + { + octave_function *fcn = ov.function_value (true); //TODO: Unwind on error? + + // TODO: Bytecode call + if (fcn) + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + octave_value_list ovl = fcn->call (*m_tw, nargout); + + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ovl, nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + else + PUSH_OV (ov); // TODO: The walker does this. Sane? + } + else + PUSH_OV (ov); // TODO: The walker does this. Sane? + } + DISPATCH (); +le_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_le, le, LE, m_scalar_typeid) + DISPATCH_1BYTEOP (); +le: + MAKE_BINOP_SELFMODIFYING (binary_op::op_lt, le_dbl, LE_DBL) + DISPATCH_1BYTEOP (); +le_eq_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_le_eq, le_eq, LE_EQ, m_scalar_typeid) + DISPATCH_1BYTEOP(); +le_eq: + MAKE_BINOP_SELFMODIFYING(binary_op::op_le, le_eq_dbl, LE_EQ_DBL) + DISPATCH_1BYTEOP(); +gr_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_gr, gr, GR, m_scalar_typeid) + DISPATCH_1BYTEOP(); +gr: + MAKE_BINOP_SELFMODIFYING(binary_op::op_gt, gr_dbl, GR_DBL) + DISPATCH_1BYTEOP(); +gr_eq_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_gr_eq, gr_eq, GR_EQ, m_scalar_typeid) + DISPATCH_1BYTEOP(); +gr_eq: + MAKE_BINOP_SELFMODIFYING(binary_op::op_ge, gr_eq_dbl, GR_EQ_DBL) + DISPATCH_1BYTEOP(); +eq_dbl: + MAKE_BINOP_SPECIALIZED(m_fn_dbl_eq, eq, EQ, m_scalar_typeid) + DISPATCH_1BYTEOP(); +eq: + MAKE_BINOP_SELFMODIFYING(binary_op::op_eq, eq_dbl, EQ_DBL) + DISPATCH_1BYTEOP(); +neq_dbl: + MAKE_BINOP_SPECIALIZED(m_fn_dbl_neq, neq, NEQ, m_scalar_typeid) + DISPATCH_1BYTEOP(); +neq: + MAKE_BINOP_SELFMODIFYING(binary_op::op_ne, neq_dbl, NEQ_DBL) + DISPATCH_1BYTEOP(); + + +index_id1_mat_1d: +{ + int slot = arg0; + ip++; // n_args_on_stack ignored + + octave_base_value *arg1 = TOP_OVB (); + octave_value &mat = SEC_OV (); + + bool is_scalar = arg1->type_id () == m_scalar_typeid; // scalar is C "double" + bool is_mat = mat.is_full_num_matrix (); + // If the args have change types we need to use the generic index opcode + if (OCTAVE_UNLIKELY (!is_scalar || !is_mat)) + { + // Rewind ip ton_args_on_stack + ip -= 1; + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + // Change the specialized opcode to the generic one + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INDEX_ID_NARGOUT1); + goto index_id1; + } + + try + { + octave_scalar *arg1_double = static_cast (arg1); + + double idx_double = arg1_double->double_value (); + octave_idx_type idx = static_cast (idx_double); + + if (static_cast (idx) != idx_double) + err_invalid_index (idx_double - 1, // Expects zero-indexed index + 1, // The 1st index has the wrong dimension + 1); // Total amount of dimensions + if (idx <= 0) + err_invalid_index (idx - 1, 1, 1); + + // Arguments are one-indexed but checked_full_matrix_elem() is 0-indexed. + octave_value ans = mat.checked_full_matrix_elem (idx - 1); + STACK_DESTROY (2); + PUSH_OV (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION +} +DISPATCH(); + +index_id1_mat_2d: +{ + int slot = arg0; + ip++; // n_args_on_stack ignored + + octave_base_value *arg2 = TOP_OVB (); // Collumn index + octave_base_value *arg1 = SEC_OVB (); // Row index + octave_value &mat = THIRD_OV (); + + bool is_scalar; // scalar as in C "double" + is_scalar = arg1->type_id () == m_scalar_typeid; + is_scalar = arg2->type_id () == m_scalar_typeid && is_scalar; + + bool is_mat = mat.is_full_num_matrix (); + // If the args have change types we need to use the generic index opcode + if (OCTAVE_UNLIKELY (!is_scalar || !is_mat)) + { + // Rewind ip to n_args_on_stack + ip -= 1; + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + // Change the specialized opcode to the generic one + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INDEX_ID_NARGOUT1); + goto index_id1; + } + + try + { + octave_scalar *arg1_double = static_cast (arg1); + + double idx1_double = arg1_double->double_value (); + octave_idx_type idx1 = static_cast (idx1_double); + + if (static_cast (idx1) != idx1_double) + err_invalid_index (idx1_double - 1, // Expects zero-indexed index + 1, // The 1st index has the wrong dimension + 2); // Total amount of dimensions + if (idx1 <= 0) + err_invalid_index (idx1 - 1, 1, 2); + + octave_scalar *arg2_double = static_cast (arg2); + + double idx2_double = arg2_double->double_value (); + octave_idx_type idx2 = static_cast (idx2_double); + + if (static_cast (idx2) != idx2_double) + err_invalid_index (idx2_double - 1, // Expects zero-indexed index + 2, // The 1st index has the wrong dimension + 2); // Total amount of dimensions + if (idx2 <= 0) + err_invalid_index (idx2 - 1, 2, 2); + + // Arguments are one-indexed but checked_full_matrix_elem() is 0-indexed. + octave_value ans = mat.checked_full_matrix_elem (idx1 - 1, idx2 - 1); + STACK_DESTROY (3); + PUSH_OV (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION +} +DISPATCH(); + +index_math_ufun_id1: +{ + auto ufn = static_cast (arg0); + ip++; // slot number ignored + ip++; // "n_args_on_stack" ignored. Always 1 + + // The object to index is before the arg on the stack + octave_value &arg = TOP_OV (); + octave_value &ov = SEC_OV (); + + if (OCTAVE_UNLIKELY (arg.type_id () != m_scalar_typeid || + !ov.is_function_cache ())) + { + ip -= 1; // Rewind ip to n_args_on_stack + arg0 = ip[-1]; // set arg0 to slot + goto index_math_ufun_id1_dispatch; + } + + // We need to check so the user has not defined some function + // that overrides the builtin ones. + octave_function *fcn; + try + { + fcn = ov.get_cached_fcn ({arg}); + } + CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs + + if (OCTAVE_UNLIKELY (!fcn->is_builtin_function ())) + { + ip -= 1; // Rewind ip to n_args_on_stack + arg0 = ip[-1]; // set arg0 to slot + goto index_math_ufun_id1_dispatch; + } + + octave_scalar *ovb_arg = static_cast (TOP_OVB ()); + + SEC_OV () = ovb_arg->octave_scalar::map (ufn); + STACK_DESTROY (1); +} +DISPATCH (); + +push_pi: +// Specialization to push pi fast as a scalar. +// +// If the user have messed up 'pi' opcode PUSH_SLOT_NARGOUT1 +// is used instead. +{ + // The next instruction is the slot number + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + // If the slot value is not a function cache we do a + // PUSH_SLOT_NARGOUT1 which will most likely put a + // function cache in the slot (unless the user has done a + // "pi = 123;" or whatever). + if (OCTAVE_UNLIKELY (!ov.is_function_cache ())) + { + goto push_slot_nargout1; + } + + // We need to check so the user has not defined some pi function + octave_function *fcn; + try + { + fcn = ov.get_cached_fcn ({}); + } + CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs + + if (OCTAVE_UNLIKELY (fcn != m_pi_builtin_fn)) + { + goto push_slot_nargout1; + } + + // The user wanna push 3.1415... + PUSH_OV (ov_pi); +} +DISPATCH(); + + { + // TODO: Too much code. Should be broken out? + + // Note: Beutifully interleaved if branches and goto labels + int nargout, slot; + bool specialization_ok; + if (0) + { +index_idn: + slot = arg0; // Needed if we need a function lookup + nargout = *ip++; + specialization_ok = false; + } + else if (0) + { +index_id1: + slot = arg0; + nargout = 1; + specialization_ok = true; + } + else if (0) + { +index_id_nargout0: + slot = arg0; + nargout = 0; + specialization_ok = false; + } + else + { +index_math_ufun_id1_dispatch: // Escape dispatch for index_math_ufun_id1 specialization + slot = arg0; + nargout = 1; + specialization_ok = false; + } + + int n_args_on_stack = *ip++; + + // The object to index is before the args on the stack + octave_value &ov = (sp[-1 - n_args_on_stack]).ov; + + // TODO: The ovl should not be needed + // Make an ovl with the args + // TODO: Should be inplace moves + octave_value_list ovl; + bool has_cs_list_arg = false; + // The operands are on the top of the stack + + bool all_args_double = true; + for (int i = n_args_on_stack - 1; i >= 0; i--) + { + octave_value &arg = sp[-1 - i].ov; + if (arg.type_id () != m_scalar_typeid) + all_args_double = false; + // If the operand arg is a cs list we need to expand it + if (arg.is_cs_list ()) + { + has_cs_list_arg = true; + ovl.append (arg.list_value ()); + } + else + ovl.append (sp[-1 - i].ov); // TODO: copied, not moved + } + + // If the ov is a "full matrix", i.e. based on octave_base_matrix, + // and the arguments are all scalar, we modify this opcode to a + // specialized opcode for matrix scalar indexing. + if (nargout == 1 && all_args_double && ov.is_full_num_matrix () && specialization_ok) + { + if (n_args_on_stack == 1) + { + ip -= 1; + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + + CHECK (ip[-2 + wide_opcode_offset] == static_cast (INSTR::INDEX_ID_NARGOUT1)); + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INDEX_ID1_MAT_1D); + + goto index_id1_mat_1d; + } + else if (n_args_on_stack == 2) + { + ip -= 1; + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + + CHECK (ip[-2 + wide_opcode_offset] == static_cast (INSTR::INDEX_ID_NARGOUT1)); + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INDEX_ID1_MAT_2D); + + goto index_id1_mat_2d; + } + } + + //TODO: Are the args really destroyed in all paths? Remember cell too + + // octave_fcn_cache and some octave_fcn_handle have caches + bool has_function_cache = ov.has_function_cache (); + + if (! has_function_cache && ov.is_defined ()) + { + // It is probably a variable + octave_value_list retval; + + if (OCTAVE_LIKELY (! ov.is_function () + || ov.is_classdef_meta ())) + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + retval = ov.simple_subsref ('(', ovl, nargout); + ovl.clear (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + else + TODO ("Silly state"); + + ov = octave_value (); + + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout); + } + else if (has_function_cache) + { +// The else clause bellow jumps to here +querry_fcn_cache: + + octave_function *fcn; + try + { + fcn = ov.get_cached_fcn (ovl); + } + CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs + + if (! fcn) + { + (*sp++).ps = new std::string {name_data[slot]}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + else if (fcn->is_compiled ()) + { + octave_user_function *usr_fcn = static_cast (fcn); + + // Alot of code in this define + MAKE_BYTECODE_CALL + + // Now dispatch to first instruction in the + // called function + } + else + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + octave_value_list ret = fcn->call (*m_tw, nargout, ovl); + + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + } + else + { + // It is probably a function call + if (! ov.is_nil ()) + { + PRINT_VM_STATE("err %s" COMMA name_data[slot].c_str ()); + TODO ("Not nil object for fcn cache replacement"); + } + + // Put a function cache object in the slot and in the local ov + ov = bsp[slot].ov = + octave_value (new octave_fcn_cache (name_data[slot])); + goto querry_fcn_cache; // Jump into the if clause above + } + } + DISPATCH (); + +push_slot_indexed: + { + // The next instruction is the slot number + int slot = arg0; + octave_value &ov = bsp[slot].ov; + + // Unlike push_slot this can't be a command function call + // so we don't need to check if this is a function. + + // Push the value in the slot to the stack + if (OCTAVE_LIKELY (!ov.is_ref ())) + PUSH_OV (ov); + else + PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing + + } + DISPATCH(); + +pow_dbl: + MAKE_BINOP_SPECIALIZED (m_fn_dbl_pow, pow, POW, m_scalar_typeid) + DISPATCH_1BYTEOP(); +pow: + MAKE_BINOP_SELFMODIFYING(binary_op::op_pow, pow_dbl, POW_DBL) + DISPATCH_1BYTEOP(); +ldiv: + MAKE_BINOP(binary_op::op_ldiv) + DISPATCH_1BYTEOP(); +el_mul: + MAKE_BINOP(binary_op::op_el_mul) + DISPATCH_1BYTEOP(); +el_div: + MAKE_BINOP(binary_op::op_el_div) + DISPATCH_1BYTEOP(); +el_pow: + MAKE_BINOP(binary_op::op_el_pow) + DISPATCH_1BYTEOP(); +el_and: + MAKE_BINOP(binary_op::op_el_and) + DISPATCH_1BYTEOP(); +el_or: + MAKE_BINOP(binary_op::op_el_or) + DISPATCH_1BYTEOP(); +el_ldiv: + MAKE_BINOP(binary_op::op_el_ldiv) + DISPATCH_1BYTEOP(); + +not_dbl: +MAKE_UNOP_SPECIALIZED (m_fn_dbl_not, op_not, NOT, m_scalar_typeid); +DISPATCH_1BYTEOP (); + +not_bool: +MAKE_UNOP_SPECIALIZED (m_fn_bool_not, op_not, NOT, m_bool_typeid); +DISPATCH_1BYTEOP (); + +op_not: + { + octave_value &ov = TOP_OV (); + + int type_id = ov.type_id (); + if (OCTAVE_UNLIKELY (type_id == m_scalar_typeid)) + { + // Change the generic opcode to the specialized one + ip[-2] = static_cast (INSTR::NOT_DBL); + goto not_dbl; + } + else if (OCTAVE_UNLIKELY (type_id == m_bool_typeid)) + { + // Change the generic opcode to the specialized one + ip[-2] = static_cast (INSTR::NOT_BOOL); + goto not_bool; + } + + try + { + octave_value ans = unary_op (*m_ti, octave_value::unary_op::op_not, + ov); + ov.~octave_value (); + + STACK_SHRINK (1); + + new (sp++) octave_value (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH_1BYTEOP(); +uadd: + { + octave_value &ov = TOP_OV (); + + try + { + octave_value ans = unary_op (*m_ti, octave_value::unary_op::op_uplus, + ov); + ov.~octave_value (); + + STACK_SHRINK (1); + + new (sp++) octave_value (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH_1BYTEOP(); + +usub_dbl: +MAKE_UNOP_SPECIALIZED (m_fn_dbl_usub, usub, USUB, m_scalar_typeid); +DISPATCH_1BYTEOP (); +usub: + { + octave_value &ov = TOP_OV (); + + if (OCTAVE_UNLIKELY (ov.type_id () == m_scalar_typeid)) + { + // Change the generic opcode to the specialized one + ip[-2] = static_cast (INSTR::USUB_DBL); + goto usub_dbl; + } + + try + { + octave_value ans = unary_op (*m_ti, octave_value::unary_op::op_uminus, + ov); + ov.~octave_value (); + + STACK_SHRINK (1); + + new (sp++) octave_value (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH_1BYTEOP(); +trans: + { + octave_value &ov = TOP_OV (); + + try + { + octave_value ans = unary_op (*m_ti, + octave_value::unary_op::op_transpose, + ov); + ov.~octave_value (); + + STACK_SHRINK (1); + + new (sp++) octave_value (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH_1BYTEOP(); +herm: + { + octave_value &ov = TOP_OV (); + + try + { + octave_value ans = unary_op (*m_ti, + octave_value::unary_op::op_hermitian, + ov); + ov.~octave_value (); + + STACK_SHRINK (1); + + new (sp++) octave_value (std::move (ans)); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH_1BYTEOP(); + +incr_id_prefix_dbl: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () != m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + // Change the specialized opcode to the generic one + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INCR_ID_PREFIX); + goto incr_id_prefix; + } + + octave_scalar &scalar = REP (octave_scalar, ov); + double val = scalar.octave_scalar::double_value (); + + if (!scalar.octave_scalar::maybe_update_double (val + 1)) + ov = octave_value_factory::make (val + 1); + + PUSH_OV (ov); + } + DISPATCH(); +incr_id_prefix: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () == m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + // Change the generic opcode to the specialized one + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INCR_ID_PREFIX_DBL); + goto incr_id_prefix_dbl; + } + + try + { + if (OCTAVE_LIKELY (!ov.is_ref ())) + { + ov.non_const_unary_op (octave_value::unary_op::op_incr); + PUSH_OV (ov); + } + else + { + octave_value &ov_glb = ov.ref_rep ()->ref (); + ov_glb.non_const_unary_op (octave_value::unary_op::op_incr); + PUSH_OV (ov_glb); + } + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH(); + +decr_id_prefix_dbl: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () != m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + ip[-2 + wide_opcode_offset] = static_cast (INSTR::DECR_ID_PREFIX); + goto decr_id_prefix; + } + + octave_scalar &scalar = REP (octave_scalar, ov); + double val = scalar.octave_scalar::double_value (); + + if (!scalar.octave_scalar::maybe_update_double (val - 1)) + ov = octave_value_factory::make (val - 1); + + PUSH_OV (ov); + } + DISPATCH(); +decr_id_prefix: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () == m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + ip[-2 + wide_opcode_offset] = static_cast (INSTR::DECR_ID_PREFIX_DBL); + goto decr_id_prefix_dbl; + } + + try + { + if (OCTAVE_LIKELY (!ov.is_ref ())) + { + ov.non_const_unary_op (octave_value::unary_op::op_decr); + PUSH_OV (ov); + } + else + { + octave_value &ov_glb = ov.ref_rep ()->ref (); + ov_glb.non_const_unary_op (octave_value::unary_op::op_decr); + PUSH_OV (ov_glb); + } + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH(); +incr_id_postfix_dbl: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () != m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INCR_ID_POSTFIX); + goto incr_id_postfix; + } + + octave_scalar &scalar = REP (octave_scalar, ov); + double val = scalar.octave_scalar::double_value (); + + PUSH_OV (std::move (ov)); + ov = octave_value_factory::make (val + 1); + } + DISPATCH(); +incr_id_postfix: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () == m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + ip[-2 + wide_opcode_offset] = static_cast (INSTR::INCR_ID_POSTFIX_DBL); + goto incr_id_postfix_dbl; + } + + try + { + if (OCTAVE_LIKELY (!ov.is_ref ())) + { + octave_value copy = ov; + ov.non_const_unary_op (octave_value::unary_op::op_incr); + PUSH_OV (std::move (copy)); + } + else + { + octave_value &ov_glb = ov.ref_rep ()->ref (); + octave_value copy = ov_glb; + ov_glb.non_const_unary_op (octave_value::unary_op::op_incr); + PUSH_OV (std::move (copy)); + } + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH(); +decr_id_postfix_dbl: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () != m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + ip[-2 + wide_opcode_offset] = static_cast (INSTR::DECR_ID_POSTFIX); + goto decr_id_postfix; + } + + octave_scalar &scalar = REP (octave_scalar, ov); + double val = scalar.octave_scalar::double_value (); + + PUSH_OV (std::move (ov)); + ov = octave_value_factory::make (val - 1); + } + DISPATCH(); +decr_id_postfix: + { + int slot = arg0; + + octave_value &ov = bsp[slot].ov; + + if (ov.type_id () == m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + ip[-2 + wide_opcode_offset] = static_cast (INSTR::DECR_ID_POSTFIX_DBL); + goto decr_id_postfix_dbl; + } + + try + { + if (OCTAVE_LIKELY (!ov.is_ref ())) + { + octave_value copy = ov; + ov.non_const_unary_op (octave_value::unary_op::op_decr); + PUSH_OV (std::move (copy)); + } + else + { + octave_value &ov_glb = ov.ref_rep ()->ref (); + octave_value copy = ov_glb; + ov_glb.non_const_unary_op (octave_value::unary_op::op_decr); + PUSH_OV (std::move (copy)); + } + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH(); +for_setup: + { + octave_value &ov_range = TOP_OV (); + + octave_idx_type n = ov_range.numel(); + + bool is_range = ov_range.is_range (); + //TODO: Kludge galore. Should be refactored into some virtual call. + if (is_range && + ( + ov_range.is_double_type () || + ov_range.is_int64_type () || + ov_range.is_uint64_type () || + ov_range.is_int32_type () || + ov_range.is_uint32_type () || + ov_range.is_int16_type () || + ov_range.is_uint16_type () || + ov_range.is_int16_type () || + ov_range.is_int8_type () || + ov_range.is_int8_type () || + ov_range.is_uint8_type () || + ov_range.is_single_type())) + { + ov_range = ov_range.maybe_as_trivial_range (); + } + else if (is_range || + ov_range.is_matrix_type () || + ov_range.iscell () || + ov_range.is_string () || + ov_range.isstruct ()) + { + // The iteration is column wise for these, so change + // n to the amount of columns rather then elements. + dim_vector dv = ov_range.dims ().redim (2); + int n_rows = dv (0); + if (n_rows) + n = dv(1); + else + n = 0; // A e.g. 0x3 sized Cell gives no iterations, not 3 + } + else if (ov_range.is_scalar_type ()) + ; + else + TODO ("Unsupported for rhs type"); + + // TODO: Kludgy classes. + + if (!ov_range.is_trivial_range () && is_range) + { + // TODO: Wasteful copy of range. + auto range = ov_range.range_value (); + if (math::isinf (range.limit ()) || math::isinf (range.base ())) + warning_with_id ("Octave:infinite-loop", + "FOR loop limit is infinite, will stop after %" + OCTAVE_IDX_TYPE_FORMAT " steps", range.numel ()); + } + + + // Push n to the stack + (*sp++).i = n; + // Push a counter to the stack, initialized so that it will + // increment to 0. + (*sp++).i = -1; + + // For empty rhs just assign it to lhs + if (! n) + { + // Slot from the for_cond that always follow a for_setup + int slot; + if (arg0 == static_cast (INSTR::WIDE)) + slot = ip[1]; + else + slot = ip[0]; + try + { + octave_value &lhs_ov = bsp[slot].ov; + if (!lhs_ov.is_ref ()) + lhs_ov = ov_range.storable_value (); + else + lhs_ov.ref_rep ()->set_value (ov_range.storable_value ()); + } + CATCH_EXECUTION_EXCEPTION + } + } +DISPATCH_1BYTEOP (); + +for_cond: + { + // Check if we should exit the loop due to e.g. ctrl-c, or handle + // any other signals. + try + { + octave_quit (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + // Increase counter + TOP ().i++; // Wraps to zero first iteration + + // Check if we done all iterations + // n is second element on the stack + if (TOP ().i == SEC ().i) + { + // The after address + unsigned char b0 = *ip++; + unsigned char b1 = *ip++; + + int after = b0 | (b1 << 8); + + // goto after block + ip = code + after; + } + else + { + // Write the iteration's value to the for loop variable + int slot = arg0; + ip +=2; // Skip the after address + + octave_idx_type counter = TOP ().i; + + octave_value &ov_range = THIRD_OV (); + octave_value &ov_it = bsp[slot].ov; + + if (ov_range.is_trivial_range ()) + { + double val = REP (octave_trivial_range, ov_range).octave_trivial_range::vm_extract_forloop_double (counter); + if (!ov_it.maybe_update_double (val)) + { + if (OCTAVE_LIKELY (!ov_it.is_ref ())) + ov_it = octave_value_factory::make (val); + else + ov_it.ref_rep ()->set_value (val); + } + } + else if (OCTAVE_LIKELY (!ov_it.is_ref ())) + ov_it = ov_range.vm_extract_forloop_value (counter); + else + ov_it.ref_rep ()->set_value (ov_range.vm_extract_forloop_value (counter)); + + // The next opcode is the start of the body + } + } + DISPATCH (); +pop_n_ints: + { + sp -= arg0; + DISPATCH(); + } +push_fcn_handle: + { + int slot = arg0; + + //octave_value &fcn_cache = bsp[slot].ov; + + std::string handle_name = name_data[slot]; + + if (!handle_name.size () || handle_name[0] != '@') + TODO ("Strange handle name"); + + handle_name = handle_name.substr(1); + + octave_value fcn_handle = m_tw->make_fcn_handle (handle_name); + + PUSH_OV (std::move (fcn_handle)); + } + DISPATCH (); +colon: + { + bool is_for_cmd; + + // Yes, we are doing this + if (0) + { +colon_cmd: + is_for_cmd = true; + } + else + { + is_for_cmd = false; + } + + bool has_incr = false; + if (ip[-2] == static_cast (INSTR::COLON3) || + ip[-2] == static_cast (INSTR::COLON3_CMD)) + has_incr = true; + + octave_value ret; + + if (has_incr) + { + octave_value &base = THIRD_OV (); + octave_value &incr = SEC_OV (); + octave_value &limit = TOP_OV (); + + try + { + ret = colon_op(base, incr, limit, is_for_cmd); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + + STACK_DESTROY (3); + } + else + { + octave_value &base = SEC_OV (); + octave_value &limit = TOP_OV (); + + try + { + ret = colon_op(base, limit, is_for_cmd); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + + STACK_DESTROY (2); + } + + PUSH_OV (std::move (ret)); + } + DISPATCH_1BYTEOP (); + +push_true: + { + PUSH_OV(ov_true); + } + DISPATCH_1BYTEOP (); +push_false: + { + PUSH_OV(ov_false); + } + DISPATCH_1BYTEOP (); +unary_true: + { + octave_value &op1 = TOP_OV (); + + bool is_true; + + try + { + is_true = op1.is_true (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + STACK_DESTROY (1); + + if (is_true) + PUSH_OV (ov_true); + else + PUSH_OV (ov_false); + + } + DISPATCH_1BYTEOP (); +assign_n: + { + int n_slots = arg0; + + int n_actual = 0; + do + { + // Move operand to the local at slot in relation to base stack pointer + + octave_value &arg = (*--sp).ov; + int slot = POP_CODE_USHORT (); + octave_value &lhs_ov = bsp[slot].ov; + + + /* Expand cs_lists */ + if (arg.is_cs_list ()) + { + octave_value_list args = arg.list_value (); + for (int i = 0; i < args.length (); i++) + { + octave_value &ov_1 = args (i); + + lhs_ov.maybe_call_dtor (); + + if (ov_1.vm_need_storable_call ()) + ov_1.make_storable_value (); // Some types have lazy copy + + if (ov_1.is_undefined ()) + { + std::string &name = name_data[slot]; + + // If the return value is ignored, undefined is OK + bool is_ignored = false; + if (name.size () >= 2 && name[0] == '%' && name[1] == '~') + is_ignored = true; + + Matrix ignored; + octave_value tmp = m_tw->get_auto_fcn_var (stack_frame::auto_var_type::IGNORED); + if (tmp.is_defined ()) + { + ignored = tmp.matrix_value (); + + if (slot < N_RETURNS ()) + { + int outputnum = N_RETURNS () - 1 - slot; + + octave_idx_type idx = ignored.lookup (outputnum); + is_ignored = idx > 0 && ignored (idx - 1) == outputnum; + } + } + + if (!is_ignored) + { + (*sp++).pee = new execution_exception {"error", "", "element number " + std::to_string (n_actual + 1) + " undefined in return list"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + } + + if (OCTAVE_LIKELY (!lhs_ov.is_ref ())) + lhs_ov = std::move (ov_1); // Note move + else + lhs_ov.ref_rep ()->set_value (ov_1); + n_actual++; + } + } + else + { + lhs_ov.maybe_call_dtor (); + + if (arg.vm_need_storable_call ()) + arg.make_storable_value (); // Some types have lazy copy + + if (arg.is_undefined ()) + { + std::string &name = name_data[slot]; + + // If the return value is ignored, undefined is OK + bool is_ignored = false; + if (name.size () >= 2 && name[0] == '%' && name[1] == '~') + is_ignored = true; + + Matrix ignored; + octave_value tmp = m_tw->get_auto_fcn_var (stack_frame::auto_var_type::IGNORED); + if (tmp.is_defined ()) + { + ignored = tmp.matrix_value (); + + if (slot < N_RETURNS ()) + { + int outputnum = N_RETURNS () - 1 - slot; + + octave_idx_type idx = ignored.lookup (outputnum); + is_ignored = idx > 0 && ignored (idx - 1) == outputnum; + } + } + if (!is_ignored) + { + (*sp++).pee = new execution_exception {"error", "", "element number " + std::to_string (n_actual + 1) + " undefined in return list"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + } + + if (OCTAVE_LIKELY (!lhs_ov.is_ref ())) + lhs_ov = std::move (arg); // Note move + else + lhs_ov.ref_rep ()->set_value (arg); + + n_actual++; + } + + arg.~octave_value (); // Destroy the operand + } + while (n_actual < n_slots); + } + DISPATCH (); + +subassign_id_mat_1d: +{ + int slot = arg0; + ip++; // nargs always one + + // The top of the stack is the rhs value + octave_value &rhs = TOP_OV (); + octave_value &arg = SEC_OV (); + // The ov to subassign to + octave_value &mat_ov = bsp[slot].ov; + + int rhs_type_id = rhs.type_id (); + int arg_type_id = arg.type_id (); + int mat_type_id = mat_ov.type_id (); + + if (rhs_type_id != m_scalar_typeid || mat_type_id != m_matrix_typeid || + arg_type_id != m_scalar_typeid) + { + // Rewind ip to the 2nd byte of the opcode + ip -= 1; + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + // Change the specialized opcode to the general one + ip[-2 + wide_opcode_offset] = static_cast (INSTR::SUBASSIGN_ID); + goto subassign_id; + } + + try + { + mat_ov.make_unique (); + + octave_scalar &rhs_scalar = REP (octave_scalar, rhs); + octave_scalar &arg_scalar = REP (octave_scalar, arg); + + double idx_dbl = arg_scalar.octave_scalar::double_value (); + octave_idx_type idx = idx_dbl - 1; + double val = rhs_scalar.octave_scalar::double_value (); + + octave_matrix &mat_ovb = REP (octave_matrix, mat_ov); + NDArray &arr = mat_ovb.matrix_ref (); + // Handle out-of-bound or non-integer index in the generic opcode + if (idx >= arr.numel () || idx < 0 || + idx != idx_dbl - 1) + { + // Rewind ip to the 2nd byte of the opcode + ip -= 1; + goto subassign_id; + } + + // The NDArray got its own m_rep that might be shared + arr.make_unique (); + + arr.xelem (idx) = val; + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + STACK_DESTROY (2); +} +DISPATCH (); + +subassign_id: + { + // The args to the subassign are on the operand stack + int slot = arg0; + int nargs = *ip++; + + // The top of the stack is the rhs value + octave_value &rhs = TOP_OV (); + // First argument + stack_element *parg = sp - 1 - nargs; + + // Move the args to an ovl + // TODO: Should actually be a move + bool all_args_are_scalar = true; + octave_value_list args; + for (int i = 0; i < nargs; i++) + { + octave_value &arg = parg[i].ov; + // We need to expand cs-lists + if (arg.type_id () != m_scalar_typeid) + all_args_are_scalar = false; + if (arg.is_cs_list ()) + args.append (arg.list_value ()); + else + args.append (arg); + } + + // The ov to subassign to + octave_value &ov = bsp[slot].ov; + + if (nargs == 1 && all_args_are_scalar && ov.type_id () == m_matrix_typeid && + rhs.type_id () == m_scalar_typeid) + { + int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back + + // If the opcode allready is SUBASSIGN_ID_MAT_1D we were sent back to + // SUBASSIGN_ID to handle some error or edgecase, so don't go back. + if ( ip[-3 + wide_opcode_offset] != static_cast (INSTR::SUBASSIGN_ID_MAT_1D)) + { + // Rewind ip to the 2nd byte of the opcode + ip -= 1; + // Change the general opcode to the specialized one + ip[-2 + wide_opcode_offset] = static_cast (INSTR::SUBASSIGN_ID_MAT_1D); + goto subassign_id_mat_1d; + } + } + + // TODO: Room for performance improvement here maybe + if (OCTAVE_LIKELY (!ov.is_ref ())) + ov.make_unique (); + else + ov.ref_rep ()->ref ().make_unique (); + + if (rhs.is_cs_list ()) + { + const octave_value_list lst = rhs.list_value (); + + if (lst.empty ()) + { + // TODO: Need id, name + // TODO: Make execution_exception like the others instead of its own error_type + (*sp++).i = static_cast(error_type::INVALID_N_EL_RHS_IN_ASSIGNMENT); + goto unwind; + } + + rhs = lst(0); + } + + // E.g. scalars do not update them self inplace + // but create a new octave_value, so we need to + // copy the return value to the slot. + + try + { + ov = ov.simple_subsasgn('(', args, rhs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + // Destroy the args on the operand stack aswell as rhs + STACK_DESTROY (nargs + 1); + } + DISPATCH (); + +end_id: + { + // Indexed variable + int slot = arg0; + // Amount of args to the index, i.e. amount of dimensions + // being indexed. + // E.g. foo (1,2,3) => 3 + int nargs = *ip++; + // Index of the end, in the index, counting from 0. + // E.g. foo (1, end, 3) => 1 + int idx = *ip++; + + octave_value ov = bsp[slot].ov; + + if (ov.is_ref ()) + ov = ov.ref_rep ()->deref (); + + if (ov.is_undefined ()) + { + (*sp++).pee = new execution_exception {"error","","invalid use of 'end': may only be used to index existing value"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + + octave_value end_idx; + if (ov.isobject ()) + { + try + { + end_idx = handle_object_end (ov, idx, nargs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + else + end_idx = end_value (ov, idx, nargs); + + PUSH_OV (std::move (end_idx)); + } + DISPATCH (); +end_obj: + { + // Slot that stores the stack depth of the indexed object + int slot = arg0; + // Amount of args to the index, i.e. amount of dimensions + // being indexed. + // E.g. foo (1,2,3) => 3 + int nargs = *ip++; + // Index of the end, in the index, counting from 0. + // E.g. foo (1, end, 3) => 1 + int idx = *ip++; + + octave_value &stack_depth = bsp[slot].ov; + // Indexed object + octave_value &ov = bsp[stack_depth.int_value () - 1].ov; + + if (ov.is_undefined ()) + { + (*sp++).pee = new execution_exception {"error","","invalid use of 'end': may only be used to index existing value"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + + octave_value end_idx; + if (ov.isobject ()) + { + try + { + end_idx = handle_object_end (ov, idx, nargs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + else + end_idx = end_value (ov, idx, nargs); + + PUSH_OV (std::move (end_idx)); + } + DISPATCH (); + +end_x_n: + { + // Since 'end' in "foo (bar (1, end))" can refer + // to the end of 'foo' if 'bar' is a function we + // need to scan inner to outer after a defined + // object to find the end of. + + int n_ids = arg0; + int i; + + for (i = 0; i < n_ids;) + { + // Amount of args to the index, i.e. amount of dimensions + // being indexed. + // E.g. foo (1,2,3) => 3 + int nargs = *ip++; + // Index of the end, in the index, counting from 0. + // E.g. foo (1, end, 3) => 1 + int idx = *ip++; + // type 0: Like 'end_id:' + // type 1: Like 'end_obj:' + int type = *ip++; + // Slot that stores: + // the object that is being indexed for type 0 + // the stack depth of the indexed object for type 1 + int slot = POP_CODE_USHORT (); + + octave_value ov = bsp[slot].ov; + + if (ov.is_ref ()) + ov = ov.ref_rep ()->deref (); + + // If the type is 1, the ov in the slot is the stack depth + // of the object being indexed. + if (type == 1) + ov = bsp[ov.int_value () - 1].ov; + + bool is_undef = ov.is_undefined (); + + // If the outer most indexed object is not defined + // it is an error. + if (is_undef && i + 1 == n_ids) + { + (*sp++).pee = new execution_exception {"error","","invalid use of 'end': may only be used to index existing value"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + else if (is_undef) + { + i++; + continue; // Look if the next outer object is defined. + } + + octave_value end_idx; + if (ov.isobject ()) + { + try + { + end_idx = handle_object_end (ov, idx, nargs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + else + end_idx = end_value (ov, idx, nargs); + + PUSH_OV (std::move (end_idx)); + i++; + break; + } + + // Skip any unread objects to index + for (; i < n_ids; i++) + ip += 5; + } + DISPATCH (); + +eval: + { + int nargout = arg0; + int tree_idx = POP_CODE_INT (); + CHECK (tree_idx < 0); // Should always be negative to mark for eval. Otherwise it is debug data + + auto it = unwind_data->m_ip_to_tree.find (tree_idx); + CHECK (it != unwind_data->m_ip_to_tree.end ()); + + tree_expression *te = static_cast (it->second); + + octave_value_list retval; + try + { + retval = te->evaluate_n (*m_tw, nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout); + } + DISPATCH (); +bind_ans: + { + int slot = arg0; + octave_value &ans_on_stack = TOP_OV (); + octave_value &ans_in_slot = bsp [slot].ov; + + if (ans_on_stack.is_defined ()) + { + if (!ans_on_stack.is_cs_list ()) + { + ans_in_slot.maybe_call_dtor (); + if (ans_on_stack.vm_need_storable_call ()) + ans_on_stack.make_storable_value (); + + if (OCTAVE_LIKELY (!ans_in_slot.is_ref ())) + ans_in_slot = std::move (ans_on_stack); // Note move + else + ans_in_slot.ref_rep ()->set_value (ans_on_stack); + } + else + { + // We need to recursivly expand any cs-list and assign + // the elements one by one to ans. + std::vector v_el; + + std::vector v_ovl_stack; // "recursive" stacks + std::vector v_ovl_idx_stack; + + v_ovl_stack.push_back (ans_on_stack.list_value ()); + v_ovl_idx_stack.push_back (0); + + while (true) + { + redo: + octave_value_list &lst = v_ovl_stack.back (); + int &idx = v_ovl_idx_stack.back (); // Note: reference changes in loop + + for (; idx < lst.length (); idx++) + { + octave_value ov = lst (idx); + if (ov.is_cs_list ()) + { + idx++; + v_ovl_stack.push_back (ov.list_value ()); + v_ovl_idx_stack.push_back (0); + goto redo; + } + else if (ov.is_defined ()) + v_el.push_back (ov); + } + + v_ovl_stack.pop_back (); + v_ovl_idx_stack.pop_back (); + + if (v_ovl_stack.size () == 0) + break; + } + + // Assign all elements to ans one by one + for (auto &ov_rhs : v_el) + { + ans_in_slot.maybe_call_dtor (); + if (ov_rhs.vm_need_storable_call ()) + ov_rhs.make_storable_value (); + + if (OCTAVE_LIKELY (!ans_in_slot.is_ref ())) + ans_in_slot = std::move (ov_rhs); // Note move + else + ans_in_slot.ref_rep ()->set_value (ov_rhs); + } + } + } + + STACK_DESTROY (1); + } +DISPATCH (); + +push_anon_fcn_handle: +{ + ip--; // Rewind ip for int macro underneath + int tree_idx = POP_CODE_INT (); + + auto it = unwind_data->m_ip_to_tree.find (tree_idx); + CHECK (it != unwind_data->m_ip_to_tree.end ()); + + tree_anon_fcn_handle *h = reinterpret_cast (it->second); + + octave_value ret = m_tw->evaluate_anon_fcn_handle (*h); + + PUSH_OV (ret); +} +DISPATCH (); + +for_complex_setup: +{ + octave_value &ov_rhs = TOP_OV (); + ov_rhs.make_unique (); // TODO: Dunno if needed + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + if (ov_rhs.is_undefined ()) + { + ip = code + target; + DISPATCH (); + } + + if (!ov_rhs.isstruct ()) + { + (*sp++).i = 1; // Need two native ints on the stack so they can be poped by the unwind. + (*sp++).i = 2; + (*sp++).pee = new execution_exception {"error", "", "in statement 'for [X, Y] = VAL', VAL must be a structure"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + + octave_map map = ov_rhs.map_value (); + string_vector keys = map.keys (); + octave_idx_type n = keys.numel (); + + // Push n to the stack + (*sp++).i = n; + // Push a counter to the stack, initialized so that it will + // increment to 0. + (*sp++).i = -1; +} +DISPATCH (); + +for_complex_cond: +{ + // Increase counter + TOP ().i++; // Wraps to zero first iteration + + // Check if we done all iterations + // n is second element on the stack + if (TOP ().i == SEC ().i) + { + // The after address + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int after = b0 | (b1 << 8); + + // goto after block + ip = code + after; + } + else + { + ip++; // Skip 2nd part of afteraddress + int slot_key = POP_CODE_USHORT (); + int slot_value = POP_CODE_USHORT (); + octave_idx_type counter = TOP ().i; + + octave_value &ov_rhs = THIRD_OV (); // This is always a struct + octave_value &ov_key = bsp[slot_key].ov; + octave_value &ov_val = bsp[slot_value].ov; + + // TODO: Abit wasteful copying map_value () each time but whatever + // who uses complex for loops anyways. + std::string key = ov_rhs.map_value ().keys () [counter]; + const Cell val_lst = ov_rhs.map_value ().contents (key); + + octave_idx_type n = val_lst.numel (); + octave_value val = (n == 1) ? val_lst(0) : octave_value (val_lst); + + if (counter == 0) + { + ov_val.maybe_call_dtor (); // The first iteration these could be class objects ... + ov_key.maybe_call_dtor (); + } + + val.make_unique (); // TODO: Dunno if needed + + if (ov_val.is_ref ()) + ov_val.ref_rep ()->set_value (val); + else + ov_val = val; + + if (ov_val.is_ref ()) + ov_key.ref_rep ()->set_value (key); + else + ov_key = key; + } +} +DISPATCH (); + +/* For dynamic m*n matrix where m and n < 256 */ +matrix: + { + int nrows = arg0; + int ncols = *ip++; + int n_el = nrows * ncols; + + // The first element is down the stack + // and the last element is at the top. + octave_value *first_arg = &(*sp).ov - n_el; + + // The stack pointer is pointing to the first unused + // stack position, so it is the end pointer. + octave_value *end_arg = &(*sp).ov; + + try + { + tm_const tmp (first_arg, end_arg, ncols, *m_tw); + + octave_value &&ov = tmp.concat (' '); + + STACK_DESTROY (n_el); + + PUSH_OV (ov); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH (); +matrix_big: + { + int type = arg0; + + /* type 0 indicates a matrix that has unequal length of the rows. + * + * Any other value than zero indicates a big "rectangle" matrix + * with more than 255 elements in a row or column. */ + if (type == 0) + { + int nrows = POP_CODE_INT (); + + std::vector length_rows; + + int n_el = 0; + for (int i = 0; i < nrows; i++) + { + int row_length = POP_CODE_INT (); + length_rows.push_back (row_length); + n_el += row_length; + } + + // The first element is down the stack + // and the last element is at the top. + octave_value *first_arg = &(*sp).ov - n_el; + + // The stack pointer is pointing to the first unused + // stack position, so it is the end pointer. + octave_value *end_arg = &(*sp).ov; + + try + { + tm_const tmp (first_arg, end_arg, length_rows, *m_tw); + + octave_value &&ov = tmp.concat (' '); + + STACK_DESTROY (n_el); + + PUSH_OV (ov); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + else + { + int nrows = POP_CODE_INT (); + int ncols = POP_CODE_INT (); + int n_el = nrows * ncols; + + // The first element is down the stack + // and the last element is at the top. + octave_value *first_arg = &(*sp).ov - n_el; + + // The stack pointer is pointing to the first unused + // stack position, so it is the end pointer. + octave_value *end_arg = &(*sp).ov; + + try + { + tm_const tmp (first_arg, end_arg, ncols, *m_tw); + + octave_value &&ov = tmp.concat (' '); + + STACK_DESTROY (n_el); + + PUSH_OV (ov); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + } + DISPATCH (); +trans_mul: + MAKE_BINOP(compound_binary_op::op_trans_mul) + DISPATCH_1BYTEOP(); +mul_trans: + MAKE_BINOP(compound_binary_op::op_mul_trans) + DISPATCH_1BYTEOP(); +herm_mul: + MAKE_BINOP(compound_binary_op::op_herm_mul) + DISPATCH_1BYTEOP(); +mul_herm: + MAKE_BINOP(compound_binary_op::op_mul_herm) + DISPATCH_1BYTEOP(); +trans_ldiv: + MAKE_BINOP(compound_binary_op::op_trans_ldiv) + DISPATCH_1BYTEOP(); +herm_ldiv: + MAKE_BINOP(compound_binary_op::op_herm_ldiv) + DISPATCH_1BYTEOP(); +wordcmd: + { + int slot = arg0; // Needed if we need a function lookup + int nargout = *ip++; + int n_args_on_stack = *ip++; + + // The object to index is before the args on the stack + octave_value &ov = (sp[-1 - n_args_on_stack]).ov; + + octave_value_list ovl; + // The operands are on the top of the stack + for (int i = n_args_on_stack - 1; i >= 0; i--) + ovl.append (sp[-1 - i].ov); // TODO: copied, not moved + // TODO: Expand cs_list? Probably not since strings? + + // octave_fcn_cache and some octave_fcn_handle have caches + bool has_function_cache = ov.has_function_cache (); + + if (! has_function_cache && ov.is_defined ()) + TODO ("Error: word list command defined"); + + else if (has_function_cache) + { +// The else clause bellow jumps to here +// TODO: Should be a shared thing? +// Add a "register" for octave_function *fcn? +querry_fcn_cache2: + + octave_function *fcn; + try + { + fcn = ov.get_cached_fcn (ovl); + } + CATCH_EXECUTION_EXCEPTION + + if (! fcn) + { + (*sp++).ps = new std::string {name_data[slot]}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + if (fcn->is_compiled ()) + { + octave_user_function *usr_fcn = static_cast (fcn); + // Alot of code in this define + bool has_cs_list_arg = false; + MAKE_BYTECODE_CALL + + // Now dispatch to first instruction in the + // called function + } + else + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + octave_value_list ret = fcn->call (*m_tw, nargout, ovl); + + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + } + else + { + // It is probably a function call + if (! ov.is_nil ()) + { + PRINT_VM_STATE("err %s" COMMA name_data[slot].c_str ()); + TODO ("Not nil object for fcn cache replacement"); + } + + // Put a function cache object in the slot and in the local ov + ov = bsp[slot].ov = + octave_value (new octave_fcn_cache (name_data[slot])); + goto querry_fcn_cache2; // Jump into the if clause above + } + } + DISPATCH (); +handle_signals: + { + // Check if there is any signal to handle + try + { + octave_quit (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + DISPATCH_1BYTEOP (); +push_cst_dbl_0: +{ + PUSH_OV (ov_dbl_0); +} +DISPATCH_1BYTEOP (); +push_cst_dbl_1: +{ + PUSH_OV (ov_dbl_1); +} +DISPATCH_1BYTEOP (); +push_cst_dbl_2: +{ + PUSH_OV (ov_dbl_2); +} +DISPATCH_1BYTEOP (); + +push_cell: + { + // The amount of columns is at the top of the stack as + // u64 ov. + octave_value &rows_ov = TOP_OV (); + octave_value &cols_ov = SEC_OV (); + octave_idx_type n_cols = cols_ov.uint64_value (); + octave_idx_type n_rows = rows_ov.uint64_value (); + STACK_DESTROY (2); // Destroy the cols and rows ov + + // We need to first figure out the size of the Cell + // we are creating. + // E.g. + // z = cell (1,2,3,0,5); + // {1, z{:}, 2} + // Is a 2x1 Cell. + // So we can't know the Cell size in advance of the + // args are evaluated. + + octave_idx_type n_actual_rows = 0; + if (n_cols != 0) + n_actual_rows++; + + stack_element *tmp_sp = sp; + octave_idx_type n_cols_i = n_cols; + for (octave_idx_type i = 0; i < n_rows; i++) + { + tmp_sp -= n_cols_i; + + if (i != n_rows - 1) // Not last iteration + { + // We now have a ov with the number of cols on the stack + octave_value &cols_i_ov = tmp_sp[-1].ov; tmp_sp--; + n_cols_i = cols_i_ov.uint64_value (); + if (n_cols_i != 0) + n_actual_rows++; + } + } + + Cell cell(n_actual_rows, n_cols); + int n_cols_orig = n_cols; + + int row_idx = 0; + n_cols_i = n_cols; + for (octave_idx_type i = 0; i < n_rows; i++) + { + if (n_cols_i != 0) // Only add row to cell if the row "exists", i.e. is not e.g. 0x3 sized + { + for (octave_idx_type j = 0; j < n_cols_i; j++) + { + cell (n_actual_rows - 1 - row_idx, n_cols_i - 1 - j) = TOP_OV(); + STACK_DESTROY (1); + } + row_idx++; + } + + if (i != n_rows - 1) // Not last iteration + { + // We now have a ov with the number of cols on the stack + octave_value &cols_i_ov = TOP_OV (); + n_cols_i = cols_i_ov.uint64_value (); + if (n_cols_i && n_cols_orig != n_cols_i) + TODO ("Wrong number of cols in other row"); + + n_cols = n_cols_i; + STACK_DESTROY (1); + } + } + + PUSH_OV (cell); + } + DISPATCH_1BYTEOP (); +push_ov_u64: + { + PUSH_OV (octave_int{}); + } + DISPATCH_1BYTEOP (); +expand_cs_list: + { + octave_value cs = TOP_OV (); + octave_value cols_ov = SEC_OV (); // n columns + octave_value rows_ov = THIRD_OV (); // n rows + + if (cs.is_cs_list ()) + { + STACK_DESTROY (3); + + octave_value_list tmp_ovl = cs.list_value (); + + for (octave_idx_type i = 0; i < tmp_ovl.length (); i++) + { + PUSH_OV (tmp_ovl (i)); + cols_ov.non_const_unary_op (octave_value::unary_op::op_incr); + } + } + else + { + STACK_DESTROY (3); + + cols_ov.non_const_unary_op (octave_value::unary_op::op_incr); + + PUSH_OV (cs); + } + + PUSH_OV (rows_ov); + PUSH_OV (cols_ov); + } + DISPATCH_1BYTEOP (); + + { + // TODO: Too much code. Should be broken out? + // Something made sp not be in r15. + + int nargout, slot; + if (0) + { +index_cell_idn: + slot = arg0; // Needed if we need a function lookup + nargout = *ip++; + } + else if (0) +index_cell_id1: + { + slot = arg0; + nargout = 1; + } + else if (0) +index_cell_id0: + { + slot = arg0; + nargout = 0; + } + + int n_args_on_stack = *ip++; + + // The object to index is before the args on the stack + octave_value &ov = (sp[-1 - n_args_on_stack]).ov; + + // TODO: The ovl should not be needed + // Make an ovl with the args + // TODO: Should be inplace moves + // TODO: cl_lists should expanded inplace on the stack instead. + // TODO: Cache lookup should just take pointers to the stack. + octave_value_list ovl; + bool has_cs_list_arg = false; + // The operands are on the top of the stack + for (int i = n_args_on_stack - 1; i >= 0; i--) + { + octave_value &arg = sp[-1 - i].ov; + // If the operand arg is a cs list we need to expand it + if (arg.is_cs_list ()) + { + has_cs_list_arg = true; + ovl.append (arg.list_value ()); + } + else + ovl.append (sp[-1 - i].ov); // TODO: copied, not moved + } + + // octave_fcn_cache and some octave_fcn_handle have caches + bool has_function_cache = ov.has_function_cache (); + + if (! has_function_cache && ov.is_defined ()) + { + // It is probably a variable + + // TODO: subsref should take ovl instead and be chained, + // or something smarter + std::list idx; // TODO: mallocs! + + idx.push_back(ovl); + + + // TODO: subsref might throw index error + octave_value_list retval; + + if (OCTAVE_LIKELY (! ov.is_function () + || ov.is_classdef_meta ())) + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + retval = ov.subsref("{", idx, nargout); + idx.clear (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + else + { + TODO ("Classes not implemented for cell index yet"); + #if 0 + retval = handle_superclass (*m_tw, + ov, + idx, + nargout); + #endif + } + + bool is_fcn = (retval.length () ? + retval(0).is_function() : false); + + // "FIXME: when can the following happen? In what case does indexing + // result in a value that is a function? Classdef method calls? + // Something else?" + + if (OCTAVE_LIKELY (!is_fcn)) + { + idx.clear (); + // TODO: Necessary? I guess it might trigger dtors + // or something? + ov = octave_value (); + } + else + { + octave_value val = retval(0); + octave_function *fcn = val.function_value (true); + + if (fcn) + { + octave_value_list final_args; + + if (! idx.empty ()) + final_args = idx.front (); + + try + { + m_tw->set_active_bytecode_ip (ip - code); + retval = fcn->call (*m_tw, nargout, final_args); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + + idx.clear (); + ov = octave_value (); + val = octave_value (); + } + + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout); + } + else if (has_function_cache) + { +// The else clause bellow jumps to here +querry_fcn_cache3: + + octave_function *fcn; + try + { + fcn = ov.get_cached_fcn (ovl); + } + CATCH_EXECUTION_EXCEPTION + + if (! fcn) + { + (*sp++).ps = new std::string {name_data[slot]}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + if (fcn->is_compiled ()) + { + octave_user_function *usr_fcn = static_cast (fcn); + // Alot of code in this define + MAKE_BYTECODE_CALL + + // Now dispatch to first instruction in the + // called function + } + else + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + octave_value_list ret = fcn->call (*m_tw, nargout, ovl); + + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + } + else + { + // It is probably a function call + if (! ov.is_nil ()) + { + PRINT_VM_STATE("err %s" COMMA name_data[slot].c_str ()); + TODO ("Not nil object for fcn cache replacement"); + } + + // Put a function cache object in the slot and in the local ov + ov = bsp[slot].ov = + octave_value (new octave_fcn_cache (name_data[slot])); + goto querry_fcn_cache3; // Jump into the if clause above + } + } + DISPATCH (); + +incr_prefix: + { + octave_value &ov = TOP_OV (); + // Inplace + ov.non_const_unary_op (octave_value::unary_op::op_incr); + } + DISPATCH_1BYTEOP (); + +rot: + { + octave_value top_ov = TOP_OV (); + octave_value sec_ov = SEC_OV (); + STACK_DESTROY (2); + PUSH_OV (top_ov); + PUSH_OV (sec_ov); + } + DISPATCH_1BYTEOP (); + +varargin_call: + { + // We jump to here when a bytecode call notices it is + // calling a function with varargin. + // + // Continue where we left off. Restore temp variables from the stack. + + octave_user_function *usr_fcn = static_cast (sp[0].pv); + + int n_returns_callee = static_cast (ip[-4]); + if (n_returns_callee < 0) + n_returns_callee = -n_returns_callee; + int n_args_callee = -static_cast (ip[-3]); // Note: Minus + int n_locals_callee = ip[-2] | (ip[-1] << 8); + + int nargout = sp[-1].i; + + // Recreate first arg and n_args_on_stack + // from the stack + stack_element *first_arg = sp[-8].pse; + int n_args_on_stack = (sp - 8) - first_arg; + + // Construct return values - note nargout + // is allready pushed as a uint64 + for (int i = 1; i < n_returns_callee; i++) + PUSH_OV (); + + int n_args_before_varargin = + std::min (n_args_callee - 1, + n_args_on_stack); + // Move the args to the new stack, except varargin + // + // We need to expand any cs-list, but only until the next + // argument would be in varargin. Those need to end up + // in the varargin cell array. + int ii; + int n_args_on_callee_stack = 0; + octave_value_list cs_args; + int cs_args_idx = 0; + for (ii = 0; ii < n_args_before_varargin; ii++) + { + octave_value &arg = first_arg[ii].ov; + if (arg.is_cs_list ()) + { + cs_args = arg.list_value (); + cs_args_idx = 0; + for (int j = 0; j < cs_args.length () && n_args_on_callee_stack < n_args_before_varargin; j++) + { + PUSH_OV (cs_args (j)); + n_args_on_callee_stack++; + cs_args_idx++; + } + } + else + { + PUSH_OV (std::move (arg)); + n_args_on_callee_stack++; + } + + // Destroy the args + first_arg[ii].ov.~octave_value (); + } + // TODO: Expand cl_list? Smarter way? Do it in beginning ... + + // Construct missing args, if any + for (int i = n_args_on_callee_stack; i < n_args_callee - 1; i++) + PUSH_OV (); + + int n_args_in_varargin = n_args_on_stack - n_args_callee + 1; // "Surplus" args + int n_cells_left = cs_args.length () - cs_args_idx; // Amount of leftover cell ellements that need to go into varargin + + int idx_cell = 0; + if (n_args_in_varargin > 0 || n_cells_left) // Anything to put in the varargin cell? + { + // TODO: Preallocate whole cell + Cell cell(n_cells_left ? 1 : 0, n_cells_left); + + // Put the leftover objects from the cs-list expansion + // in the varargin cell, if any + for (int i = 0; i < n_cells_left; i++) + cell (0, idx_cell++) = cs_args (cs_args_idx + i); + + // We need to expand cs-lists here too ... + for (int i = 0; i < n_args_in_varargin; i++) + { + // int col = n_args_in_varargin - 1 - i; + octave_value &arg = first_arg[ii + i].ov; + + if (arg.is_cs_list ()) + { + octave_value_list cs_args_i = arg.list_value (); + for (int j = 0; j < cs_args_i.length (); j++) + { + if (cell.numel () <= idx_cell) + cell.resize (dim_vector {1, idx_cell + 1}); + cell (0, idx_cell++) = cs_args_i (j); + } + } + else + { + if (cell.numel () <= idx_cell) + cell.resize (dim_vector {1, idx_cell + 1}); + cell (0, idx_cell++) = std::move (arg); + } + + arg.~octave_value (); + } + + // Push varargin to the stack + PUSH_OV (cell); + } + else + PUSH_OV (Cell (0,0)); // Empty cell into varargin's slot + + // Construct locals + int n_locals_to_ctor = + n_locals_callee - n_args_callee - n_returns_callee; + + CHECK_STACK (n_locals_to_ctor); + for (int i = 0; i < n_locals_to_ctor; i++) + PUSH_OV (); + + int nargin = n_args_on_callee_stack + idx_cell; // n_args_callee count includes varargin + try + { + m_tw->push_stack_frame(*this, usr_fcn, nargout, nargin); + } + CATCH_STACKPUSH_EXECUTION_EXCEPTION // Sets m_could_not_push_frame to true + CATCH_STACKPUSH_BAD_ALLOC + + /* Called fn needs to know about ignored outputs .e.g. [~, a] = foo() */ + if (m_output_ignore_data) + { + if (m_output_ignore_data->is_pending ()) + m_tw->set_auto_fcn_var (stack_frame::IGNORED, m_output_ignore_data->get_ignore_matrix ()); + else + m_tw->set_auto_fcn_var (stack_frame::IGNORED, {}); + m_output_ignore_data->m_v_lvalue_list.push_back (m_tw->lvalue_list ()); + m_tw->set_lvalue_list (nullptr); + } + + /* N_RETURNS is negative for varargout */ + int n_returns = N_RETURNS () - 1; /* %nargout in N_RETURNS */ + if (n_returns >= 0 && nargout > n_returns) + { + (*sp++).pee = new execution_exception {"error","","function called with too many outputs"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + + // Now dispatch to first instruction in the + // called function + } + DISPATCH (); + +unwind: + { + ip--; // Rewind ip to after the opcode (i.e. arg0's position in the code) + // Push VM state + m_sp = sp; + m_bsp = bsp; + m_rsp = rsp; + m_code = code; + m_data = data; + m_name_data = name_data; + m_ip = ip - code; + m_unwind_data = unwind_data; + + // Ther error_type is put on the stack before the jump to unwind. + error_type et = static_cast (m_sp[-1].i); + m_sp--; + + // Save current exception to the error system in handle_error () + error_data errdat = handle_error (et); + + // Only run unwind_protect code if the exception is the interrupt or OOM exception. + // I.e. no 'throw ... catch' code. + bool only_unwind_protect = et == error_type::INTERRUPT_EXC; + + while (1) + { + // Find unwind entry for current value of the instruction pointer + unwind_entry *entry = find_unwind_entry_for_current_state (only_unwind_protect); + + unwind_entry_type type = unwind_entry_type::INVALID; + if (entry) + type = entry->m_unwind_entry_type; + + // We need to figure out what stack depth we want. + // If we are unwinding in a try catch we need to save any + // nesting switch or for loop stack objects on the stack. + int target_stack_depth = N_LOCALS(); + if (entry) + { + target_stack_depth += entry->m_stack_depth; + } + + // Unwind the stack down to the locals + // + // If we got here from return op code we might allready have + // destroyed the locals when an error triggered. + while (m_sp - m_bsp > target_stack_depth) + { + // If the stack depth matches a for loop we need to + // pop some native ints. + // + // TODO: Wasteful search for forloop each iteration + int current_stack_depth = m_sp - m_bsp - N_LOCALS (); + int stack_depth_for_forloop = + find_unwind_entry_for_forloop (current_stack_depth); + + if (stack_depth_for_forloop != -1 && + current_stack_depth == stack_depth_for_forloop + 3) + { + m_sp -= 2; // Pop two ints + (*--m_sp).ov.~octave_value (); // Pop ov + } + else + (*--m_sp).ov.~octave_value (); + } + + if (type == unwind_entry_type::UNWIND_PROTECT || + type == unwind_entry_type::TRY_CATCH) + { + // Need to set some stuff for last_error etc and make the + // interpreter happy by reseting stuff + error_system& es = m_tw->get_interpreter().get_error_system (); + + octave_scalar_map err_map; + + err_map.assign ("message", es.last_error_message ()); + err_map.assign ("identifier", es.last_error_id ()); + err_map.assign ("stack", es.last_error_stack ()); + + m_tw->get_interpreter().recover_from_exception (); + + // Set stack pointer and ip and dispatch + m_ip = entry->m_ip_target; + code = m_code; + ip = m_code + m_ip; + sp = m_sp; + + // Push the error object that is either just poped right + // away by a POP instruction or assigned to the catch + // clause identifier. + PUSH_OV (err_map); + + if (et == error_type::INTERRUPT_EXC) + m_unwinding_interrupt = true; + + goto bail_unwind; + } + + m_tw->get_current_stack_frame ()->vm_unwinds (); + + // Destroy locals down to nargout + while (m_sp != m_bsp + 1) + { + (*--m_sp).ov.~octave_value (); + } + + m_sp--; // nargout + + if (m_sp == m_rsp) + break; // Got down to start of root stack frame + + if (OCTAVE_UNLIKELY (m_profiler_enabled)) + { + auto p = vm::m_vm_profiler; + if (p) + { + std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time + p->exit_fn (fn_name); + } + } + + // Restore ip + ip = (*--m_sp).puc; + + // Restore bsp + bsp = m_bsp = (*--m_sp).pse; + + // Restore id names + name_data = m_name_data = (*--m_sp).ps; + + // Restore data + data = m_data = (*--m_sp).pov; + + // Restore code + code = m_code = (*--m_sp).puc; + m_ip = ip - m_code; + + // Restore unwind data + unwind_data = m_unwind_data = (*--m_sp).pud; + + // Restore the stack pointer + sp = m_sp = m_sp[-1].pse; + + // Pop dynamic stackframe (unless it was never pushed) + if (!m_could_not_push_frame) + m_tw->pop_stack_frame (); + else + m_could_not_push_frame = false; + + // If we are messing with the interpreters lvalue_list due to some + // ~ we need to restore stuff. + if (m_output_ignore_data) + { + delete m_tw->lvalue_list (); + CHECK (!m_output_ignore_data->m_v_lvalue_list.empty ()); + + m_tw->set_lvalue_list (m_output_ignore_data->m_v_lvalue_list.back ()); + m_output_ignore_data->m_v_lvalue_list.pop_back (); + } + } + + CHECK_STACK (0); + this->m_dbg_proper_return = true; + + m_tw->set_lvalue_list (m_original_lvalue_list); + + // Rethrow exceptions out of the VM + if (et == error_type::INTERRUPT_EXC) + throw interrupt_exception {}; + else if (et == error_type::EXIT_EXCEPTION) + throw exit_exception (errdat.m_exit_status, errdat.m_safe_to_return); + else + { + error_system& es = m_tw->get_interpreter().get_error_system (); + es.rethrow_error (es.last_error_id (), es.last_error_message (), es.last_error_stack ()); + } + + } +bail_unwind: + DISPATCH (); + +init_global: + { + // The next instruction tells whether we should init a global or persistent + // variable. + global_type type = static_cast (arg0); + + // The next instruction is the local slot number for the global variable + int slot = POP_CODE_USHORT(); + POP_CODE_USHORT(); // Not used TODO: Remove. Make this opcode use WIDE + + std::string& name = name_data[slot]; + + octave_value &ov_slot = bsp[slot].ov; + bool slot_already_live = ov_slot.is_defined (); + + bool is_marked_in_VM = ov_slot.is_ref (); + + // The next instruction is whether the global declare has an + // initialization value + bool has_init_code = *ip++; + + // If the global was not allready created we need to assign a + // empty double matrix to it. + // If there already is a defined local in the slot we initialize + // the global with the local + // TODO: Should be a decrapation warning here for this + octave_value ov_default; + if (slot_already_live && !is_marked_in_VM) + ov_default = std::move (ov_slot); + else + ov_default = Matrix (); + + if (!is_marked_in_VM) + ov_slot = octave_value {}; + + bool global_is_new_in_callstack = false; + + if (type == global_type::GLOBAL) + { + if (is_marked_in_VM && ov_slot.ref_rep ()->is_persistent_ref ()) + { + (*sp++).pee = new execution_exception {"error", "", + "can't make persistent variable '" + name + "' global"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + + auto sym = m_tw->get_current_stack_frame ()->insert_symbol (name); + // Note: Install variable wont override global's value with nil ov from + // the "{}" argument. + // + // Also install_variable () will write a true ov to the marker + // ov on the VM stack. + m_tw->get_current_stack_frame()->install_variable (sym, {}, 1); + + octave_value &ov_gbl = m_tw->global_varref (name); + global_is_new_in_callstack = ov_gbl.is_undefined (); + + // We assign the default before the init + if (global_is_new_in_callstack) + m_tw->global_assign (name, ov_default); + + if (!is_marked_in_VM) + { + ov_slot = octave_value {new octave_value_ref_global {name}}; + } + + // TODO: Assert global_is_new_in_callstack != global_is_marked_in_VM + // but does not work until the dynamic stack is implemented. + + // CHECK (global_is_new_in_callstack != global_is_marked_in_VM); + } + else if (type == global_type::PERSISTENT) + { + if (is_marked_in_VM && ov_slot.ref_rep ()->is_global_ref ()) + { + (*sp++).pee = new execution_exception {"error", "", + "can't make global variable '" + name + "' persistent"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + + auto frame = m_tw->get_current_stack_frame(); + + symbol_record sym = frame->lookup_symbol(name); + try + { + // Throws if global or formal parameter + frame->make_persistent(sym); + } + CATCH_EXECUTION_EXCEPTION + + auto scope = frame->get_scope (); + + // TODO: Put the offset in the op-code instead? + auto it = unwind_data->m_slot_to_persistent_slot.find (slot); + CHECK (it != unwind_data->m_slot_to_persistent_slot.end ()); + int pers_offset = it->second; + + octave_value &ov_gbl = scope.persistent_varref (pers_offset); + + global_is_new_in_callstack = ov_gbl.is_undefined (); + if (global_is_new_in_callstack) + { + ov_gbl = ov_default; + } + + if (!is_marked_in_VM) + { + ov_slot = octave_value {new octave_value_ref_persistent {std::move (scope), pers_offset}}; + } + } + else + ERR ("Wrong global type"); + + // If there is init code, then there is also a offset to the first + // instruction after the init code, to where we jump if the global is + // alread live. + int after; + if (has_init_code) + { + unsigned char b0 = *ip++; + unsigned char b1 = *ip++; + after = b0 | (b1 << 8); + + if (!global_is_new_in_callstack || slot_already_live) + ip = code + after; + } + + // Now dispatch to either next instruction if no init, init or after init + } + DISPATCH (); +assign_compound: + { + // The next instruction is the slot number + int slot = arg0; + // The next instruction is the type of compound operation + octave_value::assign_op op = + static_cast (*ip++); + + octave_value &ov_rhs = TOP_OV (); + octave_value &ov_lhs = bsp[slot].ov; + + if (!ov_lhs.is_defined ()) // TODO: Also checked in .assign() ... + { + (*sp++).pee = new execution_exception {"error", "", + "in computed assignment A OP= X, A must be defined first"}; + (*sp++).i = static_cast(error_type::EXECUTION_EXC); + goto unwind; + } + + try + { + // TODO: assign makes some stupid empty list and slows everything down + if (OCTAVE_LIKELY (!ov_lhs.is_ref ())) + ov_lhs.assign (op, ov_rhs); // Move code into here? + else + { + octave_value &glb_ref = ov_lhs.ref_rep ()->ref (); + glb_ref.assign (op, ov_rhs); + } + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + STACK_DESTROY (1); + } + DISPATCH (); +jmp_ifdef: + { + octave_value &ov_1 = TOP_OV (); + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + if (ov_1.is_defined () && !ov_1.is_magic_colon ()) + ip = code + target; + + STACK_DESTROY (1); + } + DISPATCH (); +switch_cmp: + { + octave_value &ov_label = TOP_OV (); + octave_value &ov_switch = SEC_OV (); + unsigned char b0 = arg0; + unsigned char b1 = *ip++; + + int target = b0 | (b1 << 8); + + bool do_it; + if (ov_label.is_undefined ()) + do_it = false; + else if (!ov_label.iscell ()) + do_it = ov_switch.is_equal (ov_label); + else + { + do_it = false; + // Match all cell elements. Any will do + Cell cell (ov_label.cell_value ()); + + for (octave_idx_type i = 0; i < cell.rows (); i++) + { + for (octave_idx_type j = 0; j < cell.columns (); j++) + { + do_it = ov_switch.is_equal (cell(i,j)); + + if (do_it) + break; + } + } + } + + STACK_DESTROY (2); + + if (!do_it) + ip = code + target; + } + DISPATCH (); + +braindead_precond: + { + octave_value &ov = TOP_OV(); + + bool do_braindead = false; + if (ov.ndims () == 2 && ov.rows () == 1 && ov.columns () == 1) + do_braindead = true; + + STACK_DESTROY (1); + + if (do_braindead) + PUSH_OV (ov_true); + else + PUSH_OV (ov_false); + } + DISPATCH_1BYTEOP (); + +braindead_warning: + { + // A slot stores whether we allready printed this warning for a particular + // place where there could be a braindead short circuit + int slot = arg0; + // The next codepoint is the type of warning + int type = *ip++; // asci '|' or '&' + + octave_value& ov_warning = bsp[slot].ov; + + if (ov_warning.is_nil ()) + { + ov_warning = ov_true; // Don't print the warning next time + m_tw->set_active_bytecode_ip (ip - code); // The warning needs to be able to get line numbers. + + // It is possible to specify that certain warning should be an error, so we need a try here. + try + { + warning_with_id ("Octave:possible-matlab-short-circuit-operator", + "Matlab-style short-circuit operation performed for operator %c", + type); + } + CATCH_EXECUTION_EXCEPTION + } + } + DISPATCH (); +force_assign: + { + // The next instruction is the slot number + int slot = arg0; + + octave_value &ov_rhs = TOP_OV (); + octave_value &ov_lhs = bsp[slot].ov; + + ov_lhs.maybe_call_dtor (); + + if (ov_rhs.vm_need_storable_call ()) + ov_rhs.make_storable_value (); // Some types have lazy copy + + if (OCTAVE_LIKELY (!ov_lhs.is_ref ())) + ov_lhs = std::move (ov_rhs); // Note move + else + ov_lhs.ref_rep ()->set_value (std::move (ov_rhs)); + + STACK_DESTROY (1); + } + DISPATCH(); +push_nil: + { + PUSH_OV(octave_value{}); + } + DISPATCH_1BYTEOP(); +throw_iferrorobj: + { + octave_value& ov_top = TOP_OV (); + + if (ov_top.is_defined ()) + { + // This "error object" is created by the unwind: code + // and e.g. not from a user's error + octave_scalar_map map = ov_top.scalar_map_value (); + + bool is_err_obj = map.isfield("message") && + map.isfield ("identifier") && + map.isfield ("stack"); + + if (!is_err_obj) + PANIC ("Strange error object on stack"); + + octave_value msg = map.getfield ("message"); + octave_value id = map.getfield ("identifier"); + + STACK_DESTROY (1); + + std::string s_msg = msg.string_value (); + std::string s_id = id.string_value (); + + octave_map err_stack = map.contents ("stack").xmap_value ("ERR.STACK must be a struct"); + + // Are we unwinding an interrupt exception? + if (m_unwinding_interrupt) + { + (*sp++).i = static_cast(error_type::INTERRUPT_EXC); + goto unwind; + } + + // On a rethrow, the C++ exception is always base class execution_exception. + // We use rethrow_error() to recreate a stack info object from the octave_map + // in an easy way. + try + { + error_system& es = m_tw->get_interpreter().get_error_system (); + es.rethrow_error (s_id, s_msg, err_stack); + } + catch(execution_exception& e) + { + (*sp++).pee = new execution_exception {e}; + } + + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + else + STACK_DESTROY (1); + } + DISPATCH_1BYTEOP(); + +index_struct_call: + { + int slot = arg0; + bool has_slot = *ip++; + int nargout = *ip++; + + int n_subs = POP_CODE (); + + std::list idx; + + std::vector v_n_args; + std::string types; + types.resize (n_subs); + + for (int i = 0; i < n_subs; i++) + { + v_n_args.push_back (POP_CODE ()); + types[i] = POP_CODE (); + } + + // Add the args of each sub indexing to the idx list + for (int i = 0; i < n_subs; i++) + { + octave_value_list ovl; + int n_args = v_n_args[n_subs - 1 - i]; // Note, from end to beginning + for (int j = 0; j < n_args; j++) + { + octave_value &arg = TOP_OV (); + + if (arg.is_cs_list ()) + ovl.append (arg.list_value ().reverse ()); + else + ovl.append (std::move (arg)); + + STACK_DESTROY (1); + } + // args are pushed left to right to stack, so we need to reverse the ovl + ovl.reverse (); + idx.push_back (ovl); + } + idx.reverse (); // expressions was pushed left to right, so we need to reverse + + octave_value &ov = TOP_OV (); + octave_value_list retval; + try + { + m_tw->set_active_bytecode_ip (ip - code); + + // TODO: Need to figure out a clean way to do this kind of stuff. + // See tree_index_expression::evaluate_n() + // It should be general, because I don't think this will always work? + int cntr = 0; + while (ov_need_stepwise_subsrefs(ov) && types.size () > 0) + { + bool eat_args = true; + std::string step_type {types.front ()}; + + std::list step_idx; + step_idx.push_back (idx.front ()); + + int step_nargout = types.size () > 1 ? 1 : nargout; + + if (cntr == 0 && ov.is_nil () && has_slot) + { + // Put a function cache object in the slot and in the local ov + ov = bsp[slot].ov = + octave_value (new octave_fcn_cache (name_data[slot])); + } + + if (cntr == 0 && ov.has_function_cache () && has_slot) + { + octave_function *fcn; + try + { + if (step_type == "(") + { + fcn = ov.get_cached_fcn (idx.front ()); + } + else // { or . + { + fcn = ov.get_cached_fcn ({}); + eat_args = false; // The function call has no args + } + } + CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs + + if (! fcn) + { + (*sp++).ps = new std::string {name_data[slot]}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + try + { + m_tw->set_active_bytecode_ip (ip - code); + if (eat_args) + retval = fcn->call (*m_tw, step_nargout, idx.front ()); + else + retval = fcn->call (*m_tw, step_nargout, {}); + // TODO: Bytecode call. + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + else if (ov.is_function () && !ov.is_classdef_meta ()) + { + octave_function *fcn = ov.function_value (true); + if (fcn) + { + octave_value_list args = idx.front (); + retval = fcn->call (*m_tw, step_nargout, args); + } + else + PANIC ("Silly state"); + } + else + { + try + { + retval = ov.subsref (step_type.c_str (), step_idx, step_nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME (has_slot && cntr == 0) + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + + // If the first retval has zero length and we got more to go, it is an error + if (cntr == 0 && step_type.size() > 1 && retval.length () == 0) + { + (*sp++).pee = new execution_exception {"error","","indexing undefined value"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + + ov = retval.first_or_nil_ov (); + + if (cntr != 0 && ov.is_cs_list () && step_type.size() > 1) + { + (*sp++).pee = new execution_exception {"error","","a cs-list cannot be further indexed"}; + (*sp++).i = static_cast (error_type::EXECUTION_EXC); + goto unwind; + } + + if (eat_args) + { + types = types.substr(1, types.size () - 1); + idx.pop_front (); + } + cntr++; + } + + if ((!ov.is_function () || ov.is_classdef_meta ()) && types.size ()) + { + retval = ov.subsref (types.c_str (), idx, nargout); + idx.pop_front (); + } + + octave_value val = (retval.length () ? retval(0) : octave_value ()); + if (val.is_function ()) + { + octave_function *fcn = val.function_value (true); + + if (fcn) + { + octave_value_list args = idx.size () ? idx.front () : (octave_value_list){}; + retval = fcn->call (*m_tw, nargout, args); + } + } + + idx.clear (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + STACK_DESTROY (1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout); + } + DISPATCH (); + +index_struct_n: + { + int nargout = arg0; + + int slot = POP_CODE_USHORT (); // Needed if we need a function lookup + int slot_for_field = POP_CODE_USHORT (); + + octave_value &ov = TOP_OV (); + + std::string field_name = name_data [slot_for_field]; + + octave_value ov_field_name {field_name}; + + octave_value_list retval; + + // TODO: Should be a "simple_subsref for "{" and "." + octave_value_list ovl_idx; + ovl_idx.append (ov_field_name); + + std::list idx; + idx.push_back (ovl_idx); + + try + { + m_tw->set_active_bytecode_ip (ip - code); + retval = ov.subsref(".", idx, nargout); + + // TODO: Kludge for e.g. "m = containsers.Map;" which returns a function. + // Should preferably be done by .subsref? + octave_value val = (retval.length () ? retval(0) : octave_value ()); + if (val.is_function ()) + { + octave_function *fcn = val.function_value (true); + + if (fcn) + { + retval = fcn->call (*m_tw, nargout, {}); + } + } + + idx.clear (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + STACK_DESTROY (1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout); + } + DISPATCH (); + +subasgn_struct: + { + int slot = arg0; + int field_slot = POP_CODE_USHORT (); + + // The top of the stack is the rhs value + octave_value &rhs = TOP_OV (); + + // The ov to subassign to + octave_value &ov = bsp[slot].ov; + + // TODO: Room for performance improvement here maybe + if (OCTAVE_LIKELY (!ov.is_ref ())) + ov.make_unique (); + else + ov.ref_rep ()->ref ().make_unique (); + + // TODO: Uggly containers + std::list idx; + octave_value_list ovl; + + std::string field_name = name_data[field_slot]; + + octave_value ov_field_name {field_name}; + + ovl.append (ov_field_name); + + idx.push_back (ovl); + + // E.g. scalars do not update them self inplace + // but create a new octave_value, so we need to + // copy the return value to the slot. + try + { + ov = ov.subsasgn (".", idx, rhs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + STACK_DESTROY (1); + } + DISPATCH (); + +subasgn_cell_id: + { + // The args to the subassign are on the operand stack + int slot = arg0; + int nargs = *ip++; + + // The top of the stack is the rhs value + octave_value &rhs = TOP_OV (); + // First argument + stack_element *parg = sp - 1 - nargs; + + // Move the args to an ovl + // TODO: Should actually be a move + octave_value_list args; + for (int i = 0; i < nargs; i++) + { + octave_value &arg = parg[i].ov; + // We need to expand cs-lists + if (arg.is_cs_list ()) + args.append (arg.list_value ()); + else + args.append (arg); + } + + // The ov to subassign to + octave_value &ov = bsp[slot].ov; + // TODO: Room for performance improvement here maybe + if (OCTAVE_LIKELY (!ov.is_ref ())) + ov.make_unique (); + else + ov.ref_rep ()->ref ().make_unique (); + + // TODO: Uggly containers + std::list idx; + idx.push_back (args); + + try + { + // E.g. scalars do not update them self inplace + // but create a new octave_value, so we need to + // copy the return value to the slot. + ov = ov.subsasgn("{", idx, rhs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_NAME + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + // Destroy the args on the operand stack aswell as rhs + STACK_DESTROY (nargs + 1); + } + DISPATCH (); + +subassign_obj: + { + // The args to the subassign are on the operand stack + int nargs = arg0; + char type = *ip++; + + // First argument + stack_element *parg = sp - nargs; + // lhs is under the args -- the target for the subassign + octave_value &lhs = (sp - nargs - 1)->ov; + lhs.make_unique (); // TODO: Room for performance improvement here maybe + // rhs is under the lhs + octave_value &rhs = (sp - nargs - 2)->ov; // lhs is written to this stack position + + // Move the args to an ovl + // TODO: Should actually be a move + octave_value_list args; + for (int i = 0; i < nargs; i++) + { + octave_value &arg = parg[i].ov; + // We need to expand cs-lists + if (arg.is_cs_list ()) + args.append (arg.list_value ()); + else + args.append (arg); + } + + // TODO: Uggly containers + std::list idx; + idx.push_back (args); + + try + { + // E.g. scalars do not update them self inplace + // but create a new octave_value, so we need to + // copy the return value to the slot. + lhs = lhs.subsasgn(std::string {type}, idx, rhs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + // We want lhs on the top of the stack after dropping all + // the args to SUBASSIGN_OBJ, so we move it to where rhs is + rhs = std::move (lhs); + + // Destroy the args on the operand stack aswell as the + // stack position that we moved lhs out of. + STACK_DESTROY (nargs + 1); + + // lhs is on the top of the stack now + } + DISPATCH (); + +index_obj: + { + int nargout = arg0; + int has_slot = *ip++; + int slot = POP_CODE_USHORT (); + int n_args_on_stack = *ip++; + char type = *ip++; + + // The object to index is before the args on the stack + octave_value &ov = (sp[-1 - n_args_on_stack]).ov; + + // Make an ovl with the args + // TODO: Should be inplace moves + octave_value_list ovl; + bool has_cs_list_arg = false; + // The operands are on the top of the stack + for (int i = n_args_on_stack - 1; i >= 0; i--) + { + octave_value &arg = sp[-1 - i].ov; + + // If the operand arg is a cs list we need to expand it + if (arg.is_cs_list ()) + { + has_cs_list_arg = true; + ovl.append (arg.list_value ()); + } + else + ovl.append (sp[-1 - i].ov); // TODO: copied, not moved + } + + // octave_fcn_cache and some octave_fcn_handle have caches + bool has_function_cache = ov.has_function_cache (); + + if (! has_function_cache && ov.is_defined ()) + { + // It is probably a variable + + // TODO: subsref should take ovl instead and be chained, + // or something smarter + std::list idx; // TODO: mallocs! + + idx.push_back(ovl); + + + // TODO: subsref might throw index error + octave_value_list retval; + + if (OCTAVE_LIKELY (! ov.is_function () + || ov.is_classdef_meta ())) + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + retval = ov.subsref(std::string {type}, idx, nargout); + idx.clear (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME (has_slot) + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + else + PANIC ("Strange state"); + + bool is_fcn = (retval.length () ? + retval(0).is_function() : false); + + // "FIXME: when can the following happen? In what case does indexing + // result in a value that is a function? Classdef method calls? + // Something else?" + + if (OCTAVE_LIKELY (!is_fcn)) + { + idx.clear (); + // TODO: Necessary? I guess it might trigger dtors + // or something? + ov = octave_value (); + } + else + { + octave_value val = retval(0); + octave_function *fcn = val.function_value (true); + + if (fcn) + { + octave_value_list final_args; + + if (! idx.empty ()) + final_args = idx.front (); + + try + { + m_tw->set_active_bytecode_ip (ip - code); + retval = fcn->call (*m_tw, nargout, final_args); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME (has_slot) + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + + idx.clear (); + ov = octave_value (); + val = octave_value (); + } + + // TODO: Maybe the args should be destroyed before the indexed + // variable? + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout); + } + else if (has_function_cache) + { +querry_fcn_cache_index_obj: + + octave_function *fcn; + try + { + fcn = ov.get_cached_fcn (ovl); + } + CATCH_EXECUTION_EXCEPTION + + if (! fcn) + { + if (has_slot) + (*sp++).ps = new std::string {name_data[slot]}; + else + (*sp++).ps = new std::string {"temporary object"}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + if (fcn->is_compiled ()) + { + octave_user_function *usr_fcn = static_cast (fcn); + // Alot of code in this define + MAKE_BYTECODE_CALL + + // Now dispatch to first instruction in the + // called function + } + else + { + try + { + m_tw->set_active_bytecode_ip (ip - code); + octave_value_list ret = fcn->call (*m_tw, nargout, ovl); + + STACK_DESTROY (n_args_on_stack + 1); + EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + } + } + else + { + // If the first object is not an identifier we can't look it up for + // a function call. + if (!has_slot) + { + (*sp++).ps = new std::string {"temporary object"}; + (*sp++).i = static_cast(error_type::ID_UNDEFINED); + goto unwind; + } + + if (! ov.is_nil ()) + { + TODO ("Not nil object for fcn cache replacement"); + } + + // It is probably a function call. + // Put a function cache object in the slot and in the local ov + // and jump into the if clause above to search for some function + // to call. + ov = bsp[slot].ov = + octave_value (new octave_fcn_cache (name_data[slot])); + goto querry_fcn_cache_index_obj; + } + } + DISPATCH (); +load_far_cst: + { + ip--; + int offset = POP_CODE_INT (); + + // Copy construct it into the top of the stack + new (sp++) octave_value (data [offset]); + + DISPATCH(); + } + +set_ignore_outputs: + { + if (!m_output_ignore_data) + { + m_output_ignore_data = new output_ignore_data; + } + + int n_ignored = arg0; + int n_total = POP_CODE (); + auto *M = new Matrix {}; + m_output_ignore_data->m_v_matrixes.push_back (M); + M->resize (1, n_ignored); + + std::set set_ignored; + + for (int i = 0; i < n_ignored; i++) + { + int ignore_idx = POP_CODE (); + (*M) (i) = ignore_idx; + set_ignored.insert (ignore_idx); + } + + // For calls into m-functions etc + auto *active_lvalue_list = new std::list {}; + + auto *saved_lvalue_list = m_tw->lvalue_list (); + m_output_ignore_data->m_v_lvalue_list.push_back (saved_lvalue_list); + + for (int i = 0; i < n_total; i++) + { + octave_lvalue lval ({}, m_tw->get_current_stack_frame ()); + if (set_ignored.find (i + 1) != set_ignored.end ()) + lval.mark_black_hole (); + active_lvalue_list->push_back (lval); + } + + m_tw->set_lvalue_list (active_lvalue_list); + } + DISPATCH(); + +clear_ignore_outputs: + { + CHECK (m_output_ignore_data); + + auto *active_lvalue_list = m_tw->lvalue_list (); + delete active_lvalue_list; + + // Restore the evaluators lvalue list + m_tw->set_lvalue_list (m_output_ignore_data->pop_lvalue_list ()); + + // Delete the matrix we used to set the autovar (if it has not been used an nulled) + delete m_output_ignore_data->m_v_matrixes.back (); + m_output_ignore_data->m_v_matrixes.pop_back (); + + // We want to null m_output_ignore_data if it is empty + if (m_output_ignore_data->m_v_matrixes.empty ()) + { + CHECK (m_output_ignore_data->m_v_lvalue_list.empty ()); + delete m_output_ignore_data; + m_output_ignore_data = nullptr; + } + + // Clear any value written to the %~X slot(s) + int n_slots = arg0; + for (int i = 0; i < n_slots; i++) + { + int slot = POP_CODE_USHORT (); + + octave_value &ov = bsp[slot].ov; + + if (ov.get_count () == 1) + ov.call_object_destructor (); + + ov = octave_value{}; + } + } + DISPATCH(); + +subassign_chained: + { + octave_value::assign_op op = static_cast (arg0); + int n_chained = POP_CODE (); + std::vector v_n_args; + std::string type (n_chained, 0); + + for (int i = 0; i < n_chained; i++) + { + v_n_args.push_back (POP_CODE ()); + type [i] = POP_CODE (); + } + + std::list idx; + for (int i = 0; i < n_chained; i++) + { + octave_value_list ovl; + // foo (a1, a2).bar (a3, a4) + // are: + // TOP a4, a3, a2, a1 + // on the stack now. + int n_args = v_n_args [n_chained - i - 1]; + for (int j = 0; j < n_args; j++) + { + octave_value &arg = TOP_OV (); + ovl.append (std::move (arg)); + STACK_DESTROY (1); + } + ovl.reverse (); + idx.push_back (ovl); + } + + idx.reverse (); + + octave_value lhs = std::move (TOP_OV ()); + STACK_DESTROY (1); + octave_value rhs = std::move (TOP_OV ()); + STACK_DESTROY (1); + + try + { + if (type.size () && type.back () != '(' && lhs_assign_numel (lhs, type, idx) != 1) + err_invalid_structure_assignment (); + + lhs.assign (op, type, idx, rhs); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + + PUSH_OV (lhs); + } + DISPATCH (); + +set_slot_to_stack_depth: + { + int slot = arg0; + int stack_depth = sp - bsp; + bsp[slot].ov = octave_value {stack_depth}; + } + DISPATCH (); +dupn: + { + int offset = arg0; + int n = POP_CODE (); + stack_element *first = sp - n - offset; + for (int i = 0; i < n; i++) + PUSH_OV (first[i].ov); + } + DISPATCH (); +load_cst_alt2: + { + int offset = arg0; + + // Copy construct it into the top of the stack + new (sp++) octave_value (data [offset]); + + DISPATCH (); + } +load_cst_alt3: + { + int offset = arg0; + + // Copy construct it into the top of the stack + new (sp++) octave_value (data [offset]); + + DISPATCH (); + } +load_cst_alt4: + { + int offset = arg0; + + // Copy construct it into the top of the stack + new (sp++) octave_value (data [offset]); + + DISPATCH (); + } +load_2_cst: +{ + // We are pushing two constants to the stack. E.g. for "3 * 2". + // The next instruction is the offset in the data of the lhs. + // rhs is right after. + int offset = arg0; + + // Copy construct the two constants onto the top of the stack + new (sp++) octave_value (data [offset]); // lhs in a binop + new (sp++) octave_value (data [offset + 1]); // rhs + + DISPATCH (); +} +/* Check whether we should enter the debugger on the next ip */ +{ + bool onebyte_op; + if (0) + debug_check: + onebyte_op = false; + else if (0) + debug_check_1b: + onebyte_op = true; + + { + int tmp_ip = ip - code; + if (onebyte_op) + tmp_ip--; + + if (OCTAVE_UNLIKELY (m_trace_enabled)) + { + PRINT_VM_STATE ("Trace: "); + } + + if (OCTAVE_UNLIKELY (m_profiler_enabled)) + { + int64_t t1 = vm_profiler::unow (); + + auto p = m_vm_profiler; + if (!p) // Only happens as a race between m_profiler_enabled and m_vm_profiler + goto debug_check_end; + + std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time + vm_profiler::vm_profiler_fn_stats &stat = p->m_map_fn_stats[fn_name]; + + if (!stat.m_v_t.size ()) + { + // The profiler got enabled after the current function was called. + p->enter_fn (fn_name, "", unwind_data, name_data, code); + stat.m_v_t.back () = -1; + stat.m_v_ip.back () = ip - code; // We are not at function start, so set ip to proper value. + } + else if (stat.m_v_t.back () != -1) + { + int64_t t0 = stat.m_v_t.back (); + int64_t dt = t1 - t0; + + stat.add_t (dt); + p->add_t (dt); + } + } + + // TODO: Check all trees one time and cache the result somewhere? + // Until another bp is set? Debugging will be quite slow + // with one check for each op-code. + + auto it = unwind_data->m_ip_to_tree.find (tmp_ip); + + if (it == unwind_data->m_ip_to_tree.end ()) + goto debug_check_end; + + bool is_ret = *ip == static_cast (INSTR::RET); + + m_sp = sp; + m_bsp = bsp; + m_rsp = rsp; + m_code = code; + m_data = data; + m_name_data = name_data; + m_ip = tmp_ip; + m_unwind_data = unwind_data; + m_tw->set_active_bytecode_ip (tmp_ip); + + tree *t = it->second; + + // do_breakpoint will check if there is a breakpoint attached + // to the relevant code and escape to the debugger repl + // if neccessary. + if (t) + { + try + { + m_tw->do_breakpoint (t->is_active_breakpoint (*m_tw), is_ret); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + } + debug_check_end: + { + if (OCTAVE_UNLIKELY (m_profiler_enabled)) + { + auto p = m_vm_profiler; + + if (p) + { + std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time + vm_profiler::vm_profiler_fn_stats &stat = m_vm_profiler->m_map_fn_stats[fn_name]; + + // If someone enabled profiling in the debugger we need to wait until + // the debug_check: block is ran next time. + if (stat.m_v_t.size()) + { + int tmp_ip = ip - code; + if (onebyte_op) + tmp_ip--; + stat.m_v_ip.back () = tmp_ip; // Sets a new 'currently running ip' + stat.m_v_t.back () = vm_profiler::unow (); // Sets a new timestamp for the current ip + } + } + } + } + if (onebyte_op) + { + int opcode = ip[-1]; + arg0 = ip[0]; + ip++; + goto *instr [opcode]; + } + else + { + int opcode = ip[0]; + arg0 = ip[1]; + ip += 2; + goto *instr [opcode]; + } +} + +debug: // TODO: Remove + { + if (m_tw->debug_mode_active ()) + { + m_ip = ip - code; + m_sp = sp; + m_tw->set_active_bytecode_ip (ip - code); + + try + { + m_tw->enter_debugger (); + } + CATCH_INTERRUPT_EXCEPTION + CATCH_INDEX_EXCEPTION + CATCH_EXECUTION_EXCEPTION + CATCH_BAD_ALLOC + CATCH_EXIT_EXCEPTION + } + } + DISPATCH (); + + wide: + { + int opcode = arg0; // The opcode to execute next is in arg0, i.e. ip[-1] + // The next opcode needs its arg0, which is a unsigned short instead of the usual byte + // that DISPATCH() writes to arg0. + arg0 = (ip[1] << 8) | ip[0]; + ip += 2; // Forward ip so it points to after the widened argument + goto *instr [opcode]; + } + + __builtin_unreachable (); +} + +octave_value +vm::handle_object_end (octave_value ov, int idx, int nargs) +{ + // See tree_evaluator::evaluate_end_expression() + octave_value ans; + + auto &interpreter = m_tw->get_interpreter (); + std::string dispatch_class = ov.class_name (); + symbol_table& symtab = interpreter.get_symbol_table (); + + octave_value meth = symtab.find_method ("end", dispatch_class); + + if (meth.is_defined ()) + ans = interpreter.feval (meth, ovl (ov, idx+1, nargs), 1).first_or_nil_ov (); + else + ans = end_value (ov, idx, nargs); + + return ans; +} + +octave_value +vm::find_fcn_for_cmd_call (std::string *name) +{ + interpreter& interp = __get_interpreter__(); + + symbol_table& symtab = interp.get_symbol_table (); + + return symtab.find_function (*name); +} + +vm::error_data +vm::handle_error (error_type error_type) +{ + error_data ret; + + error_system& es = m_tw->get_interpreter().get_error_system (); + + std::stringstream ss; + // ip points to the "next" instruction, so search for the + // code location for ip - 1 + loc_entry loc = find_loc (m_ip - 1, m_unwind_data->m_loc_entry); + + switch (error_type) + { + case error_type::BAD_ALLOC: + { + execution_exception e {"error", "Octave:bad-alloc", "out of memory or dimension too large for Octave's index type"}; + es.save_exception (e); + + break; + } + case error_type::ID_UNDEFINED: + { + std::string *sp = m_sp [-1].ps; + m_sp--; + std::string id_name = *sp; + delete sp; + + ss << "'" << id_name << "'" << + " undefined near line " << loc.m_line << + ", column " << loc.m_col; + + execution_exception e { "error", + "Octave:undefined-function", + ss.str ()}; + + // Since the exception was made in the VM it has not been saved yet + es.save_exception (e); + + break; + } + case error_type::IF_UNDEFINED: + { + // error ("%s: undefined value used in conditional expression", warn_for); + ss << "if's condition undefined near line " << + loc.m_line << ", column " << loc.m_col; + + execution_exception e {"error", "", ss.str ()}; + + es.save_exception (e); + + break; + } + case error_type::INDEX_ERROR: + { + execution_exception *e = m_sp [-1].pee; + + CHECK (e); + es.save_exception (*e); + + delete e; + + m_sp--; + + break; + } + case error_type::EXECUTION_EXC: + { + execution_exception *e = m_sp [-1].pee; + + CHECK (e); + es.save_exception (*e); + + delete e; + + m_sp--; + + break; + } + case error_type::INTERRUPT_EXC: + break; // Do nothing + case error_type::EXIT_EXCEPTION: + ret.m_safe_to_return = (--m_sp)->i; + ret.m_exit_status = (--m_sp)->i; + break; + case error_type::INVALID_N_EL_RHS_IN_ASSIGNMENT: + { + execution_exception e {"error", "", "invalid number of elements on RHS of assignment"}; + + es.save_exception (e); + + break; + } + case error_type::RHS_UNDEF_IN_ASSIGNMENT: + { + execution_exception e {"error", "", "value on right hand side of assignment is undefined"}; + + es.save_exception (e); + + break; + } + default: + TODO ("Unhandeled error type"); + } + + return ret; +} + +vm::~vm () +{ + delete [] m_stack0; + + CHECK (m_output_ignore_data == nullptr); +} + +vm::vm (tree_evaluator *tw, bytecode &initial_bytecode) +{ + m_ti = &__get_type_info__(); + m_stack0 = new stack_element[stack_size + stack_pad * 2]; + + for (unsigned i = 0; i < stack_pad; i++) + { + m_stack0[i].u = stack_magic_int; + m_stack0[i + stack_size].u = stack_magic_int; + } + + m_sp = m_stack = m_stack0 + stack_pad; + m_tw = tw; + m_symtab = &__get_symbol_table__(); + + m_data = initial_bytecode.m_data.data (); + m_code = initial_bytecode.m_code.data (); + m_name_data = initial_bytecode.m_ids.data (); + m_unwind_data = &initial_bytecode.m_unwind_data; + + // Check that the typeids are what the VM anticipates. If the id change, just change + // the constexpr. + CHECK (octave_scalar::static_type_id () == m_scalar_typeid); + CHECK (octave_bool::static_type_id () == m_bool_typeid); + CHECK (octave_matrix::static_type_id () == m_matrix_typeid); + + // Function pointer used for specialized op-codes + m_fn_dbl_mul = m_ti->lookup_binary_op (octave_value::binary_op::op_mul, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_div = m_ti->lookup_binary_op (octave_value::binary_op::op_div, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_add = m_ti->lookup_binary_op (octave_value::binary_op::op_add, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_sub = m_ti->lookup_binary_op (octave_value::binary_op::op_sub, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_pow = m_ti->lookup_binary_op (octave_value::binary_op::op_pow, m_scalar_typeid, m_scalar_typeid); + + m_fn_dbl_le = m_ti->lookup_binary_op (octave_value::binary_op::op_lt, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_le_eq = m_ti->lookup_binary_op (octave_value::binary_op::op_le, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_gr = m_ti->lookup_binary_op (octave_value::binary_op::op_gt, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_gr_eq = m_ti->lookup_binary_op (octave_value::binary_op::op_ge, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_eq = m_ti->lookup_binary_op (octave_value::binary_op::op_eq, m_scalar_typeid, m_scalar_typeid); + m_fn_dbl_neq = m_ti->lookup_binary_op (octave_value::binary_op::op_ne, m_scalar_typeid, m_scalar_typeid); + + m_fn_dbl_usub = m_ti->lookup_unary_op (octave_value::unary_op::op_uminus, m_scalar_typeid); + m_fn_dbl_not = m_ti->lookup_unary_op (octave_value::unary_op::op_not, m_scalar_typeid); + m_fn_bool_not = m_ti->lookup_unary_op (octave_value::unary_op::op_not, m_bool_typeid); + + m_pi_builtin_fn = m_symtab->find_built_in_function ("pi").function_value (); + // If the platform has no M_PI we need to initialize ov_pi +#if !defined (M_PI) + ov_pi = 4.0 * atan (1.0); +#endif +} + +// If there are too many return values we can't just move them since the stacks will overlap so we +// need to copy the args first with this proc +static void copy_many_args_to_caller (octave::stack_element *sp, + octave::stack_element *caller_stack_end, + int n_args_to_move, int n_args_caller_expects) +{ + // Move args to an ovl + octave_value_list ovl; + for (int i = 0; i < n_args_to_move; i++) + { + octave_value &arg = caller_stack_end[i].ov; + ovl.append (std::move (arg)); + } + + for (int i = 0; i < n_args_to_move; i++) + { + PUSH_OV (ovl(n_args_to_move - 1 - i)); // backwards + } + + // Push missing args + for (int i = n_args_to_move; i < n_args_caller_expects; i++) + PUSH_OV (); +} + +static octave_value xeval_for_numel (octave_value &ov, const std::string& type, const std::list& idx); + +// This function reimplements octave_lvalue::numel() +// TODO: octave_lvalue::numel() could be broken out or made static and used instead. But don't mess with that code +// to keep the VM somewhat independent of other code. +static int lhs_assign_numel (octave_value &ov, const std::string& type, const std::list& idx) +{ + // Return 1 if there is no index because without an index there + // should be no way to have a cs-list here. Cs-lists may be passed + // around internally but they are not supposed to be stored as + // single symbols in a stack frame. + + std::size_t num_indices = idx.size (); + + if (num_indices == 0) + return 1; + + switch (type[num_indices-1]) + { + case '(': + return 1; + + case '{': + { + // FIXME: Duplicate code in '.' case below... + + // Evaluate, skipping the last index. + + std::string tmp_type = type; + std::list tmp_idx = idx; + + tmp_type.pop_back (); + tmp_idx.pop_back (); + + octave_value tmp = xeval_for_numel (ov, tmp_type, tmp_idx); + + octave_value_list tidx = idx.back (); + + if (tmp.is_undefined ()) + { + if (tidx.has_magic_colon ()) + err_invalid_inquiry_subscript (); + + tmp = Cell (); + } + else if (tmp.is_zero_by_zero () + && (tmp.is_matrix_type () || tmp.is_string ())) + { + tmp = Cell (); + } + + return tmp.xnumel (tidx); + } + break; + + case '.': + { + // Evaluate, skipping either the last index or the last two + // indices if we are looking at "(idx).field". + + std::string tmp_type = type; + std::list tmp_idx = idx; + + tmp_type.pop_back (); + tmp_idx.pop_back (); + + bool paren_dot = num_indices > 1 && type[num_indices-2] == '('; + + // Index for paren operator, if any. + octave_value_list pidx; + + if (paren_dot) + { + pidx = tmp_idx.back (); + + tmp_type.pop_back (); + tmp_idx.pop_back (); + } + + octave_value tmp = xeval_for_numel (ov, tmp_type, tmp_idx); + + bool autoconv = (tmp.is_zero_by_zero () + && (tmp.is_matrix_type () || tmp.is_string () + || tmp.iscell ())); + + if (paren_dot) + { + // Use octave_map, not octave_scalar_map so that the + // dimensions are 0x0, not 1x1. + + if (tmp.is_undefined ()) + { + if (pidx.has_magic_colon ()) + err_invalid_inquiry_subscript (); + + tmp = octave_map (); + } + else if (autoconv) + tmp = octave_map (); + + return tmp.xnumel (pidx); + } + else if (tmp.is_undefined () || autoconv) + return 1; + else + return tmp.xnumel (octave_value_list ()); + } + break; + + default: + panic_impossible (); + } +} + +static octave_value xeval_for_numel (octave_value &ov, const std::string& type, const std::list& idx) +{ + octave_value retval; + + try + { + retval = ov; + + if (retval.is_constant () && ! idx.empty ()) + retval = retval.subsref (type, idx); + } + catch (const execution_exception&) + { + // Ignore an error and treat it as undefined. The error + // could happen because there is an index is out of range + // and we will be resizing a cell array. + + interpreter& interp = __get_interpreter__ (); + + interp.recover_from_exception (); + + retval = octave_value (); + } + + return retval; +} + + +loc_entry vm::find_loc (int ip, std::vector &loc_entries) +{ + int best = -1; + + int n = loc_entries.size (); + + // TODO: Should maybe be some binary search, but only called in + // exceptions so who cares? + for (int i = 0; i < n; i++) + { + loc_entry &e = loc_entries[i]; + + if (ip >= e.m_ip_start && ip < e.m_ip_end) + best = i; + } + + if (best == -1) + return {}; + + return loc_entries[best]; +} + +void vm::set_nargin (int nargin) +{ + m_tw->set_nargin (nargin); +} + +void vm::set_nargout (int nargout) +{ + m_tw->set_nargout (nargout); +} + +int +vm::find_unwind_entry_for_forloop (int current_stack_depth) +{ + int best_match = -1; + + // Find a for loop entry that matches the current instruction pointer + // range and also got an anticipated stack depth less than current stack + // depth. + // + // I.e. if the ip is in a for loop, we want to unwind down the stack + // untill we reach the stack depth of the for loop to be able to remove + // its native int:s properly. + // + // To be able to unwind nested for loops we look for smaller and + // smaller stack depths given by current_stack_depth parameter. + + for (unsigned i = 0; i < m_unwind_data->m_unwind_entries.size(); i++) + { + unwind_entry& entry = m_unwind_data->m_unwind_entries[i]; + int start = entry.m_ip_start; + int end = entry.m_ip_end; + int stack_depth = entry.m_stack_depth; + + // Skip not for loop entries + if (entry.m_unwind_entry_type != unwind_entry_type::FOR_LOOP) + continue; + // Are ip the range? + if (start > m_ip || end <= m_ip) + continue; + // Is the stack depth ok? + if (stack_depth >= current_stack_depth) + continue; + + // Is it better than prior match? + if (best_match != -1) + { + if (best_match > stack_depth) + continue; + } + + best_match = stack_depth; + } + + return best_match; +} + +unwind_entry* +vm::find_unwind_entry_for_current_state (bool only_find_unwind_protect) +{ + int best_match = -1; + + // Find the entry with the highest start instruction offset + for (unsigned i = 0; i < m_unwind_data->m_unwind_entries.size(); i++) + { + unwind_entry& entry = m_unwind_data->m_unwind_entries[i]; + int start = entry.m_ip_start; + int end = entry.m_ip_end; + + // When unwinding for e.g. interrupt exceptions we are only looking for UNWIND_PROTECT + if (only_find_unwind_protect && (entry.m_unwind_entry_type != unwind_entry_type::UNWIND_PROTECT)) + continue; + + // Skip for loop entries + if (entry.m_unwind_entry_type == unwind_entry_type::FOR_LOOP) + continue; + + // Are ip the range? + if (start > m_ip || end <= m_ip) // TODO: end < m_ip ??? + continue; + + // Is it better than prior match? + if (best_match != -1) + { + int best_start = + m_unwind_data->m_unwind_entries[best_match].m_ip_start; + if (best_start > start) + continue; + } + + best_match = i; + } + + if (best_match == -1) + return nullptr; + + return &m_unwind_data->m_unwind_entries[best_match]; +} + +static bool ov_need_stepwise_subsrefs (octave_value &ov) +{ + return !ov.isobject () && !ov.isjava () && !(ov.is_classdef_meta () && ! ov.is_package ()); +} + +int64_t +vm_profiler::unow () +{ + return octave_gettime_ns_wrapper (); +} + +void +vm_profiler::vm_profiler_fn_stats::add_t (int64_t dt) +{ + int ip = m_v_ip.back (); + maybe_resize (ip); + + m_v_cum_t[ip] += dt; + ++m_v_n_cum[ip]; +} + +void +vm_profiler::add_t (int64_t dt) +{ + if (!m_shadow_call_stack.size ()) + return; + + m_shadow_call_stack.back ().m_t_self_cum += dt; +} + +// There is no std::format since we use C++ 11 so lets make our own. +// The 'format' attribute gives nice compiler warnings on missuse. +static +std::string +x_snprintf (const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); + +static +std::string +x_snprintf (const char *fmt, ...) +{ + int n = 32; + do { + char *buff = new char[n]; + + va_list va; + va_start (va, fmt); + int n_needed = vsnprintf (buff, n, fmt, va); + va_end (va); + + std::string ret; + + try + { + std::string tmp {buff}; + ret = tmp; + } + catch (...) // Maybe bad_alloc could be thrown + { + delete buff; + throw; + } + + delete buff; + + if (n_needed < 0) + error ("Profiler internal error: Invalid call to x_snprintf()"); + if (n_needed < n) + return ret; + + n = n_needed + 1; + } while (1); +} + +void +vm_profiler::print_to_stdout () +{ + using std::string; + using std::vector; + using std::map; + using std::pair; + + // These could probably be vectors, but we'll do with maps to keep the + // code easier to follow. + map map_fn_to_cum_t; + map map_fn_to_self_cum_t; + map> map_fn_to_sourcerows; + map>> map_fn_to_opcodes_stringrows; + map map_fn_to_annotated_source; + map map_fn_to_annotated_bytecode; + + // Calculate cumulative function time + for (auto kv : m_map_fn_stats) + { + string fn_name = kv.first; + vm_profiler_fn_stats &stats = kv.second; + + int64_t t_fn_cum = 0; + int64_t t_fn_self_cum = 0; + unsigned n = stats.m_v_cum_t.size (); + + for (unsigned ip = 0; ip < n; ip++) + { + t_fn_cum += stats.m_v_cum_t[ip]; + t_fn_self_cum += stats.m_v_cum_t[ip]; + } + for (unsigned ip = 0; ip < stats.m_v_cum_call_t.size (); ip++) + t_fn_cum += stats.m_v_cum_call_t[ip]; + + map_fn_to_cum_t[fn_name] = t_fn_cum; + map_fn_to_self_cum_t[fn_name] = t_fn_self_cum; + } + + // Try to get the source code + for (auto kv : m_map_fn_stats) + { + string fn_name = kv.first; + vm_profiler_fn_stats &stats = kv.second; + string file = stats.m_fn_file; + + auto &interp = __get_interpreter__ (); + + // Call type with the quiet flag to get the source + // Also works for functions without source code in files. + octave_value_list ans; + string source_text; + + bool got_source_text = false; + + if (!got_source_text) + { + octave_value_list args; + args.append ("-q"); + args.append (file); + try + { + if (file.size ()) + ans = interp.feval ("type", args, 1); + } + catch (execution_exception &) + { + // Didn't work + } + } + + if (ans.length () >= 1) + source_text = ans(0).string_value (); + if (source_text.size ()) + got_source_text = true; + + if (!got_source_text) + { + octave_value_list args; + args.append ("-q"); + args.append (fn_name); + try + { + if (fn_name.size ()) + ans = interp.feval ("type", args, 1); + } + catch (execution_exception &) + { + // Didn't work + } + } + + if (ans.length () >= 1) + source_text = ans(0).string_value (); + if (source_text.size ()) + got_source_text = true; + + if (got_source_text) + { + // Split source by row + vector v_rows; + + std::stringstream ss(source_text); + string buff; + + while(std::getline (ss, buff, '\n')) + v_rows.push_back (buff); + + map_fn_to_sourcerows[fn_name] = v_rows; + } + } + + // Get bytecode "source code" rows + for (auto kv : m_map_fn_stats) + { + string fn_name = kv.first; + vm_profiler_fn_stats &stats = kv.second; + + auto v_ls = opcodes_to_strings (stats.m_code, stats.m_ids); + + map_fn_to_opcodes_stringrows[fn_name] = v_ls; + } + + // Annotate bytecode + for (auto kv : m_map_fn_stats) + { + std::string ans; + + string fn_name = kv.first; + vm_profiler_fn_stats &stats = kv.second; + + auto v_ls = map_fn_to_opcodes_stringrows[fn_name]; + int64_t fn_cum_t = map_fn_to_cum_t[fn_name]; + + for (auto ls : v_ls) + { + int ip = ls.first; // Opcode offset + string s = ls.second; // Text representation of the opcode + + // Ignore strange data + if (ip < 0) + continue; + + if (static_cast (ip) >= stats.m_v_cum_t.size () || (stats.m_v_cum_t[ip] == 0 && stats.m_v_cum_call_t[ip] == 0)) + { + ans += x_snprintf ("\t%*s %5d: %s\n", 43, "", ip, s.c_str ()); + continue; + } + + int64_t n_hits = stats.m_v_n_cum[ip]; + int64_t t_op = stats.m_v_cum_t[ip] + stats.m_v_cum_call_t[ip]; + double share_of_fn = 100. * static_cast (t_op) / fn_cum_t; + + // Try to make the table neat around the decimal separator + int wholes = floor (share_of_fn); + int rest = (share_of_fn - wholes) * 100; + + if (share_of_fn >= 0.1) + ans += x_snprintf ("\t%8lld %12lld ns %5d.%-3d %% %12d: %s\n", static_cast (n_hits), static_cast (t_op), wholes, rest, ip, s.c_str ()); + else + ans += x_snprintf ("\t%8lld %12lld ns %7.3e%% %12d: %s\n", static_cast (n_hits), static_cast (t_op), share_of_fn, ip, s.c_str ()); + } + + map_fn_to_annotated_bytecode[fn_name] = ans; + } + + // Annotate source code + for (auto kv : m_map_fn_stats) + { + std::string ans; + + string fn_name = kv.first; + vm_profiler_fn_stats &stats = kv.second; + + // First we need to create a map between opcode offset and source line + auto v_ip_s = map_fn_to_opcodes_stringrows[fn_name]; + + map map_op_offset_to_src_line; + + for (auto ip_s : v_ip_s) + { + int ip = ip_s.first; + loc_entry loc = vm::find_loc (ip, stats.m_loc_entries); + map_op_offset_to_src_line[ip] = loc.m_line; + } + + // Sum up the time spent on a source line + map map_srcline_to_tcum; + map map_srcline_to_nhits; + + for (unsigned ip = 0; ip < stats.m_v_cum_t.size (); ip++) + { + int64_t tcum = stats.m_v_cum_t[ip] + stats.m_v_cum_call_t[ip]; + int64_t nhits = stats.m_v_n_cum[ip]; + int src_line = map_op_offset_to_src_line[ip]; + map_srcline_to_tcum[src_line] += tcum; + map_srcline_to_nhits[src_line] += nhits; + } + + auto v_src_rows = map_fn_to_sourcerows[fn_name]; + // Annotate the source code + + // Put all time spent in opcodes that does not correnspond to any source line, + // on the first row with "function.*fnname" on. + bool found = false; + for (unsigned i = 0; i < v_src_rows.size(); i++) + { + string &row = v_src_rows[i]; + std::size_t func_idx = row.find ("function"); + std::size_t name_idx = row.find (stats.m_fn_name); + + if (func_idx == string::npos || name_idx == string::npos) + continue; + + string def = row.substr (0, func_idx + strlen ("function")); + + // Any comment making it a fake? + if (def.find ('#') != string::npos || def.find ('%') != string::npos) + continue; + + int line_nr = i + 1; + map_srcline_to_tcum[line_nr] += map_srcline_to_tcum[-1]; + map_srcline_to_nhits[line_nr] += map_srcline_to_nhits[-1]; + found = true; + break; + } + + if (!found) + { + map_srcline_to_tcum[1] += map_srcline_to_tcum[-1]; + map_srcline_to_nhits[1] += map_srcline_to_nhits[-1]; + } + int64_t fn_cum_t = map_fn_to_cum_t[fn_name]; + + for (unsigned i = 0; i < v_src_rows.size(); i++) + { + int line_nr = i + 1; + int64_t t_line_cum = map_srcline_to_tcum[line_nr]; + int64_t n_hits = map_srcline_to_nhits[line_nr]; + + double share_of_fn = 100. * static_cast (t_line_cum) / fn_cum_t; + + // Try to make the table neat around the decimal separator + int wholes = floor (share_of_fn); + int rest = (share_of_fn - wholes) * 100; + + string src_line = v_src_rows[i]; + + if (share_of_fn == 0) + ans += x_snprintf ("\t%*s %5d: %s\n", 43, "", line_nr, src_line.c_str ()); + else if (share_of_fn >= 0.1) + ans += x_snprintf ("\t%8lld %12lld ns %5d.%-3d %% %12d: %s\n", static_cast (n_hits), static_cast (t_line_cum), wholes, rest, line_nr, src_line.c_str ()); + else + ans += x_snprintf ("\t%8lld %12lld ns %7.3e%% %12d: %s\n", static_cast (n_hits), static_cast (t_line_cum), share_of_fn, line_nr, src_line.c_str ()); + } + + map_fn_to_annotated_source[fn_name] = ans; + } + + map map_cumt_to_fn; + for (auto &kv : map_fn_to_cum_t) + map_cumt_to_fn[kv.second] = kv.first; + + int64_t t_tot = 0; + for (auto &kv : map_fn_to_cum_t) + t_tot += kv.second; + + // Print stuff to the user + + printf ("\n\n\nProfiled functions:\n"); + printf ("\tRuntime order:\n"); + for (auto it = map_cumt_to_fn.rbegin (); it != map_cumt_to_fn.rend (); it++) + printf ("\t\t%12lld ns %3.0f%% %s\n", static_cast (it->first), it->first * 100. / t_tot, it->second.c_str ()); + printf ("\tFirst call order:\n"); + for (string fn_name : m_fn_first_call_order) + { + int64_t tcum = map_fn_to_cum_t[fn_name]; + printf ("\t\t%12lld ns %3.0f%% %s\n", static_cast (tcum), tcum * 100. / t_tot, fn_name.c_str ()); + } + + for (auto kv : m_map_fn_stats) + { + string fn_name = kv.first; + vm_profiler_fn_stats &stats = kv.second; + + int64_t fn_cum_t = map_fn_to_cum_t[fn_name]; + int64_t fn_self_cum_t = map_fn_to_self_cum_t[fn_name]; + string annotated_source = map_fn_to_annotated_source[fn_name]; + string annotated_bytecode = map_fn_to_annotated_bytecode[fn_name]; + + printf ("\n\n\nFunction: %s\n\n", kv.first.c_str ()); + if (stats.m_fn_file.size ()) + printf ("\tFile: %s\n", stats.m_fn_file.c_str ()); + printf ("\tAmount of calls: %lld\n", static_cast (stats.m_n_calls)); + printf ("\tCallers: "); + for (string caller : stats.m_set_callers) + printf ("%s ", caller.c_str ()); + printf ("\n"); + printf ("\tCumulative time: %9.5gs %lld ns\n", fn_cum_t/1e9, static_cast (fn_cum_t)); + printf ("\tCumulative self time: %9.5gs %lld ns\n", fn_self_cum_t/1e9, static_cast (fn_self_cum_t)); + printf ("\n\n"); + + if (annotated_source.size ()) + { + printf ("\tAnnotated source:\n"); + printf ("\t ops time share\n"); + printf ("\n"); + printf ("%s\n\n", annotated_source.c_str ()); + } + if (annotated_bytecode.size ()) + { + printf ("\tAnnotated bytecode:\n"); + printf ("\t hits time share\n"); + printf ("\n"); + printf ("%s\n\n", annotated_bytecode.c_str ()); + } + printf ("\n"); + } +} + +void +vm_profiler::enter_fn (std::string caller_name, bytecode &bc) +{ + unsigned char *code = bc.m_code.data (); + std::string *name_data = bc.m_ids.data (); + unwind_data *unwind_data = &bc.m_unwind_data; + + std::string callee_name = bc.m_data[2].string_value (); // profiler_name () querried at compile time + + enter_fn (callee_name, caller_name, unwind_data, name_data, code); +} + +void +vm_profiler::enter_fn (std::string fn_name, std::string caller, octave::unwind_data *unwind_data, std::string *name_data, unsigned char *code) +{ + if (!m_map_fn_stats.count (fn_name)) + m_fn_first_call_order.push_back (fn_name); + + vm_profiler_fn_stats &callee_stat = m_map_fn_stats[fn_name]; + + callee_stat.m_set_callers.insert (caller); + callee_stat.m_v_callers.push_back (caller); + callee_stat.m_n_calls++; + + vm_profiler_call call{}; + call.m_callee = fn_name; + call.m_caller = caller; + + int64_t now = unow (); + call.m_entry_time = now; + + m_shadow_call_stack.push_back (call); + + callee_stat.m_v_t.push_back (now); + callee_stat.m_v_ip.push_back (0); + + if (callee_stat.m_code.size ()) + return; + + callee_stat.m_fn_file = unwind_data->m_file; + callee_stat.m_fn_name = unwind_data->m_name; + + // We need to copy the bytecode with id names to the stat object to be able + // to print it later. + unsigned n_code = unwind_data->m_code_size; + unsigned n_ids = unwind_data->m_ids_size; + callee_stat.m_code = std::vector (n_code); + callee_stat.m_ids = std::vector (n_ids); + + callee_stat.m_loc_entries = unwind_data->m_loc_entry; + + for (unsigned i = 0; i < n_code; i++) + callee_stat.m_code[i] = code[i]; + for (unsigned i = 0; i < n_ids; i++) + callee_stat.m_ids[i] = name_data[i]; +} + +void +vm_profiler::purge_shadow_stack () +{ + warning ("Profiler shadow stack got messed up. Measurement results might be inaccurate."); + + m_shadow_call_stack.clear (); + + for (auto &kv : m_map_fn_stats) + { + auto &v = kv.second; + v.m_v_callers.clear (); + v.m_v_t.clear (); + v.m_v_ip.clear (); + } +} + +void +vm_profiler::exit_fn (std::string fn_name) +{ + { + int64_t t_exit = unow (); + + vm_profiler_fn_stats &callee_stat = m_map_fn_stats[fn_name]; + + // Add the cost of the RET up till now to the callee + if (callee_stat.m_v_t.size () && callee_stat.m_v_t.back () != -1) + { + int64_t t0 = callee_stat.m_v_t.back (); + int64_t dt = t_exit - t0; + + callee_stat.add_t (dt); + this->add_t (dt); + } + + if (!m_shadow_call_stack.size ()) + goto error; + if (!callee_stat.m_v_callers.size ()) + goto error; + + bool is_recursive = false; + for (auto &call : m_shadow_call_stack) + { + if (call.m_caller == fn_name) + { + is_recursive = true; + break; + } + } + + vm_profiler_call call = m_shadow_call_stack.back (); + m_shadow_call_stack.pop_back (); + + std::string caller = call.m_caller; + + std::string caller_according_to_callee = callee_stat.m_v_callers.back (); + + // Pop one level + callee_stat.m_v_callers.pop_back (); + callee_stat.m_v_t.pop_back (); + callee_stat.m_v_ip.pop_back (); + + if (caller_according_to_callee != caller) + goto error; + + if (caller != "") // If the caller name is "" the callee has no profiled caller + { + vm_profiler_fn_stats &caller_stat = m_map_fn_stats[caller]; + + if (!caller_stat.m_v_t.size ()) + goto error; + + int64_t caller_enters_call = caller_stat.m_v_t.back (); + int64_t caller_enters_callee = call.m_entry_time; + int64_t caller_call_overhead = caller_enters_callee - caller_enters_call; + int64_t callee_dt = call.m_t_self_cum + call.m_t_call_cum - caller_call_overhead; + + // Add the call's cumulative time to the caller's "time spent in bytecode call"-vector + // unless the call is recursive (to prevent confusing double book keeping of the time). + unsigned caller_ip = caller_stat.m_v_ip.back (); + caller_stat.maybe_resize (caller_ip); + + if (!is_recursive) + { + // Add to cumulative spent in call from this ip, in caller + caller_stat.m_v_cum_call_t[caller_ip] += callee_dt; + // Add to cumulative time spent in *the latest call* to caller + if (m_shadow_call_stack.size ()) + m_shadow_call_stack.back ().m_t_call_cum += callee_dt; + } + // Change the caller's last timestamp to now and subtract the caller's call overhead. + caller_stat.m_v_t.back () = unow () - caller_call_overhead; + } + return; + } +error: + purge_shadow_stack (); + return; +} + +// Debugging functions to be called from gdb + +extern "C" void +vm_debug_print_ov (void *p) +{ + octave_value *ov = reinterpret_cast (p); + ov->print (std::cout); +} + +extern "C" void +vm_debug_print_ovl (void *p) +{ + octave_value_list *ovl = reinterpret_cast (p); + + if (! ovl) return; + + for (int i = 0; i < ovl->length (); i++) + { + (*ovl) (i).print (std::cout); + } +} + + +extern "C" void dummy_mark_1 (void) +{ + static int cntr; + cntr++; + asm (""); +} + +extern "C" void dummy_mark_2 (void) +{ + static int cntr; + cntr++; + asm (""); +} diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-bytecode-vm.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/parse-tree/pt-bytecode-vm.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,584 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +/* + + -- About the experimental VM for GNU Octave + + The VM is a "monkey tracing" stack based VM, executing linear bytecode compiled + from the abstract syntax tree (class tree_expression etc.). + + Files of interest are: + * pt-bytecode-walk.cc: + The compiler translating the AST to bytecode + * pt-bytecode-vm.cc: + The VM + * stack-frame.cc: + bytecode_fcn_stack_frame, is the dynamic stack frame + + -- Stack + The VM has one stack where it put arguments, returns, locals and temporaries. + The stack elements have the type 'union stack_element', which is a union of + octave_value and some pointers and native long long, double etc. + + I.e. octave_value:s are constructed inplace on the stack. Not all stack elements + are octave_value:s. + + Nested calls to compiled bytecode functions use the same stack. + + The stack area does not grow. If the stack space runs out, the execution aborts. + + To access arguments, returns and locals their "slot number" is used. I.e. offset + from the base stack register. + + At VM termination, the end and start of the stack is checked for magic numbers + that should be there, and aborts if they are changed. + + -- Registers + The VM uses the following pseudo-register: + * instruction pointer ('ip') + * base instruction register ('code') + * stack register ('sp') + * base stack register ('bsp') + The start of the current stack frame + * constant base register ('data') + A pointer to an array of octave_value "literal constants", like "3" + + The registers are popped and pushed in each return or call together with the + follwing auxilliary data: + * unwind data + * name data (names of the identifiers) + * nargout + * nargin + + Note that 'argnames' is lazy in the VM. There is more kludge state stored in + the VM object other then the happy path state noted above. + + -- Dynamic stack frame + The VM uses its own stack frames, but also pushes a 'stack_frame' of the subclass + 'bytecode_fcn_stack_frame' to the 'tree_evaluator', to be able to cooperate + with C++-compiled functions and the 'tree_evaluator'. + + 'bytecode_fcn_stack_frame' is quite lazy and lets e.g. a compiled function + or user code executed with the 'tree_evaluator' create, read and write variables + on the VM stack. + + -- Monkey tracing + During execution of some op-codes the VM checks the type of the operands + and might modify the bytecode to execute a specialized op-code. + + E.g. the "index identifier"-opcode becomes the "index matrix with one scalar"-opcode + if the index is one double and the object to index is a matrix. + + If later the preconditions are not true, the specialized opcode replaces itself + with the general opcode. + + "Monkey tracing" is a made up term for this concept. + + -- Function caching + Function lookups are cached in the corrensponding slot of an identifier on + the VM stack. If any function is added to the symbol table, the current + directory is changed or 'clear' is called, all function caches are invalidated. + + The function cache is dependent on the argument types. If the argument types + change, the cache is invalidated. + + Binary and unary operators for doubles are looked up on VM start and cached in + the VM. They are not invalidated aslong the VM is running. + + -- Compilation + At runtime when user code is about to be executed, it is compiled, if VM + evaluation is turned on. + + It is also possible to compile ahead of time. + + Compiled code is reused the next invocation of the user function. If the + user function is changed, the compiled code is cleared. + + Compilation is done be the 'bytecode_walker' class in 'pt-bytecode-walk.cc'. + + -- Opcodes + + The op-codes are byte aligned and some are variable length. + + The first byte always identifies the op-code and is also the offset used in the + dispatch table 'instr'. + + 'octave_value' is abbreviated 'ov' in this table. + "<-" means, state of stack before operation. The right most element is the toppiest of the stack. + "->" means, state of stack after operation. + + ** Binary math operations + Pop two 'ov:s' of the stack and do the appropiate operation, then push the + resulting 'ov'. The top of the stack is the right hand side. + + For all: + <- (ov lhs) (ov rhs) + -> (ov ans) + + -- MUL DIV ADD SUB POW LDIV EL_MUL EL_DIV EL_POW EL_LDIV + * / + - ^ \ .* ./ .^ .\ + + The following specializations for double arguments exist: + MUL_DBL, ADD_DBL, SUB_DBL, DIV_DBL, POW_DBL + + ** Compound math operations + Pop two 'ov:s' off the stack and do the appropiate operation, then push the + resulting 'ov'. The top of the stack is the right hand side. + + The opcodes are combinations of an unary math operation and a binary. + + <- (ov lhs) (ov rhs) + -> (ov ans) + + -- TRANS_MUL MUL_TRANS HERM_MUL MUL_HERM TRANS_LDIV HERM_LDIV + a.'*b a*b.' a'*b a*b' a.'\b a'\b + + ** Unary math operations + Pop one 'ov' and do the appropiate operation, then push the + resulting 'ov'. + + <- (ov arg) + -> (ov ans) + + -- UADD + Unary addition. Note that unary plus is not a nop in GNU Octave. It + can be overloaded to do whatever. + -- USUB + Unary subtraction + -- TRANS + Transpose, ".'" + -- HERM + Hermitian, "'" + + ** Logical unary operations + Pop one 'ov' and do the appropiate operation, then push the + resulting 'ov'. + + <- (ov arg) + -> (ov ans) + + -- NOT + "!", "~" + + -- UNARY_TRUE + Converts an ov on the stack to either ov false or ov true. + The op-code is used to construct control flow for e.g. shortcircuits. + User values' truthness are checked by JMP_IF and JMP_IFN which errors + on undefined values. + + ** Logical binary operations + Pop two 'ov:s' of the stack and do the appropiate operation, then push the + resulting 'ov'. The top of the stack is the right hand side. + + For all: + <- (ov lhs) (ov rhs) + -> (ov ans) + + -- LE GR EQ NEQ GR_EQ LE_EQ EL_AND EL_OR + < > == != >= <= & | + + Note that EL_AND and EL_OR does not emulate braindamaged if-conditions. That + is done by the bytecode compiler with the help of the opcodes + BRAINDEAD_PRECONDITION and BRAINDEAD_WARNING together with some convoluted + bytecode. + + The following specializations exist: + LE_DBL, LE_EQ_DBL, GR_DBL, GR_EQ_DBL, EQ_DBL, NEQ_DBL + + ** Stack control + -- POP + <- (ov) + -> + Pop one 'ov' element of the stack. + + -- DUP + <- (ov1) + -> (ov1) (ov1) + + Duplicate the 'ov' on top of the stack and push it to the stack. + + -- ROT + <- (ov1) (ov2) + -> (ov2) (ov1) + Rotate the top two 'ov:s' on the stack. + + -- DUPN (uint8 offset) (uint8 n) + <- (ov -offset - n) ... (ov -offset) ... (offset amount of stack elements) + -> The range "(ov -offset - n) ... (ov -offset)" copied to the top of the stack in the same order. + Pushes 'n' ov:s from the stack at depth 'offset' to the top of the + stack. The copies have the same order on the stack as the originals. + An 'offset' of 0 means the top element of the stack. + + -- PUSH_SLOT_NARGOUT0 (uint8 slot) + -- PUSH_SLOT_NARGOUT1 (uint8 slot) + -- PUSH_SLOT_NARGOUTN (uint8 slot) (uint8 nargout) + <- + -> (ov 1) (ov 2)? ... (ov n)? + If the local 'ov' at 'bsp[slot]' is an ordinary variable, push it + to the stack. + + If the local is undefined, assume it is a command call function, + look the function name up, and call it with the nargout 0, 1 or n. + + If the local is a function object, call it with the nargout 0, 1 or n. + + PUSH_SLOT_NARGOUT1_SPECIAL is like PUSH_SLOT_NARGOUT1 but pushes 'classdef_metas' + instead of trying to execute them. PUSH_SLOT_DISP keeps track of whether the + slot variable was executed or not for a correct display call. + + -- PUSH_SLOT_INDEXED (uint8 slot) + <- + -> (ov) + Push the local 'ov' at 'bsp[slot]' to the stack. This opcode is used for + e.g. pushing 'x' in "x(2)". + + -- PUSH_PI (uint8 slot) + <- + -> (ov 1) (ov 2)? ... (ov n)? + Like PUSH_SLOT_NARGOUT1, but if the slot variable resolves to a call to + the builtin function 'pi', just push pi to the stack as a double ov. + + -- PUSH_OV_U64 + <- + -> (ov1) + Push an ov of the type uint64 with the value 0, to the stack. + + -- PUSH_CELL + <- (ov i) (ov j) ... [a mess of ov:s] + -> (ov ans) + Create a cell ov on the stack with up to i*j ov objects in it. Note that the last row can + shorter than the other rows and that any row can be empty and ignored. + + Each row is initially pushed as following: + 1. element by element, if any + 2. an integer ov with the row length + + -- PUSH_NIL + <- + -> (ov nil) + Push a default constructed 'octave_value' to the stack. + + -- POP_N_INTS (uint8 n) + <- (int i1) ... (int in) + -> + Pops 'n' native values of the stack. It could be pointers or doubles, not just int:s. + + ** Data control + -- LOAD_CST (uint8 offset) + <- + -> (ov) + Load the 'ov' at 'data[offset]' and push it to the stack. + LOAD_CST_ALTx are duplicates of the opcode existing for branch prediction reasons. + + -- LOAD_FAR_CST (int32 offset) + <- + -> (ov) + Load the 'ov' at 'data[offset]' and push it to the stack. + + -- INIT_GLOBAL (uint8 type) (uint8 slot) (uint8 unused) (bool has_init_code) (uint16 target)? + Initializes a persistent or global variable depending on 'type'. + If 'has_init_code' is true, jumps to 'target' if the variable does not exist yet + in the global namespace. If 'has_init_code' is false, it is the end of the instruction. + + ** Flow control + -- JMP (uint16 target) + Set the intstruction register to the instruction base register plus target. + + -- JMP_IF (uint16 target) + -- JMP_IFN (uint16 target) + <- (ov) + -> + Set the intstruction register to the instruction base register plus target + if the argument is true/untrue. + + -- RET + <- [saved caller frame] (int nargout) (ov ret1) ... (ov retn) (ov arg1) ... (ov argn) (ov local1) ... (ov localn) + -> (ov retn) ... (ov ret1) + Return from a bytecode frame to another. + + There is always atleast one ov on the stack after RET is executed. It might be the nil ov. + + -- FOR_SETUP FOR_COND (uint16 after_target) (uint8 slot) + <- (ov range1) + -> (ov range1) (int64 n) (int64 i) + Executes a for-loop setup. Then falls through to the FOR_COND op-code which checks + if a loop body is to be executed. + + FOR_SETUP is always followed by a FOR_COND opcode. + + The 'slot' is the slot for the iteration variable. + + The 'after_target' is the instruction offset to after the loop body. + + The end of the loop body jumps to the FOR_COND op-code. + + After the loop body, and at each escape point in the body, + the two native integers and the ov range are popped. + + FOR_COMPLEX_SETUP and FOR_COMPLEX_COND is similar for "struct key-value for-loops" + but needs two slots. + + -- THROW_IFERROBJ + <- (ov) + -> + Unwinds the stack until any exception handler if ov is an error object. + + + ... there are more op-codes. +*/ + + + + +#if ! defined (octave_pt_bytecode_vm_h) +#define octave_pt_bytecode_vm_h 1 + +#include "octave-config.h" + +#include +#include +#include + +#include "oct-lvalue.h" +#include "ovl.h" + +#include "interpreter-private.h" +#include "symtab.h" + +#include "pt-bytecode.h" + +# if defined (__GNUC__) +# define OCTAVE_LIKELY(x) __builtin_expect(!!(x), 1) +# else +# define OCTAVE_LIKELY(x) !!(x) +# endif + +# if defined (__GNUC__) +# define OCTAVE_UNLIKELY(x) __builtin_expect(!!(x), 0) +# else +# define OCTAVE_UNLIKELY(x) !!(x) +# endif + +OCTAVE_BEGIN_NAMESPACE(octave) + +class tree_evaluator; + +struct vm_profiler +{ + struct vm_profiler_call + { + std::string m_caller; + std::string m_callee; + int64_t m_entry_time; + int64_t m_t_self_cum; // Time spent in callee it-self + int64_t m_t_call_cum; // Time spent in bytecode calls, called from callee + }; + + struct vm_profiler_fn_stats + { + // Cumulative ns time at op-code at offset + std::vector m_v_cum_t; + // Cumulative hits at op-code at offset + std::vector m_v_n_cum; + // Cumulative time spent in nested calls to a bytecode function at op-code at offset + std::vector m_v_cum_call_t; + + void maybe_resize (unsigned ip) + { + if (ip >= m_v_cum_t.size ()) + m_v_cum_t.resize (ip + 1); + if (ip >= m_v_n_cum.size ()) + m_v_n_cum.resize (ip + 1); + if (ip >= m_v_cum_call_t.size ()) + m_v_cum_call_t.resize (ip + 1); + } + + // The last bytecode timestamp, i.e. the start of the currently running opcode. One level per call + std::vector m_v_t; + // The last ip, i.e. the ip being executed. One level per call + std::vector m_v_ip; + // Set of callers. One entry for each caller + std::set m_set_callers; + // Amount of calls to this function + int64_t m_n_calls; + + // Data structures to keep track of calls. One level per call + std::vector m_v_callers; // Used in callee to change the last timestamp of caller + + std::string m_fn_name; + std::string m_fn_file; + std::vector m_code; // Copy of the actual opcodes executed + std::vector m_ids; // Copy of the name data + std::vector m_loc_entries; // Copy of source code location data + + void add_t (int64_t dt); + }; + + void add_t (int64_t dt); + + std::vector m_shadow_call_stack; + + std::map m_map_fn_stats; + + std::vector m_fn_first_call_order; + + static int64_t unow (); + void print_to_stdout (); + void enter_fn (std::string callee_name, std::string caller_name, octave::unwind_data *unwind_data, std::string *name_data, unsigned char *code); + void enter_fn (std::string caller_name, bytecode &bc); + void exit_fn (std::string fn); + void purge_shadow_stack (); +}; + +class vm +{ + public: + + static constexpr size_t stack_size = 2048 * 8; + static constexpr size_t stack_pad = 32; + static constexpr size_t stack_magic_int = 0xBABEBEEFCAFE5000; + static constexpr size_t stack_min_for_new_call = 1024; + + vm (tree_evaluator *tw, bytecode &initial_bytecode); + + ~vm (); + + bool m_dbg_proper_return = false; + bool m_could_not_push_frame = false; + bool m_unwinding_interrupt = false; + stack_element *m_stack0 = nullptr; + + std::vector> m_frame_ptr_cache; + + tree_evaluator *m_tw; + type_info *m_ti; + symbol_table *m_symtab; + stack_element *m_stack = nullptr; + stack_element *m_sp = 0; + stack_element *m_bsp = 0; + stack_element *m_rsp = 0; + + type_info::binary_op_fcn m_fn_dbl_mul = nullptr; + type_info::binary_op_fcn m_fn_dbl_add = nullptr; + type_info::binary_op_fcn m_fn_dbl_sub = nullptr; + type_info::binary_op_fcn m_fn_dbl_div = nullptr; + type_info::binary_op_fcn m_fn_dbl_pow = nullptr; + type_info::binary_op_fcn m_fn_dbl_le = nullptr; + type_info::binary_op_fcn m_fn_dbl_le_eq = nullptr; + type_info::binary_op_fcn m_fn_dbl_gr = nullptr; + type_info::binary_op_fcn m_fn_dbl_gr_eq = nullptr; + type_info::binary_op_fcn m_fn_dbl_eq = nullptr; + type_info::binary_op_fcn m_fn_dbl_neq = nullptr; + + type_info::unary_op_fcn m_fn_dbl_usub = nullptr; + type_info::unary_op_fcn m_fn_dbl_not = nullptr; + type_info::unary_op_fcn m_fn_bool_not = nullptr; + + octave_function * m_pi_builtin_fn = nullptr; + + static int constexpr m_scalar_typeid = 2; + static int constexpr m_matrix_typeid = 4; + static int constexpr m_bool_typeid = 10; + + struct output_ignore_data { + std::vector m_v_matrixes; + std::vector*> m_v_lvalue_list; + + bool is_pending () {return m_v_matrixes.size () && m_v_matrixes.back () != nullptr; } + + Matrix get_ignore_matrix () + { + Matrix m = *m_v_matrixes.back (); + delete m_v_matrixes.back (); + m_v_matrixes.back () = nullptr; + + return m; + } + + const std::list* pop_lvalue_list () + { + auto *p = m_v_lvalue_list.back (); + m_v_lvalue_list.pop_back (); + return p; + } + }; + + output_ignore_data *m_output_ignore_data = nullptr; + const std::list *m_original_lvalue_list = nullptr; + + unsigned char *m_code; + octave_value *m_data; + std::string *m_name_data; + unwind_data *m_unwind_data; + + int m_ip; + + // Generic data container to recreate exceptions + struct error_data + { + // Execution exception + int m_exit_status; + bool m_safe_to_return; + }; + + error_data + handle_error (error_type et); + + static + loc_entry find_loc (int ip, std::vector &loc_entries); + + octave_value_list execute_code (const octave_value_list &args, int root_nargout) + __attribute__ ((optimize("no-gcse","no-crossjumping"))); + + octave_value find_fcn_for_cmd_call (std::string *name); + octave_value handle_object_end (octave_value ov, int idx, int nargs); + + void set_nargin (int nargin); + + void set_nargout (int nargout); + + unwind_entry* find_unwind_entry_for_current_state (bool only_find_unwind_protect); + int find_unwind_entry_for_forloop (int current_stack_depth); + + static std::shared_ptr m_vm_profiler; + static bool m_profiler_enabled; + static bool m_trace_enabled; +}; + +OCTINTERP_API +void print_bytecode (bytecode &bc); + +OCTINTERP_API +std::vector> +opcodes_to_strings (bytecode &bc); + +OCTINTERP_API +std::vector> +opcodes_to_strings (std::vector &code, std::vector &names); + +OCTAVE_END_NAMESPACE(octave) + +#endif diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-bytecode-walk.cc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/parse-tree/pt-bytecode-walk.cc Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,4955 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if defined (HAVE_CONFIG_H) +# include "config.h" +#endif + +#include "pt-all.h" +#include "pt-bytecode-walk.h" +#include "symrec.h" +#include "pt-walk.h" +#include "ov-scalar.h" + +//#pragma GCC optimize("Og") + +using namespace octave; + +void octave::compile_user_function (octave_user_function &ufn, bool do_print) +{ + try + { + if (ufn.is_classdef_constructor ()) + error ("Classdef constructors are not supported by the VM yet"); // Needs special handling + if (ufn.is_inline_function () || ufn.is_nested_function ()) + error ("Inlined or scoped functions are not supported by the VM yet"); + + // Begin with clearing the old bytecode, if any + ufn.clear_bytecode (); + + bytecode_walker bw; + + ufn.accept (bw); + + if (do_print) + print_bytecode (bw.m_code); + + ufn.set_bytecode (bw.m_code); + + // Compile the subfunctions + auto subs = ufn.subfunctions (); + for (auto kv : subs) + { + octave_user_function *sub = kv.second.user_function_value (); + compile_user_function (*sub, do_print); + sub->get_bytecode ().m_unwind_data.m_file = ufn.fcn_file_name (); + } + } + catch(...) + { + ufn.clear_bytecode (); + throw; + } +} + +// Class to walk the tree and see if a index expression has +// an end in it. +// +// Does not walk nested index expressions. +class find_end_walker : tree_walker +{ +public: + static bool has_end (tree &e) + { + find_end_walker walker; + e.accept (walker); + + return walker.m_has_end; + } + + bool m_has_end = false; + + void visit_identifier (tree_identifier &id) + { + std::string name = id.name (); + if (name == "end") + m_has_end = true; + } +}; + +class is_foldable_walker : tree_walker +{ +public: + static bool is_foldable (tree_binary_expression &e) + { + return is_foldable_internal (e); + } + + static bool is_foldable (tree_prefix_expression &e) + { + return is_foldable_internal (e); + } + + static bool is_foldable (tree_postfix_expression &e) + { + return is_foldable_internal (e); + } + +private: + static bool is_foldable_internal (tree &e) + { + is_foldable_walker walker; + + e.accept (walker); + + return walker.m_is_foldable; + } + + bool is_foldable_expr (tree_expression *e) + { + return e->is_binary_expression () || e->is_unary_expression () || e->is_constant (); + } + + void visit_postfix_expression (tree_postfix_expression& e) + { + if (!m_is_foldable) + return; + + tree_expression *op = e.operand (); + + if (!is_foldable_expr (op)) + { + m_is_foldable = false; + return; + } + + op->accept (*this); + } + + void visit_prefix_expression (tree_prefix_expression& e) + { + if (!m_is_foldable) + return; + + tree_expression *op = e.operand (); + + if (!is_foldable_expr (op)) + { + m_is_foldable = false; + return; + } + + op->accept (*this); + } + + void visit_binary_expression (tree_binary_expression &e) + { + if (!m_is_foldable) + return; + + tree_expression *rhs = e.rhs (); + tree_expression *lhs = e.lhs (); + if (!is_foldable_expr (rhs) || !is_foldable_expr (lhs)) + { + m_is_foldable = false; + return; + } + + lhs->accept (*this); + if (m_is_foldable) + rhs->accept (*this); + } + + bool m_is_foldable = true; +}; + +class collect_idnames_walker : tree_walker +{ +public: + static std::vector> collect_id_names (tree_statement_list &l) + { + collect_idnames_walker walker; + + for (auto it = l.begin (); it != l.end (); it++) + { + if (*it) + (*it)->accept (walker); + } + + return walker.m_id_names_and_offset; + } + + static std::vector> collect_id_names (tree_expression &e) + { + collect_idnames_walker walker; + e.accept (walker); + + return walker.m_id_names_and_offset; + } + + std::vector> m_id_names_and_offset; + + void visit_identifier (tree_identifier &id) + { + std::string name = id.name (); + if (name == "~") // We dont want this magic id + return; + + m_id_names_and_offset.push_back ({name, id.symbol ().data_offset ()}); + } + + void visit_anon_fcn_handle (tree_anon_fcn_handle &) + { + // We dont collect any id:s in the handle, since the original scope + // don't. + } +}; + +template +typename T::value_type vector_pop (T &v) +{ + typename T::value_type tmp = v.back (); + v.pop_back (); + return tmp; +} + +#define ERR(msg) error("VM error %d: " msg, __LINE__) + +#define TODO(msg) error("VM error, Not done yet %d: " msg, __LINE__) + +#define CHECK(cond) \ + do { \ + if (!(cond)) \ + ERR("Internal VM compiler consistency check failed, " #cond); \ + } while ((0)) + +#define PUSH_CODE(code_) do {\ + int code_check_s_ = static_cast (code_); \ + unsigned char code_s_ = static_cast (code_check_s_); \ + CHECK (code_check_s_ < 256 && code_check_s_ >= -128); \ + m_code.m_code.push_back(code_s_); \ + } while ((0)) + +#define PUSH_CODE_LOAD_CST(offset) do {\ + unsigned offset_ = offset; \ + if (offset_ < 65536)\ + {\ + if (offset_ >= 256) \ + PUSH_CODE (INSTR::WIDE); \ + emit_alt (m_cnt_alts_cst, {INSTR::LOAD_CST, INSTR::LOAD_CST_ALT2, \ + INSTR::LOAD_CST_ALT3, INSTR::LOAD_CST_ALT4});\ + if (offset_ >= 256) \ + PUSH_CODE_SHORT (offset_);\ + else\ + PUSH_CODE (offset_);\ + }\ + else\ + {\ + PUSH_CODE (INSTR::LOAD_FAR_CST);\ + PUSH_CODE_INT (offset_);\ + }\ +} while (0) + +#define PUSH_SSLOT(sslot) PUSH_CODE(sslot) +#define PUSH_WSLOT(wslot) PUSH_CODE_SHORT(wslot) +#define NEED_WIDE_SLOTS() (m_map_locals_to_slot.size () >= 256) + +#define MAYBE_PUSH_WIDE_OPEXT(slot) \ +do {\ + if (slot >= 256)\ + PUSH_CODE (INSTR::WIDE);\ +} while ((0)) + +#define PUSH_SLOT(slot) \ +do {\ + if (slot >= 256)\ + PUSH_WSLOT (slot);\ + else\ + PUSH_SSLOT (slot);\ +} while ((0)) + +#define CODE_SIZE() m_code.m_code.size() +#define CODE(x) m_code.m_code[x] +#define PUSH_CODE_SHORT(code_) do { \ + unsigned u = code_; \ + unsigned char b0 = u & 0xFF; \ + unsigned char b1 = (u >> 8) & 0xFF; \ + int code_check_ss_ = static_cast (u); \ + CHECK (code_check_ss_ < 65536 && code_check_ss_ >= -32768); \ + PUSH_CODE (b0); \ + PUSH_CODE (b1); \ + } while ((0)) +#define PUSH_CODE_INT(code_) do { \ + unsigned u = code_; \ + unsigned char b0 = u & 0xFF; \ + unsigned char b1 = (u >> 8) & 0xFF; \ + unsigned char b2 = (u >> 16) & 0xFF;\ + unsigned char b3 = (u >> 24) & 0xFF;\ + PUSH_CODE (b0); \ + PUSH_CODE (b1); \ + PUSH_CODE (b2); \ + PUSH_CODE (b3); \ + } while ((0)) + +#define SET_CODE_SHORT(offset, value) do { \ + int tmp = offset; \ + unsigned u = value; \ + unsigned char b0 = u & 0xFF; \ + unsigned char b1 = (u >> 8) & 0xFF; \ + int code_check_s_ = static_cast (u); \ + CHECK (code_check_s_ < 65536 && code_check_s_ >= -32768); \ + CODE (tmp) = b0; \ + CODE (tmp + 1) = b1; \ + } while ((0)) + +#define PUSH_DATA(cst) m_code.m_data.push_back(cst) +#define DATA_SIZE() m_code.m_data.size() + +// TODO: This optimization is nice and we should get it working again. +#define PUSH_ALL_PATHS_TERMINATED() m_all_paths_terminated.push_back (false) +#define POP_ALL_PATHS_TERMINATED() vector_pop (m_all_paths_terminated) +#define PEEK_ALL_PATHS_TERMINATED() m_all_paths_terminated.back () +#define SET_ALL_PATHS_TERMINATED() m_all_paths_terminated.back () = true + +#define PUSH_BREAKS() m_need_break_target.push_back ({}) +#define POP_BREAKS() vector_pop (m_need_break_target) +#define PUSH_NEED_BREAK(offset) m_need_break_target.back ().push_back (offset) +#define N_BREAKS() m_need_break_target.size () + +#define PUSH_CONTINUE_TARGET(target) m_continue_target.push_back ({}) +#define POP_CONTINUE_TARGET() vector_pop (m_continue_target) +#define PUSH_NEED_CONTINUE_TARGET(offset) \ + m_continue_target.back ().push_back (offset) + +#define SLOT(name) get_slot (name) + +#define CHECK_NONNULL(ptr) if (!ptr) error ("Unexpected null %d", __LINE__) + +#define PUSH_ID_BEGIN_INDEXED(slot, idx, narg, is_obj) \ + m_indexed_id.push_back ({slot, idx, narg, is_obj}) +#define POP_ID_BEING_INDEXED() m_indexed_id.pop_back () +#define ID_IS_BEING_INDEXED() (m_indexed_id.size () != 0) +#define N_IDS_BEING_INDEXED() (m_indexed_id.size ()) +#define PEEK_ID_BEING_INDEXED() m_indexed_id.back () +#define IDS_BEING_INDEXED(idx) m_indexed_id[idx] + +#define PUSH_NESTING_STATEMENT(type) m_nesting_statement.push_back (type) +#define POP_NESTING_STATEMENT() m_nesting_statement.pop_back () +#define NESTING_STATEMENTS() m_nesting_statement + +// Track how many expression deep we are in the walk. +// I.e. identifiers need to know if they are: +// foo; %depth 1 +// or +// foo * 2; %depth 2 for id foo +// +// so that nargout for a command call at root is zero. +// E.g.: +// tic; +#define INC_DEPTH() ++m_depth +#define DEC_DEPTH() --m_depth +#define DEPTH() m_depth + +// We need to track the expected amount of output variables +// for each expression. E.g.: +// foo(); %0 +// a = foo (); %1 +// [a b] = foo (); %2 +// [a b] = foo (foo () + foo ()); %2 for outer, 1 for inner foo + +#define NARGOUT() m_nargout.back () +#define PUSH_NARGOUT(nargout) m_nargout.push_back (nargout) +#define POP_NARGOUT() vector_pop (m_nargout) + +#define PUSH_ARGNAMES_ENTRY(arg_nm_e) m_code.m_unwind_data.m_argname_entries.push_back (arg_nm_e) + +#define PUSH_UNWIND_RETURN_TARGETS() m_need_unwind_target.push_back ({}) +#define POP_UNWIND_RETURN_TARGET() vector_pop (m_need_unwind_target) +#define N_UNWIND_RETURN_TARGETS() m_need_unwind_target.size () +#define PUSH_A_UNWIND_RETURN_TARGET(offset) \ + m_need_unwind_target.back ().push_back (offset) + +#define PUSH_LOC() m_code.m_unwind_data.m_loc_entry.push_back ({}) +#define LOC(i) m_code.m_unwind_data.m_loc_entry[i] +#define N_LOC() m_code.m_unwind_data.m_loc_entry.size () + +#define PUSH_UNWIND() m_code.m_unwind_data.m_unwind_entries.push_back ({}) +#define UNWIND(i) m_code.m_unwind_data.m_unwind_entries[i] +#define N_UNWIND() m_code.m_unwind_data.m_unwind_entries.size () + +#define PUSH_GLOBAL(name) do {m_map_id_is_global[name] = 1;} while ((0)) +#define IS_GLOBAL(name) (m_map_id_is_global.find (name) !=\ + m_map_id_is_global.end ()) + +#define PUSH_PERSISTENT(name) do {m_map_id_is_persistent[name] = 1;} while ((0)) +#define IS_PERSISTENT(name) (m_map_id_is_persistent.find (name) !=\ + m_map_id_is_persistent.end ()) + +// Note that the placement of PUSH_TREE_FOR_DBG() need to mirror the walk in pt-bp.cc +#define PUSH_TREE_FOR_DBG(ptree) do { m_code.m_unwind_data.m_ip_to_tree[CODE_SIZE ()] = ptree; } while(0) +#define PUSH_TREE_FOR_EVAL(ptree) do { m_code.m_unwind_data.m_ip_to_tree[-CODE_SIZE ()] = ptree; } while(0) + +void +bytecode_walker:: +visit_statement_list (tree_statement_list& lst) +{ + for (tree_statement *elt : lst) + { + CHECK_NONNULL (elt); + + PUSH_NARGOUT (0); + + elt->accept (*this); + POP_NARGOUT (); + } +} + +void +bytecode_walker:: +visit_statement (tree_statement& stmt) +{ + if (stmt.is_expression ()) + { + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + tree_expression *expr = stmt.expression (); + PUSH_TREE_FOR_DBG(expr); + CHECK_NONNULL (expr); + expr->accept (*this); + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = stmt.column (); + LOC (loc_id).m_line = stmt.line (); + } + else if (stmt.is_command ()) + { + tree_command *cmd = stmt.command (); + CHECK_NONNULL (cmd); + cmd->accept (*this); + } + else + TODO (); +} + +void +bytecode_walker::emit_alt (int &cntr, std::vector alts) +{ + unsigned n = alts.size (); + unsigned offset = cntr++ % n; + PUSH_CODE (alts [offset]); +} + +bytecode_walker::emit_unwind_protect_data +bytecode_walker::emit_unwind_protect_code_start () +{ + emit_unwind_protect_data D; // Keeps track of state for emit_unwind_protect_code_before_cleanup() and emit_unwind_protect_code_end() + + // Unwind protect has a body and cleanup part that always + // is executed. + // + // If the VM is unwinding it enters the cleanup code with an + // error object on the stack. The body puts a nil object on the + // stack. + // + // If there is an error object on the stack at the end of the cleanup + // code it rethrows it. + // + // Returns in the body jumps to the cleanup code before actually returning. + // If a return is reached in the body, a true object is pushed to the stack, + // which is checked in the cleanup code to know if we are falling through or + // are supposed to return. + // + // The same applies to breaks, so the code underneath gets abit messy. + // + // The body_expr, cleanup_expr and cleanup_instructions parameters are for + // when we need to emit some internal cleanup code that has no corrensponding + // unwind_protect in the user code. + + int unwind_idx = N_UNWIND (); + PUSH_UNWIND(); + + UNWIND (unwind_idx).m_ip_start = CODE_SIZE (); + + UNWIND (unwind_idx).m_unwind_entry_type = + unwind_entry_type::UNWIND_PROTECT; + + UNWIND (unwind_idx).m_stack_depth = n_on_stack_due_to_stmt(); + + // Returns need to execute the unwind cleanup code before + // returning, so we need to keep track of offsets that need + // to jump to the cleanup code. + PUSH_UNWIND_RETURN_TARGETS (); + + // We need to store away the pending "need breaks" since any break in the + // unwind protect body need to jump to the cleanup code. + std::vector v_need_breaks_initial; + bool break_stack_populated = N_BREAKS (); + if (break_stack_populated) + { + v_need_breaks_initial = POP_BREAKS (); + PUSH_BREAKS (); + } + + D.m_break_stack_populated = break_stack_populated; + D.m_idx_unwind = unwind_idx; + D.m_v_need_breaks_initial = v_need_breaks_initial; + + return D; +} + +void +bytecode_walker::emit_unwind_protect_code_before_cleanup (emit_unwind_protect_data &D) +{ + // If the vm is unwinding it will push an error object to + // the stack. If we are just done executing the body we + // push a nil ov to the stack. + // + // At the end of the cleanup code we check the ov on the stack + // and continue to unwind if it is an error object, otherwise + // just execute the next instruction. + PUSH_CODE (INSTR::PUSH_NIL); + // For unwinding we need to keep track of the ov we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + auto v_need_cleanup = POP_UNWIND_RETURN_TARGET (); + int n_need_cleanup = v_need_cleanup.size (); + + std::vector v_need_break; + if (D.m_break_stack_populated) + v_need_break = POP_BREAKS(); + + int n_need_break = v_need_break.size (); + + // If there is a return statement inside the unwind body it need + // to jump to the cleanup code before the actual return. The return + // statement pushed a true ov to the stack, which is checked at the end of the + // cleanup code, since we use the same code for just falling throught too. + // + // The same applies to breaks, and also the combination of a possibility of + // breaks and returns. + int n_falses = 0; + if (n_need_break && n_need_cleanup) + { + n_falses = 2; + // These nils ov is the break and return marker if we are falling + // through to the cleanup code from the body. + // We have an error object on the stack. + PUSH_CODE (INSTR::PUSH_FALSE); // return marker + PUSH_CODE (INSTR::PUSH_FALSE); // break marker + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // For unwinding we need to keep track of the ovs we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Set the offset for all the break jumps that need to go to here + for (int need : v_need_break) + SET_CODE_SHORT (need, CODE_SIZE ()); + + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_FALSE); // return marker + PUSH_CODE (INSTR::PUSH_TRUE);// break marker + PUSH_CODE (INSTR::JMP); + int also_need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Set the offset for all the return jumps that need to go to here + for (int need_cleanup : v_need_cleanup) + SET_CODE_SHORT (need_cleanup, CODE_SIZE ()); + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_TRUE);// return marker + PUSH_CODE (INSTR::PUSH_FALSE); // break marker + + // If we were falling throught the body to the cleanup we jump to here + SET_CODE_SHORT(need_after, CODE_SIZE ()); + SET_CODE_SHORT(also_need_after, CODE_SIZE ()); + } + else if (n_need_break) + { + n_falses = 1; + // This nil ov is the break marker if we are falling through to the + // cleanup code from the body. We have an error object on the stack. + PUSH_CODE (INSTR::PUSH_FALSE); // break marker + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // For unwinding we need to keep track of the ov we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Set the offset for all the break jumps that need to go to here + for (int need : v_need_break) + SET_CODE_SHORT (need, CODE_SIZE ()); + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_TRUE);// break marker + + SET_CODE_SHORT(need_after, CODE_SIZE ()); + } + else if (n_need_cleanup) + { + n_falses = 1; + // This nil ov is the return marker if we are falling through to the + // cleanup code from the body + PUSH_CODE (INSTR::PUSH_FALSE); // return marker + PUSH_CODE (INSTR::JMP); // We need to skip the pushes for the returns + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // For unwinding we need to keep track of the ov we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Set the offset for all the jumps that need to go to here + for (int need_cleanup : v_need_cleanup) + SET_CODE_SHORT (need_cleanup, CODE_SIZE ()); + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_TRUE);// return marker + + SET_CODE_SHORT(need_after, CODE_SIZE ()); + } + + // This is the end of protected code + UNWIND (D.m_idx_unwind).m_ip_end = CODE_SIZE (); + + if (n_falses) + { + // Fallthrough code do not need false pushes + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + // An unwind will jump to here and needs some + // falses pushed to mark return or breaks + UNWIND (D.m_idx_unwind).m_ip_target = CODE_SIZE (); + for (int i = 0; i < n_falses; i++) + PUSH_CODE (INSTR::PUSH_FALSE); + + SET_CODE_SHORT(need_after, CODE_SIZE ()); + } + else + UNWIND (D.m_idx_unwind).m_ip_target = CODE_SIZE (); + + // The body will just fall through into the unwind clean up code + + // There might be breaks in the cleanup code too + if (D.m_break_stack_populated) + PUSH_BREAKS(); + + D.m_n_need_break = n_need_break; + D.m_n_need_cleanup = n_need_cleanup; +} + +void +bytecode_walker::emit_unwind_protect_code_end (emit_unwind_protect_data &D) +{ + std::vector v_need_break_cleanup; + if (D.m_break_stack_populated) + v_need_break_cleanup = POP_BREAKS (); + + if (v_need_break_cleanup.size ()) + TODO ("break in cleanup code"); + + if (D.m_break_stack_populated) + { + // Restore the initial "need breaks" + PUSH_BREAKS (); + for (int offset : D.m_v_need_breaks_initial) + PUSH_NEED_BREAK (offset); + } + + if (D.m_n_need_break && D.m_n_need_cleanup) + TODO ("Return and break nested"); + if (D.m_n_need_break) + { + // The break ov marker is on the stack. + // If it is not true, we skip the break jump + PUSH_CODE (INSTR::JMP_IFN); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + POP_NESTING_STATEMENT (); // The jump ate the break marker + + PUSH_CODE (INSTR::POP); // Pop the error object + + // So, we break jump from here. + // The visitor for the loop will write to proper target + PUSH_CODE (INSTR::JMP); + int need_break = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + PUSH_NEED_BREAK (need_break); // Nesting loop need to know + + // If we are not breaking we jump to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + // Check if we are doing a return unwind + else if (D.m_n_need_cleanup) + { + // If we are in another unwind protect we need to jump to its cleanup + // code if the return ov marker is true + if (N_UNWIND_RETURN_TARGETS()) + { + // The return ov marker is on the stack. + // If it is not true, we skip the "jump bridge" + PUSH_CODE (INSTR::JMP_IFN); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + POP_NESTING_STATEMENT (); // The jump ate the return marker + + PUSH_CODE (INSTR::POP); // Pop the error object + + PUSH_CODE (INSTR::JMP); // Jump to the nesting unwind protect + int need_unwind = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + PUSH_A_UNWIND_RETURN_TARGET (need_unwind); + + // If we are not returning we jump to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + // Return if the return marker on the stack is true + else + { + // The return ov marker is on the stack. + // If it is not true, we skip the return + PUSH_CODE (INSTR::JMP_IFN); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + POP_NESTING_STATEMENT (); // The jump ate the return marker + + // Generate code for a return + emit_return (); + + // If we are not returning we jump to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + } + // If there is an error object on top of the stack we + // need to continue unwinding. + PUSH_CODE (INSTR::THROW_IFERROBJ); + + POP_NESTING_STATEMENT (); +} + +void +bytecode_walker:: +emit_unwind_protect_code (tree_statement_list *body, + tree_statement_list *cleanup_code, + tree_expression *body_expr, + tree_expression *cleanup_expr, + std::vector cleanup_instructions) +{ + // Unwind protect has a body and cleanup part that always + // is executed. + // + // If the VM is unwinding it enters the cleanup code with an + // error object on the stack. The body puts a nil object on the + // stack. + // + // If there is an error object on the stack at the end of the cleanup + // code it rethrows it. + // + // Returns in the body jumps to the cleanup code before actually returning. + // If a return is reached in the body, a true object is pushed to the stack, + // which is checked in the cleanup code to know if we are falling through or + // are supposed to return. + // + // The same applies to breaks, so the code underneath gets abit messy. + // + // The body_expr, cleanup_expr and cleanup_instructions parameters are for + // when we need to emit some internal cleanup code that has no corrensponding + // unwind_protect in the user code. + + int unwind_idx = N_UNWIND (); + PUSH_UNWIND(); + + UNWIND (unwind_idx).m_ip_start = CODE_SIZE (); + + UNWIND (unwind_idx).m_unwind_entry_type = + unwind_entry_type::UNWIND_PROTECT; + + UNWIND (unwind_idx).m_stack_depth = n_on_stack_due_to_stmt(); + + // Returns need to execute the unwind cleanup code before + // returning, so we need to keep track of offsets that need + // to jump to the cleanup code. + PUSH_UNWIND_RETURN_TARGETS (); + + // We need to store away the pending "need breaks" since any break in the + // unwind protect body need to jump to the cleanup code. + std::vector v_need_breaks_initial; + bool break_stack_populated = N_BREAKS (); + if (break_stack_populated) + { + v_need_breaks_initial = POP_BREAKS (); + PUSH_BREAKS (); + } + + // Walk the body + if (body) + body->accept (*this); + if (body_expr) + body_expr->accept (*this); + + // If the vm is unwinding it will push an error object to + // the stack. If we are just done executing the body we + // push a nil ov to the stack. + // + // At the end of the cleanup code we check the ov on the stack + // and continue to unwind if it is an error object, otherwise + // just execute the next instruction. + PUSH_CODE (INSTR::PUSH_NIL); + // For unwinding we need to keep track of the ov we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + auto v_need_cleanup = POP_UNWIND_RETURN_TARGET (); + int n_need_cleanup = v_need_cleanup.size (); + + std::vector v_need_break; + if (break_stack_populated) + v_need_break = POP_BREAKS(); + + int n_need_break = v_need_break.size (); + + // If there is a return statement inside the unwind body it need + // to jump to the cleanup code before the actual return. The return + // statement pushed a true ov to the stack, which is checked at the end of the + // cleanup code, since we use the same code for just falling throught too. + // + // The same applies to breaks, and also the combination of a possibility of + // breaks and returns. + int n_falses = 0; + if (n_need_break && n_need_cleanup) + { + n_falses = 2; + // These nils ov is the break and return marker if we are falling + // through to the cleanup code from the body. + // We have an error object on the stack. + PUSH_CODE (INSTR::PUSH_FALSE); // return marker + PUSH_CODE (INSTR::PUSH_FALSE); // break marker + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // For unwinding we need to keep track of the ovs we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Set the offset for all the break jumps that need to go to here + for (int need : v_need_break) + SET_CODE_SHORT (need, CODE_SIZE ()); + + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_FALSE); // return marker + PUSH_CODE (INSTR::PUSH_TRUE);// break marker + PUSH_CODE (INSTR::JMP); + int also_need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Set the offset for all the return jumps that need to go to here + for (int need_cleanup : v_need_cleanup) + SET_CODE_SHORT (need_cleanup, CODE_SIZE ()); + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_TRUE);// return marker + PUSH_CODE (INSTR::PUSH_FALSE); // break marker + + // If we were falling throught the body to the cleanup we jump to here + SET_CODE_SHORT(need_after, CODE_SIZE ()); + SET_CODE_SHORT(also_need_after, CODE_SIZE ()); + } + else if (n_need_break) + { + n_falses = 1; + // This nil ov is the break marker if we are falling through to the + // cleanup code from the body. We have an error object on the stack. + PUSH_CODE (INSTR::PUSH_FALSE); // break marker + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // For unwinding we need to keep track of the ov we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Set the offset for all the break jumps that need to go to here + for (int need : v_need_break) + SET_CODE_SHORT (need, CODE_SIZE ()); + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_TRUE);// break marker + + SET_CODE_SHORT(need_after, CODE_SIZE ()); + } + else if (n_need_cleanup) + { + n_falses = 1; + // This nil ov is the return marker if we are falling through to the + // cleanup code from the body + PUSH_CODE (INSTR::PUSH_FALSE); // return marker + PUSH_CODE (INSTR::JMP); // We need to skip the pushes for the returns + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // For unwinding we need to keep track of the ov we pushed. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Set the offset for all the jumps that need to go to here + for (int need_cleanup : v_need_cleanup) + SET_CODE_SHORT (need_cleanup, CODE_SIZE ()); + + PUSH_CODE (INSTR::PUSH_NIL); // error object + PUSH_CODE (INSTR::PUSH_TRUE);// return marker + + SET_CODE_SHORT(need_after, CODE_SIZE ()); + } + + // This is the end of protected code + UNWIND (unwind_idx).m_ip_end = CODE_SIZE (); + + if (n_falses) + { + // Fallthrough code do not need false pushes + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + // An unwind will jump to here and needs some + // falses pushed to mark return or breaks + UNWIND (unwind_idx).m_ip_target = CODE_SIZE (); + for (int i = 0; i < n_falses; i++) + PUSH_CODE (INSTR::PUSH_FALSE); + + SET_CODE_SHORT(need_after, CODE_SIZE ()); + } + else + UNWIND (unwind_idx).m_ip_target = CODE_SIZE (); + + // The body will just fall through into the unwind clean up code + + // There might be breaks in the cleanup code too + if (break_stack_populated) + PUSH_BREAKS(); + + // Walk the clean up code + if (cleanup_code) + cleanup_code->accept (*this); + if (cleanup_expr) + cleanup_expr->accept (*this); + + // Used to e.g. always call op CLEAR_IGNORE_OUTPUTS + for (auto instr : cleanup_instructions) + PUSH_CODE (instr); + + std::vector v_need_break_cleanup; + if (break_stack_populated) + v_need_break_cleanup = POP_BREAKS (); + + if (v_need_break_cleanup.size ()) + TODO ("break in cleanup code"); + + if (break_stack_populated) + { + // Restore the initial "need breaks" + PUSH_BREAKS (); + for (int offset : v_need_breaks_initial) + PUSH_NEED_BREAK (offset); + } + + if (n_need_break && n_need_cleanup) + TODO ("Return and break nested"); + if (n_need_break) + { + // The break ov marker is on the stack. + // If it is not true, we skip the break jump + PUSH_CODE (INSTR::JMP_IFN); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + POP_NESTING_STATEMENT (); // The jump ate the break marker + + PUSH_CODE (INSTR::POP); // Pop the error object + + // So, we break jump from here. + // The visitor for the loop will write to proper target + PUSH_CODE (INSTR::JMP); + int need_break = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + PUSH_NEED_BREAK (need_break); // Nesting loop need to know + + // If we are not breaking we jump to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + // Check if we are doing a return unwind + else if (n_need_cleanup) + { + // If we are in another unwind protect we need to jump to its cleanup + // code if the return ov marker is true + if (N_UNWIND_RETURN_TARGETS()) + { + // The return ov marker is on the stack. + // If it is not true, we skip the "jump bridge" + PUSH_CODE (INSTR::JMP_IFN); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + POP_NESTING_STATEMENT (); // The jump ate the return marker + + PUSH_CODE (INSTR::POP); // Pop the error object + + PUSH_CODE (INSTR::JMP); // Jump to the nesting unwind protect + int need_unwind = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + PUSH_A_UNWIND_RETURN_TARGET (need_unwind); + + // If we are not returning we jump to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + // Return if the return marker on the stack is true + else + { + // The return ov marker is on the stack. + // If it is not true, we skip the return + PUSH_CODE (INSTR::JMP_IFN); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + POP_NESTING_STATEMENT (); // The jump ate the return marker + + // Generate code for a return + emit_return (); + + // If we are not returning we jump to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + } + // If there is an error object on top of the stack we + // need to continue unwinding. + PUSH_CODE (INSTR::THROW_IFERROBJ); + + POP_NESTING_STATEMENT (); +} + +void +bytecode_walker:: +visit_unwind_protect_command (tree_unwind_protect_command& cmd) +{ + emit_unwind_protect_code (cmd.body (), cmd.cleanup ()); +} + +void +bytecode_walker:: +visit_try_catch_command (tree_try_catch_command& cmd) +{ + // So we are in a try catch. + // + int unwind_idx = N_UNWIND (); + PUSH_UNWIND(); + + UNWIND (unwind_idx).m_ip_start = CODE_SIZE (); + + UNWIND (unwind_idx).m_unwind_entry_type = + unwind_entry_type::TRY_CATCH; + + tree_statement_list *try_code = cmd.body (); + + // Walk the body for the code + if (try_code) + try_code->accept (*this); + // We need to jump past the catch code that will come after + PUSH_CODE (INSTR::JMP); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Mark an end to the "try zone" + UNWIND (unwind_idx).m_ip_end = CODE_SIZE (); + + // We put the catch code right after the try body + UNWIND (unwind_idx).m_ip_target = CODE_SIZE (); + + // For loops add two native ints and one ov to the stack, + // and switches add one ov to the stack, so we need to + // record how many things we have added to the stack, + // from for loops and switches. + UNWIND (unwind_idx).m_stack_depth = n_on_stack_due_to_stmt (); + + // The optional identifier "catch id" + tree_identifier *expr_id = cmd.identifier (); + + // The unwind code in the vm will push an error object ... + if (expr_id) + { + // ... so assign it to the identifiers in its slot. + std::string name = expr_id->name (); + int slot = add_id_to_table (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::ASSIGN); + PUSH_SLOT (slot); + } + else + { + // ... just pop the error object unceremoniously. + PUSH_CODE (INSTR::POP); + } + + // Walk the catch code + tree_statement_list *catch_code = cmd.cleanup (); + if (catch_code) + catch_code->accept (*this); + + // The body jumps to here + SET_CODE_SHORT (need_after, CODE_SIZE ()); + + return; +} + +// For loops add two native ints and one ov to the stack, +// and switches etc add one ov to the stack, so we need to +// how how many things we have added to the stack +int +bytecode_walker:: +n_on_stack_due_to_stmt () +{ + auto v = NESTING_STATEMENTS(); + int n_things_on_stack = 0; + for (auto it = v.rbegin () ;it != v.rend (); it++) + { + nesting_statement t = *it; + switch (t) + { + case nesting_statement::FOR_LOOP: + n_things_on_stack += 3; + break; + case nesting_statement::ONE_OV_ON_STACK: + n_things_on_stack += 1; + break; + default: + ERR("Invalid state"); + } + } + + return n_things_on_stack; +} + +void +bytecode_walker:: +visit_decl_command (tree_decl_command& cmd) +{ + tree_decl_init_list *lst = cmd.initializer_list (); + + CHECK_NONNULL (lst); + + // A decl list might containt multiple declarations. + // E.g. "global a b = 3 c" + for (auto it = lst->begin (); it != lst->end (); it++) + { + tree_decl_elt *el = *it; + CHECK_NONNULL (el); + + std::string name = el->name (); + + int slot = add_id_to_table (name); + + if (el->is_global () || el->is_persistent()) + { + if (el->is_global ()) + PUSH_GLOBAL (name); + if (el->is_persistent()) + PUSH_PERSISTENT (name); + + // Slot for variable to keep track off if the variable is actually + // a global. Prepended with "#" to not collide. "+" for persistent. + // + // We need this since the same identifier in a function can be both + // a local or a global depending on whether the global declare + // statement is reached or not. + // + // Since the name of the identifier that is declared global might + // allready be used as a local, we also need to store the slot number + // of the #-marker in the code too. If this feature is removed, we + // can save some space in the OP-codes making the slot number implicit + // +1. + std::string prefix; + if (el->is_global ()) + prefix = "#"; + else + prefix = "+"; + + int prefix_slot = add_id_to_table (prefix + name); + + PUSH_CODE (INSTR::GLOBAL_INIT); + if (el->is_global ()) + PUSH_CODE (global_type::GLOBAL); + else if (el->is_persistent ()) + { + PUSH_CODE (global_type::PERSISTENT); + // We need a "offset" for the persistent variable that + // matches the exact offset the treewalker would use + tree_identifier *id = el->ident (); + CHECK_NONNULL (id); + int offset = id->symbol ().data_offset (); + + CHECK (offset < 256); // TODO: Support more slots + + // The VM need to know the special persistent variable offset + // so we store it in the unwind data + m_code.m_unwind_data. + m_slot_to_persistent_slot[slot] = offset; + } + PUSH_WSLOT (slot); + PUSH_WSLOT (prefix_slot); + + tree_expression *expr = el->expression (); + bool has_init = expr; + + PUSH_CODE (has_init); // has initialization code + + // The global has an initialization expression + if (has_init) + { + // Placeholder for address to after init code. + // GLOBAL_INIT jumps to there if the global is + // already initialized. + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // We want the value of the initialization on + // the operand stack. + + INC_DEPTH(); + PUSH_NARGOUT(1); + + // Walk for the initialization code + expr->accept (*this); + // The value of rhs is on the operand stack now. + // So we need to write it to its local slot and then + // write that to its global value. + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::ASSIGN); // Write operand stack top ... + PUSH_SLOT (slot); // to the local slot of the global + + // I think only this makes sense + CHECK (DEPTH () == 1); + + POP_NARGOUT (); + DEC_DEPTH (); + + // Write the instruction address to the placeholder + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + } + else + ERR ("Strange state"); + } +} + +void +bytecode_walker:: +visit_postfix_expression (tree_postfix_expression& expr) +{ + INC_DEPTH(); + + tree_expression *e = expr.operand (); + CHECK_NONNULL (e); + + octave_value::unary_op op = expr.op_type (); + + int folded_need_after = -1; + int fold_slot = -1; + // Check if we should to a constant fold. It only makes sense in loops since the expression is folded at runtime. + // Essentially there is a PUSH_FOLDED_CST opcode that is tied to a cache. If the cache is valid, push it and jump + // past the initialization code, otherwise run the initialization code and set the cache with SET_FOLDED_CST + if (m_n_nested_loops && !m_is_folding && is_foldable_walker::is_foldable (expr)) + { + m_is_folding = true; + + std::string fold_name = "#cst_fold_" + std::to_string (m_n_folds++); + fold_slot = add_id_to_table (fold_name); + + MAYBE_PUSH_WIDE_OPEXT (fold_slot); + PUSH_CODE (INSTR::PUSH_FOLDED_CST); + PUSH_SLOT (fold_slot); + folded_need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + } + + int slot = -1; + // For ++ and -- we don't want a local pushed to the stack, but operate + // directly in the slot, and then pushing the slot. + if (e->is_identifier() && (op == octave_value::unary_op::op_decr || + op == octave_value::unary_op::op_incr)) + { + // Just add the symbol to the table + // TODO: Could there be command function calls messing this up? + // I.e. foo++ could be a foo()++? + slot = add_id_to_table (e->name ()); + } + // We handle e.g. m("qwe")++ with eval + else if (op != octave_value::unary_op::op_incr && op != octave_value::unary_op::op_decr) + { + PUSH_NARGOUT (1); + e->accept (*this); + POP_NARGOUT (); + } + + switch (op) + { + case octave_value::unary_op::op_not: + PUSH_CODE (INSTR::NOT); + break; + case octave_value::unary_op::op_uplus: + PUSH_CODE (INSTR::UADD); + break; + case octave_value::unary_op::op_uminus: + PUSH_CODE (INSTR::USUB); + break; + case octave_value::unary_op::op_transpose: + PUSH_CODE (INSTR::TRANS); + break; + case octave_value::unary_op::op_hermitian: + PUSH_CODE (INSTR::HERM); + break; + case octave_value::unary_op::op_incr: + { + if (! e->is_identifier ()) + { + // TODO: Cheating with eval + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (NARGOUT ()); + PUSH_CODE_INT (tree_idx); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INCR_ID_POSTFIX); + PUSH_SLOT (slot); + } + } + break; + case octave_value::unary_op::op_decr: + { + if (! e->is_identifier ()) + { + // TODO: Cheating with eval + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (NARGOUT ()); + PUSH_CODE_INT (tree_idx); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::DECR_ID_POSTFIX); + PUSH_SLOT (slot); + } + } + break; + default: + TODO ("not covered"); + } + + if (fold_slot != -1) + { + m_is_folding = false; + + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (fold_slot); + PUSH_CODE (INSTR::SET_FOLDED_CST); + PUSH_SLOT (fold_slot); + + SET_CODE_SHORT (folded_need_after, CODE_SIZE ()); + } + + maybe_emit_bind_ans_and_disp (expr); + + DEC_DEPTH(); +} + +void +bytecode_walker:: +visit_prefix_expression (tree_prefix_expression& expr) +{ + INC_DEPTH(); + + tree_expression *e = expr.operand (); + CHECK_NONNULL (e); + + octave_value::unary_op op = expr.op_type (); + + int folded_need_after = -1; + int fold_slot = -1; + // Check if we should to a constant fold. It only makes sense in loops since the expression is folded at runtime. + // Essentially there is a PUSH_FOLDED_CST opcode that is tied to a cache. If the cache is valid, push it and jump + // past the initialization code, otherwise run the initialization code and set the cache with SET_FOLDED_CST + if (m_n_nested_loops && !m_is_folding && is_foldable_walker::is_foldable (expr)) + { + m_is_folding = true; + + std::string fold_name = "#cst_fold_" + std::to_string (m_n_folds++); + fold_slot = add_id_to_table (fold_name); + + MAYBE_PUSH_WIDE_OPEXT (fold_slot); + PUSH_CODE (INSTR::PUSH_FOLDED_CST); + PUSH_SLOT (fold_slot); + folded_need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + } + + int slot = -1; + // For ++ and -- we don't want a local pushed to the stack, but operate + // directly in the slot, and then pushing the slot. + if (e->is_identifier() && (op == octave_value::unary_op::op_decr || + op == octave_value::unary_op::op_incr)) + { + // Just add the symbol to the table + // TODO: Could there be command function calls messing this up? + // I.e. foo++ could be a foo()++? + slot = add_id_to_table (e->name ()); + } + // We handle e.g. m("qwe")++ with eval + else if (op != octave_value::unary_op::op_incr && op != octave_value::unary_op::op_decr) + { + PUSH_NARGOUT (1); + e->accept (*this); + POP_NARGOUT (); + } + + switch (op) + { + case octave_value::unary_op::op_not: + PUSH_CODE (INSTR::NOT); + break; + case octave_value::unary_op::op_uplus: + PUSH_CODE (INSTR::UADD); + break; + case octave_value::unary_op::op_uminus: + PUSH_CODE (INSTR::USUB); + break; + case octave_value::unary_op::op_transpose: + PUSH_CODE (INSTR::TRANS); + break; + case octave_value::unary_op::op_hermitian: + PUSH_CODE (INSTR::HERM); + break; + case octave_value::unary_op::op_incr: + { + if (! e->is_identifier ()) + { + // TODO: Cheating with eval + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (NARGOUT ()); + PUSH_CODE_INT (tree_idx); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INCR_ID_PREFIX); + PUSH_SLOT (slot); + } + } + break; + case octave_value::unary_op::op_decr: + { + if (! e->is_identifier ()) + { + // TODO: Cheating with eval + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (NARGOUT ()); + PUSH_CODE_INT (tree_idx); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::DECR_ID_PREFIX); + PUSH_SLOT (slot); + } + } + break; + default: + TODO ("not covered"); + } + + if (fold_slot != -1) + { + m_is_folding = false; + + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (fold_slot); + PUSH_CODE (INSTR::SET_FOLDED_CST); + PUSH_SLOT (fold_slot); + + SET_CODE_SHORT (folded_need_after, CODE_SIZE ()); + } + + maybe_emit_bind_ans_and_disp (expr); + + DEC_DEPTH(); +} + +void +bytecode_walker:: +visit_boolean_expression(tree_boolean_expression& expr) +{ + INC_DEPTH (); + PUSH_NARGOUT (1); + + // Since the || and && has short circuit behavoir + // we need to built up the expression from multiple opcodes. + // + // Note that UNARY_TRUE accepts operands that are not + // "is_defined ()" where as IF or IF_N would error on those, + // so we need UNARY_TRUE before the IFs. + if (expr.op_type() == tree_boolean_expression::bool_and) + { + // We want lhs on the operand stack + tree_expression *op1 = expr.lhs (); + CHECK_NONNULL (op1); + op1->accept (*this); + + // If false, jump to push false + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IFN); + int need_false0 = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // If lhs was true, we want rhs on the + // operand stack too. + tree_expression *op2 = expr.rhs (); + CHECK_NONNULL (op2); + op2->accept (*this); + + // If false, jump to push false + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IFN); + int need_false1 = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // If both lhs and rhs was true, + // we fallthrough to push true + PUSH_CODE (INSTR::PUSH_TRUE); + PUSH_CODE (INSTR::JMP); // Jump past PUSH_FALSE + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // The 2 JMP_IFN goes here, to PUSH_FALSE + int offset_false = CODE_SIZE (); + PUSH_CODE (INSTR::PUSH_FALSE); + // The JMP after PUSH_TRUE goes here + + // Set the addresses for the false jumps + SET_CODE_SHORT (need_false0, offset_false); + SET_CODE_SHORT (need_false1, offset_false); + // The true push jumps to after + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + else + { + // We want lhs on the operand stack + tree_expression *op1 = expr.lhs (); + CHECK_NONNULL (op1); + op1->accept (*this); + + // If true, jump to push true + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IF); + int need_true0 = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // If lhs was false, we want rhs on the + // operand stack too. + tree_expression *op2 = expr.rhs (); + CHECK_NONNULL (op2); + op2->accept (*this); + + // If true, jump to push true + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IF); + int need_true1 = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // If both lhs and rhs were false, + // we fallthrough to here, push false + PUSH_CODE (INSTR::PUSH_FALSE); + PUSH_CODE (INSTR::JMP); // Jump past PUSH_TRUE + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // The 2 JMP_IF goes here, to PUSH_TRUE + int offset_true = CODE_SIZE (); + PUSH_CODE (INSTR::PUSH_TRUE); + // The JMP after PUSH_FALSE goes here + + // Set the addresses for the true jumps + SET_CODE_SHORT (need_true0, offset_true); + SET_CODE_SHORT (need_true1, offset_true); + // The false push jumps to after + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + + maybe_emit_bind_ans_and_disp (expr); + + DEC_DEPTH (); + POP_NARGOUT (); +} + +void +bytecode_walker:: +visit_compound_binary_expression (tree_compound_binary_expression &expr) +{ + // Compound expression are expression that are more effeicient to + // do fused for a matrix, like M'*A etc. + INC_DEPTH(); + PUSH_NARGOUT (1); + + tree_expression *op1 = expr.clhs (); + + CHECK_NONNULL (op1); + op1->accept (*this); + + tree_expression *op2 = expr.crhs (); + + CHECK_NONNULL (op2); + op2->accept (*this); + + switch (expr.cop_type ()) + { + case octave_value::compound_binary_op::op_trans_mul: + PUSH_CODE (INSTR::TRANS_MUL); + break; + case octave_value::compound_binary_op::op_mul_trans: + PUSH_CODE (INSTR::MUL_TRANS); + break; + case octave_value::compound_binary_op::op_herm_mul: + PUSH_CODE (INSTR::HERM_MUL); + break; + case octave_value::compound_binary_op::op_mul_herm: + PUSH_CODE (INSTR::MUL_HERM); + break; + case octave_value::compound_binary_op::op_trans_ldiv: + PUSH_CODE (INSTR::TRANS_LDIV); + break; + case octave_value::compound_binary_op::op_herm_ldiv: + PUSH_CODE (INSTR::HERM_LDIV); + break; + default: + TODO ("not covered"); + } + + maybe_emit_bind_ans_and_disp (expr); + + POP_NARGOUT (); + DEC_DEPTH(); +} + +void +bytecode_walker:: +visit_binary_expression (tree_binary_expression& expr) +{ + INC_DEPTH (); + PUSH_NARGOUT (1); + + std::vector need_after; + int fold_slot = -1; + + // "&" and "|" have a braindead short circuit behavoiur when + // in if or while conditions, so we need special handling of those. + if (expr.is_braindead ()) + { + if (expr.op_type() == octave_value::binary_op::op_el_and) + { + // We use a slot to store whether a warning has been issued + // or not + std::string id_warning = "%braindead_warning_" + + std::to_string(CODE_SIZE ()); + int slot = add_id_to_table(id_warning); + + // The left most expression is always evaled + tree_expression *op1 = expr.lhs (); + + CHECK_NONNULL (op1); + op1->accept (*this); + + // We need to check if lhs value is scalar + PUSH_CODE (INSTR::DUP); + PUSH_CODE (INSTR::BRAINDEAD_PRECONDITION); + + // If the precondition is not true, we do a + // normal binop. Note that lhs is evaled twice + // since that is what the treewalker does. + PUSH_CODE (INSTR::JMP_IFN); + int need_target_not_braindead = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Now we do the braindead short circuit + + // If the lhs expression is true we check the rhs + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IF); + int need_target_true = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // The lhs was false which means we need to issue a warning + // and push a false + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::BRAINDEAD_WARNING); + PUSH_SLOT (slot); + PUSH_CODE ('&'); // The operand type to print in the warning + PUSH_CODE (INSTR::PUSH_FALSE); + PUSH_CODE (INSTR::JMP); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + + // If lhs was true we jump to here + SET_CODE_SHORT (need_target_true, CODE_SIZE ()); + // Walk rhs + tree_expression *op2 = expr.rhs (); + + CHECK_NONNULL (op2); + op2->accept (*this); + + // With rhs on the stack, check if it is true and jump to + // a true push, otherwise push false and jump to after + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IF); + need_target_true = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Push false jump to after + PUSH_CODE (INSTR::PUSH_FALSE); + PUSH_CODE (INSTR::JMP); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + + // Push true jump to after + SET_CODE_SHORT (need_target_true, CODE_SIZE ()); + PUSH_CODE (INSTR::PUSH_TRUE); + PUSH_CODE (INSTR::JMP); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + + // If the precondition was false we need to do the ordinary binary op + SET_CODE_SHORT (need_target_not_braindead, CODE_SIZE ()); + PUSH_CODE (INSTR::POP); // Pop the evaled lhs value + } + else if (expr.op_type() == octave_value::binary_op::op_el_or) + { + // We use a slot to store whether a warning has been issued + // or not + std::string id_warning = "%braindead_warning_" + + std::to_string(CODE_SIZE ()); + int slot = add_id_to_table(id_warning); + + // The left most expression is always evaled + tree_expression *op1 = expr.lhs (); + + CHECK_NONNULL (op1); + op1->accept (*this); + + // We need to check if lhs value is scalar + PUSH_CODE (INSTR::DUP); + PUSH_CODE (INSTR::BRAINDEAD_PRECONDITION); + + // If the precondition is not true, we do a + // normal binop. Note that lhs is evaled twice + // since that is what the treewalker does. + PUSH_CODE (INSTR::JMP_IFN); + int need_target_not_braindead = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Now we do the braindead short circuit for "or" + + // If the lhs expression is true we issue a + // warning, push a true and jump to after. + // If lhs is false we instead need to check rhs too. + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IFN); + int need_target_check_rhs = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::BRAINDEAD_WARNING); + PUSH_SLOT (slot); + PUSH_CODE ('|'); // The operand type to print in the warning + PUSH_CODE (INSTR::PUSH_TRUE); + PUSH_CODE (INSTR::JMP); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + + // If lhs was false we jump to here + SET_CODE_SHORT (need_target_check_rhs, CODE_SIZE ()); + // Walk rhs + tree_expression *op2 = expr.rhs (); + + CHECK_NONNULL (op2); + op2->accept (*this); + + // With rhs on the stack, check if it is true and jump to + // a true push, otherwise push false and jump to after + PUSH_CODE (INSTR::UNARY_TRUE); + PUSH_CODE (INSTR::JMP_IF); + int need_target_true = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Push false jump to after + PUSH_CODE (INSTR::PUSH_FALSE); + PUSH_CODE (INSTR::JMP); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + + // Push true jump to after + SET_CODE_SHORT (need_target_true, CODE_SIZE ()); + PUSH_CODE (INSTR::PUSH_TRUE); + PUSH_CODE (INSTR::JMP); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + + // If the precondition was false we need to do the ordinary binary op + SET_CODE_SHORT (need_target_not_braindead, CODE_SIZE ()); + PUSH_CODE (INSTR::POP); // Pop the evaled lhs value + } + else + panic_impossible (); + } + // Check if we should to a constant fold. It only makes sense in loops since the expression is folded at runtime. + // Essentially there is a PUSH_FOLDED_CST opcode that is tied to a cache. If the cache is valid, push it and jump + // past the initialization code, otherwise run the initialization code and set the cache with SET_FOLDED_CST + else if (m_n_nested_loops && !m_is_folding && is_foldable_walker::is_foldable (expr)) + { + m_is_folding = true; + + std::string fold_name = "#cst_fold_" + std::to_string (m_n_folds++); + fold_slot = add_id_to_table (fold_name); + + MAYBE_PUSH_WIDE_OPEXT (fold_slot); + PUSH_CODE (INSTR::PUSH_FOLDED_CST); + PUSH_SLOT (fold_slot); + need_after.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); + } + + tree_expression *op1 = expr.lhs (); + tree_expression *op2 = expr.rhs (); + CHECK_NONNULL (op1); + CHECK_NONNULL (op2); + + if (op1->is_constant () && op2->is_constant () && DATA_SIZE () < 255) + { + // If both rhs and lhs are constants we want to emit a super op-code + // aslong as the WIDE op is not going to be used (<255) + emit_load_2_cst (op1, op2); + } + else + { + op1->accept (*this); + op2->accept (*this); + } + + switch (expr.op_type ()) + { + case octave_value::binary_op::op_mul: + PUSH_CODE (INSTR::MUL); + break; + case octave_value::binary_op::op_div: + PUSH_CODE (INSTR::DIV); + break; + case octave_value::binary_op::op_add: + PUSH_CODE (INSTR::ADD); + break; + case octave_value::binary_op::op_sub: + PUSH_CODE (INSTR::SUB); + break; + case octave_value::binary_op::op_lt: + PUSH_CODE (INSTR::LE); + break; + case octave_value::binary_op::op_le: + PUSH_CODE (INSTR::LE_EQ); + break; + case octave_value::binary_op::op_gt: + PUSH_CODE (INSTR::GR); + break; + case octave_value::binary_op::op_ge: + PUSH_CODE (INSTR::GR_EQ); + break; + case octave_value::binary_op::op_eq: + PUSH_CODE (INSTR::EQ); + break; + case octave_value::binary_op::op_ne: + PUSH_CODE (INSTR::NEQ); + break; + case octave_value::binary_op::op_pow: + PUSH_CODE (INSTR::POW); + break; + case octave_value::binary_op::op_ldiv: + PUSH_CODE (INSTR::LDIV); + break; + case octave_value::binary_op::op_el_mul: + PUSH_CODE (INSTR::EL_MUL); + break; + case octave_value::binary_op::op_el_div: + PUSH_CODE (INSTR::EL_DIV); + break; + case octave_value::binary_op::op_el_pow: + PUSH_CODE (INSTR::EL_POW); + break; + case octave_value::binary_op::op_el_and: + PUSH_CODE (INSTR::EL_AND); + break; + case octave_value::binary_op::op_el_or: + PUSH_CODE (INSTR::EL_OR); + break; + case octave_value::binary_op::op_el_ldiv: + PUSH_CODE (INSTR::EL_LDIV); + break; + + default: + TODO ("not covered"); + } + + if (fold_slot != -1) + { + m_is_folding = false; + + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (fold_slot); + PUSH_CODE (INSTR::SET_FOLDED_CST); + PUSH_SLOT (fold_slot); + } + + for (int offset : need_after) + SET_CODE_SHORT (offset, CODE_SIZE ()); + + maybe_emit_bind_ans_and_disp (expr); + + POP_NARGOUT (); + + DEC_DEPTH (); +} + +void + +bytecode_walker:: +emit_load_2_cst (tree_expression *lhs, tree_expression *rhs) +{ + INC_DEPTH(); + + CHECK (DEPTH () > 1); + + CHECK (lhs); CHECK (rhs); + CHECK (lhs->is_constant ()); + CHECK (rhs->is_constant ()); + + tree_constant *lhs_cst = static_cast (lhs); + tree_constant *rhs_cst = static_cast (rhs); + + octave_value ov_lhs = lhs_cst->value (); + octave_value ov_rhs = rhs_cst->value (); + + PUSH_DATA (ov_lhs); + PUSH_DATA (ov_rhs); + + unsigned cst_offset = DATA_SIZE () - 1; + CHECK (cst_offset < 256); + + PUSH_CODE (INSTR::LOAD_2_CST); + PUSH_CODE (cst_offset - 1); // Offset of lhs + + DEC_DEPTH(); +} + +void +bytecode_walker:: +visit_constant (tree_constant& cst) +{ + INC_DEPTH(); + + octave_value ov_cst = cst.value (); + + bool specialized = false; + if (ov_cst.type_id () == octave_scalar::static_type_id ()) + { + double val = ov_cst.double_value (); + if (val == 0) + { + specialized = true; + PUSH_CODE (INSTR::PUSH_DBL_0); + } + else if (val == 1) + { + specialized = true; + PUSH_CODE (INSTR::PUSH_DBL_1); + } + else if (val == 2) + { + specialized = true; + PUSH_CODE (INSTR::PUSH_DBL_2); + } + } + + if (!specialized) + { + PUSH_DATA (ov_cst); + PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant + } + + maybe_emit_bind_ans_and_disp (cst); + + DEC_DEPTH(); +} + +void +bytecode_walker:: +visit_octave_user_function (octave_user_function& fcn) +{ + m_code.m_unwind_data.m_name = fcn.name (); + m_code.m_unwind_data.m_file = fcn.fcn_file_name (); + PUSH_DATA (fcn.name ()); + PUSH_DATA (std::string {"user-function"}); + PUSH_DATA (fcn.profiler_name ()); + + tree_statement_list *cmd_list = fcn.body (); + tree_parameter_list *returns = fcn.return_list(); + tree_parameter_list *paras = fcn.parameter_list (); + + std::vector v_paras; + if (paras) // paras is 0 if function args are missing, e.g. "function foo\nend" + { + for (auto it = paras->begin (); it != paras->end (); it++) + { + CHECK_NONNULL (*it); + CHECK ((*it)->ident ()); + v_paras.push_back ((*it)->name ()); + } + } + + // Does the function output varargout? + m_varargout = returns->takes_varargs (); + // "varargout" is not in the 'returns' list (if in the proper last position) + // so add one to size if 'm_varargout' is true + int n_returns = returns ? returns->size () + m_varargout: 0; + + // The first instruction is the amount of return variables. Negative for varargout. + // +1 for native '%nargout' on the stack + PUSH_CODE (m_varargout ? -(n_returns + 1) : (n_returns + 1)); + + // Check if the last parameter is "varargin" + // If that is the case, we need to mess with the stacks + // in the vm, so mark the function as having negative + // amount of parameters. + bool is_varargin = paras ? paras->takes_varargs () : false; + + // varargin is not among the parameter_lists elements, so + // add it to the vector of parameter names + if (is_varargin) + v_paras.push_back("varargin"); + + // The second instruction is the amount of arguments + int n_paras = v_paras.size (); + PUSH_CODE (is_varargin ? -n_paras : n_paras); + + // The third is the amount of locals, which need to be set + // after compiling the function. So we need to store the offset + // to it for later + m_offset_n_locals = CODE_SIZE (); + PUSH_CODE (-1); // Placeholder + PUSH_CODE (-1); + + // The first slot is a native int represenation nargout + // so we add a dummy slot object for it + add_id_to_table("%nargout"); + + // Then the return values + for (auto it = returns->begin (); it != returns->end (); it++) + { + std::string name = (*it)->name(); + tree_identifier *id = (*it)->ident (); + CHECK_NONNULL (id); + add_id_to_table (id->name ()); + } + if (m_varargout) + add_id_to_table ("varargout"); // Not in the returns list. Need to be last + + // The function itself is put after the arg outs + /* add_id_to_table (fcn.name ()); */ + + // Then the arguments + for (std::string name : v_paras) + { + if (m_map_locals_to_slot.find (name) != + m_map_locals_to_slot.end ()) + { + // So the parameter is also a return value + // so we need to push it and assign + // it to the return value, since the caller + // will write the argument to the argument slot. + // + // We give the parameter a dummy name so it + // still occupies a slot, and assigns a dummy + // object to it after we copied it to the return + // slot. + + std::string dummy_name = "!" + name; + int slot_dummy = add_id_to_table (dummy_name); + + // PUSH_SLOT_INDEXED just pushes and does not check + // for doing a cmd function call. + MAYBE_PUSH_WIDE_OPEXT (slot_dummy); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot_dummy); + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::FORCE_ASSIGN); // Accepts undefined rhs + PUSH_SLOT (slot); + PUSH_CODE (INSTR::PUSH_FALSE); // False will do + MAYBE_PUSH_WIDE_OPEXT (slot_dummy); + PUSH_CODE (INSTR::ASSIGN); + PUSH_SLOT (slot_dummy); + + continue; + } + + add_id_to_table (name); + } + + // We always need the magic id "ans" + add_id_to_table ("ans"); + + // We add all identifiers in the body to the id-table. We also + // make a map mapping the interpreters frame offset of a id + // to the frame offset in the bytecode VM frame. + if (cmd_list) + { + auto v_names_offsets = collect_idnames_walker::collect_id_names (*cmd_list); + + for (auto name_offset : v_names_offsets) + { + std::string name = name_offset.first; + int frame_offset = name_offset.second; + add_id_to_table (name_offset.first); + int slot = SLOT (name); + + m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset] = slot; + } + } + // We need the arguments and return id:s in the map too. + if (paras) + { + for (auto it = paras->begin (); it != paras->end (); it++) + { + CHECK_NONNULL (*it); + tree_identifier *id = (*it)->ident (); + int frame_offset = id->symbol ().data_offset (); + int slot = SLOT (id->name ()); + m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset] = slot; + + // If the parameter has an init expression e.g. + // "function foo (a = sin (pi))" + // , we need to search it for id:s too. + tree_expression *init_expr = (*it)->expression (); + if (init_expr) + { + auto v_names_offsets = collect_idnames_walker::collect_id_names (*init_expr); + for (auto name_offset : v_names_offsets) + { + std::string name = name_offset.first; + int frame_offset_i = name_offset.second; + add_id_to_table (name_offset.first); + int slot_i = SLOT (name); + + m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset_i] = slot_i; + } + } + } + } + for (auto it = returns->begin (); it != returns->end (); it++) + { + std::string name = (*it)->name(); + tree_identifier *id = (*it)->ident (); + int frame_offset = id->symbol ().data_offset (); + int slot = SLOT (name); + m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset] = slot; + } + + // The function name should be in the frame as an id too aswell + // as 'varargin', 'varargout' and 'ans'. + // + // 'ans' is allready added to the id table and 'varargin' and 'varargout' too + // if they are used, but we don't have their external offset. + // + // The function name is not added to the id table yet. + // + // Note that there might be symbols added to the original scope by + // eg. eval ("foo = 3"). We just ignore those. + std::string function_name = fcn.name (); + auto dot_idx = function_name.find_last_of ('.'); // Names might be e.g. "get.Count" but we only want "Count" + if (dot_idx != std::string::npos) + function_name = function_name.substr (dot_idx + 1); + + // We need to keep track of which id is the function name so that + // we can add the id to the id-table and get it's external offset. + // + // Note that the file 'bar.m' can have one function with the id name 'foo' + // which will be added to the scope by the parser, but the function name + // and thus call-name is 'bar'. + std::size_t idx_fn_name = n_returns + 1; // "+1" since 'ans' is always added first + + for (auto p : fcn.scope ().symbols ()) + { + std::string name = p.first; + symbol_record sym = p.second; + std::size_t offset = sym.data_offset (); + + bool is_fn_id = offset == idx_fn_name; // Are we at the function name id? + + auto it = m_map_locals_to_slot.find (name); + if (it == m_map_locals_to_slot.end ()) + { + if (is_fn_id) + { + // Add the function name id to the table and add the correct external offset. + // (The name might not be the call-name of the function.) + int slot = add_id_to_table (name); + m_code.m_unwind_data.m_external_frame_offset_to_internal[offset] = slot; + } + else + continue; + } + + if (name == "varargin") + m_code.m_unwind_data.m_external_frame_offset_to_internal[offset] = SLOT ("varargin"); + else if (name == "varargout") + m_code.m_unwind_data.m_external_frame_offset_to_internal[offset] = SLOT ("varargout"); + else if (name == "ans") + m_code.m_unwind_data.m_external_frame_offset_to_internal[offset] = SLOT ("ans"); + } + + // Add code to handle default arguments. If an argument is undefined or + // "magic colon" it is to get its default value. + if (paras) + { + for (auto it = paras->begin (); it != paras->end (); it++) + { + tree_expression *init_expr = (*it)->expression (); + // TODO: Default init for varargin? + + if (init_expr) + { + // There is a default arg. + + std::string name = (*it)->name (); + int slot = SLOT (name); + + // Push the arg to the operand stack from its slot + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot); + // If it is undefined or "Magic colon", execute the init code + // otherwise jump past it. + PUSH_CODE (INSTR::JMP_IFDEF); + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder + + INC_DEPTH(); + PUSH_NARGOUT(1); // nargout is 1 for simple assignments + + // Walk for the rhs code + init_expr->accept (*this); + + // The value of rhs is now on the operand stack. Assign it + // to the arg. + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::ASSIGN); + PUSH_SLOT (slot); + + POP_NARGOUT (); + DEC_DEPTH(); + + // The jump need to go here, if the argument is defined, so + // set the placeholder from above. + SET_CODE_SHORT (need_after, CODE_SIZE ()); + } + } + } + + CHECK_NONNULL (cmd_list); + cmd_list->accept (*this); + + // Set the amount of locals that has a placeholder since earlier + SET_CODE_SHORT (m_offset_n_locals, m_n_locals); + + // We want to add the locals to the scope in slot order + // so we push all the locals' names to a vector by their slot + // number + unsigned n_slots = m_map_locals_to_slot.size (); + CHECK (n_slots == static_cast (m_n_locals)); + std::vector names (n_slots); + + auto iter = m_map_locals_to_slot.begin (); + for (unsigned i = 0; i < n_slots; i++) + { + auto kv = *iter++; + + const std::string& name = kv.first; + int slot = kv.second; + + CHECK (slot >= 0 && slot < static_cast (n_slots)); + CHECK (names[slot] == ""); // Check not duplicate slot number used + + names[slot] = name; + } + + // Check that the mapping between external offsets and internal slots has no holes in it + int i = 0; + for (auto it : m_code.m_unwind_data.m_external_frame_offset_to_internal) + { + int external_offset = it.first; + CHECK (external_offset == i); + i++; + } + + // The profiler needs to know these sizes when copying from pointers. + m_code.m_unwind_data.m_code_size = m_code.m_code.size (); + m_code.m_unwind_data.m_ids_size = m_code.m_ids.size (); +} + +void +bytecode_walker:: +visit_multi_assignment (tree_multi_assignment& expr) +{ + INC_DEPTH(); + int outer_nargout = NARGOUT (); + + tree_argument_list *lhs = expr.left_hand_side (); + + // Lists are annoying, move lhs elements to a vector + std::vector v_lhs; + for (auto it = lhs->begin (); it != lhs->end (); it++) + { + CHECK_NONNULL(*it); + v_lhs.push_back (*it); + } + + CHECK_NONNULL (lhs); + // Set nargout + size_t n_args = v_lhs.size (); + PUSH_NARGOUT (n_args); + + std::vector v_arg_names; + std::vector v_is_blackhole; + + // Can't nest ignored outputs as the code here is written. Is it even possible to nest those? + CHECK (m_pending_ignore_outputs == false); + + // TODO: + // Something smarter is needed to split of cs-lists among different lhs values + // This does not work for e.g. [C{1:2}, D] = {1,2,3}{:} + // Probably need some opcode ASSIGNNX or something. + // See tree_multi_assignment::evaluate_n and octave_lvalue::eval_for_numel + // + // There probably has to be another tree_walker figuring out how many elements + // a lvalue will "ask for". In [C{1:2}] = deal (1,2) deal will have to have nargout 2 + // which is annoying since we will have to be able to set nargout dynamically. + // With a slot maybe? + // + // Maybe make a octave_lvalue and call numel() for simplicity? + // + // Meanwhile, just call eval on it ... + + bool any_lhs_not_id = false; + for (tree_expression *e : v_lhs) + if (! e->is_identifier ()) + any_lhs_not_id = true; + + if (any_lhs_not_id) + { + // The VM need to access the tree expr. + // Abuse the dbg info. + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (outer_nargout); + PUSH_CODE_INT (tree_idx); + + if (DEPTH () == 1) + PUSH_CODE (INSTR::POP); + + POP_NARGOUT (); + DEC_DEPTH (); + return; + } + + int n_blackholes = 0; + int i = 0; + for (tree_expression *e : v_lhs) + { + if (!e->is_identifier ()) + { + v_arg_names.push_back (""); + v_is_blackhole.push_back (false); + continue; + } + + std::string name = e->name (); + if (name == "~") // We need to handle the special "ignore id" '~' as in [~, a] = foo () + { + m_v_ignored.push_back (i + 1); // Output parameters are one-indexed + name = "%~" + std::to_string (n_blackholes++); // We rename it to "%~X" + v_is_blackhole.push_back (true); + } + else + v_is_blackhole.push_back (false); + + v_arg_names.push_back (name); + + add_id_to_table (name); + + i++; + } + + CHECK (v_arg_names.size () == n_args); + + /* Handle ignored outputs. Since the called function can ask with isargout() + * whether an output is ignored or not we need to set a state for this. */ + if (m_v_ignored.size ()) + { + m_pending_ignore_outputs = 1; + m_ignored_of_total = n_args; + } + + tree_expression *rhs = expr.right_hand_side (); + CHECK_NONNULL(rhs); + + // We want push NARGOUT elements to the operand stack + + emit_unwind_protect_data D; + if (m_pending_ignore_outputs) + D = emit_unwind_protect_code_start (); + + rhs->accept (*this); // Walks rhs for NARGOUT elements + + if (DEPTH () != 1) + TODO ("Only root multi assignment supported now"); + + PUSH_CODE (INSTR::ASSIGNN); + // Push the amount of slots + PUSH_CODE (v_lhs.size ()); + + // Push the slots + for (std::string &name : v_arg_names) + PUSH_WSLOT (SLOT (name)); + + // Emit code to disp if no ; + for (std::string &name : v_arg_names) + maybe_emit_push_and_disp_id (expr, name); + + if (m_pending_ignore_outputs) + { + emit_unwind_protect_code_before_cleanup (D); + + // As we are ignoring outputs we need to unwind protect to clear the VM state with opcode CLEAR_IGNORE_OUTPUTS + // We need to supply each black hole slot + + PUSH_CODE (INSTR::CLEAR_IGNORE_OUTPUTS); + PUSH_CODE (n_blackholes); + + for (unsigned j = 0; j < n_args; j++) + { + if (v_is_blackhole.at (j)) + PUSH_WSLOT (SLOT (v_arg_names.at (j))); + } + + emit_unwind_protect_code_end (D); + } + + if (m_pending_ignore_outputs) + { + m_pending_ignore_outputs = 0; + m_v_ignored.clear (); + } + + POP_NARGOUT (); + DEC_DEPTH (); +} + +std::map bytecode_walker::m_name_to_unary_func = +{ +{"abs", octave_base_value::umap_abs}, +{"acos", octave_base_value::umap_acos}, +{"acosh", octave_base_value::umap_acosh}, +{"angle", octave_base_value::umap_angle}, +{"arg", octave_base_value::umap_arg}, +{"asin", octave_base_value::umap_asin}, +{"asinh", octave_base_value::umap_asinh}, +{"atan", octave_base_value::umap_atan}, +{"atanh", octave_base_value::umap_atanh}, +{"cbrt", octave_base_value::umap_cbrt}, +{"ceil", octave_base_value::umap_ceil}, +{"conj", octave_base_value::umap_conj}, +{"cos", octave_base_value::umap_cos}, +{"cosh", octave_base_value::umap_cosh}, +{"erf", octave_base_value::umap_erf}, +{"erfinv", octave_base_value::umap_erfinv}, +{"erfcinv", octave_base_value::umap_erfcinv}, +{"erfc", octave_base_value::umap_erfc}, +{"erfcx", octave_base_value::umap_erfcx}, +{"erfi", octave_base_value::umap_erfi}, +{"dawson", octave_base_value::umap_dawson}, +{"exp", octave_base_value::umap_exp}, +{"expm1", octave_base_value::umap_expm1}, +{"isfinite", octave_base_value::umap_isfinite}, +{"fix", octave_base_value::umap_fix}, +{"floor", octave_base_value::umap_floor}, +{"gamma", octave_base_value::umap_gamma}, +{"imag", octave_base_value::umap_imag}, +{"isinf", octave_base_value::umap_isinf}, +{"isna", octave_base_value::umap_isna}, +{"isnan", octave_base_value::umap_isnan}, +{"lgamma", octave_base_value::umap_lgamma}, +{"log", octave_base_value::umap_log}, +{"log2", octave_base_value::umap_log2}, +{"log10", octave_base_value::umap_log10}, +{"log1p", octave_base_value::umap_log1p}, +{"real", octave_base_value::umap_real}, +{"round", octave_base_value::umap_round}, +{"roundb", octave_base_value::umap_roundb}, +{"signum", octave_base_value::umap_signum}, +{"sin", octave_base_value::umap_sin}, +{"sinh", octave_base_value::umap_sinh}, +{"sqrt", octave_base_value::umap_sqrt}, +{"tan", octave_base_value::umap_tan}, +{"tanh", octave_base_value::umap_tanh}, +{"isalnum", octave_base_value::umap_xisalnum}, +{"isalpha", octave_base_value::umap_xisalpha}, +{"isascii", octave_base_value::umap_xisascii}, +{"iscntrl", octave_base_value::umap_xiscntrl}, +{"isdigit", octave_base_value::umap_xisdigit}, +{"isgraph", octave_base_value::umap_xisgraph}, +{"islower", octave_base_value::umap_xislower}, +{"isprint", octave_base_value::umap_xisprint}, +{"ispunct", octave_base_value::umap_xispunct}, +{"isspace", octave_base_value::umap_xisspace}, +{"isupper", octave_base_value::umap_xisupper}, +{"isxdigit", octave_base_value::umap_xisxdigit}, +{"signbit", octave_base_value::umap_xsignbit}, +{"tolower", octave_base_value::umap_xtolower}, +{"toupper", octave_base_value::umap_xtoupper}, +}; + +void +bytecode_walker:: +emit_disp_obj (tree_expression &expr) +{ + CHECK (expr.print_result ()); + CHECK (DEPTH () == 1); + PUSH_CODE (INSTR::DISP); + // Magic slot number 0 (%nargout that is a native int) that + // will never be printed corrensponds to "" name tag stashing of + // the ovl before calling display. + PUSH_SLOT (0); + PUSH_WSLOT (0); // never a command function call +} + +void +bytecode_walker:: +maybe_emit_push_and_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name) +{ + if (!expr.print_result ()) + return; + + if (name.size () && name[0] == '%') // Don't print internal variables like black holes + return; + + CHECK (DEPTH () == 1); + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot); + maybe_emit_disp_id (expr, name, maybe_cmd_name); // Always, not maybe +} + +void +bytecode_walker:: +maybe_emit_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name) +{ + if (!expr.print_result ()) + return; + + if (name.size () && name[0] == '%') // Don't print internal variables like black holes + return; + + // The Octave function inputname (i) needs to be able to know the name + // of the argument to a function, so we need to make an entry of + // the id printed if the user overloads display() + arg_name_entry arg_name_entry; + arg_name_entry.m_arg_names = string_vector {name}; + + arg_name_entry.m_ip_start = CODE_SIZE (); + + CHECK (DEPTH () == 1); + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::DISP); + PUSH_SLOT (slot); + // E.g. "x" might either be a command call x() that should print + // "ans = ..." or a variable that should print "x = ..." so we + // store the information on whether a certain symbol + // was a variable or command call in a slot. + // Some expressions like "1+1" are never command calls + // ans have maybe_cmd_name as "" + if (maybe_cmd_name != "") + PUSH_WSLOT (SLOT (maybe_cmd_name)); + else + PUSH_WSLOT (0); + + arg_name_entry.m_ip_end = CODE_SIZE (); + PUSH_ARGNAMES_ENTRY (arg_name_entry); +} + +void +bytecode_walker:: +maybe_emit_bind_ans_and_disp (tree_expression &expr, const std::string maybe_cmd_name) +{ + bool print_result = expr.print_result (); + + // If this is an root expression we need to write the return value + // to ans. + if (DEPTH () == 1) + { + if (print_result) + PUSH_CODE (INSTR::DUP); + int slot = SLOT ("ans"); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::BIND_ANS); + PUSH_SLOT (slot); + } + + if (expr.is_identifier ()) + maybe_emit_disp_id (expr, expr.name (), maybe_cmd_name); + else + maybe_emit_disp_id (expr, "ans", maybe_cmd_name); +} + +void +bytecode_walker:: +emit_return () +{ + // For loops, unwind protect and switches etc have stuff on the stack + // inside them, so we need to pop those before executing the RET opcode. + auto v = NESTING_STATEMENTS(); + // Reverse it backwards (top to bottom) + for (auto it = v.rbegin () ;it != v.rend (); it++) + { + nesting_statement t = *it; + switch (t) + { + case nesting_statement::FOR_LOOP: + // We need to pop the counter and n + PUSH_CODE (INSTR::POP_N_INTS); + PUSH_CODE (2); + // Pop the rhs ov (the range) + PUSH_CODE (INSTR::POP); + break; + case nesting_statement::ONE_OV_ON_STACK: + PUSH_CODE (INSTR::POP); + break; + default: + ERR("Invalid state"); + } + } + + PUSH_CODE (INSTR::RET); +} + +void +bytecode_walker:: +visit_return_command (tree_return_command &cmd) +{ + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + // If we are in a unwind protect and returning we need to + // run the cleanup code before returning. + if (N_UNWIND_RETURN_TARGETS()) + { + PUSH_CODE (INSTR::JMP); + int need_unwind = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder + PUSH_A_UNWIND_RETURN_TARGET (need_unwind); + } + else + emit_return (); + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = cmd.column (); + LOC (loc_id).m_line = cmd.line (); +} + +void +bytecode_walker:: +visit_simple_assignment (tree_simple_assignment& expr) +{ + INC_DEPTH(); + PUSH_NARGOUT(1); // nargout is 1 for simple assignments + + tree_expression *lhs = expr.left_hand_side (); + + CHECK_NONNULL (lhs); + + if (!lhs->is_identifier() && !lhs->is_index_expression()) + TODO ("lhs not identifier or index expression"); + + octave_value::assign_op op = expr.op_type (); + + // There is a general op-code SUBASSIGN_CHAINED for "complex" index assignments + // and some specialized for "simple" ones + bool complex_index_assignment = false; + bool idx_has_ends = false; + + if (lhs->is_index_expression ()) + { + tree_index_expression *idx = dynamic_cast (lhs); + complex_index_assignment = idx->type_tags ().size () != 1; + + // We want to know if there is any magic end index in the arguments + std::list args_lists = idx->arg_lists (); + for (auto it = args_lists.begin (); it != args_lists.end (); it++) + { + if (!*it) + continue; + tree_argument_list *args = *it; + for (auto it_args = args->begin (); it_args != args->end (); it_args++) + { + if (!*it_args) + continue; + if (find_end_walker::has_end (**it_args)) + idx_has_ends = true; + } + } + + if (op != octave_value::assign_op::op_asn_eq) + complex_index_assignment = true; + } + + if (complex_index_assignment) + { + if (idx_has_ends) + { + // TODO: Need lvalue walk to figure out how big subexpression are for end. + // Eval as workaround. + + // The VM need to access the tree expr. + // Abuse the dbg info. + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (0); // nargout + PUSH_CODE_INT (tree_idx); + + if (DEPTH () == 1) + PUSH_CODE (INSTR::POP); + + POP_NARGOUT (); + DEC_DEPTH (); + return; + } + + tree_index_expression *idx = dynamic_cast (lhs); + + tree_expression *e = idx->expression (); + std::list args_lists = idx->arg_lists (); + std::list dyns_fields = idx->dyn_fields (); + std::list fields_names = idx->arg_names(); + std::string type_tags = idx->type_tags (); + + size_t n_chained = type_tags.size (); + + // Begin with rhs + tree_expression *rhs = expr.right_hand_side (); + CHECK_NONNULL (rhs); + rhs->accept (*this); + + // rhs is on the stack now + + if (e->is_identifier ()) + { + // Name of the identifier + std::string name = e->name (); + + int slot = add_id_to_table (name); + + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot); + } + else + { + // Visit the lhs expression. This should put whatever + // we are assigning to, on the stack. + e->accept (*this); + } + + // Subassigns are abit awkward on a stack VM, since we can't + // do this piecewise. We need to construct a list of lists of + // arguments to all the chained subassigns and feed them to + // ov.assign (). TODO: make a "ref_subsasgn()" call or what + // ever in ov.cc for the middle in the chain. + // + // Also, any inclusion of 'end' get quite annoying since we + // need to save subsrefs to each chained subexpression to be + // able to figure out the subexpressions sizes. + + auto it_args_lists = args_lists.begin (); + auto it_dyns_fields = dyns_fields.begin (); + auto it_fields_names = fields_names.begin (); + + int active_idx_slot = -1; + if (idx_has_ends) + { + // We need to store the active subexpression in a slot for end + // to be able to access it. + std::string name = "%active_idx_" + std::to_string (CODE_SIZE ()); + add_id_to_table (name); + active_idx_slot = SLOT (name); + // Write the root value to the slot + // i.e. root(2:end)(3,end) + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (active_idx_slot); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (active_idx_slot); + } + + std::vector n_args_per_part; + + for (size_t i = 0; i < n_chained; i++) + { + // Amount of args in the subexpresseion + // E.g. foo(1,2).bar(1) = ... => 2 and 1 + int n_args_in_part = 0; + char type = type_tags[i]; + + tree_argument_list *args = *it_args_lists++; + tree_expression *dyn_fields = *it_dyns_fields++; + string_vector field_names = *it_fields_names++; + + if (type == '.' && dyn_fields) + { + INC_DEPTH (); + dyn_fields->accept (*this); + DEC_DEPTH (); + n_args_in_part++; // Dynamic struct fields are always one arg + } + else if (type == '.') + { + // We want to push the field name as a ovtave_string to the stack + std::string field_name = field_names.elem (0); + octave_value ov_field_name{field_name}; + PUSH_DATA (ov_field_name); // Make a constant + // Load the constant + PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant + + n_args_in_part++; + } + else + { + // Push all the args to the stack + + n_args_in_part = args->size (); + int j = 0; + // We want to push the args to the stack + // The order of eval is left to right + for (auto it = args->begin (); it != args->end (); it++, j++) + { + INC_DEPTH (); + // Any end will work on the active idx slot's object + PUSH_ID_BEGIN_INDEXED (active_idx_slot, j, n_args_in_part, false); + (*it)->accept (*this); + POP_ID_BEING_INDEXED (); + DEC_DEPTH (); + } + } + + // If we have an end in the assignment we need to write the active subexpression + // to the designated slot for end to be able to access it. + // Unecessary for the last in the chain. + if (idx_has_ends && i + 1 != n_chained) + { + // Push the prior active index subexpression + MAYBE_PUSH_WIDE_OPEXT (active_idx_slot); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (active_idx_slot); + // Duplicate the args + PUSH_CODE (INSTR::DUPN); + PUSH_CODE (1); // offset, under the object being indexed + PUSH_CODE (n_args_in_part); // amount of objects to duplicate + // Index the prior active index subexpression + PUSH_CODE (INSTR::INDEX_OBJ); + PUSH_CODE (1); // nargout + PUSH_CODE (0); // "has slot" + PUSH_WSLOT (0); // The w/e slot + PUSH_CODE (n_args_in_part); + PUSH_CODE (type); + // Write the new active subexpression back to the slot + MAYBE_PUSH_WIDE_OPEXT (active_idx_slot); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (active_idx_slot); + } + + n_args_per_part.push_back (n_args_in_part); + } + + // So we have alot of arguments to different subexpression evaluated on the + // stack now. + // + // We want to put them in lists and feed them to a subsassgn() call. We use + // a special op-code for this. + + PUSH_CODE (INSTR::SUBASSIGN_CHAINED); + PUSH_CODE (op); // =, += etc. + PUSH_CODE (n_chained); + for (unsigned i = 0; i < n_chained; i++) + { + PUSH_CODE (n_args_per_part[i]); // Amount of args, left to right + // The type, i.e. '.' or '(' or '{' + PUSH_CODE (type_tags[i]); + } + + // Now we got the value that is subassigned to, on the stack + if (e->is_identifier ()) + { + if (DEPTH () != 1) // Duplicate value for chained assignments + PUSH_CODE (INSTR::DUP); + + // Write the subassigned value back to the slot + int slot = SLOT (e->name ()); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (slot); + + maybe_emit_push_and_disp_id (expr, e->name ()); + } + else + { + if (expr.print_result ()) + { + PUSH_CODE (INSTR::DUP); + emit_disp_obj (expr); + } + if (DEPTH () == 1) + PUSH_CODE (INSTR::POP); + } + } + else if (lhs->is_index_expression()) // eg "foo(2) = bar" or "foo.a = bar"? + { + /* We have differen op codes for struct, cell, () index assignement + * of ids and another for assignments where the rhs of the index is not + * an id, e.g. foo.("bar") = 2 */ + + CHECK (op == octave_value::assign_op::op_asn_eq); + + // We want the arguments to the index expression on the + // operand stack. They are evaluated before the rhs expression. + tree_index_expression *idx = dynamic_cast (lhs); + + tree_expression *ee = idx->expression (); + std::list arg_lists = idx->arg_lists (); + std::list dyn_fields = idx->dyn_fields (); + std::list field_names = idx->arg_names(); + + std::string type_tags = idx->type_tags (); + CHECK (type_tags.size () == 1); + CHECK (dyn_fields.size () == 1); + CHECK (arg_lists.size () == 1); + CHECK (field_names.size () == 1); + + char type = type_tags[0]; + + + bool is_id = ee->is_identifier (); + CHECK_NONNULL(ee); + + bool is_dynamic_field = false; + if (type == '.') + { + tree_expression *dyn_field = dyn_fields.front (); + if (dyn_field) + is_dynamic_field = true; + } + + if (!is_id && type != '.') + { + tree_argument_list *arg = *arg_lists.begin (); + + // TODO: The other branches evaluate rhs after the arguments. + // Has to be wrong? + tree_expression *rhs = expr.right_hand_side (); + + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the stack now + + // Visit the lhs expression + ee->accept (*this); + // Pushed the left most lhs expression to the stack + + int nargs = 0; + + if (arg) + { + // If we are indexing an object, and have a magic end index + // we need to save the stack depth in a slot + bool obj_has_end = false; + for (auto it = arg->begin (); it != arg->end (); it++) + { + CHECK_NONNULL (*it); + tree_expression &t = **it; + obj_has_end = find_end_walker::has_end (t); + if (obj_has_end) + break; + } + + int obj_stack_depth_slot = -1; + if (obj_has_end) + { + std::string obj_stack_depth_name = "%objsd_" + std::to_string (CODE_SIZE ()); + obj_stack_depth_slot = add_id_to_table (obj_stack_depth_name); + + MAYBE_PUSH_WIDE_OPEXT (obj_stack_depth_slot); + PUSH_CODE (INSTR::SET_SLOT_TO_STACK_DEPTH); + PUSH_SLOT (obj_stack_depth_slot); + } + + nargs = arg->size (); + int i = 0; + // We want to push the args to the stack + for (auto it = arg->begin (); it != arg->end (); it++, i++) + { + INC_DEPTH (); + PUSH_ID_BEGIN_INDEXED (obj_stack_depth_slot, i, nargs, true); + (*it)->accept (*this); + POP_ID_BEING_INDEXED (); + DEC_DEPTH (); + } + } + // rhs, lhs root expression, lhs's args on the stack now + + PUSH_CODE (INSTR::SUBASSIGN_OBJ); + PUSH_CODE (nargs); + PUSH_CODE (type); + + if (expr.print_result ()) + { + PUSH_CODE (INSTR::DUP); + emit_disp_obj (expr); + } + + // SUBASSIGN_OBJ puts the lhs back on the stack + // but since lhs is not an id from a slot we just + // pop it unless it is used by a chained assign. + if (DEPTH () == 1) + PUSH_CODE (INSTR::POP); + } + else if (type == '(') + { + // Name of the identifier + std::string name = ee->name (); + + add_id_to_table (name); + + tree_argument_list *arg = *arg_lists.begin (); + + int nargs = 0; + if (arg) + { + nargs = arg->size (); + int i = 0; + // We want to push the args to the stack + for (auto it = arg->begin (); it != arg->end (); it++, i++) + { + INC_DEPTH (); + PUSH_ID_BEGIN_INDEXED (SLOT (name), i, nargs, false); + (*it)->accept (*this); + POP_ID_BEING_INDEXED (); + DEC_DEPTH (); + } + } + + tree_expression *rhs = expr.right_hand_side (); + + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the operand stack now + + // If the assignment is not at root we want to keep the + // value on the stack, e.g. + // a = b(1) = 3; + // Gives: a == 3 + // We use a slot to store the rhs in. + std::string rhs_copy_nm = "%rhs_" + std::to_string (CODE_SIZE ()); + int slot_cpy = -1; + if (DEPTH () != 1) + { + slot_cpy = add_id_to_table (rhs_copy_nm); + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (slot_cpy); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (slot_cpy); + } + + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::SUBASSIGN_ID); + PUSH_SLOT (slot); + PUSH_CODE (nargs); + + if (DEPTH () != 1) + { + MAYBE_PUSH_WIDE_OPEXT (slot_cpy); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot_cpy); + } + + maybe_emit_push_and_disp_id (expr, name); + } + else if (type == '.') + { + tree_expression *e = idx->expression (); + CHECK_NONNULL(e); + + if (is_id && !is_dynamic_field) + { + // Name of the identifier + std::string name = e->name (); + + add_id_to_table (name); + + std::list l_pv_nms = idx->arg_names (); + CHECK (l_pv_nms.size () == 1); + auto pv_nms = l_pv_nms.begin (); + CHECK (pv_nms->numel () == 1); + + std::string field_name = pv_nms->elem (0); + + // We just need the field's name in the VM + int slot_field = add_id_to_table (field_name); + + tree_expression *rhs = expr.right_hand_side (); + + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the operand stack now + + std::string rhs_copy_nm = "%rhs_" + std::to_string (CODE_SIZE ()); + int slot_cpy = -1; + if (DEPTH () != 1) // Chained assignments? + { + slot_cpy = add_id_to_table (rhs_copy_nm); + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (slot_cpy); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (slot_cpy); + } + + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::SUBASSIGN_STRUCT); + PUSH_SLOT (slot); + PUSH_WSLOT (slot_field); + + if (DEPTH () != 1) + { + MAYBE_PUSH_WIDE_OPEXT (slot_cpy); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot_cpy); + } + + maybe_emit_push_and_disp_id (expr, name); + } + else if (is_dynamic_field && is_id) + { + // Name of the identifier + std::string name = e->name (); + + add_id_to_table (name); + + tree_expression *rhs = expr.right_hand_side (); + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the stack now + + // We want lhs on the stack + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot); + + // The argument, foo.(arg) = bar + tree_expression *dyn_expr = dyn_fields.front (); + CHECK_NONNULL (dyn_expr); + + INC_DEPTH (); + PUSH_NARGOUT (1); + dyn_expr->accept (*this); + // The value of the arg on the stack, i.e. foo.(arg) = baz + POP_NARGOUT (); + DEC_DEPTH (); + + PUSH_CODE (INSTR::SUBASSIGN_OBJ); + PUSH_CODE (1); // nargout + PUSH_CODE (type); + + if (DEPTH () != 1) // Chained assignments? + PUSH_CODE (INSTR::DUP); + + // Assign the assigned to value back to the slot + // TODO: Neccessary? + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (slot); + + maybe_emit_push_and_disp_id (expr, name); + } + else if (!is_dynamic_field && !is_id) + { + tree_expression *rhs = expr.right_hand_side (); + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the operand stack now + + // Visit the lhs expression + e->accept (*this); + // Pushed the left most lhs expression to the stack + + string_vector ptr = field_names.front (); + CHECK (ptr.numel() == 1); + std::string field_name = ptr.elem (0); + + /* Make a ov string with the field name in it that + * we store as a constant. */ + octave_value ov_field_name{field_name}; + PUSH_DATA (ov_field_name); + + PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant + + PUSH_CODE (INSTR::SUBASSIGN_OBJ); + PUSH_CODE (1); // nargout + PUSH_CODE (type); + + if (expr.print_result ()) + { + PUSH_CODE (INSTR::DUP); + emit_disp_obj (expr); + } + + // SUBASSIGN_OBJ puts the lhs back on the stack + // but since lhs is not an id from a slot we just + // pop it, unless there are chained assignments. + if (DEPTH () == 1) + PUSH_CODE (INSTR::POP); + } + else //(is_dynamic_field && !is_id) + { + tree_expression *rhs = expr.right_hand_side (); + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the operand stack now + + // Visit the lhs expression + e->accept (*this); + // Pushed the left most lhs expression to the stack + + // The argument, foo.(arg) = bar + tree_expression *dyn_expr = dyn_fields.front (); + CHECK_NONNULL (dyn_expr); + + INC_DEPTH (); + PUSH_NARGOUT (1); + dyn_expr->accept (*this); + // The value of the arg on the stack, i.e. foo.(arg) = baz + POP_NARGOUT (); + DEC_DEPTH (); + + PUSH_CODE (INSTR::SUBASSIGN_OBJ); + PUSH_CODE (1); // nargout + PUSH_CODE (type); + + if (expr.print_result ()) + { + PUSH_CODE (INSTR::DUP); + emit_disp_obj (expr); + } + + // SUBASSIGN_OBJ puts the lhs back on the stack + // but since lhs is not an id from a slot we just + // pop it, unless there are chained assignments. + if (DEPTH () == 1) + PUSH_CODE (INSTR::POP); + } + } + else if (type == '{') + { + tree_expression *e = idx->expression (); + CHECK_NONNULL(e); + CHECK (e->is_identifier ()); + + // Name of the identifier + std::string name = e->name (); + + add_id_to_table (name); + + CHECK (arg_lists.size ()); + tree_argument_list *arg = *arg_lists.begin (); + + int nargs = 0; + if (arg) + { + nargs = arg->size (); + int i = 0; + // We want to push the args to the stack + for (auto it = arg->begin (); it != arg->end (); it++, i++) + { + INC_DEPTH (); + PUSH_ID_BEGIN_INDEXED (SLOT (name), i, nargs, false); + (*it)->accept (*this); + POP_ID_BEING_INDEXED (); + DEC_DEPTH (); + } + } + + tree_expression *rhs = expr.right_hand_side (); + + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the operand stack now + + // If the assignment is not at root we want to keep the + // value on the stack, e.g. + // a = b(1) = 3; + // Gives: a == 3 + // We use a slot to store the rhs in. + std::string rhs_copy_nm = "%rhs_" + std::to_string (CODE_SIZE ()); + int slot_cpy = -1; + if (DEPTH () != 1) + { + slot_cpy = add_id_to_table (rhs_copy_nm); + PUSH_CODE (INSTR::DUP); + MAYBE_PUSH_WIDE_OPEXT (slot_cpy); + PUSH_CODE (INSTR::FORCE_ASSIGN); + PUSH_SLOT (slot_cpy); + } + + int slot = SLOT (name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::SUBASSIGN_CELL_ID); + PUSH_SLOT (slot); + PUSH_CODE (nargs); + + if (DEPTH () != 1) + { + MAYBE_PUSH_WIDE_OPEXT (slot_cpy); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot_cpy); + } + + maybe_emit_push_and_disp_id (expr, name); + } + else + TODO ("Type of subassignment not done yet"); + } + else if (lhs->is_identifier ()) + { + std::string name = lhs->name (); + + int slot = add_id_to_table (name); + + tree_expression *rhs = expr.right_hand_side (); + + CHECK_NONNULL (rhs); + rhs->accept (*this); + // The value of rhs is on the operand stack now + + if (op != octave_value::assign_op::op_asn_eq) + { + // Compound assignment have the type of operation in the code + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::ASSIGN_COMPOUND); + PUSH_SLOT (slot); + PUSH_CODE (op); + } + else + { + // Ordinary assignment has its own opcode. + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::ASSIGN); + PUSH_SLOT (slot); + } + + // If the assignment is not at root we want to keep the + // value on the stack, e.g. + // a = (b = 3); + if (DEPTH () != 1) + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot); + } + + maybe_emit_push_and_disp_id (expr, name); + } + + POP_NARGOUT (); + DEC_DEPTH(); +} + +void +bytecode_walker:: +visit_matrix (tree_matrix &m) +{ + INC_DEPTH (); + + bool is_rectangle = true; + std::vector row_lengths; + + /* We want to know if the matrix is rectangular. I.e. + * all rows are of equal length. */ + size_t first_row_size = static_cast (-1); + for (auto it = m.begin (); it != m.end (); it++) + { + // This is a row + tree_argument_list *row = *it; + size_t row_size = row->size (); + + if (first_row_size == static_cast (-1)) + first_row_size = row_size; + else if (first_row_size != row_size) + is_rectangle = false; + + row_lengths.push_back (row_size); + } + + + auto p = m.begin (); + int n_rows = 0; + int n_cols = 0; + + // Push each row element to operand stack + while (p != m.end ()) + { + // This is a row + tree_argument_list *elt = *p++; + + n_cols = 0; + CHECK_NONNULL (elt); + for (auto it = elt->begin (); it != elt->end (); it++) + { + // This is an element + tree_expression *e = *it; + CHECK_NONNULL (e); + + INC_DEPTH (); + e->accept (*this); + DEC_DEPTH (); + n_cols++; + } + n_rows++; + } + + CHECK (n_cols > 0); + CHECK (n_rows > 0); + + if (is_rectangle && n_cols < 256 && n_rows < 256) // Small rectangle matrix + { + PUSH_CODE (INSTR::MATRIX); + PUSH_CODE (n_rows); + PUSH_CODE (n_cols); + } + else if (is_rectangle) // Big rectangle matrix + { + PUSH_CODE (INSTR::MATRIX_UNEVEN); + PUSH_CODE (1); // Type 1, Big rectangle matrix + PUSH_CODE_INT (n_rows); + PUSH_CODE_INT (n_cols); + } + else // Uneven matrix + { + PUSH_CODE (INSTR::MATRIX_UNEVEN); + PUSH_CODE (0); // Type 0, Uneven matrix + PUSH_CODE_INT (n_rows); + for (int i : row_lengths) + PUSH_CODE_INT (i); + } + + maybe_emit_bind_ans_and_disp (m); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +visit_cell (tree_cell &m) +{ + INC_DEPTH (); + + auto p = m.begin (); + int n_rows = 0; + int n_cols = -1; + + PUSH_CODE (INSTR::PUSH_OV_U64); // number of rows + + // Push each row element to operand stack + while (p != m.end ()) + { + // This is a row + tree_argument_list *elt = *p++; + + PUSH_CODE (INSTR::PUSH_OV_U64); //number of columns + + int n_cols_old = n_cols; + n_cols = 0; + CHECK_NONNULL (elt); + for (auto it = elt->begin (); it != elt->end (); it++) + { + // This is an element + tree_expression *e = *it; + CHECK_NONNULL (e); + + INC_DEPTH (); + e->accept (*this); + DEC_DEPTH (); + n_cols++; + + // We now need to expand the value (if it is an cs list) + // and rotate the counters to the top of the stack. + // + // Expand cslist does that in one opcode. + PUSH_CODE (INSTR::EXPAND_CS_LIST); + } + + if (n_cols > n_cols_old) + n_cols_old = n_cols; + + // The amount of rows is on the second position of the stack, + // rotate it with the amount of columns and increment the rows. + PUSH_CODE (INSTR::ROT); + PUSH_CODE (INSTR::INCR_PREFIX); + + n_rows++; + } + + PUSH_CODE (INSTR::PUSH_CELL); + + maybe_emit_bind_ans_and_disp (m); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +visit_identifier (tree_identifier& id) +{ + INC_DEPTH(); + + std::string name = id.name (); + if (name == "__VM_DBG") + { + PUSH_CODE (INSTR::PUSH_FALSE); // An id need to put something on the stack + PUSH_CODE (INSTR::DEBUG); + } + // The magic end id need special handling + else if (name == "end") + { + CHECK (ID_IS_BEING_INDEXED ()); + + // Since in e.g. "M = [1 2 3]; M (min (10, end))" the 'end' will + // refer to the end of M, not the function min, we need a special + // op-code for nested indexings that can refer to any outer object + int n_ids = N_IDS_BEING_INDEXED (); + + if (n_ids == 1) // Simple case + { + id_being_indexed obj = PEEK_ID_BEING_INDEXED (); + if (obj.type == 0) + { + /* TODO: Is this op-code with slots really needed? */ + MAYBE_PUSH_WIDE_OPEXT (obj.slot); + PUSH_CODE (INSTR::END_ID); + PUSH_SLOT (obj.slot); // The slot variable being indexed + PUSH_CODE (obj.nargs); // The amount of dimensions being indexed + PUSH_CODE (obj.idx); // The offset of the index being indexed right now + } + else if (obj.type == 1) + { + MAYBE_PUSH_WIDE_OPEXT (obj.slot); + PUSH_CODE (INSTR::END_OBJ); + // Slot for keeping the stack depth of the object being indexed + PUSH_SLOT (obj.slot); + PUSH_CODE (obj.nargs); // The amount of dimensions being indexed + PUSH_CODE (obj.idx); // The offset of the index being indexed right now + } + else + panic_impossible (); + } + else // Nested indexing + { + PUSH_CODE (INSTR::END_X_N); + PUSH_CODE (n_ids); + + // Note: Pushing inner to outer. + // foo (bar (baz (1, end))) => 1: baz, 2: bar, 3: foo + for (int i = n_ids - 1; i >= 0; i--) + { + id_being_indexed obj = IDS_BEING_INDEXED (i); + PUSH_CODE (obj.nargs); + PUSH_CODE (obj.idx); + PUSH_CODE (obj.type); + PUSH_WSLOT (obj.slot); + } + } + } + else + { + int slot = add_id_to_table (name); + + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + if (m_pending_ignore_outputs && DEPTH () == 2) + { + PUSH_CODE (INSTR::SET_IGNORE_OUTPUTS); + PUSH_CODE (m_v_ignored.size ()); + PUSH_CODE (m_ignored_of_total); + for (int i : m_v_ignored) + PUSH_CODE (i); + } + + if (id.is_postfix_indexed ()) + { + // "foo.a" and "foo{1}" might be command function calls + // which is checked for in PUSH_SLOT_NARGOUT1_SPECIAL + // Also foo might be a classdef meta object. + MAYBE_PUSH_WIDE_OPEXT (slot); + if (id.postfix_index () != '(') + PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT1_SPECIAL); + else + PUSH_CODE (INSTR::PUSH_SLOT_INDEXED); + PUSH_SLOT (slot); + } + else if (DEPTH () == 1) + { + CHECK (NARGOUT () == 0); + + if (id.print_result ()) + { + // Need to keep track of if this is a command call + // or not for display since "x" will print "x = 3" + // for e.g. variables but "ans = 3" for command calls. + std::string maybe_cmd_name = "%maybe_command"; + int slot_cmd = add_id_to_table (maybe_cmd_name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_DISP); + PUSH_SLOT (slot); + PUSH_WSLOT (slot_cmd); + + maybe_emit_bind_ans_and_disp (id, maybe_cmd_name); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT0); + PUSH_SLOT (slot); + + // Write the return value to ans. It is either the variables + // value straight off, or e.g. a cmd function call return value. + maybe_emit_bind_ans_and_disp (id); + } + } + else if (NARGOUT () == 1) + { + // Push the local at its slot number to the stack + MAYBE_PUSH_WIDE_OPEXT (slot); + if (name == "pi") + PUSH_CODE (INSTR::PUSH_PI); + else + PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT1); + PUSH_SLOT (slot); + } + else if (NARGOUT() > 1) + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_NARGOUTN); + PUSH_SLOT (slot); + PUSH_CODE (NARGOUT ()); + } + else + { + // Push the local at its slot number to the stack + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT0); + PUSH_SLOT (slot); + } + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = id.column (); + LOC (loc_id).m_line = id.line (); + } + DEC_DEPTH(); +} + +int +bytecode_walker:: +add_id_to_table (std::string name) +{ + // Is the id already added to the local table? + auto it = m_map_locals_to_slot.find (name); + + if (it == m_map_locals_to_slot.end ()) + { + // Push local + m_code.m_ids.push_back(name); + m_map_locals_to_slot[name] = m_n_locals++; + + return m_n_locals - 1; + } + + return it->second; +} + +void +bytecode_walker:: +visit_no_op_command (tree_no_op_command& cmd) +{ + if (cmd.is_end_of_fcn_or_script()) + { + // Put a return in the end so that we don't fall of the edge + // of the world + + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + PUSH_TREE_FOR_DBG (&cmd); + emit_return (); + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = cmd.column (); + LOC (loc_id).m_line = cmd.line (); + } +} + +void +bytecode_walker:: +visit_do_until_command (tree_do_until_command& cmd) +{ + tree_expression *expr = cmd.condition (); + int code_start = CODE_SIZE (); + + tree_statement_list *list = cmd.body (); + + PUSH_CONTINUE_TARGET (); + PUSH_BREAKS (); + + // Push an opcode that checks for signals, e.g. ctrl-c + PUSH_CODE (INSTR::HANDLE_SIGNALS); + + // A empty body will yield a null list pointer + m_n_nested_loops++; + if (list) + list->accept (*this); + m_n_nested_loops--; + + // Any continue jumps to here (before the condition) + for (int offset : POP_CONTINUE_TARGET()) + SET_CODE_SHORT (offset, CODE_SIZE ()); + + CHECK_NONNULL (expr); + INC_DEPTH (); // Since we need the value + PUSH_TREE_FOR_DBG (expr); + expr->accept (*this); + DEC_DEPTH (); + + // The condition value is on the operand stack, do + // a jmp_ifn to the start of the body, on false + PUSH_CODE (INSTR::JMP_IFN); + PUSH_CODE_SHORT (code_start); + + // The breaks jump to here + for (int offset : POP_BREAKS ()) + SET_CODE_SHORT (offset, CODE_SIZE ()); +} + +void +bytecode_walker:: +visit_while_command (tree_while_command& cmd) +{ + tree_expression *expr = cmd.condition (); + + // Location data for the condition + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + int cond_offset = CODE_SIZE (); + + CHECK_NONNULL (expr); + INC_DEPTH (); // Since we need the value + PUSH_TREE_FOR_DBG (expr); + expr->accept (*this); + DEC_DEPTH (); + + // The condition value is on the operand stack, do + // a jmp_ifn to after the body, on false + PUSH_CODE (INSTR::JMP_IFN); + int offset_need_jmp_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = expr->column (); + LOC (loc_id).m_line = expr->line (); + + tree_statement_list *list = cmd.body (); + + PUSH_CONTINUE_TARGET (); + PUSH_BREAKS (); + + // Push an opcode that checks for signals, e.g. ctrl-c + PUSH_CODE (INSTR::HANDLE_SIGNALS); + + // nullptr if body is empty + m_n_nested_loops++; + if (list) + list->accept (*this); + m_n_nested_loops--; + + // The continue targets can now be set, to jump back + // to the condition. + for (int offset : POP_CONTINUE_TARGET()) + SET_CODE_SHORT (offset, cond_offset); + + // Jump back to the condition, TODO: unless all paths are terminated + PUSH_CODE (INSTR::JMP); + PUSH_CODE_SHORT (cond_offset); + + // Now we can set where the condition should jump on false, i.e. + // to here, after the jump back to the condition + SET_CODE_SHORT (offset_need_jmp_after, CODE_SIZE ()); + + // The breaks jump to the same place + for (int offset : POP_BREAKS ()) + SET_CODE_SHORT (offset, CODE_SIZE ()); +} + +void +bytecode_walker:: +visit_switch_command (tree_switch_command& cmd) +{ + tree_expression *expr = cmd.switch_value (); + CHECK_NONNULL (expr); + + tree_switch_case_list *lst = cmd.case_list (); + + std::vector need_after_all; + + // First off we need the switch value on the stack + INC_DEPTH (); + PUSH_NARGOUT(1); + + expr->accept (*this); + + POP_NARGOUT (); + DEC_DEPTH (); + + // Since the switch have a value on the stack through the whole switch + // statement we need to track that so returns can pop it. + PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK); + + // Any nested continue or break need to pop the switch value + PUSH_CONTINUE_TARGET (); + PUSH_BREAKS (); + + // We now have the switch value on the operand stack, + // so now we need to compare it with the first label + // either execute its code or skip it depending on + // wheter the switch value and the label are "equal" + + tree_switch_case *default_case = nullptr; + + if (lst) + for (tree_switch_case *t : *lst) + { + // We want to do the default case last + if (t->is_default_case ()) + { + default_case = t; + continue; + } + + // We need to duplicate the switch value on the stack so + // each label will have its own + PUSH_CODE (INSTR::DUP); + + INC_DEPTH (); + PUSH_NARGOUT(1); + + // Walk for code for the case label expression + t->case_label()->accept(*this); + + POP_NARGOUT (); + DEC_DEPTH (); + + // case label value is now on the stack + + PUSH_CODE (INSTR::JMP_IFNCASEMATCH); + int need_next = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + + // Walk for the case label body + + tree_statement_list *stmt_lst = t->commands (); + + if (stmt_lst) + stmt_lst->accept (*this); + + // TODO: Unless the body is terminated we need to jump past + // the rest of the switch bodies + PUSH_CODE (INSTR::JMP); + need_after_all.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); // Placeholder, jump to after all + + // If the label was not "true" we jump to here. Under there will be + // another case or the end of the switch + SET_CODE_SHORT (need_next, CODE_SIZE ()); // The placeholder above + } + + // If there was a default case, generate code for it + if (default_case) + { + tree_statement_list *stmt_lst = default_case->commands(); + + if (stmt_lst) + stmt_lst->accept (*this); + } + + // Any nested break or continue need to jump here to pop an ov + // and then jump to a outer break or continue block. + auto v_breaks = POP_BREAKS (); + auto v_continues = POP_CONTINUE_TARGET (); + + if (v_breaks.size () || v_continues.size ()) + { + // Fallthrough from default need to jump past break and continue bridges + PUSH_CODE (INSTR::JMP); + int offset = CODE_SIZE (); + need_after_all.push_back (offset); + PUSH_CODE_SHORT (-1); + } + + if (v_breaks.size ()) + { + for (int offset : v_breaks) + SET_CODE_SHORT (offset, CODE_SIZE ()); + // We need to pop the switch value + PUSH_CODE (INSTR::POP); + // Jump to the outer break target + PUSH_CODE (INSTR::JMP); + int offset = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + PUSH_NEED_BREAK (offset); + } + if (v_continues.size ()) + { + // Nested continues should jump to there + int target_offset = CODE_SIZE (); + for (int offset : v_continues) + SET_CODE_SHORT (offset, target_offset); + // We need to pop the switch value + PUSH_CODE (INSTR::POP); + // Jump to the outer continue target (i.e. start of whatever loop) + PUSH_CODE (INSTR::JMP); + int need_continue_target = CODE_SIZE (); + PUSH_CODE_SHORT (-1); + PUSH_NEED_CONTINUE_TARGET (need_continue_target); + } + + // Some code points might need a jump to after the switch statement + for (int offset : need_after_all) + SET_CODE_SHORT (offset, CODE_SIZE ()); + + // We need to pop the switch value + PUSH_CODE (INSTR::POP); + + // We are out of the switch statement so pop it from the nesting stack + POP_NESTING_STATEMENT (); +} + +void +bytecode_walker:: +visit_if_command (tree_if_command& cmd) +{ + tree_if_command_list *list = cmd.cmd_list (); + CHECK_NONNULL (list); + + // Offset to jump addresses that will need to jump + // to after all the if clauses and bodies. E.g. + // the end of each if body, if there are more than one if. + std::vector need_after_all; + + // Offset for the jump address of the condition test, which + // need to go to after the body. + int need_after_body = -1; + + std::size_t n = list->size (); + + std::size_t idx = 0; + for (auto p = list->begin (); p != list->end (); p++, idx++) + { + bool is_last = idx + 1 == n; + + tree_if_clause *elt = *p; + CHECK_NONNULL (elt); + + tree_statement_list *body = elt->commands (); + + bool is_not_else = ! elt->is_else_clause (); + // Condition + if (is_not_else) + { + tree_expression *cond = elt->condition (); + CHECK_NONNULL (cond); + + // Location data for the condition + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + PUSH_TREE_FOR_DBG (elt); // We want the debug hit before the condition + + PUSH_NARGOUT (1); + INC_DEPTH (); + cond->accept (*this); + DEC_DEPTH (); + POP_NARGOUT (); + + // The condition is on the operand stack now + PUSH_CODE (INSTR::JMP_IFN); + need_after_body = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder, jump to after all + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = cond->column (); + LOC (loc_id).m_line = cond->line (); + } + + // Body + // nullptr if body is empty + if (body) + body->accept (*this); + + if (!is_last) + { + PUSH_CODE (INSTR::JMP); + need_after_all.push_back (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); // Placeholder, jump to after all + } + + // Now we can set the address to which failed condition + // will jump + if (is_not_else) + SET_CODE_SHORT (need_after_body, CODE_SIZE ()); + } + + for (int offset : need_after_all) + SET_CODE_SHORT (offset, CODE_SIZE ()); +} + +void +bytecode_walker:: +visit_anon_fcn_handle (tree_anon_fcn_handle &expr) +{ + INC_DEPTH (); + + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); + + PUSH_CODE (INSTR::PUSH_ANON_FCN_HANDLE); + PUSH_CODE_INT (tree_idx); + + maybe_emit_bind_ans_and_disp (expr); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +emit_args_for_visit_index_expression (tree_argument_list *arg_list, + tree_expression *root_lhs_id) +{ + int nargs = arg_list->size (); + int idx = 0; + bool lhs_is_id = root_lhs_id ? root_lhs_id->is_identifier () : false; + + // If we are indexing an object, and have a magic end index + // we need to save the stack depth in a slot + bool obj_has_end = false; + if (!lhs_is_id) + { + for (auto it = arg_list->begin (); it != arg_list->end (); it++) + { + CHECK_NONNULL (*it); + obj_has_end = find_end_walker::has_end (**it); + if (obj_has_end) + break; + } + } + + int obj_stack_depth_slot = -1; + if (obj_has_end) + { + std::string obj_stack_depth_name = "%objsd_" + std::to_string (CODE_SIZE ()); + obj_stack_depth_slot = add_id_to_table (obj_stack_depth_name); + + MAYBE_PUSH_WIDE_OPEXT (obj_stack_depth_slot); + PUSH_CODE (INSTR::SET_SLOT_TO_STACK_DEPTH); + PUSH_SLOT (obj_stack_depth_slot); + } + + // We want to push the args to the stack + for (auto it = arg_list->begin (); it != arg_list->end (); it++, idx++) + { + INC_DEPTH (); + if (lhs_is_id) + PUSH_ID_BEGIN_INDEXED (SLOT (root_lhs_id->name ()), idx, nargs, false); + else + PUSH_ID_BEGIN_INDEXED (obj_stack_depth_slot, idx, nargs, true); + + PUSH_NARGOUT (1); + (*it)->accept (*this); + POP_NARGOUT (); + POP_ID_BEING_INDEXED (); + DEC_DEPTH (); + } +} + +void +bytecode_walker:: +emit_fields_for_visit_index_expression (string_vector &field_names, + tree_expression *dyn_expr, + tree_expression *lhs_root, + bool *struct_is_id_dot_id) +{ + if (struct_is_id_dot_id) + *struct_is_id_dot_id = false; + // For struct the "arg" is the field and not executed. + // Just add it as an identifier so that we can get it's + // name as a string in the VM. + CHECK (field_names.numel() == 1); + + std::string field_name = field_names.elem (0); + + if (lhs_root && lhs_root->is_identifier () && field_name.size ()) + { + if (struct_is_id_dot_id) + *struct_is_id_dot_id = true; + add_id_to_table (field_name); + } + else if (field_name.size ()) + { + octave_value ov_field_name{field_name}; + PUSH_DATA (ov_field_name); + + PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant + } + else + { + CHECK_NONNULL (dyn_expr); + + INC_DEPTH (); + PUSH_NARGOUT (1); + dyn_expr->accept (*this); + POP_NARGOUT (); + DEC_DEPTH (); + } +} + +void +bytecode_walker:: +eval_visit_index_expression (tree_index_expression& expr) +{ + INC_DEPTH (); + tree_expression *e = expr.expression (); + CHECK_NONNULL(e); + + PUSH_TREE_FOR_EVAL (&expr); + int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data + + PUSH_CODE (INSTR::EVAL); + PUSH_CODE (NARGOUT ()); + PUSH_CODE_INT (tree_idx); + + maybe_emit_bind_ans_and_disp (expr); + + if (DEPTH () == 1 && NARGOUT () > 1) + TODO ("Silly state"); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +simple_visit_index_expression (tree_index_expression& expr) +{ + INC_DEPTH (); + tree_expression *e = expr.expression (); + CHECK_NONNULL(e); + + // Word commands are on the form: + // foo bar baz; <=> foo('bar', 'baz'); + bool is_wordcmd = expr.is_word_list_cmd (); + + std::string type_tags = expr.type_tags (); + + size_t n_chained = type_tags.size (); + CHECK (n_chained == 1); + + // Put the object to index on the stack + INC_DEPTH (); + e->accept (*this); + DEC_DEPTH (); + + // The Octave function inputname (i) needs to be able to know the name + // of th nth argument to a function, so we need to make an entry of + // the names. + arg_name_entry arg_name_entry; + + std::list arg_lists = expr.arg_lists (); + std::list arg_names = expr.arg_names (); + std::list dyn_fields = expr.dyn_fields (); + + CHECK (arg_lists.size () == n_chained); + CHECK (arg_names.size () == n_chained); + CHECK (dyn_fields.size () == n_chained); + CHECK (type_tags.size () == n_chained); + + auto arg_names_it = arg_names.begin (); + auto arg_lists_it = arg_lists.begin (); + auto arg_lists_dyn_it = dyn_fields.begin (); + auto arg_type_tags_it = type_tags.begin (); + + char type = *arg_type_tags_it; + + int nargout = NARGOUT (); + + bool struct_is_id_dot_id = false; + if (type == '.') + emit_fields_for_visit_index_expression (*arg_names_it, *arg_lists_dyn_it, e, &struct_is_id_dot_id); + else if (*arg_lists_it) + { + emit_args_for_visit_index_expression (*arg_lists_it, e); + // Push the argnames for inputname () + size_t n_args = arg_names_it->numel (); + string_vector names(n_args); + for (int i = 0; i < arg_names_it->numel (); i++) + names.elem (i) = arg_names_it->elem (i); + arg_name_entry.m_arg_names = names; + } + + if (m_pending_ignore_outputs && DEPTH () == 2) + { + PUSH_CODE (INSTR::SET_IGNORE_OUTPUTS); + PUSH_CODE (m_v_ignored.size ()); + PUSH_CODE (m_ignored_of_total); + for (int i : m_v_ignored) + PUSH_CODE (i); + } + + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + arg_name_entry.m_ip_start = CODE_SIZE (); + + tree_argument_list *args = *arg_lists_it; + + if (is_wordcmd) + { + CHECK (e->is_identifier ()); + + std::string id_name = e->name (); + int slot = SLOT (id_name); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::WORDCMD); + // The vm need the name of the identifier for function lookups + PUSH_SLOT (slot); + PUSH_CODE (nargout); + // Push nargin + PUSH_CODE (args ? args->size () : 0); + } + else if (e->is_identifier () && !(type == '.' && !struct_is_id_dot_id)) + { + std::string id_name = e->name (); + int slot = SLOT (id_name); + + if (type == '(') + { + if (nargout == 0) + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_ID_NARGOUT0); + // The vm need the name of the identifier for function lookups + PUSH_SLOT (slot); + } + else if (nargout == 1) + { + // If the id is "sin", "cos", "round" etc, and there is one argument, + // in the end map(unary_mapper_t) will be called while executing, + // unless the user have overriden those. + // We do a special opcode for those to speed them up. + // Don't do the special opcode if it would need wide slots, i.e. slot nr > 256. + auto umaped_fn_it = m_name_to_unary_func.find (id_name); + if (!args || args->size () != 1 || umaped_fn_it == m_name_to_unary_func.end () || slot > 256) + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_ID_NARGOUT1); + } + else + { + octave_base_value::unary_mapper_t idx = umaped_fn_it->second; + PUSH_CODE (INSTR::INDEX_ID1_MATHY_UFUN); + PUSH_CODE (static_cast (idx)); + } + + PUSH_SLOT (slot); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_IDN); + PUSH_SLOT (slot); + PUSH_CODE (nargout); + } + + // Push nargin + PUSH_CODE (args ? args->size () : 0); + } + else if (type == '{') + { + if (nargout == 0) + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_CELL_ID_NARGOUT0); + // The vm need the name of the identifier for function lookups + PUSH_SLOT (slot); + } + else if (nargout == 1) + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_CELL_ID_NARGOUT1); + PUSH_SLOT (slot); + } + else + { + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_CELL_ID_NARGOUTN); + PUSH_SLOT (slot); + PUSH_CODE (nargout); + } + + // Push nargin + PUSH_CODE (args ? args->size () : 0); + } + else if (type == '.') + { + PUSH_CODE (INSTR::INDEX_STRUCT_NARGOUTN); + PUSH_CODE (nargout); + + string_vector field_names = *arg_names_it; + CHECK (field_names.numel ()); + std::string field_name = field_names.elem (0); + + PUSH_WSLOT (slot); // id to index + PUSH_WSLOT (SLOT (field_name)); // VM need name of the field + } + else + TODO ("Not implemeted typetag"); + } + else + { + // We are not indexing an id, but e.g.: + // (foo).() + // I.e. a temporary object. + PUSH_CODE (INSTR::INDEX_OBJ); + PUSH_CODE (nargout); + PUSH_CODE (0); // "has slot" + PUSH_WSLOT (0); // The w/e slot TODO: Remove? + // Push nargin + if (type == '.') + PUSH_CODE (1); // Nargin always one for struct indexing + else + PUSH_CODE (args ? args->size () : 0); + PUSH_CODE (type); + } + + arg_name_entry.m_ip_end = CODE_SIZE (); + PUSH_ARGNAMES_ENTRY (arg_name_entry); + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = expr.column (); + LOC (loc_id).m_line = expr.line (); + + maybe_emit_bind_ans_and_disp (expr); + + if (DEPTH () == 1 && NARGOUT () > 1) + TODO ("Silly state"); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +visit_index_expression (tree_index_expression& expr) +{ + + tree_expression *e = expr.expression (); + CHECK_NONNULL(e); + + std::list arg_lists = expr.arg_lists (); + std::list arg_names = expr.arg_names (); + std::list dyn_fields = expr.dyn_fields (); + std::string type_tags = expr.type_tags (); + + size_t n_chained = type_tags.size (); + CHECK (n_chained); + + // For un-chained index expressions we use specialized + // op-codes that has e.g. nargout and type '(','{' and '.' + // encoded in the op-code it self to speed things up. + if (n_chained == 1) + { + simple_visit_index_expression (expr); + return; + } + + // If there is any struct in the chain and an end, we cheat and use + // eval. We can't use the existing end op-codes since they need a value + // to check the size of, but e.g. "foo.bar(end)" might be a + // class cmd form method call "foo.bar" and we can't know the size before + // that has been checked. + // + // TODO: Solve this. Maybe with some special if? + bool has_end = false; + for (auto outer_it = arg_lists.begin (); outer_it != arg_lists.end (); outer_it++) + { + auto arg_list = *outer_it; + if (!arg_list) + continue; + for (auto it = arg_list->begin (); it != arg_list->end (); it++) + { + CHECK_NONNULL (*it); + has_end = find_end_walker::has_end (**it); + if (has_end) + break; + } + } + if (has_end) + { + eval_visit_index_expression (expr); + return; + } + + INC_DEPTH (); + + // A chained index expression might be: foo.bar(2).baz{1} => n_chained == 4 + + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + // The Octave function inputname (i) needs to be able to know the name + // of th nth argument to a function, so we need to make an entry of + // the names. + arg_name_entry arg_name_entry; + + // We push the first object to index to the stack. + // Subsequent indexings will have the prior index result on the + // stack. + INC_DEPTH (); + e->accept (*this); + DEC_DEPTH (); + + CHECK (arg_lists.size () == n_chained); + CHECK (arg_names.size () == n_chained); + CHECK (dyn_fields.size () == n_chained); + CHECK (type_tags.size () == n_chained); + + auto arg_names_it = arg_names.begin (); + auto arg_lists_it = arg_lists.begin (); + auto arg_lists_dyn_it = dyn_fields.begin (); + auto arg_type_tags_it = type_tags.begin (); + + tree_expression *first_expression = e; + // Iterate over the chained subexpressions + std::vector v_n_args {}; // We pushed one field above + std::vector v_types {};// The type is . + while (arg_lists_it != arg_lists.end ()) + { + tree_argument_list *arg_list = *arg_lists_it++; + string_vector field_names = *arg_names_it++; + tree_expression *dyn_expr = *arg_lists_dyn_it++; + char type = *arg_type_tags_it++; + + v_types.push_back (type); + + if (type == '.') + { + emit_fields_for_visit_index_expression (field_names, dyn_expr, nullptr, nullptr); + v_n_args.push_back (1); + } + else if (arg_list) + { + emit_args_for_visit_index_expression (arg_list, nullptr); + v_n_args.push_back (arg_list->size ()); + // Push the argnames for inputname () + int n_args = field_names.numel (); + string_vector names(n_args); + for (int i = 0; i < n_args; i++) + names.elem (i) = field_names.elem (i); + arg_name_entry.m_arg_names = names; + } + else + v_n_args.push_back (0); // e.g. the call to "bar" in "foo.bar ()" + } + + if (m_pending_ignore_outputs && DEPTH () == 2) + { + PUSH_CODE (INSTR::SET_IGNORE_OUTPUTS); + PUSH_CODE (m_v_ignored.size ()); + PUSH_CODE (m_ignored_of_total); + for (int i : m_v_ignored) + PUSH_CODE (i); + } + + int nargout = NARGOUT (); + + arg_name_entry.m_ip_start = CODE_SIZE (); + + if (first_expression && first_expression->is_identifier ()) + { + int slot = SLOT (first_expression->name ()); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::INDEX_STRUCT_CALL); + PUSH_SLOT (slot); // the slot + PUSH_CODE (1); // has slot + PUSH_CODE (nargout); + } + else + { + PUSH_CODE (INSTR::INDEX_STRUCT_CALL); + PUSH_SLOT (0); // slot + PUSH_CODE (0); // has slot + PUSH_CODE (nargout); + } + + PUSH_CODE (v_n_args.size ()); + for (unsigned i = 0; i < v_n_args.size (); i++) + { + PUSH_CODE (v_n_args[i]); + PUSH_CODE (v_types[i]); + } + + arg_name_entry.m_ip_end = CODE_SIZE (); + PUSH_ARGNAMES_ENTRY (arg_name_entry); + arg_name_entry = {}; // TODO: Remove? + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = expr.column (); + LOC (loc_id).m_line = expr.line (); + + maybe_emit_bind_ans_and_disp (expr); + + if (DEPTH () == 1 && NARGOUT () > 1) + TODO ("Silly state"); + + DEC_DEPTH (); +} + +// For loops are setup like this: +// +// Setup block: +// * The range variable is on the top of the stack +// * Push the amount of iterations to the stack, octave_idx_type +// * Push a counter to the stack initialized to ~0, octave_idx_type, +// so that it wraps to zero after incrementing. +// * Fall through to condition block +// Condition block: +// * Increase counter +// * If there are no iterations left, go to after. +// * Write the iteration's value to the local +// * Fall through to body +// Body block: +// * Execute the body code +// * Jump to condition block +// After block: +// * Pop the type, counter and limit variables +// +// FOR_SETUP = opcode +// FOR_COND = opcode, after address, local slot + +void +bytecode_walker:: +visit_simple_for_command (tree_simple_for_command& cmd) +{ + tree_expression *lhs = cmd.left_hand_side (); + + int loc_id = N_LOC (); + PUSH_LOC (); + LOC (loc_id).m_ip_start = CODE_SIZE (); + + CHECK_NONNULL (lhs); + if (! lhs->is_identifier ()) + TODO ("For loop with lhs not id ???"); + + std::string id_name = lhs->name (); + // We don't want the id pushed to the stack so we + // don't walk it. + int slot = add_id_to_table (id_name); + + tree_expression *expr = cmd.control_expr (); + CHECK_NONNULL (expr); + + PUSH_TREE_FOR_DBG (&cmd); // Debug hit before rhs + + // We want the rhs expression on the stack + INC_DEPTH (); + PUSH_NARGOUT (1); + expr->accept (*this); + POP_NARGOUT (); + DEC_DEPTH (); + + // For loops need a special unwind entry to destroy the + // native ints on the stack properly. + int unwind_idx = N_UNWIND (); + PUSH_UNWIND(); + UNWIND (unwind_idx).m_ip_start = CODE_SIZE (); + + UNWIND (unwind_idx).m_unwind_entry_type = + unwind_entry_type::FOR_LOOP; + + // For loops add two native ints and one ov to the stack, + // and switches add one ov to the stack, so we need to + // record how many things we have added to the stack, + // not counting this for loop. From for loops and + // switches. + int n_things_on_stack = n_on_stack_due_to_stmt(); + + // Store added things on stack (due to for loops and switches) + // in the unwind table. + UNWIND (unwind_idx).m_stack_depth = n_things_on_stack; + + PUSH_CODE (INSTR::FOR_SETUP); + // FOR_COND need to come after FOR_SETUP + // FOR_SETUP uses FOR_COND's operands the first loop iteration + PUSH_TREE_FOR_DBG (&cmd); // Debug hit at condition + int cond_offset = CODE_SIZE (); + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::FOR_COND); + PUSH_SLOT (slot); // The for loop variable + int need_after = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder for after address + + LOC (loc_id).m_ip_end = CODE_SIZE (); + LOC (loc_id).m_col = cmd.column (); + LOC (loc_id).m_line = cmd.line (); + + // Walk body + tree_statement_list *list = cmd.body (); + + // The body can be empty + if (list) + { + m_n_nested_loops++; + PUSH_NESTING_STATEMENT (nesting_statement::FOR_LOOP); + PUSH_BREAKS (); + PUSH_CONTINUE_TARGET (); + list->accept (*this); + for (int offset : POP_CONTINUE_TARGET()) + SET_CODE_SHORT (offset, cond_offset); + POP_NESTING_STATEMENT (); + m_n_nested_loops--; + } + + // A new loc for the for loop suffix code, so that any time + // spent there end up by the "for"-row in the profiler + + int loc_id2 = N_LOC (); + PUSH_LOC (); + LOC (loc_id2).m_ip_start = CODE_SIZE (); + + // Jump to condition block, TODO: unless all paths terminated + PUSH_CODE (INSTR::JMP); + PUSH_CODE_SHORT (cond_offset); + + // Now we can set the after jump in cond + SET_CODE_SHORT (need_after, CODE_SIZE ()); + + if (list) + { + // Also all breaks jump to here + for (int need_break : POP_BREAKS ()) + { + SET_CODE_SHORT (need_break, CODE_SIZE ()); + } + } + + // Mark an end to the special for loop unwind entry + UNWIND (unwind_idx).m_ip_end = CODE_SIZE (); + + // We need to pop the counter, n and range + PUSH_CODE (INSTR::POP_N_INTS); + PUSH_CODE (2); + // Pop the rhs ov (the range) + PUSH_CODE (INSTR::POP); + + LOC (loc_id2).m_ip_end = CODE_SIZE (); + LOC (loc_id2).m_col = cmd.column (); + LOC (loc_id2).m_line = cmd.line (); +} + +void +bytecode_walker:: +visit_complex_for_command (tree_complex_for_command& cmd) +{ + tree_argument_list *lhs = cmd.left_hand_side (); + + CHECK (lhs); + CHECK (lhs->size () == 2); + + auto p = lhs->begin (); + tree_expression *val = *p++; + tree_expression *key = *p++; + + CHECK (val); CHECK (key); + + CHECK (val->is_identifier ()); + CHECK (key->is_identifier ()); + + std::string val_name = val->name (); + std::string key_name = key->name (); + + add_id_to_table (val_name); + add_id_to_table (key_name); + + tree_expression *expr = cmd.control_expr (); + CHECK_NONNULL (expr); + + // We want the rhs expression on the stack + INC_DEPTH (); + PUSH_NARGOUT (1); + expr->accept (*this); + POP_NARGOUT (); + DEC_DEPTH (); + + // For loops need a special unwind entry to destroy the + // native ints on the stack properly. + int unwind_idx = N_UNWIND (); + PUSH_UNWIND(); + UNWIND (unwind_idx).m_ip_start = CODE_SIZE (); + + UNWIND (unwind_idx).m_unwind_entry_type = + unwind_entry_type::FOR_LOOP; + + // For loops add two native ints and one ov to the stack, + // and switches add one ov to the stack, so we need to + // record how many things we have added to the stack, + // not counting this for loop. From for loops and + // switches. + int n_things_on_stack = n_on_stack_due_to_stmt(); + + // Store added things on stack (due to for loops and switches) + // in the unwind table. + UNWIND (unwind_idx).m_stack_depth = n_things_on_stack; + + PUSH_CODE (INSTR::FOR_COMPLEX_SETUP); + int need_after0 = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder for after address for a jump if rhs is undefined + + int cond_offset = CODE_SIZE (); + PUSH_CODE (INSTR::FOR_COMPLEX_COND); + int need_after1 = CODE_SIZE (); + PUSH_CODE_SHORT (-1); // Placeholder for after address + PUSH_WSLOT (SLOT (key_name)); + PUSH_WSLOT (SLOT (val_name)); + + // Walk body + tree_statement_list *list = cmd.body (); + // The body can be empty + if (list) + { + m_n_nested_loops++; + PUSH_NESTING_STATEMENT (nesting_statement::FOR_LOOP); + PUSH_BREAKS (); + PUSH_CONTINUE_TARGET (); + list->accept (*this); + for (int offset : POP_CONTINUE_TARGET()) + SET_CODE_SHORT (offset, cond_offset); + POP_NESTING_STATEMENT (); + m_n_nested_loops--; + } + + // Jump to condition block, TODO: unless all paths terminated + PUSH_CODE (INSTR::JMP); + PUSH_CODE_SHORT (cond_offset); + + // Now we can set the after jump in cond and setup + SET_CODE_SHORT (need_after0, CODE_SIZE ()); + SET_CODE_SHORT (need_after1, CODE_SIZE ()); + + if (list) + { + // Also all breaks jump to here + for (int need_break : POP_BREAKS ()) + { + SET_CODE_SHORT (need_break, CODE_SIZE ()); + } + } + + // Mark an end to the special for loop unwind entry + UNWIND (unwind_idx).m_ip_end = CODE_SIZE (); + + // We need to pop the counter, n and rhs struct + PUSH_CODE (INSTR::POP_N_INTS); + PUSH_CODE (2); + // Pop the rhs ov (the struct) + PUSH_CODE (INSTR::POP); +} + +void +bytecode_walker:: +visit_fcn_handle (tree_fcn_handle &handle) +{ + INC_DEPTH (); + std::string name = handle.name (); + // We prepend the handles with @ to not risk collisions with + // other identifiers in the id table + std::string aname = "@" + name; + + if (name.find ('.') != std::string::npos) + TODO ("No support for method fcn handles yet"); + + // slot for the handle function cache + int slot = add_id_to_table(aname); + + MAYBE_PUSH_WIDE_OPEXT (slot); + PUSH_CODE (INSTR::PUSH_FCN_HANDLE); + PUSH_SLOT (slot); + + maybe_emit_bind_ans_and_disp (handle); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +visit_colon_expression (tree_colon_expression& expr) +{ + INC_DEPTH (); + + tree_expression *op1 = expr.base (); + + CHECK_NONNULL (op1); + op1->accept (*this); + + tree_expression *op2 = expr.increment (); + + if (op2) + op2->accept (*this); + + tree_expression *op3 = expr.limit (); + + CHECK_NONNULL (op3); + op3->accept (*this); + + // Colon expressions have some different semantics + // in command expressions. + if (expr.is_for_cmd_expr ()) + { + if (op2) + PUSH_CODE (INSTR::COLON3_CMD); + else + PUSH_CODE (INSTR::COLON2_CMD); + } + else + { + if (op2) + PUSH_CODE (INSTR::COLON3); + else + PUSH_CODE (INSTR::COLON2); + } + + maybe_emit_bind_ans_and_disp (expr); + + DEC_DEPTH (); +} + +void +bytecode_walker:: +visit_break_command (tree_break_command&) +{ + PUSH_CODE (INSTR::JMP); + // Need to set where to jump to after we know where the loop ends + PUSH_NEED_BREAK (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); // Placeholder +} + + + +void +bytecode_walker:: +visit_continue_command (tree_continue_command&) +{ + PUSH_CODE (INSTR::JMP); + // The address to jump to need to be set by the loop + // visitor (do until jumps forward), så push the code + // address that need a target address. + PUSH_NEED_CONTINUE_TARGET (CODE_SIZE ()); + PUSH_CODE_SHORT (-1); // Placeholder +} + diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-bytecode-walk.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/parse-tree/pt-bytecode-walk.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,418 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_pt_bytecode_walk_h) +#define octave_pt_bytecode_walk_h 1 + +#include + +#include "octave-config.h" +#include "pt-walk.h" +#include "error.h" + +#include "pt-bytecode-vm.h" + +class octave_user_script; +class octave_user_function; + +#define ERROR_NOT_IMPLEMENTED \ +{ error("Not implemented %s:%d", __FILE__, __LINE__); } + +namespace octave +{ + void compile_user_function (octave_user_function &fn, bool print); + + // No separate visitor needed + // Base classes only, so no need to include them. + // + // class tree_array_list + // class tree_unary_expression + // class tree_black_hole + + class tree_anon_fcn_handle; + class tree_arg_size_spec; + class tree_arg_validation; + class tree_arg_validation_fcns; + class tree_args_block_attribute_list; + class tree_args_block_validation_list; + class tree_argument_list; + class tree_arguments_block; + class tree_binary_expression; + class tree_boolean_expression; + class tree_compound_binary_expression; + class tree_break_command; + class tree_colon_expression; + class tree_continue_command; + class tree_decl_command; + class tree_decl_init_list; + class tree_decl_elt; + class tree_simple_for_command; + class tree_complex_for_command; + class tree_spmd_command; + class tree_function_def; + class tree_identifier; + class tree_if_clause; + class tree_if_command; + class tree_if_command_list; + class tree_switch_case; + class tree_switch_case_list; + class tree_switch_command; + class tree_index_expression; + class tree_matrix; + class tree_cell; + class tree_multi_assignment; + class tree_no_op_command; + class tree_constant; + class tree_fcn_handle; + class tree_parameter_list; + class tree_postfix_expression; + class tree_prefix_expression; + class tree_return_command; + class tree_simple_assignment; + //class tree_simple_index_expression; + class tree_statement; + //class tree_statement_cmd; + //class tree_statement_expression; + //class tree_statement_null; + class tree_statement_list; + class tree_try_catch_command; + class tree_unwind_protect_command; + class tree_while_command; + class tree_do_until_command; + + class tree_superclass_ref; + class tree_metaclass_query; + class tree_classdef_attribute; + class tree_classdef_attribute_list; + class tree_classdef_superclass; + class tree_classdef_superclass_list; + class tree_classdef_property; + class tree_classdef_property_list; + class tree_classdef_properties_block; + class tree_classdef_methods_list; + class tree_classdef_methods_block; + class tree_classdef_event; + class tree_classdef_events_list; + class tree_classdef_events_block; + class tree_classdef_enum; + class tree_classdef_enum_list; + class tree_classdef_enum_block; + class tree_classdef_body; + class tree_classdef; + + struct id_being_indexed + { + int slot; + int idx; + int nargs; + int type; + }; + + class bytecode_walker : public tree_walker + { + public: + + enum class nesting_statement + { + INVALID, + FOR_LOOP, + ONE_OV_ON_STACK, + }; + + bytecode_walker (void) { } + + virtual ~bytecode_walker (void) = default; + + // The bytecode will be put in this container + bytecode m_code; + // The bytecode need its own scope object that will + // be written back to the octave_user_function object + symbol_scope m_scope; + + bool m_varargout = false; + + std::vector> m_continue_target; + std::vector> m_need_break_target; + std::vector m_loop_target; + std::vector m_all_paths_terminated; + std::vector m_nargout; + std::vector> m_need_unwind_target; + + std::vector m_nesting_statement; + + // For "end" in indexing expression we need to know what variable is + // being indexed. + std::vector m_indexed_id; + + int m_depth = 0; + int m_offset_n_locals = -1; + int m_n_locals = 0; + int m_n_nested_loops = 0; + + // Counter to choose different alternative op-codes in a try to help branch prediction + int m_cnt_alts_cst = 0; + int m_cnt_alts_mul = 0; + int m_cnt_alts_add = 0; + int m_cnt_alts_div = 0; + + // Simple way to keep down amount of temporary slots made to store results + int m_n_multi_assign = 0; + + // Need to keep track of ignored outputs with the '~' + bool m_pending_ignore_outputs = false; + int m_ignored_of_total = 0; + std::vector m_v_ignored; + + // + bool m_is_folding = false; + std::vector m_v_trees_to_fold; + std::vector m_v_offset_of_folds; + int m_n_folds = 0; + + std::map m_map_locals_to_slot; + + std::map m_map_id_is_global; + std::map m_map_id_is_persistent; + + static std::map m_name_to_unary_func; + + int add_id_to_table (std::string name); + + int n_on_stack_due_to_stmt (); + + void emit_return (); + + void emit_alt (int &cntr, std::vector alts); + + void emit_load_2_cst (tree_expression *lhs, tree_expression *rhs); + + void maybe_emit_bind_ans_and_disp (tree_expression &expr, const std::string maybe_cmd_name = ""); + void maybe_emit_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name = "" ); + void maybe_emit_push_and_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name = ""); + void emit_disp_obj (tree_expression &expr); + + int get_slot (std::string name) + { + auto it = m_map_locals_to_slot.find (name); + if (it == m_map_locals_to_slot.end ()) + error ("VM internal error: Slot %s does not exist", name.c_str ()); + return it->second; + } + + void ctor_unary_map (); + + // No copying! + + bytecode_walker (const bytecode_walker&) = delete; + + bytecode_walker& operator = (const bytecode_walker&) = delete; + + void visit_anon_fcn_handle (tree_anon_fcn_handle&); + + void visit_argument_list (tree_argument_list&) ERROR_NOT_IMPLEMENTED + + void visit_arguments_block (tree_arguments_block&) ERROR_NOT_IMPLEMENTED + + void visit_args_block_attribute_list (tree_args_block_attribute_list&) + ERROR_NOT_IMPLEMENTED + + void visit_args_block_validation_list (tree_args_block_validation_list&) + ERROR_NOT_IMPLEMENTED + + void visit_arg_validation (tree_arg_validation&) ERROR_NOT_IMPLEMENTED + + void visit_arg_size_spec (tree_arg_size_spec&) ERROR_NOT_IMPLEMENTED + + void visit_arg_validation_fcns (tree_arg_validation_fcns&) + ERROR_NOT_IMPLEMENTED + + void visit_binary_expression (tree_binary_expression&); + + void visit_boolean_expression (tree_boolean_expression&); + + void visit_compound_binary_expression (tree_compound_binary_expression&); + + void visit_break_command (tree_break_command&); + + void visit_colon_expression (tree_colon_expression&); + + void visit_continue_command (tree_continue_command&); + + void visit_decl_command (tree_decl_command&); + + void visit_decl_elt (tree_decl_elt&) ERROR_NOT_IMPLEMENTED + + void visit_decl_init_list (tree_decl_init_list&) ERROR_NOT_IMPLEMENTED + + void visit_simple_for_command (tree_simple_for_command&); + + void visit_complex_for_command (tree_complex_for_command&); + + void visit_spmd_command (tree_spmd_command&) ERROR_NOT_IMPLEMENTED + + void visit_octave_user_script (octave_user_script&) ERROR_NOT_IMPLEMENTED + + void visit_octave_user_function (octave_user_function&); + + void visit_function_def (tree_function_def&) ERROR_NOT_IMPLEMENTED + + void visit_identifier (tree_identifier&); + + void visit_if_clause (tree_if_clause&) ERROR_NOT_IMPLEMENTED + + void visit_if_command (tree_if_command&); + + void visit_if_command_list (tree_if_command_list&) ERROR_NOT_IMPLEMENTED + + void visit_switch_case (tree_switch_case&) ERROR_NOT_IMPLEMENTED + + void visit_switch_case_list (tree_switch_case_list&) ERROR_NOT_IMPLEMENTED + + void visit_switch_command (tree_switch_command&); + + // Helper functions + void + emit_args_for_visit_index_expression (tree_argument_list *arg_list, + tree_expression *lhs_root); + + void + emit_fields_for_visit_index_expression (string_vector &field_names, + tree_expression *dyn_expr, + tree_expression *lhs_root, + bool *struct_is_id_dot_id); + + void simple_visit_index_expression (tree_index_expression&); + void eval_visit_index_expression (tree_index_expression&); + + void visit_index_expression (tree_index_expression&); + + //void visit_simple_index_expression (tree_simple_index_expression&); + + void visit_matrix (tree_matrix&); + + void visit_cell (tree_cell&); + + void visit_multi_assignment (tree_multi_assignment&); + + void visit_no_op_command (tree_no_op_command&); + + void visit_constant (tree_constant&); + + void visit_fcn_handle (tree_fcn_handle&); + + void visit_parameter_list (tree_parameter_list&) ERROR_NOT_IMPLEMENTED + + void visit_postfix_expression (tree_postfix_expression&); + + void visit_prefix_expression (tree_prefix_expression&); + + void visit_return_command (tree_return_command&); + + void visit_simple_assignment (tree_simple_assignment&); + + void visit_statement (tree_statement&); + + void visit_statement_list (tree_statement_list&); + + void visit_try_catch_command (tree_try_catch_command&); + + void emit_unwind_protect_code (tree_statement_list *body, + tree_statement_list *cleanup_code, + tree_expression *body_expr = nullptr, + tree_expression *cleanup_expr = nullptr, + std::vector cleanup_instructions = {}); + + struct emit_unwind_protect_data + { + int m_idx_unwind; + bool m_break_stack_populated; + std::vector m_v_need_breaks_initial; + int m_n_need_break; + int m_n_need_cleanup; + }; + + emit_unwind_protect_data emit_unwind_protect_code_start (); + void emit_unwind_protect_code_before_cleanup (emit_unwind_protect_data &data); + void emit_unwind_protect_code_end (emit_unwind_protect_data &data); + + void visit_unwind_protect_command (tree_unwind_protect_command&); + + void visit_while_command (tree_while_command&); + + void visit_do_until_command (tree_do_until_command&); + + void visit_superclass_ref (tree_superclass_ref&) ERROR_NOT_IMPLEMENTED + + void visit_metaclass_query (tree_metaclass_query&) ERROR_NOT_IMPLEMENTED + + void visit_classdef_attribute (tree_classdef_attribute&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_attribute_list (tree_classdef_attribute_list&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_superclass (tree_classdef_superclass&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_superclass_list (tree_classdef_superclass_list&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_property (tree_classdef_property&) ERROR_NOT_IMPLEMENTED + + void visit_classdef_property_list (tree_classdef_property_list&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_properties_block (tree_classdef_properties_block&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_methods_list (tree_classdef_methods_list&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_methods_block (tree_classdef_methods_block&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_event (tree_classdef_event&) ERROR_NOT_IMPLEMENTED + + void visit_classdef_events_list (tree_classdef_events_list&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_events_block (tree_classdef_events_block&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_enum (tree_classdef_enum&) ERROR_NOT_IMPLEMENTED + + void visit_classdef_enum_list (tree_classdef_enum_list&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_enum_block (tree_classdef_enum_block&) + ERROR_NOT_IMPLEMENTED + + void visit_classdef_body (tree_classdef_body&) ERROR_NOT_IMPLEMENTED + + void visit_classdef (tree_classdef&) ERROR_NOT_IMPLEMENTED + }; +} + +#endif diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-bytecode.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libinterp/parse-tree/pt-bytecode.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,293 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_pt_bytecode_h) +#define octave_pt_bytecode_h 1 + +#include +#include + +#include "octave-config.h" +#include "Cell.h" +#include "ov-vm.h" + +OCTAVE_BEGIN_NAMESPACE(octave) + +class tree; + +enum class INSTR +{ + POP, + DUP, + LOAD_CST, + MUL, + DIV, + ADD, + SUB, + RET, + ASSIGN, + JMP_IF, + JMP, + JMP_IFN, + PUSH_SLOT_NARGOUT0, + LE, + LE_EQ, + GR, + GR_EQ, + EQ, + NEQ, + INDEX_ID_NARGOUT0, + PUSH_SLOT_INDEXED, + POW, + LDIV, + EL_MUL, + EL_DIV, + EL_POW, + EL_AND, + EL_OR, + EL_LDIV, + NOT, + UADD, + USUB, + TRANS, + HERM, + // TODO: These should have an inplace optimization (no push) + INCR_ID_PREFIX, + DECR_ID_PREFIX, + INCR_ID_POSTFIX, + DECR_ID_POSTFIX, + FOR_SETUP, + FOR_COND, + POP_N_INTS, + PUSH_SLOT_NARGOUT1, + INDEX_ID_NARGOUT1, + PUSH_FCN_HANDLE, + COLON3, + COLON2, + COLON3_CMD, + COLON2_CMD, + PUSH_TRUE, + PUSH_FALSE, + UNARY_TRUE, + INDEX_IDN, + ASSIGNN, + PUSH_SLOT_NARGOUTN, + SUBASSIGN_ID, + END_ID, + MATRIX, + TRANS_MUL, + MUL_TRANS, + HERM_MUL, + MUL_HERM, + TRANS_LDIV, + HERM_LDIV, + WORDCMD, + HANDLE_SIGNALS, + PUSH_CELL, + PUSH_OV_U64, + EXPAND_CS_LIST, + INDEX_CELL_ID_NARGOUT0, + INDEX_CELL_ID_NARGOUT1, + INDEX_CELL_ID_NARGOUTN, + INCR_PREFIX, + ROT, + GLOBAL_INIT, + ASSIGN_COMPOUND, + JMP_IFDEF, + JMP_IFNCASEMATCH, + BRAINDEAD_PRECONDITION, + BRAINDEAD_WARNING, + FORCE_ASSIGN, // Accepts undefined rhs + PUSH_NIL, + THROW_IFERROBJ, + INDEX_STRUCT_NARGOUTN, + SUBASSIGN_STRUCT, + SUBASSIGN_CELL_ID, + INDEX_OBJ, + SUBASSIGN_OBJ, + MATRIX_UNEVEN, + LOAD_FAR_CST, + END_OBJ, + SET_IGNORE_OUTPUTS, + CLEAR_IGNORE_OUTPUTS, + SUBASSIGN_CHAINED, + SET_SLOT_TO_STACK_DEPTH, + DUPN, + DEBUG, + INDEX_STRUCT_CALL, + END_X_N, + EVAL, + BIND_ANS, + PUSH_ANON_FCN_HANDLE, + FOR_COMPLEX_SETUP, // opcode + FOR_COMPLEX_COND, + PUSH_SLOT_NARGOUT1_SPECIAL, + DISP, + PUSH_SLOT_DISP, + LOAD_CST_ALT2, + LOAD_CST_ALT3, + LOAD_CST_ALT4, + LOAD_2_CST, + MUL_DBL, + ADD_DBL, + SUB_DBL, + DIV_DBL, + POW_DBL, + LE_DBL, + LE_EQ_DBL, + GR_DBL, + GR_EQ_DBL, + EQ_DBL, + NEQ_DBL, + INDEX_ID1_MAT_1D, + INDEX_ID1_MAT_2D, + PUSH_PI, + INDEX_ID1_MATHY_UFUN, + SUBASSIGN_ID_MAT_1D, + INCR_ID_PREFIX_DBL, + DECR_ID_PREFIX_DBL, + INCR_ID_POSTFIX_DBL, + DECR_ID_POSTFIX_DBL, + PUSH_DBL_0, + PUSH_DBL_1, + PUSH_DBL_2, + JMP_IF_BOOL, + JMP_IFN_BOOL, + USUB_DBL, + NOT_DBL, + NOT_BOOL, + PUSH_FOLDED_CST, + SET_FOLDED_CST, + WIDE, +}; + +enum class unwind_entry_type +{ + INVALID, + FOR_LOOP, + TRY_CATCH, + UNWIND_PROTECT, +}; + +struct unwind_entry +{ + int m_ip_start; + int m_ip_end; + int m_ip_target; + int m_stack_depth; + unwind_entry_type m_unwind_entry_type; +}; + +struct loc_entry +{ + int m_ip_start = -1; + int m_ip_end = -1; + int m_col = -1; + int m_line = -1; +}; + +struct arg_name_entry +{ + int m_ip_start; + int m_ip_end; + Cell m_arg_names; + std::string m_obj_name; +}; + +struct unwind_data +{ + std::vector m_unwind_entries; + std::vector m_loc_entry; + std::map m_slot_to_persistent_slot; + std::map m_ip_to_tree; + std::vector m_argname_entries; + std::map m_external_frame_offset_to_internal; + + std::string m_name; + std::string m_file; + + unsigned m_code_size; + unsigned m_ids_size; +}; + +struct bytecode +{ + std::vector m_code; + std::vector m_data; + std::vector m_ids; + unwind_data m_unwind_data; +}; + +union stack_element +{ + octave_value ov; + octave_value_vm ov_vm; + octave_base_value *ovb; + uint64_t u; + int64_t i; + double d; + + void *pv; + const char *pcc; + unsigned char *puc; + stack_element *pse; + octave_value *pov; + std::string *ps; + unwind_data *pud; + execution_exception *pee; + + stack_element(){} + ~stack_element(){} +}; + +// Enums to describe what error message to build +enum class error_type +{ + INVALID, + ID_UNDEFINED, + ID_UNDEFINEDN, + IF_UNDEFINED, + INDEX_ERROR, + EXECUTION_EXC, + INTERRUPT_EXC, + INVALID_N_EL_RHS_IN_ASSIGNMENT, + RHS_UNDEF_IN_ASSIGNMENT, + BAD_ALLOC, + EXIT_EXCEPTION, +}; + +enum class global_type +{ + GLOBAL, + PERSISTENT, + GLOBAL_OR_PERSISTENT, +}; + +// If TRUE, use VM evaluator rather than tree walker. +extern bool V__enable_vm_eval__; + +OCTAVE_END_NAMESPACE(octave) + +#endif \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-eval.cc --- a/libinterp/parse-tree/pt-eval.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/parse-tree/pt-eval.cc Mon Apr 24 20:34:39 2023 +0200 @@ -72,6 +72,7 @@ #include "unwind-prot.h" #include "utils.h" #include "variables.h" +#include "pt-bytecode-vm.h" OCTAVE_BEGIN_NAMESPACE(octave) @@ -2180,12 +2181,30 @@ m_call_stack.set_auto_fcn_var (avt, val); } +void +tree_evaluator::set_nargin (int nargin) +{ + m_call_stack.set_nargin (nargin); +} + +void +tree_evaluator::set_nargout (int nargout) +{ + m_call_stack.set_nargout (nargout); +} + octave_value tree_evaluator::get_auto_fcn_var (stack_frame::auto_var_type avt) const { return m_call_stack.get_auto_fcn_var (avt); } +void +tree_evaluator::set_active_bytecode_ip (int ip) +{ + m_call_stack.set_active_bytecode_ip (ip); +} + void tree_evaluator::define_parameter_list_from_arg_vector (tree_parameter_list *param_list, const octave_value_list& args) @@ -2448,11 +2467,22 @@ m_call_stack.push (fcn); } +void tree_evaluator::push_stack_frame (vm &vm, octave_user_function *fcn, int nargout, int nargin) +{ + m_call_stack.push (vm, fcn, nargout, nargin); +} + void tree_evaluator::pop_stack_frame () { m_call_stack.pop (); } +std::shared_ptr +tree_evaluator::pop_return_stack_frame () +{ + return m_call_stack.pop_return (); +} + int tree_evaluator::current_line () const { return m_call_stack.current_line (); @@ -3467,6 +3497,48 @@ // argument, which must be the partially constructed object instance. octave_value_list args (xargs); + + // FIXME: this probably shouldn't be a double-precision matrix. + Matrix ignored_outputs = ignored_fcn_outputs (); + + // Check if it has been compiled and execute the bytecode if so + if (user_function.is_compiled ()) + { + bytecode &bc = user_function.get_bytecode (); + + vm vm (this, bc); + + bool caller_is_bytecode = get_current_stack_frame ()->is_bytecode_fcn_frame (); + + // Pushes a bytecode stackframe. nargin is set inside the VM. + push_stack_frame (vm, &user_function, nargout, 0); + + // The arg names of root stackframe in VM need to be set here, unless the caller is bytecode. + // The caller can be bytecode if evalin("caller", ...) is used in some uncompiled function. + if (!caller_is_bytecode) + set_auto_fcn_var (stack_frame::ARG_NAMES, Cell (xargs.name_tags ())); + set_auto_fcn_var (stack_frame::IGNORED, ignored_outputs); + + octave_value_list ret; + + try { + ret = vm.execute_code (args, nargout); + } catch (std::exception &e) { + if (vm.m_dbg_proper_return == false) + { + std::cout << e.what () << std::endl; + // TODO: Replace with panic when the VM almost works + + // Some test code eats errors messages, so we print to stderr too. + fprintf (stderr, "VM error %d: " "Exception in function %s escaped the VM\n", __LINE__, user_function.name ().c_str()); + error("VM error %d: " "Exception in function %s escaped the VM\n", __LINE__, user_function.name ().c_str()); + } + throw; + } + + return ret; + } + octave_value_list ret_args; int nargin = args.length (); @@ -3483,9 +3555,6 @@ panic_impossible (); } - // FIXME: this probably shouldn't be a double-precision matrix. - Matrix ignored_outputs = ignored_fcn_outputs (); - tree_parameter_list *param_list = user_function.parameter_list (); bool takes_varargs = false; @@ -4566,6 +4635,9 @@ { std::string file_name = check_autoload_file (nm); + // Signal to load path that the function cache is invalid + octave::load_path::signal_clear_fcn_cache (); + m_autoload_map[fcn] = file_name; } @@ -4574,6 +4646,9 @@ { check_autoload_file (nm); + // Signal to load path that the function cache is invalid + octave::load_path::signal_clear_fcn_cache (); + // Remove function from symbol table and autoload map. symbol_table& symtab = m_interpreter.get_symbol_table (); @@ -4940,7 +5015,7 @@ m_debugger_stack.top()->dbquit (all); } -static octave_value end_value (const octave_value& value, +octave_value end_value (const octave_value& value, octave_idx_type index_position, octave_idx_type num_indices) { diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-eval.h --- a/libinterp/parse-tree/pt-eval.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/parse-tree/pt-eval.h Mon Apr 24 20:34:39 2023 +0200 @@ -61,6 +61,7 @@ class interpreter; class push_parser; class unwind_protect; +class vm; // How to evaluate the code that the parse trees represent. @@ -395,8 +396,13 @@ void set_auto_fcn_var (stack_frame::auto_var_type avt, const octave_value& val = octave_value ()); + void set_nargin (int nargin); + void set_nargout (int nargout); + octave_value get_auto_fcn_var (stack_frame::auto_var_type avt) const; + void set_active_bytecode_ip (int ip); + void define_parameter_list_from_arg_vector (tree_parameter_list *param_list, const octave_value_list& args); @@ -433,8 +439,12 @@ void push_stack_frame (octave_function *fcn); + void push_stack_frame (vm &vm, octave_user_function *fcn, int nargout, int nargin); + void pop_stack_frame (); + std::shared_ptr pop_return_stack_frame (); + std::shared_ptr get_current_stack_frame () const { return m_call_stack.get_current_stack_frame (); @@ -840,6 +850,16 @@ void push_echo_state (int type, const std::string& file_name, int pos = 1); + bool debug_mode_active () const { return m_debug_mode; } + +protected: + friend class vm; + + void do_breakpoint (tree_statement& stmt); + + void do_breakpoint (bool is_breakpoint, + bool is_end_of_fcn_or_script = false); + private: template @@ -855,11 +875,6 @@ bool maybe_push_echo_state_cleanup (); - void do_breakpoint (tree_statement& stmt); - - void do_breakpoint (bool is_breakpoint, - bool is_end_of_fcn_or_script = false); - bool is_logically_true (tree_expression *expr, const char *warn_for); // For unwind-protect. @@ -994,6 +1009,10 @@ int m_num_indices; }; +octave_value end_value (const octave_value& value, + octave_idx_type index_position, + octave_idx_type num_indices); + OCTAVE_END_NAMESPACE(octave) #endif diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-tm-const.cc --- a/libinterp/parse-tree/pt-tm-const.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/parse-tree/pt-tm-const.cc Mon Apr 24 20:34:39 2023 +0200 @@ -214,6 +214,61 @@ } } +void tm_row_const::init (const octave_value *beg, const octave_value *end) +{ + bool first_elem = true; + + for (; beg != end; beg++) + { + octave_quit (); + + octave_value tmp = *beg; + + if (tmp.is_undefined ()) + error ("undefined element in matrix list"); + + if (tmp.is_cs_list ()) + { + octave_value_list tlst = tmp.list_value (); + + for (octave_idx_type i = 0; i < tlst.length (); i++) + { + octave_quit (); + + init_element (tlst(i), first_elem); + } + } + else + init_element (tmp, first_elem); + } + + if (m_any_cell && ! m_any_class && ! m_first_elem_is_struct) + cellify (); + + first_elem = true; + + for (const auto& val : m_values) + { + octave_quit (); + + dim_vector this_elt_dv = val.dims (); + + if (! this_elt_dv.zero_by_zero ()) + { + m_all_empty = false; + + if (first_elem) + { + first_elem = false; + m_dv = this_elt_dv; + } + else if ((! m_any_class) && (! m_dv.hvcat (this_elt_dv, 1))) + eval_error ("horizontal dimensions mismatch", m_dv, this_elt_dv); + } + } +} + + octave_value tm_const::concat (char string_fill_char) const { if (m_tm_rows.empty ()) @@ -410,6 +465,243 @@ } } +// For variable length rows +void tm_const::init (const octave_value *beg, const octave_value *end, + const std::vector &row_lengths) +{ + bool first_elem = true; + bool first_elem_is_struct = false; + + // Just eval and figure out if what we have is complex or all strings. + // We can't check columns until we know that this is a numeric matrix -- + // collections of strings can have elements of different lengths. + + for (int i = 0;beg != end; beg += row_lengths[i++]) + { + octave_quit (); + + if (beg + row_lengths[i] > end) + error ("Invalid call to tm_const::init"); + + tm_row_const row (beg, beg + row_lengths[i]); + + if (first_elem) + { + first_elem_is_struct = row.first_elem_struct_p (); + + first_elem = false; + } + + if (row.empty ()) + continue; + + if (m_all_strings && ! row.all_strings_p ()) + m_all_strings = false; + + if (m_all_sq_strings && ! row.all_sq_strings_p ()) + m_all_sq_strings = false; + + if (m_all_dq_strings && ! row.all_dq_strings_p ()) + m_all_dq_strings = false; + + if (! m_some_strings && row.some_strings_p ()) + m_some_strings = true; + + if (m_all_real && ! row.all_real_p ()) + m_all_real = false; + + if (m_all_complex && ! row.all_complex_p ()) + m_all_complex = false; + + if (m_all_empty && ! row.all_empty_p ()) + m_all_empty = false; + + if (! m_any_cell && row.any_cell_p ()) + m_any_cell = true; + + if (! m_any_sparse && row.any_sparse_p ()) + m_any_sparse = true; + + if (! m_any_class && row.any_class_p ()) + m_any_class = true; + + m_all_1x1 = m_all_1x1 && row.all_1x1_p (); + + m_tm_rows.push_back (row); + } + + if (m_any_cell && ! m_any_class && ! first_elem_is_struct) + { + for (auto& elt : m_tm_rows) + { + octave_quit (); + + elt.cellify (); + } + } + + first_elem = true; + + for (const auto& elt : m_tm_rows) + { + octave_quit (); + + octave_idx_type this_elt_nr = elt.rows (); + octave_idx_type this_elt_nc = elt.cols (); + + std::string this_elt_class_name = elt.class_name (); + m_class_name = get_concat_class (m_class_name, this_elt_class_name); + + dim_vector this_elt_dv = elt.dims (); + + m_all_empty = false; + + if (first_elem) + { + first_elem = false; + + m_dv = this_elt_dv; + } + else if (m_all_strings && m_dv.ndims () == 2 + && this_elt_dv.ndims () == 2) + { + // This is Octave's specialty. + // Character matrices support rows of unequal length. + if (m_dv.any_zero ()) + { + // Empty existing element (bug #52542). + // Replace empty element with non-empty one. + m_dv = this_elt_dv; + } + else + { + if (this_elt_nc > cols ()) + m_dv(1) = this_elt_nc; + m_dv(0) += this_elt_nr; + } + } + else if ((! m_any_class) && (! m_dv.hvcat (this_elt_dv, 0))) + eval_error ("vertical dimensions mismatch", m_dv, this_elt_dv); + } +} + +// Fixed row size +void tm_const::init (const octave_value *beg, const octave_value *end, + octave_idx_type row_length) +{ + bool first_elem = true; + bool first_elem_is_struct = false; + + // Just eval and figure out if what we have is complex or all strings. + // We can't check columns until we know that this is a numeric matrix -- + // collections of strings can have elements of different lengths. + + for (;beg != end; beg += row_length) + { + octave_quit (); + + tm_row_const row (beg, beg + row_length); + + if (first_elem) + { + first_elem_is_struct = row.first_elem_struct_p (); + + first_elem = false; + } + + if (row.empty ()) + continue; + + if (m_all_strings && ! row.all_strings_p ()) + m_all_strings = false; + + if (m_all_sq_strings && ! row.all_sq_strings_p ()) + m_all_sq_strings = false; + + if (m_all_dq_strings && ! row.all_dq_strings_p ()) + m_all_dq_strings = false; + + if (! m_some_strings && row.some_strings_p ()) + m_some_strings = true; + + if (m_all_real && ! row.all_real_p ()) + m_all_real = false; + + if (m_all_complex && ! row.all_complex_p ()) + m_all_complex = false; + + if (m_all_empty && ! row.all_empty_p ()) + m_all_empty = false; + + if (! m_any_cell && row.any_cell_p ()) + m_any_cell = true; + + if (! m_any_sparse && row.any_sparse_p ()) + m_any_sparse = true; + + if (! m_any_class && row.any_class_p ()) + m_any_class = true; + + m_all_1x1 = m_all_1x1 && row.all_1x1_p (); + + m_tm_rows.push_back (row); + } + + if (m_any_cell && ! m_any_class && ! first_elem_is_struct) + { + for (auto& elt : m_tm_rows) + { + octave_quit (); + + elt.cellify (); + } + } + + first_elem = true; + + for (const auto& elt : m_tm_rows) + { + octave_quit (); + + octave_idx_type this_elt_nr = elt.rows (); + octave_idx_type this_elt_nc = elt.cols (); + + std::string this_elt_class_name = elt.class_name (); + m_class_name = get_concat_class (m_class_name, this_elt_class_name); + + dim_vector this_elt_dv = elt.dims (); + + m_all_empty = false; + + if (first_elem) + { + first_elem = false; + + m_dv = this_elt_dv; + } + else if (m_all_strings && m_dv.ndims () == 2 + && this_elt_dv.ndims () == 2) + { + // This is Octave's specialty. + // Character matrices support rows of unequal length. + if (m_dv.any_zero ()) + { + // Empty existing element (bug #52542). + // Replace empty element with non-empty one. + m_dv = this_elt_dv; + } + else + { + if (this_elt_nc > cols ()) + m_dv(1) = this_elt_nc; + m_dv(0) += this_elt_nr; + } + } + else if ((! m_any_class) && (! m_dv.hvcat (this_elt_dv, 0))) + eval_error ("vertical dimensions mismatch", m_dv, this_elt_dv); + } +} + octave_value tm_const::char_array_concat (char string_fill_char) const { char type = (m_all_dq_strings ? '"' : '\''); diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-tm-const.h --- a/libinterp/parse-tree/pt-tm-const.h Sat Jun 03 20:28:49 2023 -0700 +++ b/libinterp/parse-tree/pt-tm-const.h Mon Apr 24 20:34:39 2023 +0200 @@ -157,6 +157,13 @@ init (row, tw); } + tm_row_const (const octave_value *beg, const octave_value *end) + : tm_info (beg == end), // "empty" + m_values () + { + init (beg, end); + } + tm_row_const (const tm_row_const&) = default; tm_row_const& operator = (const tm_row_const&) = delete; @@ -182,6 +189,7 @@ void init_element (const octave_value&, bool&); void init (const tree_argument_list&, tree_evaluator& tw); + void init (const octave_value *beg, const octave_value *end); }; class tm_const : public tm_info @@ -199,6 +207,24 @@ init (tm); } + tm_const (const octave_value *beg, + const octave_value *end, + octave_idx_type n_rows, + tree_evaluator& tw) + : tm_info (beg == end), m_evaluator (tw), m_tm_rows () + { + init (beg, end, n_rows); + } + + tm_const (const octave_value *beg, + const octave_value *end, + const std::vector &row_lengths, + tree_evaluator& tw) + : tm_info (beg == end), m_evaluator (tw), m_tm_rows () + { + init (beg, end, row_lengths); + } + OCTAVE_DISABLE_COPY_MOVE (tm_const) ~tm_const () = default; @@ -217,6 +243,14 @@ void init (const tree_matrix& tm); + void init (const octave_value *beg, + const octave_value *end, + octave_idx_type row_length); + + void init (const octave_value *beg, + const octave_value *end, + const std::vector &row_lengths); + octave_value char_array_concat (char string_fill_char) const; octave_value class_concat () const; diff -r edbe81ee00c5 -r d2de83a80165 libinterp/parse-tree/pt-vm-eval.cc --- a/libinterp/parse-tree/pt-vm-eval.cc Sat Jun 03 20:28:49 2023 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,72 +0,0 @@ -//////////////////////////////////////////////////////////////////////// -// -// Copyright (C) 2022-2023 The Octave Project Developers -// -// See the file COPYRIGHT.md in the top-level directory of this -// distribution or . -// -// This file is part of Octave. -// -// Octave is free software: you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Octave is distributed in the hope that it will be useful, but -// WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Octave; see the file COPYING. If not, see -// . -// -//////////////////////////////////////////////////////////////////////// - -#if defined (HAVE_CONFIG_H) -# include "config.h" -#endif - -#include "defun.h" -#include "variables.h" - -OCTAVE_BEGIN_NAMESPACE(octave) - -// If TRUE, use VM evaluator rather than tree walker. - -static bool V__enable_vm_eval__ = false; - -DEFUN (__enable_vm_eval__, args, nargout, - doc: /* -*- texinfo -*- -@deftypefn {} {@var{val} =} __enable_vm_eval__ () -@deftypefnx {} {@var{old_val} =} __enable_vm_eval__ (@var{new_val}) -@deftypefnx {} {@var{old_val} =} __enable_vm_eval__ (@var{new_val}, "local") -Query or set whether Octave uses a virtual machine (VM) for evaluation of -parsed statements. - -The default value is false. When false, Octave uses a traditional tree walker -to evaluate statements parsed from m-code. When true, Octave translates parsed -statements to an intermediate representation that is then evaluated by a -virtual machine. - -When called from inside a function with the @qcode{"local"} option, the setting -is changed locally for the function and any subroutines it calls. The original -setting is restored when exiting the function. -@end deftypefn */) -{ -#if defined (OCTAVE_ENABLE_VM_EVALUATOR) - - return set_internal_variable (V__enable_vm_eval__, args, nargout, - "__enable_vm_eval__"); - -#else - - octave_unused_parameter (args); - - err_disabled_feature ("vm-evaluator", - "using a Virtual Machine for statement evaluation"); - -#endif -} - -OCTAVE_END_NAMESPACE(octave) diff -r edbe81ee00c5 -r d2de83a80165 liboctave/array/oct-pool.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/liboctave/array/oct-pool.h Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,138 @@ +//////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2022-2023 The Octave Project Developers +// +// See the file COPYRIGHT.md in the top-level directory of this +// distribution or . +// +// This file is part of Octave. +// +// Octave is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Octave is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Octave; see the file COPYING. If not, see +// . +// +//////////////////////////////////////////////////////////////////////// + +#if ! defined (octave_oct_pool_h) +#define octave_oct_pool_h 1 + +#include "octave-config.h" + +// thread_local destructors seems abit bugged/non-standard compliant in +// gcc and clang and seems to only be executed if the object is "used" somehow +// even though the destructor has side effects. +// +// So we just make objects here that are initialized with the addres of the +// cleaner object. +// +// See e.g. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61991 + +#ifdef OCTAVE_ENABLE_OBJECT_POOL + +#define OCT_OBJECT_POOL_PUBLICS(n_pool_size) \ +/* Overloaded new and delete operators that uses a object pool. */ \ +static void * operator new (std::size_t count) \ +{ \ + /* If n_in_obj_pool is negative, the pool is closed */ \ + if (n_in_obj_pool > 0) \ + return obj_pool[--n_in_obj_pool]; \ + \ + return ::operator new (count); \ +} \ + \ +static void * operator new (std::size_t count, void *p) \ +{ \ + /* Placement new just uses default one */ \ + return ::operator new (count, p); \ +} \ + \ +static void operator delete (void* ptr) \ +{ \ + /* If n_in_obj_pool is negative, the pool is closed */ \ + if (n_in_obj_pool >= 0 && n_in_obj_pool < obj_pool_size) \ + { \ + obj_pool[n_in_obj_pool++] = ptr; \ + return; \ + } \ + ::operator delete (ptr); \ +} \ + \ +/* Class that cleans the object pool when the thread terminates */ \ +class object_pool_cleaner \ +{ \ +public: \ + ~object_pool_cleaner (); \ +}; \ + \ +/* For work around reasons pool_cleaner can't be private. Its addres \ + * has to be taken somewhere to fool the compiler to trigger its \ + * destructor. */ \ +thread_local static object_pool_cleaner pool_cleaner \ +__attribute__ ((tls_model ("initial-exec"))); \ + \ +private: \ +static constexpr int obj_pool_size = n_pool_size; \ + \ +/* Counter for amount of objects in the pool */ \ +thread_local static int n_in_obj_pool \ +__attribute__ ((tls_model ("initial-exec"))); \ +/* The pool */ \ +thread_local static void * obj_pool[obj_pool_size] \ +__attribute__ ((tls_model ("initial-exec"))); \ +public: \ + +// The following macro defines the static symbols for an object pool for a class. +// a class. The corresponding class need to use the OBJECT_POOL_PUBLICS(n_pool_size) +// macro. +// +// Use OBJECT_POOL_DEF_STATICS_NO_DUMMY_PTR for classes with nested private +// classes that need an object pool and define the pointer separately as a +// static member in the outer class. + +#define OCT_OBJECT_POOL_DEF_STATICS(type) \ +OCT_OBJECT_POOL_DEF_STATICS_NO_DUMMY_PTR(type) \ +/* Assure the compiler thinks we are using the pool \ + * so that it actually call its dtor. */ \ +type::object_pool_cleaner *cleaner_ptr_type::dummy_idx_ ## type = \ + &type::pool_cleaner; \ + +#define OCT_OBJECT_POOL_DEF_STATICS_NO_DUMMY_PTR(type) \ +thread_local type::object_pool_cleaner \ +type::pool_cleaner __attribute__ ((tls_model ("initial-exec"))); \ + \ +/* Live objects in pool */ \ +thread_local int type::n_in_obj_pool \ +__attribute__ ((tls_model ("initial-exec"))); \ + \ +/* The actual pool */ \ +thread_local void * type::obj_pool[type::obj_pool_size] \ + __attribute__ ((tls_model ("initial-exec"))); \ + \ +type::object_pool_cleaner::~object_pool_cleaner () \ + { \ + int n = n_in_obj_pool; \ + /* Set the counter to -1 so that any lingering objects \ + * do use the pool */ \ + n_in_obj_pool = -1; \ + \ + for (int i = 0; i < n; i++) \ + type::operator delete (obj_pool[i]); \ + } \ + +#else +#define OCT_OBJECT_POOL_PUBLICS(n_pool_size) +#define OCT_OBJECT_POOL_DEF_STATICS(type) +#define OCT_OBJECT_POOL_DEF_STATICS_NO_DUMMY_PTR(type) +#endif + +#endif diff -r edbe81ee00c5 -r d2de83a80165 liboctave/util/lo-array-errwarn.cc --- a/liboctave/util/lo-array-errwarn.cc Sat Jun 03 20:28:49 2023 -0700 +++ b/liboctave/util/lo-array-errwarn.cc Mon Apr 24 20:34:39 2023 +0200 @@ -35,6 +35,7 @@ #include "lo-array-errwarn.h" #include "lo-error.h" +#include "error.h" OCTAVE_BEGIN_NAMESPACE(octave) @@ -119,6 +120,13 @@ is1d ? "I" : "..,I,..", idx, ext); } +index_exception* index_exception::vm_dup () +{ + // Any dervied class of index_exception need to implement vm_dup () for the VM. + // TODO: Make this a build error with a pure virtual definition. + liboctave_fatal ("VM panic: Derived class of index_exception does not implement vm_dup ()"); +} + // Show the expression that caused the error, e.g., "A(-1,_)", // "A(0+1i)", "A(_,3)". Show how many indices come before/after the // offending one, e.g., (), (,_), or (_,,...[x5]...) @@ -179,6 +187,8 @@ update_message (); } + OCTAVE_DEFAULT_COPY_MOVE (invalid_index) + void update_message () { static std::string exp @@ -194,6 +204,13 @@ { return error_id_invalid_index; } + + index_exception* vm_dup () + { + invalid_index *p = new invalid_index {*this}; + p->set_identifier (p->err_id ()); + return p; + } }; // Complain if an index is negative, fractional, or too big. @@ -247,6 +264,8 @@ update_message (); } + OCTAVE_DEFAULT_COPY_MOVE (out_of_range) + void update_message () { set_message (expression () + ": out of bound " @@ -260,6 +279,13 @@ return error_id_index_out_of_bounds; } + index_exception* vm_dup () + { + out_of_range *p = new out_of_range {*this}; + p->set_identifier (p->err_id ()); + return p; + } + private: // Dimension of object being accessed. diff -r edbe81ee00c5 -r d2de83a80165 liboctave/util/lo-array-errwarn.h --- a/liboctave/util/lo-array-errwarn.h Sat Jun 03 20:28:49 2023 -0700 +++ b/liboctave/util/lo-array-errwarn.h Mon Apr 24 20:34:39 2023 +0200 @@ -92,6 +92,9 @@ update_message (); } + // Allocate a copy of the index exception with new on the heap + virtual index_exception* vm_dup (); + private: // Value of invalid index. diff -r edbe81ee00c5 -r d2de83a80165 liboctave/wrappers/time-wrappers.c --- a/liboctave/wrappers/time-wrappers.c Sat Jun 03 20:28:49 2023 -0700 +++ b/liboctave/wrappers/time-wrappers.c Mon Apr 24 20:34:39 2023 +0200 @@ -155,3 +155,12 @@ { return mktime (tp); } + +// Avoid the risk of gnulib overriding anything above by placing this underneath the above fns +#include "gethrxtime.h" + +long long +octave_gettime_ns_wrapper () +{ + return gethrxtime (); +} diff -r edbe81ee00c5 -r d2de83a80165 liboctave/wrappers/time-wrappers.h --- a/liboctave/wrappers/time-wrappers.h Sat Jun 03 20:28:49 2023 -0700 +++ b/liboctave/wrappers/time-wrappers.h Mon Apr 24 20:34:39 2023 +0200 @@ -36,6 +36,9 @@ extern "C" { #endif +extern OCTAVE_API long long +octave_gettime_ns_wrapper (void); + extern OCTAVE_API int octave_gettimeofday_wrapper (time_t *sec, long *usec); extern OCTAVE_API int diff -r edbe81ee00c5 -r d2de83a80165 test/Makefile.am --- a/test/Makefile.am Sat Jun 03 20:28:49 2023 -0700 +++ b/test/Makefile.am Mon Apr 24 20:34:39 2023 +0200 @@ -112,6 +112,7 @@ include classdef-multiple-inheritance/module.mk include classes/module.mk include colon-op/module.mk +include compile/module.mk include ctor-vs-method/module.mk include fcn-handle/module.mk include file-encoding/module.mk diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/bench.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/bench.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,173 @@ +function bench (varargin) + + % The tests to run + % + % {name, {arg_type, n}, ...} + tests = { + {"for_loop_empty", {"n", 206824596}, 1, {}}, + {"for_loop_silly", {"n", 34894840}, 1, {}}, + {"for_loop_binop_1", {"n", 20300088}, 1, {}}, + {"for_loop_sinpi", {"n", 12991066}, 1, {}}, + {"for_loop_ifs", {"n", 5874007}, 1, {}}, + {"while_loop_empty", {"n", 24237997}, 1, {}}, + {"do_until_loop_empty", {"n", 27109647}, 1, {}}, + {"for_loop_subfun_1", {"n", 11930390}, 1, {}}, + {"for_loop_matselfmul", {"rand sq",150}, 3, {}}, + {"for_sum_1", {"rand rowvec", 19267692}, 1, {}}, + {"for_sum_2", {"rand rowvec", 8742659}, 1, {}}, + {"qsort_recursive", {"rand rowvec", 107851}, 1, {}}, % Mostly copies vectors around + {"qsort_iterative", {"rand rowvec", 344418}, 1, {}}, + {"for_loop_fncall", {"n", 2164885}, 1, {}}, + {"bench_median", {"rand rowvec", 1927}, 1, {}}, + {"bench_cov", {"rand rowvec", 15261}, 1, {}}, + {"str_mod", {"n", 2335290}, 1, {}}, + {"fib", {"n", 31}, 1, {}}, + }; + + reg = ''; + calibrate = 0; + do_both = 1; + n_factor = 1; + filter = ""; + i = 1; + while i <= nargin + arg = varargin{i++}; + if strcmp (arg, "reg") + assert (i <= nargin) + reg = varargin{i++}; + elseif strcmp (arg, "calibrate") + calibrate = 1; + elseif strcmp (arg, "n_factor") + assert (i <= nargin) + n_factor = varargin{i++}; + end + end + + % For compatibility with older releases and Matlab + if ~exist("__compile") + __compile = @(varargin) true; + end + if ~exist("__dummy_mark_1") + __dummy_mark_1 = @() true; + end + if ~exist("__dummy_mark_2") + __dummy_mark_2 = @() true; + end + + cal_res = {}; + + for i = 1:length(tests) + + test = tests{i}; + name = test{1}; + complexity = test{3}; + also_compile = test{4}; + j = 1; + + % Skip on not matching regex, if there is one + if length (reg) && isempty (regexp (name, reg)) + continue; + end + + fn = str2func (name); + + printf ("%s:\n", name); + + n = 0; + arg = 0; + conf = test{2}; + conf_type = conf {1}; %"n", "rand sq" etc + n_norm = conf{2}; + + if strcmp (conf_type, "n") + n = round (conf{2} * n_factor); + arg = n; + elseif strcmp (conf_type, "rand sq") + rng (0); % Reset rng + n = round (conf{2} * n_factor); + arg = randn (n); + elseif strcmp (conf_type, "rand rowvec") + rng (0); % Reset rng + n = round (conf{2} * n_factor); + arg = randn (n, 1); + end + n = round (n); + + iters = 1:1; + if calibrate + iters = 1:40; + e_i = 0; + end + + for j = iters + + if strcmp (conf_type, "n") + n = round (n_norm * n_factor); + arg = n; + elseif strcmp (conf_type, "rand sq") + rng (0); % Reset rng + n = round (n_norm * n_factor); + arg = randn (n); + elseif strcmp (conf_type, "rand rowvec") + rng (0); % Reset rng + n = round (n_norm * n_factor); + arg = randn (n, 1); + end + n = round (n); + + + tic; + [ccttot0, cctuser0, cctsys0] = cputime; + assert (__compile (name)); + [ccttot1, cctuser1, cctsys1] = cputime; + cctwall = toc; + + [cttot0, ctuser0, ctsys0] = cputime; + tic; + __dummy_mark_1 (); + fn (arg); + __dummy_mark_2 (); + [cttot1, ctuser1, ctsys1] = cputime; + ctwall = toc; + + printf (" %-16s %-16s %-16s %-16s %-16s\n", "t tic","t cpu", "t usr" , "t sys", "n"); + printf (" Runtime: %-16g %-16g %-16g %-16g %-16g\n", ctwall, cttot1 - cttot0, ctuser1 - ctuser0, ctsys1 - ctsys0, n); + printf (" Compiletime %-16g %-16g %-16g %-16g\n\n", cctwall, ccttot1 - ccttot0, cctuser1 - cctuser0, cctsys1 - cctsys0); + + if calibrate + t_target = 1; + e = ctwall - t_target; + if e > 0.5 + e = 0.5; + elseif e < -0.5 + e = -0.5; + end + + n_norm_new = n_norm * (1 - e); + if j > 30 + n_norm = 0.998 * n_norm + 0.002 * n_norm_new; + elseif j > 20 + n_norm = 0.99 * n_norm + 0.01 * n_norm_new; + elseif j > 10 + n_norm = 0.95 * n_norm + 0.05 * n_norm_new; + else + n_norm = n_norm_new; + end + + printf (" n = %g, e = %g, e_i = %g\n", n_norm, e, e_i); + end + end + + if calibrate + printf (" Calibrated n: %d\n\n", n); + cal_res{end + 1} = {name, n}; + end + end + + if calibrate + printf ("Calibrated n:s for 1s\n\n"); + for e = cal_res + printf ("%s %d\n", e{1}{1}, e{1}{2}); + end + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/bench_cov.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/bench_cov.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,5 @@ +function bench_cov (v) + for i = 1:10000 + cov (v, v); + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/bench_median.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/bench_median.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,5 @@ +function bench_median (v) + for i = 1:10000 + median (v); + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/do_until_loop_empty.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/do_until_loop_empty.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,7 @@ +function do_until_loop_empty (n) + i = 0; + do + i++; + until i >= n +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/fib.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/fib.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,8 @@ +function b = fib (n) + if n <= 1 + b = 1; + return; + endif + + b = fib (n - 1) + fib (n - 2); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_binop_1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_binop_1.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,6 @@ +function for_loop_binop_1 (n) + for i = 1:n + j = 1*2*3*4; + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_binop_2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_binop_2.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,8 @@ +function for_loop_binop_2 (n) + for i = 1:n + j = 1*2*3*4 * i; + g = j / 2 + 1; + f = g + j - 3; + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_empty.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_empty.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,5 @@ +function for_loop_empty (n) + for i = 1:n + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_fncall.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_fncall.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,5 @@ +function for_loop_fncall (n) + for i = 1:n + max (i, 2); + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_ifs.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_ifs.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,30 @@ +function for_loop_ifs (n) + for i = 1:n + if i == 100 + continue; + elseif i == 300 + continue; + end + + if i * 2 == 3002 + continue; + end + + if i < 0 + break; + end + + if i == -1024 + disp ("foooo"); + end + + if i == n + 1 + break; + end + + if ~i + break; + end + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_matselfmul.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_matselfmul.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,18 @@ +function A = for_loop_matselfmul (sq) + A = zeros (size (sq)); + + cols = size (sq, 2); + rows = size (sq, 1); + + assert (cols == rows); + + n = cols; + + for i=1:n + for j=1:n + for k=1:n + A(i,j) = A(i,j)+sq(i,k)*sq(k,j); + end + end + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_silly.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_silly.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,6 @@ +function for_loop_silly (n) + for i = 1:n + j = i; + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_sinpi.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_sinpi.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,5 @@ +function for_loop_sinpi (n) + for i = 1:n + j = sin (pi * i); + end +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_loop_subfun_1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_loop_subfun_1.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,9 @@ +function for_loop_subfun_1 (n) + + for i = 1:n + suby (); + end +end + +function suby () +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_sum_1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_sum_1.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,7 @@ +function for_sum_1 (mat) + sum = 0; + for i = 1:length (mat) + sum = sum + mat (i); + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/for_sum_2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/for_sum_2.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,7 @@ +function sum = for_sum_2 (arg) + sum = 0; + for i = arg' + sum = sum + i; + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/qsort_iterative.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/qsort_iterative.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,48 @@ +function A = qsort_iterative (A) + len = length (A); + stack = zeros (64, 1); + + top = 0; + + % Push initial indices to the stack + stack (++top) = 1; + stack (++top) = len; + + while top > 0 + % Pop indices + high = stack (top--); + low = stack (top--); + + % Partion part of the algorithm + p = low - 1; % pivot index + x = A (high);% pivot value + + % Swap so that there are two parts. One less than or equal to the pivot + % value and one higher + for j = low:high - 1 + if A(j) <= x + p++; + tmp = A(j); + A(j) = A(p); + A(p) = tmp; + end + end + % Swap the pivot value with the first value bigger than the pivot + p++; + tmp = A(high); + A(high) = A(p); + A(p) = tmp; + % End partion + + % Push left and right indices (if there are any value to the left or right) + if p - 1 > low + stack(++top) = low; + stack(++top) = p - 1; + end + + if p + 1 < high + stack(++top) = p + 1; + stack(++top) = high; + end + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/qsort_recursive.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/qsort_recursive.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,36 @@ +% Most time is spent copying vectors anyway so terrible test really. + +function A = qsort_recursive (A) + if isempty (A) || length(A) == 1 + return; + end + + [p, A] = partion (A); + + left = qsort_recursive (A(1:p - 1)); + right = qsort_recursive (A(p + 1:end)); + + A = [left A(p) right]; +end + +function [p, A] = partion (A) + lo = 1; + hi = length (A); + + pivot = A(hi); + + p = lo - 1; + + for j = lo:1:hi-1 + if A(j) <= pivot + p++; + tmp = A(j); + A (j) = A (p); + A (p) = tmp; + end + end + p++; + tmp = A(hi); + A(hi) = A(p); + A(p) = tmp; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/str_mod.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/str_mod.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,9 @@ +function str_mod (n) + + s1 = "qweasd"; + s2 = "zxccvb"; + for i = 1:n + s1 (2) = "f"; + s2 (1) = s1(3); + end +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-octave/while_loop_empty.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-octave/while_loop_empty.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,7 @@ +function while_loop_empty (n) + i = 0; + while i < n + i++; + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/bench-py3/bench.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/bench-py3/bench.py Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 + +from timeit import default_timer as timer +from array import * +import random +import math + +def fib (n): + if n <= 1: + return 1 + + return fib (n - 1) + fib (n - 2) + +def for_loop_empty (n): + for i in range (1, n + 1): + continue + +def for_loop_silly (n): + for i in range (1, n + 1): + j = i + +def for_loop_binop_1(n): + for i in range (1, n + 1): + j = 1*2*3*4 + +def for_loop_sinpi (n): + for i in range (1, n): + j = math.sin (math.pi * i) + +def for_loop_ifs (n): + for i in range (1, n + 1): + if i == 100.0: + continue + elif i == 300.0: + continue + + if i * 2.0 == 3002.0: + continue + + if i < 0.0: + break + + if i == -1024.0: + print ("fooo") + + if i == n + 1.0: + break + + if not i: + break + +def while_loop_empty (n): + i = 0.0 + while i < n: + i += 1 + +def for_loop_subfun_1 (n): + for i in range (n): + suby () + +def suby (): + return + +def qsort_iterative (A): + l = len (A) + stack = [0] * 128 # Probably big enough + + top = 0 + stack[top] = 0 + top += 1 + stack[top] = l - 1 + top += 1 + + while top > 0: + top -= 1 + high = stack[top] + top -= 1 + low = stack[top] + + p = low - 1 + x = A[high] + + for j in range (low, high): + if A[j] <= x: + p += 1 + tmp = A[j] + A[j] = A[p] + A[p] = tmp + p += 1 + tmp = A[high] + A[high] = A[p] + A[p] = tmp + + if p - 1 > low: + stack[top] = low + top += 1 + stack[top] = p - 1 + top += 1 + if p + 1 < high: + stack[top] = p + 1 + top += 1 + stack[top] = high + top += 1 + + return A + +def time_fn_call (fn, arg): + start = timer() + fn (arg) + end = timer() + + return end - start + +def randn (rows, cols): + if rows == 1: + arr = array('d') + for i in range(cols): + arr.append(random.gauss (0, 1)) + return arr + + arr = array() + for i in range (rows): + arr_row = array('d') + for j in range(cols): + arr_row.append(random.gauss (0, 1)) + arr.append (arr_row) + return arr + +tests = \ + [[for_loop_empty, 206824596], + [for_loop_silly, 34894840], + [for_loop_binop_1, 20300088], + [for_loop_sinpi, 12991066], + [for_loop_ifs, 5874007], + [while_loop_empty, 24237997], + [for_loop_subfun_1, 11930390], + [fib, 31], + [qsort_iterative, "rowvec", 344418]] + +def main(): + for t in tests: + if (t[1] == "rowvec"): + dt = time_fn_call (t[0], randn (1, t[2])) + else: + dt = time_fn_call (t[0], t[1]) + print (t[0].__name__ + " in %g s" % dt) + +if __name__ == "__main__": + main () \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile-bench/module.mk --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile-bench/module.mk Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,23 @@ +vm_TEST_FILES = \ + %reldir%/compile-bench/bench-octave/bench.m \ + %reldir%/compile-bench/bench-octave/bench_cov.m\ + %reldir%/compile-bench/bench-octave/bench_median.m\ + %reldir%/compile-bench/bench-octave/do_until_loop_empty.m\ + %reldir%/compile-bench/bench-octave/fib.m\ + %reldir%/compile-bench/bench-octave/for_loop_binop_1.m\ + %reldir%/compile-bench/bench-octave/for_loop_empty.m\ + %reldir%/compile-bench/bench-octave/for_loop_fncall.m\ + %reldir%/compile-bench/bench-octave/for_loop_ifs.m\ + %reldir%/compile-bench/bench-octave/for_loop_matselfmul.m\ + %reldir%/compile-bench/bench-octave/for_loop_silly.m\ + %reldir%/compile-bench/bench-octave/for_loop_sinpi.m\ + %reldir%/compile-bench/bench-octave/for_loop_subfun_1.m\ + %reldir%/compile-bench/bench-octave/for_sum_1.m\ + %reldir%/compile-bench/bench-octave/for_sum_2.m\ + %reldir%/compile-bench/bench-octave/qsort_iterative.m\ + %reldir%/compile-bench/bench-octave/qsort_recursive.m\ + %reldir%/compile-bench/bench-octave/str_mod.m\ + %reldir%/compile-bench/bench-octave/while_loop_empty.m\ + %reldir%/compile-bench/bench-py3/bench.py + +TEST_FILES += $(vm_TEST_FILES) diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode.tst --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode.tst Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,664 @@ +######################################################################## +## +## Copyright (C) 2022-2023 The Octave Project Developers +## +## See the file COPYRIGHT.md in the top-level directory of this +## distribution or . +## +## This file is part of Octave. +## +## Octave is free software: you can redistribute it and/or modify it +## under the terms of the GNU General Public License as published by +## the Free Software Foundation, either version 3 of the License, or +## (at your option) any later version. +## +## Octave is distributed in the hope that it will be useful, but +## WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with Octave; see the file COPYING. If not, see +## . +## +######################################################################## + +## Just clear the cached string in __prog_output_assert__() and clear +## classes method lookups due to maybe bug +%!test +%! __prog_output_assert__ (""); +%! % Overloading of class-methods seems to stick so we need to clear them since we overload +%! % double's display. Is this a bug ??? +%! clear classes + +## Test binary expressions +%!test +%! __enable_vm_eval__ (0, "local"); % Disable the vm for the tree_walker run +%! +%! clear all % We want all compiled functions to be cleared so that we can run the tree_walker +%! +%! key = "10 -10 24 0.041666666666666664 1 -5.0915810909090906 13 1 0 1 0 truthy1 1 falsy3 falsy4 truthy5 1 truthy7 truthy8 1 falsy9 falsy11 0 1 0 1 0 0 1 1 0 1 0 1 1 1 0 1 0 1 0 0 0 0 1 1 1 1 1 1 1 1 "; +%! +%! __compile bytecode_binops clear; +%! bytecode_binops (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! % We wanna know the function compiles, so do a explicit compile +%! assert (__compile ("bytecode_binops")); +%! bytecode_binops (); +%! assert (__prog_output_assert__ (key)); + +## Test subfunctions +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "2 2 2 11 30 10 30 5 0 0 double 1 2 1 2 double 30 11 5 0 0 double 1 2 1 2 double 11 11 12 13 1 1 double 14 1 1 double 11 11 5 13 1 1 double 14 1 1 double 11 3 3 3 2 2 2 313 ret32:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 ret32:1 ret32:ret32:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 take32:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 1 18 59 64 "; +%! a = 313; +%! % Gets called to ensure anonymous functions calls with +%! % externally scoped variables work +%! h = @() __printf_assert__ ("%d ", a); +%! +%! __compile bytecode_subfuncs clear; +%! bytecode_subfuncs (h); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_subfuncs")); +%! bytecode_subfuncs (h); +%! assert (__prog_output_assert__ (key)); + +## Test if:s +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "0 1 2 3 4 5 6 7 8 1 2 yay1 3 5 7 8 1 yay1 3 4 yay2 5 6 7 yay3 "; +%! +%! __compile bytecode_if clear; +%! bytecode_if (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_if")); +%! bytecode_if (); +%! assert (__prog_output_assert__ (key)); + +## Test for:s +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "1 2 3 4 4 1 3 5 5 4 3 2 1 1 1 4 2 2 16 4 3 3 256 3 2 1 double 1 3 size 2 size 1 2 4 size 2 size 1 double q size 1 size 1 w size 1 size 1 e size 1 size 1 char single single 5 1 11 2 12 key:a val:1 1val:1 key:b val:1 3val:2 4val:2 2key:c val:string "; +%! +%! __compile bytecode_for clear; +%! bytecode_for (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_for")); +%! bytecode_for (); +%! assert (__prog_output_assert__ (key)); + +## Test while +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "5 4 3 2 1 3 5 4 4 3 3 4 1 2 1 3 2 8 3 1 3 "; +%! +%! __compile bytecode_while clear; +%! bytecode_while (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_while")); +%! bytecode_while (); +%! assert (__prog_output_assert__ (key)); + +## Test assign +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "2 3 1 1 2 3 2 3 2 2 6 18 2.000000 2.000000 3.000000 4.000000 5.000000 1 4 double 729.000000 324.000000 182.250000 116.640000 4 1 double 37.000000 81.000000 54.000000 118.000000 2 2 double "; +%! +%! __compile bytecode_assign clear; +%! bytecode_assign (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_assign")); +%! bytecode_assign (); +%! assert (__prog_output_assert__ (key)); + +## Test unary +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "-1 4 1 2 3 4 1 3 2 4 0 0 "; +%! +%! __compile bytecode_unary clear; +%! bytecode_unary (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_unary")); +%! bytecode_unary (); +%! assert (__prog_output_assert__ (key)); + +## Test range +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "1 2 3 1 3 5 1 3 5 1 1.1 1.2 1.3 1.4 1 0.9 0.8 0.7 7 7 1 8 10 8 10 8 9 10 11 8 9 10 11 10 8 10 8 -10 -9 -8 -7 -10 -9 -8 -7 "; +%! +%! __compile bytecode_range clear; +%! bytecode_range (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_range")); +%! bytecode_range (); +%! assert (__prog_output_assert__ (key)); + +## Test multi assign +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "3 4 2 2 1 2 3 4 1 2 3 4 1 1 3 2 3 4 1 1 1 2 3 4 1 1 1 2 3 4 1 2 3 "; +%! +%! __compile bytecode_multi_assign clear; +%! bytecode_multi_assign (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_multi_assign")); +%! bytecode_multi_assign (); +%! assert (__prog_output_assert__ (key)); + +## Test subsasgn +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! clear functions % persistent variables in bytecode_subsasgn +%! key = "3 5 9 8 11 13 1 2 3 4 5 6 77 88 99 1010 1 2 3 987 987 6 77 88 99 1010 0 0 0 0 0 13 double 3 2 4 2 3 cell 1 3 6 7 2 3 1 4 5 1 3 5 2 4 6 7 7 7 7 7 7 1 2 3 1 3 3 2 3 2 3 1 3 1 2 3 4 4 4 3 4 5 6 1 5 3 4 1 5 -1 4 1 5 -1 8 "; +%! +%! __compile bytecode_subsasgn clear; +%! bytecode_subsasgn (); +%! assert (__prog_output_assert__ (key), "bytecode_subsasgn failed uncompiled"); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_subsasgn")); +%! bytecode_subsasgn (); +%! assert (__prog_output_assert__ (key), "bytecode_subsasgn failed compiled"); + +## Test end +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "1 3 2 4 1 5 6 7 2 2 5 5 6 6 1 2 3 4 5 2 2 2 3 3 4 fs 2 3 1 foo oo "; +%! +%! __compile bytecode_end clear; +%! bytecode_end (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_end")); +%! bytecode_end (); +%! assert (__prog_output_assert__ (key)); + +## Test matrix +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "1 2 3 4 1 4 1 2 3 4 4 1 1 3 2 4 2 2 1 3 1 3 2 4 2 4 4 2 0 0 a b c d 7 15 10 22 2 2 30 1 1 1 2 3 4 2 4 6 8 3 6 9 12 4 8 12 16 4 4 1 1 1 0.0333333 0.0666667 0.1 0.133333 0.0666667 0.133333 0.2 0.266667 0.1 0.2 0.3 0.4 0.133333 0.266667 0.4 0.533333 4 4 1 0 0 1 2 2 30 1 1 10 14 14 20 2 2 0.0333333 0.0666667 0.1 0.133333 0.0666667 0.133333 0.2 0.266667 0.1 0.2 0.3 0.4 0.133333 0.266667 0.4 0.533333 4 4 2.5 -0.5 2 0 2 2 2 6 4 8 2 2 2 3 4 5 3 4 5 6 4 5 6 7 5 6 7 8 4 4 3 4 5 6 1 4 3 4 5 6 1 4 -1 0 1 2 1 4 2 4 6 8 1 4 0.5 1 1.5 2 1 4 0.5 1 1.5 2 1 4 1 4 9 16 1 4 1 1 1 1 1 4 1 1 1 1 1 4 1 4 27 256 1 4 1 2 3 4 2 4 6 8 3 6 9 12 4 8 12 16 4 4 1 0.5 0.333333 0.25 2 1 0.666667 0.5 3 1.5 1 0.75 4 2 1.33333 1 4 4 1 2 3 4 0.5 1 1.5 2 0.333333 0.666667 1 1.33333 0.25 0.5 0.75 1 4 4 1 4 27 256 4 1 qzwxeca s d zzxxccz x c 1 258 33264 258 1 33264 "; +%! +%! __compile bytecode_matrix clear; +%! bytecode_matrix (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_matrix")); +%! bytecode_matrix (); +%! assert (__prog_output_assert__ (key)); + +## Test return +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "2 baaar bääär baaaaz bääääz bååååz booz 1 1 2 1 1 1 2 1 silly silly "; +%! +%! __compile bytecode_return clear; +%! bytecode_return (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_return")); +%! bytecode_return (); +%! assert (__prog_output_assert__ (key)); + +## Test word list command +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "A B C QWE "; +%! +%! __compile bytecode_wordlistcmd clear; +%! bytecode_wordlistcmd (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_wordlistcmd")); +%! bytecode_wordlistcmd (); +%! assert (__prog_output_assert__ (key)); + +## Test do until +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "5 3 5 5 4 4 3 4 1 2 1 3 2 12 3 0 3 "; +%! +%! __compile bytecode_dountil clear; +%! bytecode_dountil (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_dountil")); +%! bytecode_dountil (); +%! assert (__prog_output_assert__ (key)); + +## Test cell +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "a b a b 1 2 b c b c 1 2 char b c c d b d c e 2 2 b d f h j l c e g i k m 6 2 1 2 2 3 1 3 2 4 1 3 1 2 1 3 2 4 2 2 double qwe 1 3 char 1 2 "; +%! +%! __compile bytecode_cell clear; +%! bytecode_cell (); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_cell")); +%! bytecode_cell (); +%! assert (__prog_output_assert__ (key)); + +## Test varargin +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "2 3 1 2 1 1 1 1 1 2 3 4 1 4 4 0 0 0 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 1 0 0 1 0 0 0 2 1 1 1 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 2 1 2 4 1 2 3 4 3 3 2 1 0 "; +%! +%! __compile bytecode_varargin clear; +%! bytecode_varargin (1,2,3); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_varargin")); +%! bytecode_varargin (1,2,3); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "0 0 1 1 1 1 1 2 3 4 1 4 4 0 0 0 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 1 0 0 1 0 0 0 2 1 1 1 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 2 1 2 4 1 2 3 4 1 3 2 1 0 "; +%! +%! __compile bytecode_varargin clear; +%! bytecode_varargin (1); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_varargin")); +%! bytecode_varargin (1); +%! assert (__prog_output_assert__ (key)); + +## Test global variables +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "double 0 0 1 1 1 1 1 2 2 2 400 100 0 1 3 double 1 1 1 2 double 1 2 1 1 11 eclass:double 1 1 3 4 double 1 2 400 100 1 1 1 1 3 4 1 1 5 6 1 1 1 2 double 1 2 1 2 double 1 2 1 1 3 4 eclass:double 1 2 3 4 double 1 2 0 0 1 1 3 4 1 1 5 6 1 0 2 double 2 double 11 2 6 4 5 double 1 5 11 double 1 1 22 double 1 1 33 double 1 1 3 double 1 1 4 double 1 1 10 double 1 1 2 3 double 1 2 3 double 1 1 2 double 1 1 55 double 1 1 7 double 1 1 0 "; +%! +%! __compile bytecode_global_1 clear; +%! clear global a; +%! clear global b; +%! clear global q +%! global q % Used in test function +%! q = 55; +%! bytecode_global_1 (); +%! assert (__prog_output_assert__ (key)); +%! assert (length(who('global','a'))); +%! assert (length(who('global','b'))); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_global_1")); +%! clear global a; +%! clear global b; +%! clear global q; +%! global q % Used in test function +%! q = 55; +%! bytecode_global_1 (); +%! assert (length(who('global','a'))); +%! assert (length(who('global','b'))); +%! assert (__prog_output_assert__ (key)); +%! +%! global a b; +%! assert (a == 5); +%! assert (b == 6); +%! +%! clear global a; +%! clear global b; +%! clear global q; + +## Test switch +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "yay yay2 yay3 yay4 yay5 yay6 yay7 yay8 1 2 3 3 1 3 3 4 4 1 3 3 4 4 2 yoo 2 3 3 1:1 for-end:12:2 3:3 for-end:3breaking:4 "; +%! +%! __compile bytecode_switch clear; +%! bytecode_switch; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_switch")); +%! bytecode_switch; +%! assert (__prog_output_assert__ (key)); + +## Test eval (dynamic stack frames) +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "3.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 5.000000 4.000000 2.000000 3.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 5.000000 4.000000 2.000000 1:11.000000 2:22.000000 3:33.000000 4:3.000000 5:22.000000 6:3.000000 7:3.000000 3 3 2 2 3.000000 3.000000 "; +%! +%! __compile bytecode_eval_1 clear; +%! bytecode_eval_1; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_eval_1")); +%! bytecode_eval_1; +%! assert (__prog_output_assert__ (key)); + +## Test evalin and assignin +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! % We want to test all combinations of compiled and uncompiled evalin_1 and 2. +%! +%! key = "2.000000 yoyo yobase 3.000000 yoyo2 yobase2 123.000000 124.000000 11.000000 33.000000 "; +%! +%! caller_a = 2; +%! +%! +%! __compile bytecode_evalin_1 clear; +%! __compile bytecode_evalin_2 clear; +%! bytecode_evalin_1 (); +%! assert (__prog_output_assert__ (key)); +%! +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_evalin_1")); +%! bytecode_evalin_1 (); +%! assert (__prog_output_assert__ (key)); +%! +%! +%! __compile bytecode_evalin_1 clear; +%! __compile bytecode_evalin_2 clear; +%! assert (__compile ("bytecode_evalin_1")); +%! assert (__compile ("bytecode_evalin_2")); +%! bytecode_evalin_1 (); +%! assert (__prog_output_assert__ (key)); +%! +%! __compile bytecode_evalin_1 clear; +%! __compile bytecode_evalin_2 clear; +%! assert (__compile ("bytecode_evalin_2")); +%! bytecode_evalin_1 (); +%! assert (__prog_output_assert__ (key)); +%! + +## Test error messages +%!test +%! ## Interpreter reference +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! __compile bytecode_errors clear; +%! fail ("bytecode_errors (0)", ... +%! "'qweqwe' undefined near line 9, column 6"); +%! fail ("bytecode_errors (1)", ... +%! "'b' undefined near line 15, column 7"); +%! fail ("bytecode_errors (2)", ... +%! "'b' undefined near line 19, column 7"); +%! fail ("bytecode_errors (3)", ... +%! "'b' undefined near line 23, column 7"); +%! fail ("bytecode_errors (4)", ... +%! "'b' undefined near line 27, column 3"); +%! fail ("bytecode_errors (5)", ... +%! "'b' undefined near line 31, column 3"); +%! fail ("bytecode_errors (6)", ... +%! 'a\(3\): out of bound 2 \(dimensions are 1x2\)'); +%! fail ("bytecode_errors (7)", ... +%! 'a\(-1\): subscripts must be either integers 1 to \(2\^63\)-1 or logicals'); +%! fail ("bytecode_errors (8)", ... +%! 'operator \+: nonconformant arguments \(op1 is 1x3, op2 is 1x2\)'); +%! +%! __enable_vm_eval__ (1, "local"); +%! ## Bytecode running the same errors +%! __compile bytecode_errors; +%! fail ("bytecode_errors (0)", ... +%! "'qweqwe' undefined near line 9, column 6"); +%! fail ("bytecode_errors (1)", ... +%! "'b' undefined near line 15, column 7"); +%! fail ("bytecode_errors (2)", ... +%! "'b' undefined near line 19, column 7"); +%! fail ("bytecode_errors (3)", ... +%! "'b' undefined near line 23, column 7"); +%! fail ("bytecode_errors (4)", ... +%! "'b' undefined near line 27, column 3"); +%! fail ("bytecode_errors (5)", ... +%! "'b' undefined near line 31, column 3"); +%! fail ("bytecode_errors (6)", ... +%! 'a\(3\): out of bound 2 \(dimensions are 1x2\)'); +%! fail ("bytecode_errors (7)", ... +%! 'a\(-1\): subscripts must be either integers 1 to \(2\^63\)-1 or logicals'); +%! fail ("bytecode_errors (8)", ... +%! 'operator \+: nonconformant arguments \(op1 is 1x3, op2 is 1x2\)'); + +## Test try catch +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "yay yay2 yay3 ooo yay2 yay3 ooo2 ooo2 yay3 yay4 Nested error yay5 yay6 In catch yay7 qwe yay8 Error in subfunction yay9 'asd' undefined near line 87, column 11 yay10 operator *: nonconformant arguments (op1 is 1x2, op2 is 1x3) yay11 yoyo yay12 foo yay12 foo yay12 foo yay13 foo yay13 foo yay13 foo "; +%! +%! __compile bytecode_trycatch clear; +%! bytecode_trycatch; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_trycatch")); +%! bytecode_trycatch; +%! assert (__prog_output_assert__ (key)); + +## Test unwind protect +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "yay1 yay2 yay3 e1 subyyay1 subyyay2 subyyay3 subyyay4 subyyay5 subyyay6 subyyay7 subyyay8 subyyay9 subyyay10 subyyay11 subyyay12 subyyay13 subyyay14 subyyay15 subyyay16 subyyay17 subyyay18 yay4 yay5 yay6 "; +%! __compile bytecode_unwind clear; +%! bytecode_unwind; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_unwind")); +%! bytecode_unwind; +%! assert (__prog_output_assert__ (key)); + +## Test persistant +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! clear functions % clear persistent variables in bytecode_persistant +%! key = "a:3 b: double 0 0 0 c:3 c:4 a:4 b:1 double 1 1 0 c:5 c:6 "; +%! +%! __compile bytecode_persistant clear; +%! bytecode_persistant; +%! bytecode_persistant; +%! assert (__prog_output_assert__ (key)); +%! +%! clear all; +%! __enable_vm_eval__ (1, "local"); +%! key = "a:3 b: double 0 0 0 c:3 c:4 a:4 b:1 double 1 1 0 c:5 c:6 "; +%! assert (__compile ("bytecode_persistant")); +%! bytecode_persistant; +%! +%! bytecode_persistant; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (0, "local"); +%! __compile bytecode_persistant clear; +%! clear all; +%! key = "a:3 b: double 0 0 0 c:3 c:4 a:4 b:1 double 1 1 0 c:5 c:6 "; +%! bytecode_persistant; +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_persistant")); +%! +%! bytecode_persistant; +%! assert (__prog_output_assert__ (key)); + +## Test structs +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "1 2 double 1 1 struct 3 4 "; +%! __compile bytecode_struct clear; +%! bytecode_struct; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_struct")); +%! bytecode_struct; +%! assert (__prog_output_assert__ (key)); + +## Test indexing chained objects and strange indexing +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "2 2 3 3 2 cell 1 1 3 3 2 3 22 double 33 3 4 matlab.lang.MemoizedFunction 2 "; +%! __compile bytecode_index_obj clear; +%! bytecode_index_obj; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_index_obj")); +%! bytecode_index_obj; +%! assert (__prog_output_assert__ (key)); + +## Test varargout +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "7 8 1 1 2 1 0 0 0 1 0 1 0 0 0 1 0 "; +%! __compile bytecode_varargout clear; +%! bytecode_varargout; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_varargout")); +%! bytecode_varargout; +%! assert (__prog_output_assert__ (key)); + +## Test inputname +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "a a b + 1 a a b b aa aa bb bb aa + 1 bb * 3 a + 1 b * 3 aa aa bb bb aa + 1 bb * 3 a a b b a + 1 b * 3 "; +%! __compile bytecode_inputname clear; +%! a = 9; b = 8; +%! bytecode_inputname (a, b + 1); +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_inputname")); +%! bytecode_inputname (a, b + 1); +%! assert (__prog_output_assert__ (key)); + +## Test ans +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "2 5 1 1 1 "; +%! __compile bytecode_ans clear; +%! bytecode_ans; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_ans")); +%! bytecode_ans; +%! assert (__prog_output_assert__ (key)); + +## Test using classdef +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! global cdef_foo_ctor_cnt; clear global cdef_foo_ctor_cnt; +%! global cdef_foo_dtor_cnt; clear global cdef_foo_dtor_cnt; +%! key = ". 1 f1 . 2 f3 3 f2 . sumf2f3 2 . . call14 f4 . a a_1 . 5 f8 . 6 f10 7 f9 . sumf9f10 2 . . call18 f11 . 2 2 3 4 4 4 3 2 2 3 . 9 sumf9f10 10 f12 11 f13 12 f14 13 sumf2f3 14 f5 15 f6 16 f7 "; +%! __compile bytecode_cdef_use clear; +%! bytecode_cdef_use (); +%! assert (__prog_output_assert__ (key)); +%! global cdef_foo_ctor_cnt; global cdef_foo_dtor_cnt; +%! assert (cdef_foo_ctor_cnt == cdef_foo_dtor_cnt); % Check, as many ctor and dtor executions +%! +%! __enable_vm_eval__ (1, "local"); +%! global cdef_foo_ctor_cnt; clear global cdef_foo_ctor_cnt; +%! global cdef_foo_dtor_cnt; clear global cdef_foo_dtor_cnt; +%! assert (__compile ("bytecode_cdef_use")); +%! bytecode_cdef_use (); +%! assert (__prog_output_assert__ (key)); +%! global cdef_foo_ctor_cnt; global cdef_foo_dtor_cnt; +%! assert (cdef_foo_ctor_cnt == cdef_foo_dtor_cnt); +%! +%! global cdef_foo_ctor_cnt; clear global cdef_foo_ctor_cnt; +%! global cdef_foo_dtor_cnt; clear global cdef_foo_dtor_cnt; +%! +%! clear global __assert_printf__ + +## Test anonymous function handles +%!test +%! __enable_vm_eval__ (0, "local"); +%! clear all +%! key = "1 2 12 3 4 1 2 3 1 2 11 12 1 "; +%! __compile bytecode_anon_handles clear; +%! bytecode_anon_handles; +%! assert (__prog_output_assert__ (key)); +%! +%! __enable_vm_eval__ (1, "local"); +%! assert (__compile ("bytecode_anon_handles")); +%! bytecode_anon_handles; +%! assert (__prog_output_assert__ (key)); + +## Test compling a function named differently from its +## m-file +%!test +%! clear all +%! __enable_vm_eval__ (1, "local"); +%! __compile wrongname_fn clear; +%! assert (__compile ("wrongname_fn")); +%! +%! assert (wrongname_fn (77) == 78); + +## Test some misc stuff +%!test +%! clear all +%! __enable_vm_eval__ (1, "local"); +%! +%! bytecode_misc; % asserts inernally + +## Leak check +%!test +%! clear all +%! __enable_vm_eval__ (1, "local"); +%! +%! c = 2; +%! d = 3; +%! n_c = __ref_count (c); +%! n_d = __ref_count (d); +%! bytecode_leaks (c, d); % asserts inernally +%! +%! assert (n_c == __ref_count (c)) +%! assert (n_d == __ref_count (d)) +%! diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_anon_handles.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_anon_handles.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,27 @@ +function bytecode_anon_handles () + h1 = @(x) __printf_assert__ ("%d ", x); + h1 (1); + h1 (2); + h11 = h1; + h11 (12); + + a = 3; + h2 = @() __printf_assert__ ("%d ", a); + h2 (); + + h3 = @(a,b,c) a + b + c; + __printf_assert__ ("%d ", h3 (1, 2, 1)); + + h4 = @() {1,2,3}{:}; + [a b c] = h4(); + __printf_assert__ ("%d %d %d ", a, b, c); + [a b] = h4(); + __printf_assert__ ("%d %d ", a, b); + + h5 = @(x) @(y) __printf_assert__ ("%d %d ", x, y); + h5(11)(12) + + % max not in parent scope + h6 = @(x, y) max (x, y); + __printf_assert__ ("%d ", h6 (-1, 1)); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_ans.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_ans.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,13 @@ +function bytecode_ans () + max (1, 2); + __printf_assert__ ("%d ", ans); + 1 + 1 + 3; + __printf_assert__ ("%d ", ans); + !false; + __printf_assert__ ("%d ", ans); + true; + __printf_assert__ ("%d ", ans); + + c = 13; % Not written to ans + __printf_assert__ ("%d ", ans); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_assign.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_assign.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,57 @@ +function bytecode_assign () + + a = 2; + __printf_assert__ ("%d ", a); + + a = 3; + __printf_assert__ ("%d ", a); + + b = a = 1; + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + c = [2 2; 3 3]; + d = c (1,2); + __printf_assert__ ("%d ", c); + __printf_assert__ ("%d ", d); + + % Compound assignment + d = 1; + d += 1; + __printf_assert__ ("%d ", d); + d += d * 2; + __printf_assert__ ("%d ", d); + d *= 3; + __printf_assert__ ("%d ", d); + d /= 9; + __printf_assert__ ("%f ", d); + + b = [1 2 3 4]; + b += 1; + __printf_assert__ ("%f ", b); + __printf_assert__ ("%d ", size(b)); + __printf_assert__ ("%s ", class(b)); + b \= 2; + b -= 2; + b *= 2; + b /= 2; + b += 2; + + b .\= 2; + %b .-= 2; % TODO: Removed in interpreter. Remove in VM too. + b .*= 2; + b ./= 2; + %b .+= 2; % TODO: Removed in interpreter. Remove in VM too. + b .^= 2; + + __printf_assert__ ("%f ", b); + __printf_assert__ ("%d ", size(b)); + __printf_assert__ ("%s ", class(b)); + + b = [1 2; 3 4]; + b ^= 3; + + __printf_assert__ ("%f ", b); + __printf_assert__ ("%d ", size(b)); + __printf_assert__ ("%s ", class(b)); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_binops.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_binops.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,117 @@ +function bytecode_binops() + % Root level binary expressions8 + % should not mess up the operand stack + 2 * 3 + 1; + max (3, 2) - min (3, 2); + sin (3) * sin (2); + + % General + a = 1 + 2 + 3 + 4; + __printf_assert__ ("%.17g ", a); + a = -1 - 2 - 3 - 4; + __printf_assert__ ("%.17g ", a); + a = 1 * 2 * 3 * 4; + __printf_assert__ ("%.17g ", a); + a = 1 / 2 / 3 / 4; + __printf_assert__ ("%.17g ", a); + a = 1^2^3^4; + __printf_assert__ ("%.17g ", a); + + % Order + a = 1 + 2 - 3 * 4 / 5 ^ 6 * 7 / 8 - 9 + 10 / 11; + __printf_assert__ ("%.17g ", a); + + % Function calls + a = max (3, 2) * min (2, 1) + max (10, 9); + __printf_assert__ ("%.17g ", a); + + % Logical + a = 1 && 2; + __printf_assert__ ("%d ", a); + a = 1 && 0; + __printf_assert__ ("%d ", a); + a = 1 || 2; + __printf_assert__ ("%d ", a); + a = 0 || 0; + __printf_assert__ ("%d ", a); + + % Need to not linger on stack + 1 && 1; + 1 && 0; + 0 && 1; + 0 || 0; + 0 || 1; + 1 || 0; + + % We wanna make sure there actually is a short circuit + % and that the operands are only evaluated once + a = truthy (1) || falsy(2); + __printf_assert__ ("%d ", a); + + a = falsy (3) || falsy (4) || truthy (5) || falsy (6); + __printf_assert__ ("%d ", a); + + a = truthy (7) && truthy (8) || falsy (12); + __printf_assert__ ("%d ", a); + + a = falsy (9) && truthy (10) || falsy (11); + __printf_assert__ ("%d ", a); + + % Compares + + a = 1 == 1; + __printf_assert__ ("%d ", a); + a = 1 == 2; + __printf_assert__ ("%d ", a); + a = 1 < 2; + __printf_assert__ ("%d ", a); + a = 2 < 1; + __printf_assert__ ("%d ", a); + a = 1 > 2; + __printf_assert__ ("%d ", a); + a = 2 > 1; + __printf_assert__ ("%d ", a); + a = 1 <= 2; + __printf_assert__ ("%d ", a); + a = 2 <= 1; + __printf_assert__ ("%d ", a); + a = 1 <= 1; + __printf_assert__ ("%d ", a); + a = 1 >= 2; + __printf_assert__ ("%d ", a); + a = 2 >= 1; + __printf_assert__ ("%d ", a); + a = 1 >= 1; + __printf_assert__ ("%d ", a); + a = 2 ~= 1; + __printf_assert__ ("%d ", a); + a = 1 ~= 1; + __printf_assert__ ("%d ", a); + a = 2 != 1; + __printf_assert__ ("%d ", a); + a = 1 != 1; + __printf_assert__ ("%d ", a); + + a = 1 == 1 && 2 > 1 && 3 > -3 || 1 < 10; + __printf_assert__ ("%d ", a); + + % Elementwise logical + + a = ones (2,2) & zeros (2,2); + __printf_assert__ ("%d ", a); + a = ones (2,2) & ones (2,2); + __printf_assert__ ("%d ", a); + a = ones (2,2) | zeros (2,2); + __printf_assert__ ("%d ", a); + +endfunction + +function out = truthy (i) + __printf_assert__ ("truthy%d ", i); + out = 1; +end + +function out = falsy (i) + __printf_assert__ ("falsy%d ", i); + out = 0; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_cdef_use.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_cdef_use.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,82 @@ +function bytecode_cdef_use () + % The classdef handle class cdef_foo prints its ctor argument + % in the dtor with __printf_assert__ aswell asd + % adds one to the globals cdef_foo_ctor_cnt and cdef_foo_dtor_cnt + % in the ctor and dtor. + + h1 = cdef_foo("f1"); + __printf_assert__ (". "); + h1 = 2; % Triggers dtor + __printf_assert__ (". "); + + h4 = make_obj ("f2") + make_obj ("f3"); % Two dtors will trigger here + __printf_assert__ (". "); + __printf_assert__ ("%s %d ", h4.msg, h4.val); + + __printf_assert__ (". "); + make_obj ("f4"); % Saved in ans + __printf_assert__ (". "); % Wont trigger dtor since no defined return value + print_arg_ret_one ("call1"); % ans' dtor executed after rhs eval + __printf_assert__ (". "); + + % Test calling function in packages. Not really a classdef + u = matlab.lang.makeUniqueStrings ({"a","a"}); + __printf_assert__ ("%s %s ", u{1}, u{2}); + + % Check dtor call order + a = make_obj ("f5"); + b = make_obj ("f6"); + c = make_obj ("f7"); + + suby (); % Same tests in a subfunction +end + +function suby () + h1 = cdef_foo("f8"); + __printf_assert__ (". "); + h1 = 2; % Triggers dtor + __printf_assert__ (". "); + + h4 = make_obj ("f9") + make_obj ("f10"); % Two dtors will trigger here + __printf_assert__ (". "); + __printf_assert__ ("%s %d ", h4.msg, h4.val); + + __printf_assert__ (". "); + make_obj ("f11"); % Saved in ans + __printf_assert__ (". "); % Wont trigger dtor since no defined return value + print_arg_ret_one ("call1"); % ans' dtor executed after rhs eval + __printf_assert__ (". "); + + % Check that the classdef object is called + m = containers.Map; + m("qwe") = 2; + __printf_assert__ ("%d ", m("qwe")); + __printf_assert__ ("%d ", m("qwe")++); % Test ++-- on objects + __printf_assert__ ("%d ", m("qwe")); + __printf_assert__ ("%d ", ++m("qwe")); + __printf_assert__ ("%d ", m("qwe")); + __printf_assert__ ("%d ", m("qwe")--); + __printf_assert__ ("%d ", m("qwe")); + __printf_assert__ ("%d ", --m("qwe")); + __printf_assert__ ("%d ", m("qwe")); + % Different op code than cmd form call + m = containers.Map(); + m("qwe") = 3; + __printf_assert__ ("%d ", m("qwe")); + + __printf_assert__ (". "); + % Check dtor call order + a = make_obj ("f12"); + b = make_obj ("f13"); + c = make_obj ("f14"); +end + + +function h = make_obj (msg) + h = cdef_foo (msg); +end + +function a = print_arg_ret_one (msg) + __printf_assert__ (msg); + a = 1; +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_cell.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_cell.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,65 @@ +function bytecode_cell () + a = {'a','b'}; + __printf_assert__ ("%s ", a{1}); + __printf_assert__ ("%s ", a{2}); + __printf_assert__ ("%s ", a{:}); + __printf_assert__ ("%d ", size (a)); + + b = 'b'; + c = 'c'; + + a = {b, c}; + __printf_assert__ ("%s ", a{1}); + __printf_assert__ ("%s ", a{2}); + __printf_assert__ ("%s ", a{:}); + __printf_assert__ ("%d ", size (a)); + __printf_assert__ ("%s ", class (a{1})); + + d = 'd'; + e = 'e'; + + a = {b, c; d, e}; + __printf_assert__ ("%s ", a{1}); + __printf_assert__ ("%s ", a{3}); + __printf_assert__ ("%s ", a{1,2}); + __printf_assert__ ("%s ", a{2,1}); + __printf_assert__ ("%s ", a{:}); + __printf_assert__ ("%d ", size (a)); + + f = 'f'; + g = 'g'; + h = 'h'; + i = 'i'; + a = {b, c; d, e; f g; h, i; 'j', 'k'; 'l', 'm'}; + __printf_assert__ ("%s ", a{:}); + __printf_assert__ ("%d ", size (a)); + + b = 1; + c = 2; + d = 3; + e = 4; + a = {b, c; d, e}; + __printf_assert__ ("%d ", a{1}); + __printf_assert__ ("%d ", a{3}); + __printf_assert__ ("%d ", a{1,2}); + __printf_assert__ ("%d ", a{2,1}); + __printf_assert__ ("%d ", a{:}); + __printf_assert__ ("%d ", a{:, 1}); + __printf_assert__ ("%d ", a{1, :}); + __printf_assert__ ("%d ", a{:, :}); + __printf_assert__ ("%d ", size (a)); + __printf_assert__ ("%s ", class (a{1})); + + a = {'qwe','asd','zxc'}; + f = a{:}; + __printf_assert__ ("%s ", f); + __printf_assert__ ("%d ", size (f)); + __printf_assert__ ("%s ", class (f)); + + % Command form function call subref + __printf_assert__ ("%d ", suby{:}); +end + +function a = suby() + a = {1,2}; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_dountil.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_dountil.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,115 @@ +function bytecode_dountil () + i = 5; + do + __printf_assert__ ("%d ", i); + i--; + until i + + i = 0; + do + i++; + until i >= 3 + __printf_assert__ ("%d ", i); + + i = 0; + ctr = 0; + do + ctr++; + until i++ >= 4 + __printf_assert__ ("%d ", i); + __printf_assert__ ("%d ", ctr); + + i = 0; + ctr = 0; + do + ctr++; + until ++i >= 4 + __printf_assert__ ("%d ", i); + __printf_assert__ ("%d ", ctr); + + i = 0; + ctr = 0; + do + i++; + if i == 2 + continue + end + ctr++; + until i >= 4 + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + + i = 0; + ctr = 0; + do + i++; + if i == 2 + break + end + ctr++; + until i >= 4 + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + + i = 0; + ctr = 0; + do + i++; + if i == 2 + continue + elseif i == 3 + break + end + ctr++; + until i >= 4 + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + + i = 0; + do + i++; + if i == 1 + continue + else + break + end + until i > 100 + __printf_assert__ ("%d ", i); + + ctr = 0; + j = 0; + do + i = 0; + do + k = 0; + do + k++; + ctr++; + until k >= 2 + i++; + until i > 2 + j++; + until j >= 2 + __printf_assert__ ("%d ", ctr); + + i = 0; + do + i++; + if i == 4 + break; + end + continue; + until i == 3 + __printf_assert__ ("%d ", i); + + i = 0; + do + break + until i++ > 2 + __printf_assert__ ("%d ", i); + + i = 0; + do + until i++ == 2 + __printf_assert__ ("%d ", i); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_end.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_end.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,58 @@ +function bytecode_end () + + A = [1 2; 3 4]; + __printf_assert__ ("%d ", A(1:end)); + + A(2:end) = [5 6 7]; + __printf_assert__ ("%d ", A); + __printf_assert__ ("%d ", size (A)); + + A = [1 2; 3 4]; + A(end, end) = 5; + __printf_assert__ ("%d ", A(2, 2)); + __printf_assert__ ("%d ", A(end, end)); + + A = [1 2; 3 4]; + A(end - 1, end - 1) = 6; + __printf_assert__ ("%d ", A(1, 1)); + __printf_assert__ ("%d ", A(end - 1, end - 1)); + + A = [1:4]; + A(end + 1) = 5; + __printf_assert__ ("%d ", A); + + A = 1; + A(end) = 2; + __printf_assert__ ("%d ", A); + __printf_assert__ ("%d ", A(end)); + + A(end + 1) = 3; + __printf_assert__ ("%d ", A); + __printf_assert__ ("%d ", A(end)); + + __printf_assert__ ("%d ", suby1()(end)); + + % End indexing an object that is not an id + s = {"ifs"}; + a = s{1}(2:end); + __printf_assert__ ("%s ", a); + + % Nested index expressions + M = [1 2 3 4]; + __printf_assert__ ("%d ", M (min (2, end))); % End of M + __printf_assert__ ("%d ", M (max (3, min (2, end)))); % End of M + + min_h = @min; + __printf_assert__ ("%d ", M (min_h (2, end))); % End of min_h + + s = [struct struct struct]; + s(2).name = "foo"; + __printf_assert__ ("%s ", s(min (2, end)).name); + + % end together with struct refs are annoying + __printf_assert__ ("%s ", s(2).name (end - 1: end)); +end + +function a = suby1() + a = [1 2 3 4]; +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_errors.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_errors.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,68 @@ +function bytecode_errors (idx) + % We put test dispatch last, so that we don't have + % to update all columns and rows each time we + % add a test ... + run_test (idx); +end + +function if_undefined_value () + if qweqwe + a = 2; + end +end + +function assign_undef () + a = b; +end + +function subsref_undef_id () + a = b(1,2,3); +end + +function subsref_cell_undef_id () + a = b{1,2,3}; +end + +function wordcmd_undef_id () + b 1 2 3; +end + +function binary_undef () + b * a; +end + +function id_index_oob_error_1 () + a = [1, 2]; + b = a (3); +end + +function id_index_oob_error_2 () + a = [1, 2]; + b = a (-1); +end + +function binary_wrong_size_1 () + a = [1 2 3] + [1 2]; +end + +function run_test (idx) + if idx == 0 + if_undefined_value (); + elseif idx == 1 + assign_undef (); + elseif idx == 2 + subsref_undef_id (); + elseif idx == 3 + subsref_cell_undef_id (); + elseif idx == 4 + wordcmd_undef_id (); + elseif idx == 5 + binary_undef (); + elseif idx == 6 + id_index_oob_error_1 (); + elseif idx == 7 + id_index_oob_error_2 (); + elseif idx == 8 + binary_wrong_size_1 (); + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_eval_1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_eval_1.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,126 @@ +function bytecode_eval_1 () + % Simple + assert (2 == eval ("2")); + assert (2 == eval ("2;")); + + v = eval("11"); + assert (v == 11); + + % ans + eval ("12;"); + assert (ans == 12); + + % Change variable value + a = 2; + eval ("a = 3;"); + + __printf_assert__ ("%f ", a) + __printf_assert__ ("%f ", size (a)) + __printf_assert__ ("%s ", class (a)); + + % Create new variable in an eval + eval ("b = 4;"); + __printf_assert__ ("%f ", b) + __printf_assert__ ("%f ", size (b)) + __printf_assert__ ("%s ", class (b)); + + % Create new variable in an eval, that is also not + % not in a bytecode slot + eval ("c = 4;"); + __printf_assert__ ("%f ", eval("c")) + __printf_assert__ ("%f ", size (eval("c"))) + __printf_assert__ ("%s ", class (eval("c"))); + eval ("c = 5;"); + __printf_assert__ ("%f ", eval("c")) + + % Change a global in an eval + clear global d + global d = 3; + eval ("d = 4;") + __printf_assert__ ("%f ", d); + clear global d + d = 2; + __printf_assert__ ("%f ", d); + + % Create a global in an eval + + %% TODO: Not supported. Does it have to be? + % eval ("clear global e"); + % eval ("global e = 5;") + % __printf_assert__ ("%f ", e); + % __printf_assert__ ("%d ", length(who('global','e'))); + + % Just test the same thing in a subfunction + sub1 (); + + % Change the value of arguments and returns in an eval + % Also do nargin and nargout in a subfunction + [aa bb] = suby2 (11, 22, 33); + __printf_assert__ ("%f ", aa); + __printf_assert__ ("%f ", bb); +end + +function sub1() + % Simple + assert (2 == eval ("2;")); + assert (2 == eval ("2;")); + + v = eval("11;"); + assert (v == 11); + + % ans + eval ("12;"); + assert (ans == 12); + + % Change variable value + a = 2; + eval ("a = 3;"); + + __printf_assert__ ("%f ", a) + __printf_assert__ ("%f ", size (a)) + __printf_assert__ ("%s ", class (a)); + + % Create new variable in an eval + eval ("b = 4;"); + __printf_assert__ ("%f ", b) + __printf_assert__ ("%f ", size (b)) + __printf_assert__ ("%s ", class (b)); + + % Create new variable in an eval, that is also not + % not in a bytecode slot + eval ("c = 4;"); + __printf_assert__ ("%f ", eval("c")) + __printf_assert__ ("%f ", size (eval("c"))) + __printf_assert__ ("%s ", class (eval("c"))); + eval ("c = 5;"); + __printf_assert__ ("%f ", eval("c")) + + % Change a global in an eval + clear global d + global d = 3; + eval ("d = 4;") + __printf_assert__ ("%f ", d); + clear global d + d = 2; + __printf_assert__ ("%f ", d); +end + +function [c d] = suby2 (a, b, c) + __printf_assert__ ("1:%f ", a); + __printf_assert__ ("2:%f ", b); + __printf_assert__ ("3:%f ", c); + + eval ("c = 3;") + eval ("a = c;") + eval ("d = a;") + __printf_assert__ ("4:%f ", a); + __printf_assert__ ("5:%f ", b); + __printf_assert__ ("6:%f ", c); + __printf_assert__ ("7:%f ", d); + + __printf_assert__ ("%d ", nargin); + __printf_assert__ ("%d ", eval ("nargin")); + + __printf_assert__ ("%d ", nargout); + __printf_assert__ ("%d ", eval ("nargout")); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_evalin_1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_evalin_1.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,35 @@ +function bytecode_evalin_1 () + b = evalin ("caller", "caller_a"); + __printf_assert__ ("%f ", b); + + evalin ("caller", "__printf_assert__('yoyo ')"); + + evalin ("base", "__printf_assert__('yobase ')"); + + caller_b = 3; + sub1(); + __printf_assert__ ("%f ", caller_e); + __printf_assert__ ("%f ", eval ("caller_f")); % No slot for caller_f + + caller_c = 11; + bytecode_evalin_2 (); + __printf_assert__ ("%f ", caller_c); % Changes to 33 + + %%% TODO: Can't create a variable with evalin in the treewalker + %%% need to verify it aint working with the VM too. + %% __printf_assert__ ("%f ", caller_d); % Is initialized to 22 + + +end + +function sub1() + b = evalin ("caller", "caller_b"); + __printf_assert__ ("%f ", b); + + evalin ("caller", "__printf_assert__('yoyo2 ')"); + + evalin ("base", "__printf_assert__('yobase2 ')"); + + assignin ("caller", "caller_e", 123); + assignin ("caller", "caller_f", 124); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_evalin_2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_evalin_2.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,8 @@ +function bytecode_evalin_2 () + __printf_assert__ ("%f ", evalin ("caller", "caller_c")); + + evalin ("caller", "caller_c = 33;"); + + %% %Can't create local in caller in the treewalker + %% evalin ("caller", "caller_d = 22;"); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_for.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_for.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,188 @@ +function bytecode_for() + ctr = 0; + + for i = 1:4 + __printf_assert__ ("%d ", i); + end + __printf_assert__ ("%d ", i); + + for i = 1:2:5 + __printf_assert__ ("%d ", i); + end + __printf_assert__ ("%d ", i); + + for i = 4:-1:1 + __printf_assert__ ("%d ", i); + end + __printf_assert__ ("%d ", i); + + for j = 1:4 + break + end + __printf_assert__ ("%d ", j); + + for j = 1:4 + continue + end + __printf_assert__ ("%d ", j); + + for j = 1:4 + if j == 2 + break + end + end + __printf_assert__ ("%d ", j); + + for j = 1:4 + if j == 2 + break + else + continue + end + end + __printf_assert__ ("%d ", j); + + ctr = 0; + for i = 1:4 + for j = 1:4 + ctr++; + end + end + __printf_assert__ ("%d ", ctr); + + ctr = 0; + for i = 1:4 + if i == 2 + continue + end + + for j = 1:4 + if j == 2 + continue + end + + ctr++; + + if j == 3 + break + end + end + + if i == 3 + break + end + end + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + __printf_assert__ ("%d ", j); + + ctr = 0; + for i = 1:2 + for j = 1:2 + for k = 1:2 + for l = 1:2 + for m = 1:2 + for n = 1:2 + for o = 1:2 + for p = 1:2 + ctr++; + end + end + end + end + end + end + end + end + __printf_assert__ ("%d ", ctr); + + for i = 1:3 + end + __printf_assert__ ("%d ", i); + + % sclar range, only executed once + for i = 2 + __printf_assert__ ("%d ", i); + end + + n = 1; + for i = 1:n + __printf_assert__ ("%d ", i); + end + + n = 1; + for i = 2:n + __printf_assert__ ("boo"); + end + __printf_assert__ ("%d ", i); + __printf_assert__ ("%s ", class (i)); + + %Matrix + M = [1 2; 3 4]; + for i = M + __printf_assert__ ("%d ", i); + __printf_assert__ ("size %d ", size (i)); + end + + for i = [] + __printf_assert__ ("boo"); + end + __printf_assert__ ("%d ", i); + __printf_assert__ ("%s ", class (i)); + + + n = 'qwe'; + for i = n + __printf_assert__ ("%s ", i); + __printf_assert__ ("size %d ", size (i)); + end + __printf_assert__ ("%s ", class (i)); + + % The iteration variable is a double + % ??? Changed in someones patch? TODO: Bug? + for i = single(1):single(3) + if i == 1 + __printf_assert__ ("%s ", class (i)); + end + end + + % ... unless rhs is a scalar ... + for i = single (1) + end + __printf_assert__ ("%s ", class (i)); + + % Test return from for loop (need to pop native integers from stack) + __printf_assert__ ("%d ", foo ()); + + % Iterate over struct + for s = struct ("a", {"1", "2"}, "b", {"11", "12"}) + __printf_assert__ ("%s %s ", s.a, s.b); + end + + % Complex for loop + + x.a = 1; + x.b = [1, 2; 3, 4]; + x.c = "string"; + for [val, key] = x + __printf_assert__ ("key:%s ", key) + if isa(val, "char") + __printf_assert__ ("val:%s ", val) + else + __printf_assert__ ("val:%d %d", val, size(val)) + end + endfor + + for [val, key] = struct () + __printf_assert__ ("boo"); + end +end + + +function i = foo () + for i = 1:10 + if i == 5 + return + end + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_global_1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_global_1.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,201 @@ +% TODO: When clear and dynamic stack works, test +% that behavoiur is the same when clearing globals +% in another function while they are on the stack in the +% caller. + +function bytecode_global_1 () + % General test. a and b are also read and verified in test .tst file + global a b + __printf_assert__ ("%s ", class (a)); + __printf_assert__ ("%d ", size (a)); + + __printf_assert__ ("%d ", length (who ('global','a'))); + __printf_assert__ ("%d ", length (who ('global','b'))); + __printf_assert__ ("%d ", isglobal ('a')); + __printf_assert__ ("%d ", isglobal ('b')); + + a = 1; + __printf_assert__ ("%d ", a); + b = 2; + __printf_assert__ ("%d ", b); + + a = b; + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + b = 100; + a = 3 * b + max (a, b); + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + % Test that we can make globals in subfunctions + global e %sub1 needs a global "e" + e = 11; + sub1 (1); + + __printf_assert__ ("%d ", isglobal ('a')); + __printf_assert__ ("%d ", isglobal ('b')); + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + sub1 (0); + + __printf_assert__ ("%d ", isglobal ('a')); + __printf_assert__ ("%d ", isglobal ('b')); + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + % Declare global, clear it, use identifier as local, declare + % it as global ... + global c + __printf_assert__ ("%d ", length (who ('global','c'))); + clear global c; + __printf_assert__ ("%d ", length (who ('global','c'))); + + c = 2; + __printf_assert__ ("%d ", c); + __printf_assert__ ("%s ", class(c)); + + global c + __printf_assert__ ("%d ", c); + __printf_assert__ ("%s ", class(c)); + + % Subassign + global f + f = [1 2 3 4 5]; + f(3) = 6; + f(1) = 11; + __printf_assert__ ("%d ", f); + __printf_assert__ ("%s ", class(f)); + __printf_assert__ ("%d ", size (f)); + + % Multiassign + global g h + [g h f] = returns3 (); + __printf_assert__ ("%d ", g); + __printf_assert__ ("%s ", class(g)); + __printf_assert__ ("%d ", size (g)); + __printf_assert__ ("%d ", h); + __printf_assert__ ("%s ", class(h)); + __printf_assert__ ("%d ", size (h)); + __printf_assert__ ("%d ", f); + __printf_assert__ ("%s ", class(f)); + __printf_assert__ ("%d ", size (f)); + + % Init expression + global k = 3; + __printf_assert__ ("%d ", k); + __printf_assert__ ("%s ", class(k)); + __printf_assert__ ("%d ", size (k)); + + global l = 4 m = max (10,9) n = [2,3] o = k; + __printf_assert__ ("%d ", l); + __printf_assert__ ("%s ", class(l)); + __printf_assert__ ("%d ", size (l)); + __printf_assert__ ("%d ", m); + __printf_assert__ ("%s ", class(m)); + __printf_assert__ ("%d ", size (m)); + __printf_assert__ ("%d ", n); + __printf_assert__ ("%s ", class(n)); + __printf_assert__ ("%d ", size (n)); + __printf_assert__ ("%d ", o); + __printf_assert__ ("%s ", class(o)); + __printf_assert__ ("%d ", size (o)); + + % Init expression for existing local + p = 2; + global p = 3; + __printf_assert__ ("%d ", p); + __printf_assert__ ("%s ", class(p)); + __printf_assert__ ("%d ", size (p)); + % q created in caller already + global q = 4; + __printf_assert__ ("%d ", q); + __printf_assert__ ("%s ", class(q)); + __printf_assert__ ("%d ", size (q)); + + % Reinit does nothing + global r = 7 + global r = 8 + __printf_assert__ ("%d ", r); + __printf_assert__ ("%s ", class(r)); + __printf_assert__ ("%d ", size (r)); + + clear global c + __printf_assert__ ("%d ", length (who ('global','c'))); + + clear global d + clear global e + clear global f + clear global g + clear global h + clear global k + clear global l + clear global m + clear global n + clear global o + clear global p + clear global r +end + +function [q w e] = returns3() + q = 11; + w = 22; + e = 33; +end + + +function sub1(make_global) + % Already defined local, later declared global + d = 3; + __printf_assert__ ("%d ", length(who('global','d'))); + global d + __printf_assert__ ("%d ", length(who('global','d'))); + __printf_assert__ ("%d ", d); + __printf_assert__ ("%s ", class(d)); + __printf_assert__ ("%d ", size (d)); + d = [1 2]; + __printf_assert__ ("%d ", d); + __printf_assert__ ("%s ", class(d)); + __printf_assert__ ("%d ", size (d)); + + % Already defined local, later declared global, + % but with the global already with a value from the caller + e = 4; + __printf_assert__ ("%d ", length(who('global','e'))); + global e + __printf_assert__ ("%d ", length(who('global','e'))); + __printf_assert__ ("%d ", e); + __printf_assert__ ("eclass:%s ", class(e)); + __printf_assert__ ("%d ", size (e)); + e = [3 4]; + __printf_assert__ ("%d ", e); + __printf_assert__ ("%s ", class(e)); + __printf_assert__ ("%d ", size (e)); + + + % Conditionally global a and b + if make_global + global a + global b + + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + end + + __printf_assert__ ("%d ", isglobal ('a')); + __printf_assert__ ("%d ", isglobal ('b')); + __printf_assert__ ("%d ", length(who('global','a'))); + __printf_assert__ ("%d ", length(who('global','b'))); + + a = 3; + b = 4; + + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + if make_global + a = 5; + b = 6; + end +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_if.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_if.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,142 @@ +function bytecode_if() + ctr = 0; + a = 1; + b = 0; + + if a + __printf_assert__ ("%d ", ctr++); + end + + if a + __printf_assert__ ("%d ", ctr++); + else + __printf_assert__ ("booo "); + end + + if a + __printf_assert__ ("%d ", ctr++); + elseif a + __printf_assert__ ("booo "); + else + __printf_assert__ ("booo "); + end + + if b + __printf_assert__ ("booo ", ctr++); + end + + if b + __printf_assert__ ("booo "); + else + __printf_assert__ ("%d ", ctr++); + end + + if b + __printf_assert__ ("booo "); + elseif b + __printf_assert__ ("booo "); + else + __printf_assert__ ("%d ", ctr++); + end + + if b + __printf_assert__ ("booo "); + elseif a + __printf_assert__ ("%d ", ctr++); + else + __printf_assert__ ("booo "); + end + + if a + if a + if a + if a + if a + if b + __printf_assert__ ("booo "); + else + if a + if a + if b + __printf_assert__ ("booo "); + elseif a + if b + __printf_assert__ ("booo "); + else + __printf_assert__ ("%d ", ctr++); + end + else + __printf_assert__ ("booo "); + end + end + end + end + end + else + __printf_assert__ ("booo "); + end + end + end + end + + if 3 > 2 + __printf_assert__ ("%d ", ctr++); + end + + if [] + __printf_assert__ ("booo "); + end + + if ~b + __printf_assert__ ("%d ", ctr++); + end + + if b + end + + % "Braindead" short circuit + % + % We also check that there is a proper short circuit + if truthy (1) & truthy (2) + __printf_assert__ ("yay1 "); + end + + if falsy (3) & truthy (4) + __printf_assert__ ("booo "); + end + + if falsy (5) & falsy (6) + __printf_assert__ ("booo "); + end + + if truthy (7) & falsy (8) + __printf_assert__ ("booo "); + end + + if truthy (1)| truthy (2) + __printf_assert__ ("yay1 "); + end + + if falsy (3) | truthy (4) + __printf_assert__ ("yay2 "); + end + + if falsy (5) | falsy (6) + __printf_assert__ ("booo "); + end + + if truthy (7) | falsy (8) + __printf_assert__ ("yay3 "); + end +end + +function a = truthy (b) + __printf_assert__ ("%d ", b); + a = 1; +end + +function a = falsy (b) + __printf_assert__ ("%d ", b); + a = 0; +end + diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_index_obj.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_index_obj.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,63 @@ +function bytecode_index_obj () + + % Chained indexing with struct + __printf_assert__ ("%d ", suby1 ().a); + __printf_assert__ ("%d ", suby1.a); % cmd fn call + __printf_assert__ ("%d ", suby1 ().b.a); + __printf_assert__ ("%d ", suby1.b.a); + + % Chained indexing with matrix + __printf_assert__ ("%d ", suby_mat1 ()(2)); + + % Chained indexing with cells + __printf_assert__ ("%s ", class(suby_cell1 ()(2))); + __printf_assert__ ("%d ", size(suby_cell1 ()(2))); + __printf_assert__ ("%d ", suby_cell1 (){1}{3}); + __printf_assert__ ("%d ", suby_cell1 (){1}{3}(1)); + __printf_assert__ ("%d ", suby_cell1 ()(1){1}{2}); + __printf_assert__ ("%d ", suby_cell1{1}{3}); % cmd fn call + + % Dynamic struct field + s = struct; + s.qwe = 22; + s.asd = struct ("qwe", 33); + + __printf_assert__ ("%d ", s.("qwe")); + __printf_assert__ ("%s ", class (s.("asd").("qwe"))); + __printf_assert__ ("%d ", s.("asd").("qwe")); + + % Subassign dynamic field + t.qwe = 3; + t.("asd") = 4; + __printf_assert__ ("%d ", t.qwe); + __printf_assert__ ("%d ", t.asd); + + % : and end for eg. foo()(:,end) etc + + % Check that classdef metas can be used to construct a classdef object + h = @sin; + o = matlab.lang.MemoizedFunction (h); + __printf_assert__ ("%s ", class (o)) + + % Check proper argument order + s = [struct struct ; struct struct]; + M = [1:10 ; 11:20]; + s(1,2).a = M; + __printf_assert__ ("%d ", s(1,2).a(1,2)); +end + +function s = suby1() + s = struct; + s.a = 2; + b = struct; + b.a = 3; + s.b = b; +end + +function m = suby_mat1() + m = [1 2 3]; +end + +function c = suby_cell1() + c = {{1 2 3}, 4, 5}; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_inputname.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_inputname.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,28 @@ +function bytecode_inputname (arg1, arg2) + __printf_assert__ ("%s ", inputname (1, 0)); + __printf_assert__ ("%s ", inputname (1, 1)); + __printf_assert__ ("%s ", inputname (2, 0)); + __printf_assert__ ("%s ", inputname (2, 1)); + + a = 2; + b = 3; + suby (a, b); + suby (a + 1, b * 3); + + % inputname from non-compiled function + inputname_args (a, b); + inputname_args (a + 1, b * 3); +end + +function suby (arg1, arg2) + __printf_assert__ ("%s ", inputname (1, 0)); + __printf_assert__ ("%s ", inputname (1, 1)); + __printf_assert__ ("%s ", inputname (2, 0)); + __printf_assert__ ("%s ", inputname (2, 1)); + + aa = 22; + bb = 33; + % inputname from non-compiled function + inputname_args (aa, bb); + inputname_args (aa + 1, bb * 3); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_leaks.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_leaks.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,158 @@ +# +# We are looking for leaks of octave_value:s by checking the refrence counts +# in different code paths of the calling convention. +# +function [a b] = bytecode_leaks (c, d) + cc = c; + b = d; + + e = 1+1; + refs_e = __ref_count (e); + suby1(e); + assert (refs_e, __ref_count (e)) + + aa = suby2(e); + assert (refs_e, __ref_count (e)) + + % Test special code path for assigning argument to return value + aa = suby3(e); + assert (refs_e + 1, __ref_count (e)) + aa = 0; + assert (refs_e, __ref_count (e)) + + % varargin + suby4 (e,e,e,e); + assert (refs_e, __ref_count (e)) + suby5 (e,e,e,e); + assert (refs_e, __ref_count (e)) + + % varargout + suby6(e); + assert (refs_e, __ref_count (e)) + [tmp1, tmp2, tmp3, tmp4, tmp5] = suby6(e); + tmp1 = 0; tmp2 = 0; tmp3 = 0; tmp4 = 0; tmp5 = 0; + assert (refs_e, __ref_count (e)) + + suby7(e); + assert (refs_e, __ref_count (e)) + [tmp1, tmp2, tmp3, tmp4, tmp5] = suby6(e); + tmp1 = 0; tmp2 = 0; tmp3 = 0; tmp4 = 0; tmp5 = 0; + assert (refs_e, __ref_count (e)) + + % Call non-vm function + sin (e); + assert (refs_e, __ref_count (e)) + + % Index matrix + m = [1 2 3 4]; + m (e); + assert (refs_e, __ref_count (e)) + + % Ops + tmp1 = -e + e * e - e / e ^ e; + assert (refs_e, __ref_count (e)) + + % Dynamic matrix + m = [1 2 3 e; e 4 5 6]; + m = 0; + assert (refs_e, __ref_count (e)) + + % Cell + m = {1,2,3, e; 4, 5, e, 6}; + m = 0; + assert (refs_e, __ref_count (e)) + + % "command call" with disp + disp ("The disp of e and pi underneath is on purpose. There should be a 'e = 2' and 'ans = 3.14...'") + e % Should print "e = 2" + assert (refs_e + 1, __ref_count (e)) % in ans + ans = 0; + assert (refs_e, __ref_count (e)) + + % This will be a function call and should print "ans = 3.14..." + pi + + % no disp + e; + assert (refs_e + 1, __ref_count (e)) % in ans + ans = 0; + assert (refs_e, __ref_count (e)) + + + % Too many or few args + try + suby1 (e,e,e); + catch + end + assert (refs_e, __ref_count (e)) + + try + subsuby5 (e); + catch + end + assert (refs_e, __ref_count (e)) + + try + m = []; + m(e) + catch + end + assert (refs_e, __ref_count (e)) + + try + m = []; + m(e) = 123; + catch + end + assert (refs_e, __ref_count (e)) + + % eval dynamic stack + suby8 (e); + assert (refs_e, __ref_count (e)) +end + +function suby1 (a) + aa = 1 + a; + bb = a; +end + +function aa = suby2 (a) + aa = 1 + a; + bb = a; +end + +function a = suby3(a) +end + +function b = suby4(varargin) + b = 3 + varargin{1}; + c = varargin{2}; +end + +function b = suby5(a, b, varargin) + b = 3 + varargin{1}; + c = varargin{2}; + d = a; +end + +function varargout = suby6(a) + varargout{1} = 3; + varargout{2} = a; + varargout{3} = 1; + varargout{4} = 1; + varargout{5} = a; + cc = a; +end + +function [aa bb varargout] = suby7(a) + varargout{1} = 3; + varargout{2} = a; + varargout{3} = 1; + aa = 1; + bb = a; + cc = a; +end + +function suby8(a) + eval ("g = a;"); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_matrix.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_matrix.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,222 @@ +function bytecode_matrix () + % Dynamically built matrixes, i.e. those + % who are not built by the parser, with a dynamic value + % inside the initializer. + + a = 1; + b = 2; + c = 3; + d = 4; + A = [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d]'; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b; c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [A; A]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = []; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Evalorder + A = [hi('a') hi('b'); hi('c') hi('d')]; + + % Matrix multiply + A = [a b; c d] * [a b; c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] * [a; b; c; d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a; b; c; d] * [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Matrix div + A = [a b c d] / [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] \ [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b; c d] / [a b; c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Compound mul herm + A = [a b c d] * [a b c d]'; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Compound herm mul + A = [a b; c d]' * [a b; c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Compound trans ldiv + A = [a; b; c; d].' \ [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Compound trans ldiv + A = [a b; c d].' \ [a b; c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Matrix add + A = [a b; c d] + [a b; c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] + [a; b; c; d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Scalar + A = [a b c d] + 2; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = 2 + [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] - 2; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = 2 * [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] / 2; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = 2 \ [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Dot + + A = [a b c d] .* [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] ./ [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] .\ [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] .^ [a b c d]; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] .* [a b c d]'; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] ./ [a b c d]'; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] .\ [a b c d]'; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + A = [a b c d] .^ [a b c d]'; + __printf_assert__ ("%g ", A); + __printf_assert__ ("%g ", size (A)); + + % Matrixes with unequal row length + s = "zxc"; + A = ["qweasd"; s]; + __printf_assert__ ("%s ", A); + A = [s s; s]; + __printf_assert__ ("%s ", A); + + % Matrixes with more than 255 elements use + % a differens op-code. + a = 111; + % note a here + b = [ a 1 2 3 4 5 6 7 8 9 10 ... + 11 12 13 14 15 16 17 18 19 20 ... + 21 22 23 24 25 26 27 28 29 30 ... + 31 32 33 34 35 36 37 38 39 40 ... + 41 42 43 44 45 46 47 48 49 50 ... + 51 52 53 54 55 56 57 58 59 60 ... + 61 62 63 64 65 66 67 68 69 70 ... + 71 72 73 74 75 76 77 78 79 80 ... + 81 82 83 84 85 86 87 88 89 90 ... + 91 92 93 94 95 96 97 98 99 100 ... + 101 102 103 104 105 106 107 108 109 110 ... + 111 112 113 114 115 116 117 118 119 120 ... + 121 122 123 124 125 126 127 128 129 130 ... + 131 132 133 134 135 136 137 138 139 140 ... + 141 142 143 144 145 146 147 148 149 150 ... + 151 152 153 154 155 156 157 158 159 160 ... + 161 162 163 164 165 166 167 168 169 170 ... + 171 172 173 174 175 176 177 178 179 180 ... + 181 182 183 184 185 186 187 188 189 190 ... + 191 192 193 194 195 196 197 198 199 200 ... + 201 202 203 204 205 206 207 208 209 210 ... + 211 212 213 214 215 216 217 218 219 220 ... + 221 222 223 224 225 226 227 228 229 230 ... + 231 232 233 234 235 236 237 238 239 240 ... + 241 242 243 244 245 246 247 248 249 250 ... + 251 252 253 254 255 256 257]; + __printf_assert__ ("%d ", size (b)); + __printf_assert__ ("%d ", sum (b)); + a = 111; + % note a here + b = [ a; 1; 2; 3; 4; 5; 6; 7; 8; 9; 10; ... + 11; 12; 13; 14; 15; 16; 17; 18; 19; 20; ... + 21; 22; 23; 24; 25; 26; 27; 28; 29; 30; ... + 31; 32; 33; 34; 35; 36; 37; 38; 39; 40; ... + 41; 42; 43; 44; 45; 46; 47; 48; 49; 50; ... + 51; 52; 53; 54; 55; 56; 57; 58; 59; 60; ... + 61; 62; 63; 64; 65; 66; 67; 68; 69; 70; ... + 71; 72; 73; 74; 75; 76; 77; 78; 79; 80; ... + 81; 82; 83; 84; 85; 86; 87; 88; 89; 90; ... + 91; 92; 93; 94; 95; 96; 97; 98; 99; 100; ... + 101; 102; 103; 104; 105; 106; 107; 108; 109; 110; ... + 111; 112; 113; 114; 115; 116; 117; 118; 119; 120; ... + 121; 122; 123; 124; 125; 126; 127; 128; 129; 130; ... + 131; 132; 133; 134; 135; 136; 137; 138; 139; 140; ... + 141; 142; 143; 144; 145; 146; 147; 148; 149; 150; ... + 151; 152; 153; 154; 155; 156; 157; 158; 159; 160; ... + 161; 162; 163; 164; 165; 166; 167; 168; 169; 170; ... + 171; 172; 173; 174; 175; 176; 177; 178; 179; 180; ... + 181; 182; 183; 184; 185; 186; 187; 188; 189; 190; ... + 191; 192; 193; 194; 195; 196; 197; 198; 199; 200; ... + 201; 202; 203; 204; 205; 206; 207; 208; 209; 210; ... + 211; 212; 213; 214; 215; 216; 217; 218; 219; 220; ... + 221; 222; 223; 224; 225; 226; 227; 228; 229; 230; ... + 231; 232; 233; 234; 235; 236; 237; 238; 239; 240; ... + 241; 242; 243; 244; 245; 246; 247; 248; 249; 250; ... + 251; 252; 253; 254; 255; 256; 257]; + __printf_assert__ ("%d ", size (b)); + __printf_assert__ ("%d ", sum (b)); + +end + +function i = hi (s) + __printf_assert__ ("%s ", s); + i = 1; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_misc.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_misc.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,121 @@ +function bytecode_misc () + % Assure "set_internal_variable" are reset properly + max_stack = max_stack_depth; + set_max_stack_depth_1p (max_stack + 1); + assert (max_stack_depth == max_stack); + + % Check that the WIDE opcode extension works + wide_sub(); + + % Try to run out of allowed number of stack frames + threw_up = false; + try + stack_overflow (max_stack * 2); % Should hit the limit + catch + threw_up = true; + end + + assert (threw_up); + + % Try to run out of VM stack space + % Assure that the VM is running, since we will disable the tree_evaluators + % stack limit mechanism. + if __vm_is_executing () + absurd_frame_limit = max_stack_depth * 10000; + max_stack_depth (absurd_frame_limit, "local"); + + threw_up = false; + try + stack_overflow (absurd_frame_limit); % Should hit the VM limit + catch + threw_up = true; + end + assert (threw_up); + end +end + +function set_max_stack_depth_1p (x) + max_stack_depth (x + 1, "local"); + assert (max_stack_depth == x + 1); +end + +function stack_overflow (n) + if n != 0 + stack_overflow (n - 1); + end +end + +function wide_sub () + % 512 locals + a001=1; a002=2; a003=3; a004=4; a005=5; a006=6; a007=7; a008=8; a009=9; a010=10; a011=11; a012=12; a013=13; a014=14; a015=15; a016=16; a017=17; a018=18; a019=19; a020=20; a021=21; a022=22; a023=23; a024=24; a025=25; a026=26; a027=27; a028=28; a029=29; a030=30; a031=31; a032=32; a033=33; a034=34; a035=35; a036=36; a037=37; a038=38; a039=39; a040=40; a041=41; a042=42; a043=43; a044=44; a045=45; a046=46; a047=47; a048=48; a049=49; a050=50; a051=51; a052=52; a053=53; a054=54; a055=55; a056=56; a057=57; a058=58; a059=59; a060=60; a061=61; a062=62; a063=63; a064=64; a065=65; a066=66; a067=67; a068=68; a069=69; a070=70; a071=71; a072=72; a073=73; a074=74; a075=75; a076=76; a077=77; a078=78; a079=79; a080=80; a081=81; a082=82; a083=83; a084=84; a085=85; a086=86; a087=87; a088=88; a089=89; a090=90; a091=91; a092=92; a093=93; a094=94; a095=95; a096=96; a097=97; a098=98; a099=99; a100=100; a101=101; a102=102; a103=103; a104=104; a105=105; a106=106; a107=107; a108=108; a109=109; a110=110; a111=111; a112=112; a113=113; a114=114; a115=115; a116=116; a117=117; a118=118; a119=119; a120=120; a121=121; a122=122; a123=123; a124=124; a125=125; a126=126; a127=127; a128=128; a129=129; a130=130; a131=131; a132=132; a133=133; a134=134; a135=135; a136=136; a137=137; a138=138; a139=139; a140=140; a141=141; a142=142; a143=143; a144=144; a145=145; a146=146; a147=147; a148=148; a149=149; a150=150; a151=151; a152=152; a153=153; a154=154; a155=155; a156=156; a157=157; a158=158; a159=159; a160=160; a161=161; a162=162; a163=163; a164=164; a165=165; a166=166; a167=167; a168=168; a169=169; a170=170; a171=171; a172=172; a173=173; a174=174; a175=175; a176=176; a177=177; a178=178; a179=179; a180=180; a181=181; a182=182; a183=183; a184=184; a185=185; a186=186; a187=187; a188=188; a189=189; a190=190; a191=191; a192=192; a193=193; a194=194; a195=195; a196=196; a197=197; a198=198; a199=199; a200=200; a201=201; a202=202; a203=203; a204=204; a205=205; a206=206; a207=207; a208=208; a209=209; a210=210; a211=211; a212=212; a213=213; a214=214; a215=215; a216=216; a217=217; a218=218; a219=219; a220=220; a221=221; a222=222; a223=223; a224=224; a225=225; a226=226; a227=227; a228=228; a229=229; a230=230; a231=231; a232=232; a233=233; a234=234; a235=235; a236=236; a237=237; a238=238; a239=239; a240=240; a241=241; a242=242; a243=243; a244=244; a245=245; a246=246; a247=247; a248=248; a249=249; a250=250; a251=251; a252=252; a253=253; a254=254; a255=255; a256=256; a257=257; a258=258; a259=259; a260=260; a261=261; a262=262; a263=263; a264=264; a265=265; a266=266; a267=267; a268=268; a269=269; a270=270; a271=271; a272=272; a273=273; a274=274; a275=275; a276=276; a277=277; a278=278; a279=279; a280=280; a281=281; a282=282; a283=283; a284=284; a285=285; a286=286; a287=287; a288=288; a289=289; a290=290; a291=291; a292=292; a293=293; a294=294; a295=295; a296=296; a297=297; a298=298; a299=299; a300=300; a301=301; a302=302; a303=303; a304=304; a305=305; a306=306; a307=307; a308=308; a309=309; a310=310; a311=311; a312=312; a313=313; a314=314; a315=315; a316=316; a317=317; a318=318; a319=319; a320=320; a321=321; a322=322; a323=323; a324=324; a325=325; a326=326; a327=327; a328=328; a329=329; a330=330; a331=331; a332=332; a333=333; a334=334; a335=335; a336=336; a337=337; a338=338; a339=339; a340=340; a341=341; a342=342; a343=343; a344=344; a345=345; a346=346; a347=347; a348=348; a349=349; a350=350; a351=351; a352=352; a353=353; a354=354; a355=355; a356=356; a357=357; a358=358; a359=359; a360=360; a361=361; a362=362; a363=363; a364=364; a365=365; a366=366; a367=367; a368=368; a369=369; a370=370; a371=371; a372=372; a373=373; a374=374; a375=375; a376=376; a377=377; a378=378; a379=379; a380=380; a381=381; a382=382; a383=383; a384=384; a385=385; a386=386; a387=387; a388=388; a389=389; a390=390; a391=391; a392=392; a393=393; a394=394; a395=395; a396=396; a397=397; a398=398; a399=399; a400=400; a401=401; a402=402; a403=403; a404=404; a405=405; a406=406; a407=407; a408=408; a409=409; a410=410; a411=411; a412=412; a413=413; a414=414; a415=415; a416=416; a417=417; a418=418; a419=419; a420=420; a421=421; a422=422; a423=423; a424=424; a425=425; a426=426; a427=427; a428=428; a429=429; a430=430; a431=431; a432=432; a433=433; a434=434; a435=435; a436=436; a437=437; a438=438; a439=439; a440=440; a441=441; a442=442; a443=443; a444=444; a445=445; a446=446; a447=447; a448=448; a449=449; a450=450; a451=451; a452=452; a453=453; a454=454; a455=455; a456=456; a457=457; a458=458; a459=459; a460=460; a461=461; a462=462; a463=463; a464=464; a465=465; a466=466; a467=467; a468=468; a469=469; a470=470; a471=471; a472=472; a473=473; a474=474; a475=475; a476=476; a477=477; a478=478; a479=479; a480=480; a481=481; a482=482; a483=483; a484=484; a485=485; a486=486; a487=487; a488=488; a489=489; a490=490; a491=491; a492=492; a493=493; a494=494; a495=495; a496=496; a497=497; a498=498; a499=499; a500=500; a501=501; a502=502; a503=503; a504=504; a505=505; a506=506; a507=507; a508=508; a509=509; a510=510; a511=511; a512=512; + + assert (a511 == 511); + assert (a512 == 512); + assert (a400 + a500 == 900); + + % Do some ops to check that WIDE does not mess things up + + % Loop to test specializations and despecializations + for j = 1:4 + b = 3; + c = 4; + d = b * c; + assert (d == 12); + + e = [1 2 3 4]; + two = 2; + if (j == 3) + e = single (e); % despecialization + two = single (two); + end + + assert (e(2) == 2); + assert (e(2) == two); + e(3) = 11; + assert (e(3) == 11); + + assert (e(end) == 4); + assert (e(end - 1) == 11); + + f = [5 6 7 8; 9 10 11 12]; + six = 6; + if (j == 3) + f = single (f); % despecialization + six = single (six); + end + assert (f(1,2) == 6); + assert (f(1,2) == six); + f(1,2) = 7; + assert (f(1,2) == 7); + + g = 0; + if (j == 3) + g = single (g); % despecialization + end + + g++; + assert (g == 1); + ++g; + assert (g == 2); + g += 3; + assert (g == 5); + eval ("assert (g == 5);") + + sum = 0; + for i = 1:3 % WIDE FOR_COND + sum += i; + end + assert (sum == 6); + + s.s = 2; + assert (s.s == 2); + s.w.s = 3; + assert (s.w.s == 3); + end + + + % Check that a001 to a512 have to correct values + for i = 1:512 + eval (sprintf ("assert (a%03d == %d);", i, i)); + end +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_multi_assign.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_multi_assign.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,56 @@ +function bytecode_multi_assign () + A = [1 2; 3 4]; + [a, b] = max (A); + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + [a,b,c,d] = foo (); + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + __printf_assert__ ("%d ", c); + __printf_assert__ ("%d ", d); + + % Non ids in lhs + % Eval is used as a cheat since + % rhs need to know how many lhs values + % there are. + + [e, f.a, g, h.b] = foo (); + __printf_assert__ ("%d ", e); + __printf_assert__ ("%d ", f.a); + __printf_assert__ ("%d ", g); + __printf_assert__ ("%d ", h.b); + + e = [1 2 3]; + g = {1, 2, 3}; + [e(2), f.a, g{2}, h.b] = foo (); + __printf_assert__ ("%d ", e); + __printf_assert__ ("%d ", f.a); + __printf_assert__ ("%d ", g{2}); + __printf_assert__ ("%d ", h.b); + + [e(end), f.a, g{min (100, end)}, h.b] = foo (); + __printf_assert__ ("%d ", e); + __printf_assert__ ("%d ", f.a); + __printf_assert__ ("%d ", g{min (100, end)}); + __printf_assert__ ("%d ", h.b); + + [e(end), f.a, ~, h.b] = foo (); + __printf_assert__ ("%d ", e); + __printf_assert__ ("%d ", f.a); + __printf_assert__ ("%d ", g{end}); + __printf_assert__ ("%d ", h.b); + + + [C{1:2}, D] = {1,2,3}{:}; + __printf_assert__ ("%d ", C{1}); + __printf_assert__ ("%d ", C{2}); + __printf_assert__ ("%d ", D); +end + +function [a,b,c,d] = foo () + a = 1; + b = 2; + c = 3; + d = 4; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_persistant.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_persistant.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,23 @@ +function bytecode_persistant () + q = 3; + l = 4; + + persistent a = 3; + __printf_assert__ ("a:%d ", a++); + + persistent b; + __printf_assert__ ("b:%d ", b); + __printf_assert__ ("%s ", class (b)); + __printf_assert__ ("%d ", size (b)); + b = 0; + __printf_assert__ ("%d ", b++); + + suby (); + suby (); +end + +function suby () + persistent c = 2; + c++; + __printf_assert__ ("c:%d ", c); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_range.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_range.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,68 @@ +function bytecode_range () + + % The should be range constants, but I think + % negative limits are not folded to range constants + + a = 1:3; + __printf_assert__ ("%d ", a); + + a = 1:2:6; + __printf_assert__ ("%d ", a); + + a = 1:2:5; + __printf_assert__ ("%d ", a); + + a = 1:0.1:1.4; + __printf_assert__ ("%d ", a); + + a = 1:-0.1:0.7; + __printf_assert__ ("%d ", a); + + a = 7:7; + __printf_assert__ ("%d ", a); + + a = 7:-1:7; + __printf_assert__ ("%d ", a); + + a = 7:-1:8; + __printf_assert__ ("%d ", isempty (a)); + + % Dynamically created with COLON2 or 3 opcode. + % Colons behave differently when in command expression, + % they use the COLONX_CMD opcodes. + % + % ??? I don't think the ranges are allocated as matrixes + % when used in commands. + base = 8; + inc = 2; + lim = 11; + + a = base : inc : lim; + __printf_assert__ ("%d ", a); + for i = base : inc : lim + __printf_assert__ ("%d ", i); + end + + a = base : lim; + __printf_assert__ ("%d ", a); + for i = base : lim + __printf_assert__ ("%d ", i); + end + + base = 10; + inc = -2; + lim = 7; + + a = base : inc : lim; + __printf_assert__ ("%d ", a); + for i = base : inc : lim + __printf_assert__ ("%d ", i); + end + + a = -base : -lim; + __printf_assert__ ("%d ", a); + for i = -base : -lim + __printf_assert__ ("%d ", i); + end + +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_return.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_return.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,93 @@ +function bytecode_return () + a = foo (); + __printf_assert__ ("%d ", a); + + bar (1); + bar (0); + + baz (0); + baz (1); + baz (2); + + boz(); + + meh (); + + a = return_1 (); + __printf_assert__ ("%d ", a); + [a b] = return_2 (); + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + % Drop one output variable + a = return_2 (); + __printf_assert__ ("%d ", a); + + % Drop all output variables + return_2 (); + + % Command form call + a = return_1; + __printf_assert__ ("%d ", a); + + [a b] = return_2; + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", b); + + a = return_2; + __printf_assert__ ("%d ", a); + + return_2; + + silly(); + silly(2); +end + +function [a b] = silly(i) + __printf_assert__ ("silly "); +end + +function a = return_1 () + a = 1; +end + +function [a b] = return_2 () + a = 1; + b = 2; +end + +function out = foo () + out = 2; + return +end + +function bar (i) + if i + __printf_assert__ ("baaar "); + return + end + + __printf_assert__ ("bääär "); +end + +function out = baz (i) + if i == 0 + __printf_assert__ ("baaaaz "); + return + elseif i == 1 + __printf_assert__ ("bääääz "); + return + end + + __printf_assert__ ("bååååz "); +end + +function boz () + __printf_assert__ ("booz "); + return + __printf_assert__ ("booo "); +end + +function meh () + return +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_struct.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_struct.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,24 @@ +function bytecode_struct () + s = struct ('a', 1, 'b', 2); + __printf_assert__ ("%d ", s.a); + __printf_assert__ ("%d ", s.b); + __printf_assert__ ("%s ", class (s.a)); + __printf_assert__ ("%d ", size (s.a)); + + % Should not mess up stack + s.a; + + % Test simple assigns + r.a = 3; + + __printf_assert__ ("%s ", class (r)); + __printf_assert__ ("%d ", r.a); + + % Test word command struct subref + + __printf_assert__ ("%d ", suby.b); +end + +function a = suby () + a.b = 4; +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_subfuncs.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_subfuncs.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,128 @@ +function bytecode_subfuncs (h_to_anon_fn) + a = foo (); + __printf_assert__ ("%.17g ", a); + b = bar (); + __printf_assert__ ("%.17g ", b); + c = baz (); + __printf_assert__ ("%.17g ", c); + + meh (); + + a = para_ret_same_name (11); + __printf_assert__ ("%.17g ", a); + + % Test default args + a = defaultarg (); + __printf_assert__ ("%.17g ", a); + + a = defaultarg (10); + __printf_assert__ ("%.17g ", a); + + a = defaultarg2 (); + __printf_assert__ ("%.17g ", a); + + a = defaultarg2 (11); + __printf_assert__ ("%.17g ", a); + + a = defaultarg2 (11, 12, 13, 14); + __printf_assert__ ("%.17g ", a); + + % Magic colon + a = defaultarg2 (11, :, 13, 14); + __printf_assert__ ("%.17g ", a); + + % Functions handles + h = @max; + __printf_assert__ ("%d ", h ([1 3])); + __printf_assert__ ("%d ", just_call_handle_with_arg (h, [1 3])); + __printf_assert__ ("%d ", just_call_handle_with_arg_bytecode (h, [1 3])); + + h = @foo; + __printf_assert__ ("%d ", h ()); + __printf_assert__ ("%d ", just_call_handle_with_arg (h)); + __printf_assert__ ("%d ", just_call_handle_with_arg_bytecode (h)); + + % Call an anonymous function from the tst-file + h_to_anon_fn (); + + % Many args and returns + [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32 (); + __printf_assert__ ("%d ", a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32); + __printf_assert__ ("%d ", ret32 ()); % nargout = 1 + ret32 (); % nargout = 0 + [args32{1:32}] = ret32 (); + __printf_assert__ ("%d ", args32{:}); + + [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32take32 (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); + __printf_assert__ ("%d ", a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32); + + take32 (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); + + [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, ... + a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45, a46, a47, a48, a49, a50, a51, a52, a53, a54, a55, a56, a57, a58, a59, a60, a61, a62, a63, a64] = ... + takeXp32retXp32 (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ... + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64); + __printf_assert__ ("%d ", a01, a18, a59, a64); +end + +function [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, varargout] = takeXp32retXp32 (b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32, varargin) + for i = 1:32 + eval (sprintf ("a%02.f = b%02.f;", i, i)); + end + varargout = varargin; +end + +function take32 (b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32) + __printf_assert__ ("take32:"); + __printf_assert__ ("%d ", b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32); +end + +function [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32 () + __printf_assert__ ("ret32:"); + for i = 1:32 + eval (sprintf ("a%02.f = %f;", i, i)); + end +end + +function [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32take32 (b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32) + for i = 1:32 + eval (sprintf ("a%02.f = b%02.f;", i, i)); + end +end + +function out = just_call_handle_with_arg_bytecode (h, varargin) + out = h (varargin{:}); +end + +function out = foo () + out = 2; +end + +function out = bar () + out = foo (); +end + +function out = baz () + out = bar (); +end + +function meh() +end + +function i = para_ret_same_name (i) +end + +function a = defaultarg (b = 30) + a = b; +end + +function a = defaultarg2 (a = 30, b = max (4, 5), c = [], d = [1 2]) + __printf_assert__ ("%.17g ", a); + __printf_assert__ ("%.17g ", b); + __printf_assert__ ("%.17g ", c); + __printf_assert__ ("%d ", size(c)); + __printf_assert__ ("%s ", class(c)); + __printf_assert__ ("%.17g ", d); + __printf_assert__ ("%d ", size(d)); + __printf_assert__ ("%s ", class(d)); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_subsasgn.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_subsasgn.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,95 @@ +function bytecode_subsasgn () + A = [1 2; 3 4]; + A(1) = 3; + __printf_assert__ ("%d ", A(1)); + + A(1,2) = 5; + __printf_assert__ ("%d ", A(1,2)); + + A(:,1) = [9;8]; + __printf_assert__ ("%d ", A(:,1)); + + A(:,:) = [11 12; 13 14]; + __printf_assert__ ("%d ", A(:,1)); + + B = [1:10]; + B(7:end) = [77 88 99 1010]; + __printf_assert__ ("%d ", B); + B(4:min (5, end)) = 987; + __printf_assert__ ("%d ", B); + + % Subassign to a undefined variable + C(3,2) = 13; + __printf_assert__ ("%d ", C); + __printf_assert__ ("%s ", class (C)); + __printf_assert__ ("%d ", size (C)); + + % Subassign cells + D = {1,2,3}; + D{1} = 4; + __printf_assert__ ("%d ", D{:}); + __printf_assert__ ("%s ", class (D)); + __printf_assert__ ("%d ", size (D)); + + D{2,3} = {6,7}; + dd = D{2,3}; + __printf_assert__ ("%d ", dd{:}); + __printf_assert__ ("%d ", size (D)); + + E = {1,2,3}; + E(2:3) = {4,5}; + __printf_assert__ ("%d ", E{:}); + + % Use cells as a subscript + M = [1 2 3; 3 4 5; 5 6 7]; + s = {":", [1;2]}; + __printf_assert__ ("%d ", M(s{:})); + M(s{:}) = 7; + __printf_assert__ ("%d ", M(s{:})); + + % Assure that sources are not modified + x = [1 2 3]; + y = x; + y(2) = 3; + __printf_assert__ ("%d %d ", x, y); + + x = {1 2 3}; + y = x; + y{2} = 3; + __printf_assert__ ("%d %d ", x{2}, y{2}); + + % Chained assigns + a.b.c.d = 2; + __printf_assert__ ("%d ", a.b.c.d); + a.("a").c.d = 3; + __printf_assert__ ("%d ", a.a.c.d); + q.w = {{1},{2}}; + __printf_assert__ ("%d ", q.w{1}{1}); + q.w{1} = {3}; + __printf_assert__ ("%d ", q.w{1}{1}); + + z.x.c = [1 2 3]; + __printf_assert__ ("%d ", z.x.c); + z.x.c(:) = 4; + __printf_assert__ ("%d ", z.x.c); + + x = {[1 2 3], [4 5 6]; [8 9 10], [11 12 13]}; + q.y = {{}, {}; {} {}}; + q.y{1, 2} = x; + q.y{1, 2}{2, 1} = 3; + + __printf_assert__ ("%d ", q.y{1, 2}{2, 1}); + __printf_assert__ ("%d ", q.y{1, 2}{1, 2}); + + % += etc + A = [1 2 3 4]; + A(2) += 3; + __printf_assert__ ("%d ", A); + A(3) -= 4; + __printf_assert__ ("%d ", A); + + C = struct (); + C.A = A; + C.A(4) *= 2; + __printf_assert__ ("%d ", C.A); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_switch.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_switch.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,164 @@ +function bytecode_switch () + a = 2; + switch (a) + case 1 + __printf_assert__ ("boo "); + case 2 + __printf_assert__ ("yay "); + otherwise + __printf_assert__ ("boo "); + end + + switch (a) + case 1 + __printf_assert__ ("boo "); + case 3 + __printf_assert__ ("boo "); + otherwise + __printf_assert__ ("yay2 "); + end + + b = "yay3 "; + switch (b) + case "boo1" + __printf_assert__ ("boo "); + case "yay3 " + __printf_assert__ ("yay3 "); + otherwise + __printf_assert__ ("boo "); + end + + % Cells + a = 2; + switch (a) + case {1,0} + __printf_assert__ ("boo "); + case {2,3} + __printf_assert__ ("yay4 "); + otherwise + __printf_assert__ ("boo "); + end + + switch (a) + case {1,0} + __printf_assert__ ("boo "); + case {3,2} %Swapped + __printf_assert__ ("yay5 "); + otherwise + __printf_assert__ ("boo "); + end + + switch (a) + case {1,0} + __printf_assert__ ("boo "); + case {3,4} + __printf_assert__ ("boo "); + otherwise + __printf_assert__ ("yay6 "); + end + + % Silly + switch (a) + otherwise + __printf_assert__ ("yay7 "); + end + + % Empty + switch (a) + end + + % No default case + a = 2; + switch (a) + case 1 + __printf_assert__ ("boo "); + case 2 + __printf_assert__ ("yay8 "); + end + + switch (a) + case 1 + __printf_assert__ ("boo "); + case 3 + __printf_assert__ ("boo "); + end + + % Return from switch + __printf_assert__ ("%d ", returnfromswitch (1)); + __printf_assert__ ("%d ", returnfromswitch (2)); + __printf_assert__ ("%d ", returnfromswitch (3)); + __printf_assert__ ("%d ", returnfromswitch2 (1)); + __printf_assert__ ("%d ", returnfromswitch2 (2)); + __printf_assert__ ("%d ", returnfromswitch2 (3)); + + % switch with continue + + for i = 1:4 + switch (i) + case 1 + __printf_assert__ ("1:%d ", i); + case 2 + __printf_assert__ ("2:%d ", i); + continue; + case 3 + __printf_assert__ ("3:%d ", i); + otherwise + __printf_assert__ ("breaking:%d ", i); + break; + endswitch + __printf_assert__ ("for-end:%d", i); + end + +end + + +function a = returnfromswitch (b) + switch (b) + case 1 + a = 1; + return + case 2 + a = 2; + return; + otherwise + a = 3; + return + end + + __printf_assert__ ("boo "); +end + +function a = returnfromswitch2 (b) + % switches and fors cleans the stack at returns + % in a special way so test that that works properly + % + % The for loops puts native ints on the stack, so we can't just pop + % the stack assuming everything is octave values. + % + for i = [1, 2] % Puts int n,int i and the range on the stack + switch (b) % Puts b on the stack + case 10 + return + otherwise + for j = [3, 4] % Puts int n,int i and the range on the stack + __printf_assert__ ("%d ", j); + switch (b) % Puts b on the stack + case 1 + a = 1; + return %pop, popint popint pop, pop, popint popint pop + case 2 + a = 2; + otherwise + a = 3; + return + end + + __printf_assert__ ("%d ", j); + end + end + + __printf_assert__ ("%d ", i); + end + + __printf_assert__ ("yoo "); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_trycatch.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_trycatch.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,173 @@ +function bytecode_trycatch () + % TODO: Check identifier in error object too + + try + __printf_assert__ ("yay "); + catch + __printf_assert__ ("boo "); + end + + try + __printf_assert__ ("yay2 "); + error ("ooo"); + __printf_assert__ ("boo "); + catch + __printf_assert__ ("yay3 "); + + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + + % Empty body/catch + try + catch + __printf_assert__ ("boo "); + end + + try + catch + end + + try + catch err + end + + try + error("foo"); + catch + end + + % Error object + try + __printf_assert__ ("yay2 "); + error ("ooo2"); + __printf_assert__ ("boo "); + catch err + __printf_assert__ ("yay3 "); + + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + __printf_assert__ ("%s ", getfield (err, 'message')); + end + + % Nested + try + __printf_assert__ ("yay3 "); + try + __printf_assert__ ("yay4 "); + error ("Nested error"); + __printf_assert__ ("boo "); + catch + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + try + __printf_assert__ ("yay5 "); + error ("In catch"); + __printf_assert__ ("boo "); + catch + __printf_assert__ ("yay6 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + end + + error ("qwe"); + __printf_assert__ ("boo "); + catch + __printf_assert__ ("yay7 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + + % Unwind subfunction + try + suby (); + catch + __printf_assert__ ("yay8 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + + % Catch undefined id + try + qwe = asd; + __printf_assert__ ("boo "); + catch + __printf_assert__ ("yay9 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + % Catch unconformant arguments + try + a = [1 2]; + b = [1 2 3]; + c = a * b; + catch + __printf_assert__ ("yay10 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + + % Rethrow + try + try + error ("yoyo"); + catch err + rethrow (err); + __printf_assert__ ("boo "); + end + catch + __printf_assert__ ("yay11 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + + % There are some shenanigans going on poping native ints belonging + % to for-loops' iteration counters going on, so test that. + for i = 1:3 + try + error ("foo"); + catch + __printf_assert__ ("yay12 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + end + + % switch statements save the value on the stack so add some switches + % to check that they are unwound properly nested in fors etc + + zxc = '1'; + switch zxc + case '1' + for m = 1:3 + end + switch m + case 2 + end + + for i = 1:3 + try + qwe = '1'; + switch qwe + case '1' + for j = 1:3 + asd = '1'; + switch asd + case '1' + error ("foo"); + end + + for k = 1:3 + end + end + end + catch + __printf_assert__ ("yay13 "); + __printf_assert__ ("%s ", getfield (lasterror (), 'message')); + end + end + + for l = 1:2 + end + end + % TODO: Test more types of errors ... +end + +function suby () + for j = 1:2 + for i = 1:3 + error ("Error in subfunction"); + end + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_unary.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_unary.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,22 @@ +function bytecode_unary () + a = 1; + a = -a; + __printf_assert__ ("%d ", a); + + c = +4; + b = +c; + __printf_assert__ ("%d ", c); + + a = [1 2; 3 4]'; + __printf_assert__ ("%d ", a); + a = a'; + __printf_assert__ ("%d ", a); + + b = true; + b = ~b; + __printf_assert__ ("%d ", b); + + b = true; + b = !b; + __printf_assert__ ("%d ", b); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_unwind.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_unwind.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,170 @@ +function bytecode_unwind () + unwind_protect + __printf_assert__ ("yay1 "); + unwind_protect_cleanup + __printf_assert__ ("yay2 "); + end + + try + unwind_protect + error ("e1"); + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("yay3 "); + end + catch e + __printf_assert__ ("%s ", getfield (e, 'message')); + end + + suby (1); + suby (2); + + try + suby (3); + catch + end + + suby (4); + suby (5); + suby (6); + + % Break and returns that are not executed + % + % The combination "break and return", "break" and "return" + % takes different codepaths in the walker so we test all + % combinations here. + + unwind_protect + for i = 1:3 + if i == 4 + break + elseif i == 5 + return + end + end + unwind_protect_cleanup + __printf_assert__ ("yay4 "); + end + + % Break only + unwind_protect + for i = 1:3 + if i == 4 + break + end + end + unwind_protect_cleanup + __printf_assert__ ("yay5 "); + end + + % Return only + unwind_protect + for i = 1:3 + if i == 4 + return + end + end + unwind_protect_cleanup + __printf_assert__ ("yay6 "); + end +end + +function suby (a) + if a == 1 + unwind_protect + __printf_assert__ ("subyyay1 "); + return + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay2 "); + end + __printf_assert__ ("boo "); + elseif a == 2 + % Nested unwind protect with return in body + unwind_protect + unwind_protect + __printf_assert__ ("subyyay3 "); + return + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay4 "); + end + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay5 "); + end + __printf_assert__ ("boo "); + elseif a == 3 + % Nested unwind protect with error in body + unwind_protect + unwind_protect + __printf_assert__ ("subyyay6 "); + error foooo + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay7 "); + end + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay8 "); + end + __printf_assert__ ("boo "); + elseif a == 4 + for i = 1:3 + unwind_protect + __printf_assert__ ("subyyay9 "); + break; + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay10 "); + end + __printf_assert__ ("boo "); + end + elseif a == 5 + for i = 1:3 + unwind_protect + __printf_assert__ ("subyyay11 "); + for j = 1:3 + unwind_protect + __printf_assert__ ("subyyay12 "); + break; + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay13 "); + end + __printf_assert__ ("boo "); + end + break; + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay14 "); + end + __printf_assert__ ("boo "); + end + elseif a == 6 + % Mixing unwind protect with for loops. error and break + for i = 1:3 + unwind_protect + __printf_assert__ ("subyyay15 "); + try + for j = 1:3 + unwind_protect + __printf_assert__ ("subyyay16 "); + error ('qwe'); + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay17 "); + end + __printf_assert__ ("boo "); + end + catch + break; + end + __printf_assert__ ("boo "); + unwind_protect_cleanup + __printf_assert__ ("subyyay18 "); + end + __printf_assert__ ("boo "); + end + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_varargin.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_varargin.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,64 @@ +function bytecode_varargin (b, varargin) + __printf_assert__ ("%d ", varargin{:}); + __printf_assert__ ("%d ", size (varargin)) + + varg1 (1); + varg1 (1,2,3,4); + varg1 (); + + varg2 (1,2,3,4); + varg2 (1); + + cslist = {1,2,3,4}; + varg2 (cslist{:}); + + justnamenotpos (1, 2); + + out = inout (1,2,3,4); + __printf_assert__ ("%d ", out{:}); + + __printf_assert__ ("%d ", nargin); + + % TODO: Check in caller that return is the same + % b = varargin{:} + + suby (1,2,3); +end + +function varg1 (varargin) + __printf_assert__ ("%d ", varargin{:}); + __printf_assert__ ("%d ", size (varargin)); + __printf_assert__ ("%d ", nargin); +end + +function varg2 (a, varargin) + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", varargin{:}); + __printf_assert__ ("%d ", size (varargin)); + __printf_assert__ ("%d ", nargin); + + varg1 (varargin{:}) + varg1 (2, varargin{:}) +end + +function justnamenotpos (varargin, a) + __printf_assert__ ("%d ", a); + __printf_assert__ ("%d ", varargin); + __printf_assert__ ("%d ", nargin); +end + +function [varargin] = inout (varargin) + __printf_assert__ ("%d ", nargin); +end + +function suby(a,b,c) + __printf_assert__ ("%d ", nargin); + + if nargin == 3 + suby (1,2); + elseif nargin == 2 + suby (1); + elseif nargin == 1 + suby (); + end +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_varargout.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_varargout.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,65 @@ +function bytecode_varargout () + + % Just check this works + [a b] = {7, 8}{:}; + __printf_assert__ ("%d %d ", a, b); + + % n amount of vargouts from suby1(n) + a = suby1 (1); + __printf_assert__ ("%d ", a); + [a b] = suby1 (2); + __printf_assert__ ("%d %d ", a, b); + + % Test that ignored outputs are set properly + % when calling interpreted functions. + % + % return_isargout (n) returns isargout (n) in its + % first output. + [a b c] = return_isargout (2); + __printf_assert__ ("%d ", a); + + [a b c] = return_isargout (4); + __printf_assert__ ("%d ", a); + + [a, ~, c] = return_isargout (2); + __printf_assert__ ("%d ", a); + + [a, ~, ~] = return_isargout (2); + __printf_assert__ ("%d ", a); + [a, ~, ~] = return_isargout (1); + __printf_assert__ ("%d ", a); + [a, ~, ~] = return_isargout (3); + __printf_assert__ ("%d ", a); + + [~, ~, ~] = return_isargout (3); + + % Do the same for a vm function + [a b c] = sub_return_isargout (2); + __printf_assert__ ("%d ", a); + + [a b c] = sub_return_isargout (4); + __printf_assert__ ("%d ", a); + + [a, ~, c] = sub_return_isargout (2); + __printf_assert__ ("%d ", a); + + [a, ~, ~] = sub_return_isargout (2); + __printf_assert__ ("%d ", a); + [a, ~, ~] = sub_return_isargout (1); + __printf_assert__ ("%d ", a); + [a, ~, ~] = sub_return_isargout (3); + __printf_assert__ ("%d ", a); + + [~, ~, ~] = sub_return_isargout (3); +end + +function [a b c ] = sub_return_isargout (n) + b = 0; c = 0; + a = isargout (n); +end + +function varargout = suby1(n) + for i = 1:n + varargout{i} = i; + end +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_while.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_while.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,111 @@ +function bytecode_while () + i = 5; + while i + __printf_assert__ ("%d ", i); + i--; + end + + i = 0; + while i < 3 + i++; + end + __printf_assert__ ("%d ", i); + + i = 0; + ctr = 0; + while i++ < 4 + ctr++; + end + __printf_assert__ ("%d ", i); + __printf_assert__ ("%d ", ctr); + + i = 0; + ctr = 0; + while ++i < 4 + ctr++; + end + __printf_assert__ ("%d ", i); + __printf_assert__ ("%d ", ctr); + + i = 0; + ctr = 0; + while i < 4 + i++; + if i == 2 + continue + end + ctr++; + end + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + + i = 0; + ctr = 0; + while i < 4 + i++; + if i == 2 + break + end + ctr++; + end + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + + i = 0; + ctr = 0; + while i < 4 + i++; + if i == 2 + continue + elseif i == 3 + break + end + ctr++; + end + __printf_assert__ ("%d ", ctr); + __printf_assert__ ("%d ", i); + + i = 0; + while i < 4 + i++; + if i == 1 + continue + else + break + end + end + __printf_assert__ ("%d ", i); + + ctr = 0; + j = 0; + while j < 2 + i = 0; + while i < 2 + k = 0; + while k < 2 + k++; + ctr++; + end + i++; + end + j++; + end + __printf_assert__ ("%d ", ctr); + + i = 0; + while i++ < 2 + continue + end + __printf_assert__ ("%d ", i); + + i = 0; + while i++ < 2 + break + end + __printf_assert__ ("%d ", i); + + i = 0; + while i++ < 2 + end + __printf_assert__ ("%d ", i); +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/bytecode_wordlistcmd.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/bytecode_wordlistcmd.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,25 @@ +function bytecode_wordlistcmd () + foo A B C; + + bar QWE; + + boz + boz A + boz A B +end + + +function foo (a,b,c) + __printf_assert__ ("%s ", a); + __printf_assert__ ("%s ", b); + __printf_assert__ ("%s ", c); +end + +function [a b] = bar (c) + __printf_assert__ ("%s ", c); + a = 1; + b = 2; +end + +function boz (a,b,c) +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/cdef_foo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/cdef_foo.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,21 @@ +classdef cdef_foo < handle + properties + msg = ""; + val = 1; + end + methods + function f = cdef_foo(msg) + global cdef_foo_ctor_cnt = 0; + f.msg = msg; + cdef_foo_ctor_cnt++; + end + function delete (self) + global cdef_foo_dtor_cnt = 0; + __printf_assert__ ("%d %s ", ++cdef_foo_dtor_cnt, self.msg); + endfunction + function c = plus (a, b) + c = cdef_foo (strcat("sum", a.msg, b.msg)); + c.val = a.val + b.val; + end + endmethods +end diff -r edbe81ee00c5 -r d2de83a80165 test/compile/inputname_args.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/inputname_args.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,6 @@ +function inputname_args (arg1, arg2) + __printf_assert__ ("%s ", inputname (1, 0)); + __printf_assert__ ("%s ", inputname (1, 1)); + __printf_assert__ ("%s ", inputname (2, 0)); + __printf_assert__ ("%s ", inputname (2, 1)); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/just_call_handle_with_arg.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/just_call_handle_with_arg.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,3 @@ +function a = just_call_handle_with_arg (h, varargin) + a = h (varargin{:}); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/module.mk --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/module.mk Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,48 @@ +vm_TEST_FILES = \ + %reldir%/bytecode.tst \ + %reldir%/bytecode_ans.m \ + %reldir%/bytecode_assign.m \ + %reldir%/bytecode_binops.m \ + %reldir%/bytecode_anon_handles.m \ + %reldir%/bytecode_cdef_use.m \ + %reldir%/bytecode_cell.m \ + %reldir%/bytecode_dountil.m \ + %reldir%/bytecode_end.m \ + %reldir%/bytecode_errors.m \ + %reldir%/bytecode_eval_1.m \ + %reldir%/bytecode_evalin_1.m \ + %reldir%/bytecode_evalin_2.m \ + %reldir%/bytecode_for.m \ + %reldir%/bytecode_global_1.m \ + %reldir%/bytecode_if.m \ + %reldir%/bytecode_index_obj.m \ + %reldir%/bytecode_inputname.m \ + %reldir%/bytecode_leaks.m \ + %reldir%/bytecode_matrix.m \ + %reldir%/bytecode_misc.m \ + %reldir%/bytecode_multi_assign.m \ + %reldir%/bytecode_persistant.m \ + %reldir%/bytecode_range.m \ + %reldir%/bytecode_return.m \ + %reldir%/bytecode_struct.m \ + %reldir%/bytecode_subfuncs.m \ + %reldir%/bytecode_subsasgn.m \ + %reldir%/bytecode_switch.m \ + %reldir%/bytecode_trycatch.m \ + %reldir%/bytecode_unary.m \ + %reldir%/bytecode_unwind.m \ + %reldir%/bytecode_varargin.m \ + %reldir%/bytecode_varargout.m \ + %reldir%/bytecode_while.m \ + %reldir%/bytecode_wordlistcmd.m \ + %reldir%/cdef_foo.m \ + %reldir%/inputname_args.m \ + %reldir%/just_call_handle_with_arg.m \ + %reldir%/return_isargout.m \ + %reldir%/shutup_operator_test/@double/display.m \ + %reldir%/shutup_operator_test/@logical/display.m \ + %reldir%/shutup_operator_test/bytecode_disp.m \ + %reldir%/shutup_operator_test/bytecode_disp.tst \ + %reldir%/wrongname_fn.m + +TEST_FILES += $(vm_TEST_FILES) diff -r edbe81ee00c5 -r d2de83a80165 test/compile/return_isargout.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/return_isargout.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,5 @@ +function [a b c] = return_isargout (n) + b = 0; + c = 0; + a = isargout (n); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/shutup_operator_test/@double/display.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/shutup_operator_test/@double/display.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,11 @@ +% For use by bytecode_disp(). +% +% Overload display +% +function display (x) + if (inputname(1)) + __printf_assert__ ("%s = %d ", inputname(1), x); + else + __printf_assert__ ("%d ", x); + end +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/shutup_operator_test/@logical/display.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/shutup_operator_test/@logical/display.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,11 @@ +% For use by bytecode_disp(). +% +% Overload display +% +function display (x) + if (inputname(1)) + __printf_assert__ ("%s = %d ", inputname(1), x); + else + __printf_assert__ ("%d ", x); + end +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/shutup_operator_test/bytecode_disp.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/shutup_operator_test/bytecode_disp.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,38 @@ +function bytecode_disp() + % int8 type's display is overloaded with a __printf_assert__ + 1 % "ans = 1" + print_dot; + 2 + 3 % "ans = 5" + print_dot; + 0; % + 0 + 0; % + print_dot; + ~4 % "ans = 0" + print_dot; + 2^3 % "ans = 8" + print_dot % + + if 2 + 3 % "ans = 3" + end + print_dot; + x = 4 - 1 % "x = 3" + + print_dot; + [x, y] = deal (1,2) % "x = 1" "y = 2" + print_dot; + [x, ~] = deal (1,2) % "x = 1" + print_dot; + % If all lvalues are black holes, nothing is printed + [~, ~] = deal (1,2) % + print_dot; + [x, ~, ~] = deal (1,2, 3) % "x = 1" + print_dot; + [~, y, ~] = deal (1,2, 3) % "y = 2" + print_dot; + x +end + +function print_dot() + __printf_assert__(". "); +end \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/shutup_operator_test/bytecode_disp.tst --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/shutup_operator_test/bytecode_disp.tst Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,18 @@ +## Test display due to no ";" at eol +## +## We are overloading display for double so we place this test +## in its own folder to not mess double up for the other +## tests. + +%!test +%! % Overloading of class-methods seems to stick so we need to clear them since we overload +%! % double's display. Is this a bug ??? +%! clear classes +%! key = "ans = 1 . ans = 5 . . ans = 0 . ans = 8 . ans = 3 . x = 3 . x = 1 y = 2 . x = 1 . . x = 1 . y = 2 . x = 1 "; +%! __compile bytecode_disp clear; +%! bytecode_disp; +%! assert (__prog_output_assert__ (key)); +%! +%! assert (__compile ("bytecode_disp")); +%! bytecode_disp; +%! assert (__prog_output_assert__ (key)); \ No newline at end of file diff -r edbe81ee00c5 -r d2de83a80165 test/compile/wrongname_fn.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compile/wrongname_fn.m Mon Apr 24 20:34:39 2023 +0200 @@ -0,0 +1,3 @@ +function a = rightname_fn (b) + a = b + 1; +end \ No newline at end of file