From 78647a4e621ff874f4de47c98370a9c3b7e8eb92 Mon Sep 17 00:00:00 2001 From: Angel Ezquerra Date: Mon, 17 Jun 2024 20:33:45 +0200 Subject: [PATCH] Add "format string" support for tensors This comes in 2 forms: 1. Add a version of the pretty function that takes a format string as its input instead of a precision value. Also add a new "showHeader" argument to the original pretty function. - This new version of pretty let's you specify the format string that must be used to display each element. It also adds some new tensor-specific format string "tokens" that are used to control how tensors are displayed (beyond the format of each element). 2. Add a formatValue procedure that takes a tensor as its first input. This makes it possible to control how tensors are displayed in format strings, in the exact same way as if you were using the new pretty(specifier) procedure. The special, tensor-specific tokens added by this change are: - "[:]": Display the tensor as if it were a nim "array". This makes it easy to use the representation of a tensor in your own code. No header is shown. - "[]": Same as "[:]" but displays the tensor in a single line. No header is shown. - "<>": Combined with the 2 above (i.e. "<>[:]" or "<>[]") adds a header with basic tensor info (type and shape). "<:>" can be used as a shortcut for "<>[:]" while "<>" on its own is equivalent to "<>[]". Can also be combined with "<>||" (see below). - "||": "Pretty-print" the tensor without a header. This can also be combined with "<>" (i.e. "<>||") to explicitly enable the default mode, which is pretty printing with a header. - 'j': Formats complex values as (A+Bj) like in mathematics. Ignored for non Complex tensors Note that these are only used to control how the tensors themselves are displayed as a whole, and are removed before displaying the individual elements. --- src/arraymancer/tensor/display.nim | 166 +++++++++++- src/arraymancer/tensor/private/p_display.nim | 248 +++++++++++++++-- tests/tensor/test_display.nim | 269 ++++++++++++++++++- 3 files changed, 644 insertions(+), 39 deletions(-) diff --git a/src/arraymancer/tensor/display.nim b/src/arraymancer/tensor/display.nim index 0d35f7654..9ed9b013c 100644 --- a/src/arraymancer/tensor/display.nim +++ b/src/arraymancer/tensor/display.nim @@ -13,22 +13,160 @@ # limitations under the License. import ./private/p_display, - ./data_structure, - typetraits + ./data_structure +import std / typetraits -proc pretty*[T](t: Tensor[T], precision = -1): string = - ## Pretty-print a Tensor with the option to set a custom `precision` - ## for float values. - var desc = t.type.name & " of shape \"" & $t.shape & "\" on backend \"" & "Cpu" & "\"" - if t.storage.isNil: # return useful message for uninit'd tensors instead of crashing - return "Uninitialized " & $desc - if t.size() == 0: - return desc & "\n [] (empty)" - elif t.rank == 1: # for rank 1 we want an indentation, because we have no `|` - return desc & "\n " & t.prettyImpl(precision = precision) - else: - return desc & "\n" & t.prettyImpl(precision = precision) +proc pretty*[T](t: Tensor[T], precision: int = -1, showHeader: static bool = true): string = + ## Pretty-print a Tensor as a "table" with a given precision and optional header + ## + ## Pretty-print a Tensor with options to set a custom `precision` for float + ## values and to show or hide a header describing the tensor type and shape. + ## + ## Inputs: + ## - Input Tensor. + ## - precision: The number of decimals printed (for float tensors), + ## _including_ the decimal point. + ## - showHeader: If true (the default) show a dscription header + ## indicating the tensor type and shape. + ## Result: + ## - A string containing a "pretty-print" representation of the Tensor. + ## + ## Examples: + ## ```nim + ## let t = arange(-2.0, 4.0).reshape(2, 3) + ## + ## echo t.pretty() + ## # Tensor[system.float] of shape "[2, 3]" on backend "Cpu" + ## # |-2 -1 0| + ## # |1 2 3| + ## + ## # Note that the precision counts + ## echo t.pretty(2) + ## # Tensor[system.float] of shape "[2, 3]" on backend "Cpu" + ## # |-2.0 -1.0 0.0| + ## # |1.0 2.0 3.0| + ## ``` + const specifier = if showHeader: "" else: "||" + t.prettyImpl(precision = precision, specifier = specifier) + +proc pretty*[T](t: Tensor[T], specifier: static string = ""): string = + ## Pretty-print a Tensor with the option to set a custom format `specifier` + ## + ## The "format specifier" is similar to those used in format strings, with + ## the addition of a few, tensor specific modifiers (shown below). + ## + ## Inputs: + ## - Input Tensor + ## - specifier: A format specifier similar to those used in format strings, + ## with the addition of a few, tensor specific modifiers which + ## can be combined to achieve different results: + ## - "[:]": Display the tensor as if it were a nim "array". + ## This makes it easy to use the representation of a + ## tensor in your own code. No header is shown. + ## - "[]": Same as "[:]" but displays the tensor in a single + ## line. No header is shown. + ## - "<>": Combined with the 2 above (i.e. "<>[:]" or "<>[]") + ## adds a header with basic tensor info (type and + ## shape). "<:>" can be used as a shortcut for "<>[:]" + ## while "<>" on its own is equivalent to "<>[]". + ## Can also be combined with "<>||" (see below). + ## - "||": "Pretty-print" the tensor _without_ a header. This + ## can also be combined with "<>" (i.e. "<>||") to + ## explicitly enable the default mode, which is pretty + ## printing with a header. + ## Notes: + ## - Note that in addition to these we support all of the standard format + ## specifiers, such as "f", "g", "+", etc (and including, in nim 2.2 and + ## above, the 'j' specifier for complex tensors). For a list of supported + ## format specifiers please check the documentation of nim's `strformat` + ## module. + ## - This version of this function does not have a `showHeader` argument + ## because to disable the header you must add a "n" to the format + ## specifier. + ## + ## Examples: + ## ```nim + ## let t_int = arange(-2, 22, 4).reshape(2, 3) + ## + ## # You can specify a format for the elements in the tensor + ## # Note that the default is "pretty-printing" the tensor + ## # _and_ showing a header describing its type and shape + ## echo t_int.pretty("+05X") + ## # Tensor[system.int] of shape "[2, 3]" on backend "Cpu" + ## # |-0002 +0002 +0006| + ## # |+000a +000e +0012| + ## + ## # The header can be disabled by using "||" + ## echo t_int.pretty("+05X||") + ## # |-0002 +0002 +0006| + ## # |+000a +000e +0012| + ## + ## # Use the "[:]" format specifier to print the tensor as a + ## # "multi-line array" _without_ a header + ## echo t_int.pretty("[:]") + ## # [[-2, 2, 6], + ## # [10, 14, 18]] + ## + ## # Enable the header adding "<>" (i.e. "<>[:]") or the shorter "<:>" + ## echo t_int.pretty("<:>") + ## # Tensor[int]<2,3>: + ## # [[-2, 2, 6], + ## # [10, 14, 18]] + ## + ## # The "[]" specifier is similar to "[:]" but prints on a single line + ## echo t_int.pretty("[]") + ## # Tensor[float]<2,3>:[[-2.00, -1.00, +0.00], [+1.00, +2.00, +3.00]] + ## + ## # You can also enable the header using "<>" or "<>[]" + ## echo t_int.pretty("<>") + ## # [[-2.00, -1.00, +0.00], [+1.00, +2.00, +3.00]] + ## + ## # You can combine "[]", "[:]", "<>" and "<:>" with a regular format spec: + ## let t_float = arange(-2.0, 22.0, 4.0).reshape(2, 3) + ## + ## echo t_float.pretty("6.2f<:>") + ## # Tensor[int]<2,3>: + ## # [[ -2.00, 2.00, 6.00], + ## # [ 10.00, 14.00, 18.00]] + ## ``` + t.prettyImpl(precision = -1, specifier = specifier) proc `$`*[T](t: Tensor[T]): string = ## Pretty-print a tensor (when using ``echo`` for example) t.pretty() + +proc `$$`*[T](t: Tensor[T]): string = + ## Print the "elements" of a tensor as a multi-line array + t.pretty(specifier = "<:>") + +proc `$<>`*[T](t: Tensor[T]): string = + ## Print the "elements" of a tensor as a single-line array + t.pretty(specifier = "<>") + +proc formatValue*[T](result: var string, t: Tensor[T], specifier: static string) = + ## Standard format implementation for `Tensor`. It makes little + ## sense to call this directly, but it is required to exist + ## by the `&` macro. + ## + ## For Tensors, we add some additional specifiers which can be combined to + ## achieve different results: + ## - "[:]": Display the tensor as if it were a nim "array". + ## This makes it easy to use the representation of a + ## tensor in your own code. No header is shown. + ## - "[]": Same as "[:]" but displays the tensor in a single + ## line. No header is shown. + ## - "<>": Combined with the 2 above (i.e. "<>[:]" or "<>[]") + ## adds a header with basic tensor info (type and + ## shape). "<:>" can be used as a shortcut for "<>[:]" + ## while "<>" on its own is equivalent to "<>[]". + ## Can also be combined with "<>||" (see below). + ## - "||": "Pretty-print" the tensor _without_ a header. This + ## can also be combined with "<>" (i.e. "<>||") to + ## explicitly enable the default mode, which is pretty + ## printing with a header. + ## - 'j': Formats complex values as (A+Bj) like in mathematics. + ## Ignored for non Complex tensors + if specifier.len == 0: + result.add $t + else: + result.add t.pretty(specifier = specifier) diff --git a/src/arraymancer/tensor/private/p_display.nim b/src/arraymancer/tensor/private/p_display.nim index 7995b8ecd..cbab161fe 100644 --- a/src/arraymancer/tensor/private/p_display.nim +++ b/src/arraymancer/tensor/private/p_display.nim @@ -14,8 +14,64 @@ import ../../private/functional, ../higher_order_applymap, ../shapeshifting, ../data_structure, - ../accessors, - sequtils, strutils + ../accessors +import std / [sequtils, strutils, strformat, typetraits] + +# We use different specifier string "tokens" to set the tensor display mode: +# - || or <>||: forces "table mode" (without or with header) +# - [:] or <>[:] (or the equivalent <:>): multi line array mode (w/o or w/ header) +# - [] or <>[] (or the equivalent <>): single line array mode (w/o or w/ header) +# If none of those tokens is found use table mode +# +func isTensorHeaderEnabled(specifier: string): bool = + ## Return true when a header must be shown based on the format specifier + # Show a header when "<>" (i.e. the explicit header marker) or "<:>" (i.e. the + # combined <>[:] shortcut) is found + # Otherwise do not show a header if "||" or "[]" or "[:]" is found + # Show a header in all other cases + # if "<>" in specifier or "<:>" in specifier: + if "<" in specifier and ">" in specifier: + # Note that this also covers the "<>[]" and "<>||" cases + return true + elif specifier.count('|') == 2 or ("[" in specifier and "]" in specifier): + return false + return true + +type tensorDispMode = enum table, multi_line_array, single_line_array + +func getTensorDispMode(specifier: string): tensorDispMode = + ## Get the display mode based on the special "tensor specifier tokens" + let brackets = "[" in specifier and "]" in specifier + let colon_brackets = specifier.count(':') == 2 + let logic_brackets = "<" in specifier and ">" in specifier + let flat_brackets = specifier.count('|') == 2 + if flat_brackets: + # Note that "<>||" also implies "table" mode! + table + elif colon_brackets or + (":" in specifier and (brackets or logic_brackets)): + # <:> is a shortcut for <>[:] + multi_line_array + elif brackets or logic_brackets: + # <> is a shortcut for <>[] _when_ not combined with ||, ::, [:] or [::] + single_line_array + else: + # In all other cases, default to "table" mode! + table + +func removeTensorFormatSpecifiers(specifier: string): string = + ## Remove the special "tensor specifier tokens" + ## + ## This must be called _after_ getting the display mode (using + ## `getTensorDispMode`) and the "show header flat" (using + ## `isTensorHeaderEnabled`), to avoid an error when the specifier + ## is finally passed to the tensor element `formatValue` functions + result = specifier.replace("::") + .replace("[:]") + .replace("<:>") + .replace("<>") + .replace("[]") + .replace("||") func bounds_display(t: Tensor, idx_data: tuple[val: string, idx: int], @@ -78,17 +134,24 @@ func bounds_display(t: Tensor, # let b = toSeq(1..72).toTensor.reshape(2,3,3,4) # echo b +func dispElement*[T](value: T, precision = -1, specifier: static string = ""): string = + ## Display a single element with the selected precision _or_ specifier format + when specifier.len == 0: + when T is SomeFloat: + result = formatBiggestFloat(value, precision = precision) + else: + result = $value + else: + formatValue(result, value, specifier = specifier) + func disp2d*[T](t: Tensor[T], alignBy = 6, alignSpacing = 3, - precision = -1): string = - ## Display a 2D-tensor + precision = -1, specifier: static string = ""): string = + ## Display a 2D-tensor (only used for "table", i.e. non array, printing) # Add a position index to each value in the Tensor. var indexed_data: seq[(string,int)] = @[] for i, value in t.enumerate: - when T is SomeFloat: - let val = formatBiggestFloat(value, precision = precision) - else: - let val = $value + let val = dispElement(value, precision = precision, specifier = specifier) indexed_data.add((val, i+1)) # TODO Note: the $conversion is unstable if the whole test suite is done. # it fails at the test_load_openmp. # if done alone there is no issue @@ -152,17 +215,19 @@ func genLeftIdx(axIdx: string, s: string): string = if i < tmp.high - 1: result.add "\n" -proc determineLargestElement[T](t: Tensor[T], precision: int): int = +proc determineLargestElement[T](t: Tensor[T], precision: int, specifier: static string = ""): int = ## Determines the length of the "largest" element in the tensor after ## string conversion. This is to align our output table nicely. - when T is SomeFloat: - result = t.map_inline((x.formatBiggestFloat(precision = precision)).len).max - else: - result = t.map_inline(($x).len).max + # when T is SomeFloat: + # result = t.map_inline((x.formatBiggestFloat(precision = precision)).len).max + # else: + # result = t.map_inline(($x).len).max + result = t.map_inline(x.dispElement(precision = precision, specifier = specifier).len).max -proc prettyImpl*[T](t: Tensor[T], - inputRank = 0, alignBy = 0, alignSpacing = 4, - precision = -1): string = +proc dispTensorAsTable*[T](t: Tensor[T], + inputRank = 0, alignBy = 0, alignSpacing = 4, + precision = -1, + specifier: static string = ""): string = ## Pretty printing implementation that aligns N dimensional tensors as a ## table. Odd dimensions are stacked horizontally and even dimensions ## vertically. @@ -174,11 +239,12 @@ proc prettyImpl*[T](t: Tensor[T], ## and all others right aligned). ## ## `precision` sets the floating point precision. + const specifier = removeTensorFormatSpecifiers(specifier) var alignBy = alignBy var inputRank = inputRank if inputRank == 0: inputRank = t.rank - let largestElement = t.determineLargestElement(precision) + let largestElement = t.determineLargestElement(precision, specifier) alignBy = max(6, largestElement + alignSpacing) # for tensors of rank larger 2, walk axis 0 and stack vertically (even dim) # or stack horizontally (odd dim) @@ -189,8 +255,11 @@ proc prettyImpl*[T](t: Tensor[T], for ax in axis(t, 0): if oddRank: # 1. get next "column" - var toZip = prettyImpl(ax.squeeze, inputRank, alignBy = alignBy, - precision = precision) + var toZip = dispTensorAsTable(ax.squeeze, + inputRank, + alignBy = alignBy, + precision = precision, + specifier = specifier) # 2. center current "column" index to width of `toZip`, put on top toZip = center($axIdx, toZip.splitLines[0].len) & "\n" & toZip # 3. generate separator of "columns" and zip together @@ -198,8 +267,11 @@ proc prettyImpl*[T](t: Tensor[T], res = res.zipStrings(toZip, sep = sep, allowEmpty = false) else: # 1. get next "row" - var toStack = prettyImpl(ax.squeeze, inputRank, alignBy = alignBy, - precision = precision) + var toStack = dispTensorAsTable(ax.squeeze, + inputRank, + alignBy = alignBy, + precision = precision, + specifier = specifier) # 2. center current "row" index to height of `toStack` let leftIdx = genLeftIdx($axIdx, toStack) # 3. zip index and "row" @@ -215,4 +287,136 @@ proc prettyImpl*[T](t: Tensor[T], else: result = t.disp2d(alignBy = alignBy, alignSpacing = alignSpacing, - precision = precision).strip + precision = precision, + specifier = specifier).strip + +proc disp1dAsArray[T](t: Tensor[T], + sep = ", ", + precision = -1, specifier: static string = ""): string = + ## Display a 1D-tensor (only used for "array-style", i.e. non-table, printing) + if t.len == 0: + return "[]" + result = "[" + for value in t: + result &= dispElement(value, precision = precision, specifier = specifier) + result &= sep + # Remove the separator from the last element + result = result[0..^(1+sep.len)] & "]" + when T is Complex and "j" in specifier: + result = result.replace("(").replace(")") + +proc compactTensorDescription[T](t: Tensor[T]): string = + ## Describe the tensor in terms of its shape and type (in a "compact" way) + ## Only used for array-style printing + # Most if not all tensors element types are part of the system or complex + # modules so we can remove them from the type without must information loss + let compactType = t.type.name().replace("system.", "").replace("complex.", "") + let compactShape = ($t.shape)[1 ..< ^1].replace(", ", ",") + result = compactType & "<" & compactShape & ">" + +proc squeezeTopDimension[T](t: Tensor[T]): Tensor[T] = + ## Remove the top most dimension if its size is 1 + if t.shape.len == 0 or t.shape[0] > 1: + return t + var new_shape = t.shape + new_shape.delete(0) + result = t.reshape(new_shape) + +proc dispTensorAsSingleLineArrayImp[T](t: Tensor[T], + precision = -1, + specifier: static string = "", + indentSpacing = 0, + sep = ", ", rowSep = "" + ): string = + ## Implementation of the "array-style" tensor printing + result = "[" + if t.rank <= 1: + result = disp1dAsArray(t, sep = sep, precision = precision, specifier = specifier) + else: + var n = 0 + for ax in axis(t, 0): + var axRepr = dispTensorAsSingleLineArrayImp(ax.squeezeTopDimension(), + precision = precision, + specifier = specifier, + indentSpacing = indentSpacing, + sep = sep, rowSep = rowSep) + result &= axRepr + n += 1 + if n < t.shape[0]: + result &= sep & rowSep + result &= "]" + +proc dispTensorAsSingleLineArray*[T](t: Tensor[T], + precision = -1, + indentSpacing = 0, + specifier: static string = "", + sep = ", ", rowSep = "", + showHeader = true + ): string = + ## Display a tensor as a single line "array" + # Remove the non-standard specifier flags (n, a and s) + const specifier = removeTensorFormatSpecifiers(specifier) + if showHeader: + result = t.compactTensorDescription & ":" & rowSep + result &= dispTensorAsSingleLineArrayImp(t, precision, specifier = specifier, rowSep = rowSep) + if t.storage.isNil: + # Return a useful message for uninit'd tensors instead of crashing + # Note that this should only happen when displaying tensors created + # by just declaring their type (e.g. `var t: Tensor[int]`), given that + # even tensors created by calling `newTensorUninit` have their storage + # initialized (with garbage) + result &= " (uninitialized)" + +proc indentTensorReprRows(s: string, indent: int): string = + ## Indent the lines of a multi-line "array-style" tensor representation + ## so that the right-most opening braces align vertically + if indent <= 0: + return s + for line in s.splitLines(): + var numBrackets = 0 + for c in line: + if c != '[': + break + numBrackets += 1 + result &= line.indent(indent - numBrackets) & "\n" + +proc dispTensorAsArray*[T](t: Tensor[T], + precision = -1, + specifier: static string = "", + showHeader = true): string = + ## Display a tensor as a multi-line "array" + result = t.dispTensorAsSingleLineArray( + precision = precision, specifier = specifier, rowSep="\n", showHeader = false) + result = indentTensorReprRows(result, t.rank).strip(leading=false) + if showHeader: + result = t.compactTensorDescription() & ":\n" & result + +proc prettyImpl*[T]( + t: Tensor[T], precision: int, specifier: static string): string = + ## Non public implementation of the pretty function + ## Three modes are supported: table, multi-line array and single-line array + let showHeader = isTensorHeaderEnabled(specifier) + let dispMode = getTensorDispMode(specifier) + const specifier = removeTensorFormatSpecifiers(specifier) + if dispMode == single_line_array: + return t.dispTensorAsSingleLineArray( + precision = precision, specifier = specifier, showHeader = showHeader) + elif dispMode == multi_line_array: + return t.dispTensorAsArray( + precision = precision, specifier = specifier, showHeader = showHeader) + # Represent the tensor as a "pretty" table + var desc = t.type.name & " of shape \"" & $t.shape & "\" on backend \"" & "Cpu" & "\"" + if t.storage.isNil: # return useful message for uninit'd tensors instead of crashing + return "Uninitialized " & $desc + if showHeader: + desc &= "\n" + else: + desc = "" + if t.size() == 0: + return desc & " [] (empty)" + elif t.rank == 1: # for rank 1 we want an indentation, because we have no `|` + return desc & " " & t.dispTensorAsTable( + precision = precision, specifier = specifier) + else: + return desc & t.dispTensorAsTable( + precision = precision, specifier = specifier) diff --git a/tests/tensor/test_display.nim b/tests/tensor/test_display.nim index a1e029ed7..17e179a5e 100644 --- a/tests/tensor/test_display.nim +++ b/tests/tensor/test_display.nim @@ -13,7 +13,7 @@ # limitations under the License. import ../../src/arraymancer -import std / [unittest, sequtils, strutils] +import std / [unittest, sequtils, strutils, strformat] template compareStrings(t1, t2: string) = let t1S = t1.splitLines @@ -26,9 +26,15 @@ proc main() = suite "Displaying tensors": test "Display invalid tensor": var t: Tensor[int] - let exp = """ + block: + let exp = """ Uninitialized Tensor[system.int] of shape "[]" on backend "Cpu"""" - check $t == exp + check $t == exp + block: + let exp = """ +Tensor[int]<>: +[] (uninitialized)""" + check t.pretty("<:>") == exp test "Display 1D tensor": block: @@ -36,14 +42,24 @@ Uninitialized Tensor[system.int] of shape "[]" on backend "Cpu"""" compareStrings($t, """ Tensor[system.float] of shape "[2]" on backend "Cpu" 0.953293 0.129458""") + compareStrings(t.pretty("<:>"), """ +Tensor[float]<2>: +[0.953293, 0.129458]""") + block: let t = [1, 2, 3, 4].toTensor compareStrings($t, """Tensor[system.int] of shape "[4]" on backend "Cpu" 1 2 3 4""") + compareStrings(t.pretty("<:>"), """ +Tensor[int]<4>: +[1, 2, 3, 4]""") block: let t = ["foo", "bar", "hello world", "baz"].toTensor compareStrings($t, """Tensor[system.string] of shape "[4]" on backend "Cpu" foo bar hello world baz""") + compareStrings(t.pretty("<:>"), """ +Tensor[string]<4>: +[foo, bar, hello world, baz]""") block: # sequence of tensors still look a bit funky var ts = newSeq[Tensor[float]]() @@ -63,6 +79,9 @@ Tensor[system.float] of shape "[2]" on backend "Cpu" compareStrings($t_single_row, """ Tensor[system.int] of shape "[1, 5]" on backend "Cpu" |1 2 3 4 5|""") + compareStrings(t_single_row.pretty("<:>"), """ +Tensor[int]<1,5>: +[[1, 2, 3, 4, 5]]""") let t_single_column = t_single_row.transpose() compareStrings($t_single_column, """ Tensor[system.int] of shape "[5, 1]" on backend "Cpu" @@ -71,6 +90,13 @@ Tensor[system.int] of shape "[5, 1]" on backend "Cpu" | 3| | 4| | 5|""") + compareStrings(t_single_column.pretty("<:>"), """ +Tensor[int]<5,1>: +[[1], + [2], + [3], + [4], + [5]]""") test "Display 2D tensor (multi-column)": const @@ -102,6 +128,13 @@ Tensor[system.int] of shape "[5, 5]" on backend "Cpu" |3 9 27 81 243| |4 16 64 256 1024| |5 25 125 625 3125|""") + compareStrings(t_van.pretty("<:>"), """ +Tensor[int]<5,5>: +[[1, 1, 1, 1, 1], + [2, 4, 8, 16, 32], + [3, 9, 27, 81, 243], + [4, 16, 64, 256, 1024], + [5, 25, 125, 625, 3125]]""") test "Disp3d + Concat + SlicerMut bug with empty tensors": let a = [4, 3, 2, 1, 8, 7, 6, 5].toTensor.reshape(2, 1, 4) @@ -116,6 +149,14 @@ Tensor[system.int] of shape "[2, 3, 4]" on backend "Cpu" |5 6 7 8| |17 18 19 20| |9 10 11 12| |21 22 23 24| """) + compareStrings(t.pretty("<:>"), """ +Tensor[int]<2,3,4>: +[[[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], + [[13, 14, 15, 16], + [17, 18, 19, 20], + [21, 22, 23, 24]]]""") test "Display 4D tensor": let t = toSeq(1..72).toTensor().reshape(2,3,4,3) @@ -134,6 +175,32 @@ Tensor[system.int] of shape "[2, 3, 4, 3]" on backend "Cpu" |46 47 48| |58 59 60| |70 71 72| -------------------------------------------------- """) + compareStrings(t.pretty("<:>"), """ +Tensor[int]<2,3,4,3>: +[[[[1, 2, 3], + [4, 5, 6], + [7, 8, 9], + [10, 11, 12]], + [[13, 14, 15], + [16, 17, 18], + [19, 20, 21], + [22, 23, 24]], + [[25, 26, 27], + [28, 29, 30], + [31, 32, 33], + [34, 35, 36]]], + [[[37, 38, 39], + [40, 41, 42], + [43, 44, 45], + [46, 47, 48]], + [[49, 50, 51], + [52, 53, 54], + [55, 56, 57], + [58, 59, 60]], + [[61, 62, 63], + [64, 65, 66], + [67, 68, 69], + [70, 71, 72]]]]""") test "Display 4D tensor with float values": let t = linspace(0.0, 100.0, 72).reshape(2,3,4,3) @@ -154,6 +221,32 @@ Tensor[system.float] of shape "[2, 3, 4, 3]" on backend "Cpu" |63.3803 64.7887 66.1972| |80.2817 81.6901 83.0986| |97.1831 98.5915 100| ----------------------------------------------------------------------------------------------- """) + compareStrings(t.pretty("<:>"), """ +Tensor[float]<2,3,4,3>: +[[[[0, 1.40845, 2.8169], + [4.22535, 5.6338, 7.04225], + [8.4507, 9.85915, 11.2676], + [12.6761, 14.0845, 15.493]], + [[16.9014, 18.3099, 19.7183], + [21.1268, 22.5352, 23.9437], + [25.3521, 26.7606, 28.169], + [29.5775, 30.9859, 32.3944]], + [[33.8028, 35.2113, 36.6197], + [38.0282, 39.4366, 40.8451], + [42.2535, 43.662, 45.0704], + [46.4789, 47.8873, 49.2958]]], + [[[50.7042, 52.1127, 53.5211], + [54.9296, 56.338, 57.7465], + [59.1549, 60.5634, 61.9718], + [63.3803, 64.7887, 66.1972]], + [[67.6056, 69.0141, 70.4225], + [71.831, 73.2394, 74.6479], + [76.0563, 77.4648, 78.8732], + [80.2817, 81.6901, 83.0986]], + [[84.507, 85.9155, 87.3239], + [88.7324, 90.1408, 91.5493], + [92.9577, 94.3662, 95.7746], + [97.1831, 98.5915, 100]]]]""") test "Display 4D tensor with float values and custom precision": let t = linspace(0.0, 100.0, 72).reshape(2,3,4,3) @@ -173,6 +266,32 @@ Tensor[system.float] of shape "[2, 3, 4, 3]" on backend "Cpu" |63.38 64.79 66.20| |80.28 81.69 83.10| |97.18 98.59 100.0| ----------------------------------------------------------------------------- """) + compareStrings(t.pretty("<:>"), """ +Tensor[float]<2,3,4,3>: +[[[[0, 1.40845, 2.8169], + [4.22535, 5.6338, 7.04225], + [8.4507, 9.85915, 11.2676], + [12.6761, 14.0845, 15.493]], + [[16.9014, 18.3099, 19.7183], + [21.1268, 22.5352, 23.9437], + [25.3521, 26.7606, 28.169], + [29.5775, 30.9859, 32.3944]], + [[33.8028, 35.2113, 36.6197], + [38.0282, 39.4366, 40.8451], + [42.2535, 43.662, 45.0704], + [46.4789, 47.8873, 49.2958]]], + [[[50.7042, 52.1127, 53.5211], + [54.9296, 56.338, 57.7465], + [59.1549, 60.5634, 61.9718], + [63.3803, 64.7887, 66.1972]], + [[67.6056, 69.0141, 70.4225], + [71.831, 73.2394, 74.6479], + [76.0563, 77.4648, 78.8732], + [80.2817, 81.6901, 83.0986]], + [[84.507, 85.9155, 87.3239], + [88.7324, 90.1408, 91.5493], + [92.9577, 94.3662, 95.7746], + [97.1831, 98.5915, 100]]]]""") test "Display 5D tensor": let t1 = toSeq(1..144).toTensor().reshape(2,3,4,3,2) @@ -195,6 +314,20 @@ Tensor[system.int] of shape "[2, 3, 4, 3, 2]" on backend "Cpu" |53 54| |59 60| |65 66| |71 72| | |125 126| |131 132| |137 138| |143 144| --------------------------------------------------- | --------------------------------------------------- """) + compareStrings(t1.reshape(2,2,1,3,12).pretty("<:>"), """ +Tensor[int]<2,2,1,3,12>: +[[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], + [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], + [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36]]], + [[[37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], + [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60], + [61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72]]]], + [[[[73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84], + [85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96], + [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108]]], + [[[109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120], + [121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132], + [133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144]]]]]""") let t2 = toSeq(1..72*3).toTensor().reshape(3,3,4,3,2) compareStrings($t2, """ @@ -238,6 +371,44 @@ Tensor[system.int] of shape "[3, 3, 4, 3, 2]" on backend "Cpu" |1000000052 1000000053| |1000000058 1000000059| |1000000064 1000000065| |1000000070 1000000071| | |1000000124 1000000125| |1000000130 1000000131| |1000000136 1000000137| |1000000142 1000000143| | |1000000196 1000000197| |1000000202 1000000203| |1000000208 1000000209| |1000000214 1000000215| ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- """) + compareStrings(t.reshape(2,3,1,6,6).pretty("<:>"), """ +Tensor[int]<2,3,1,6,6>: +[[[[[1000000000, 1000000001, 1000000002, 1000000003, 1000000004, 1000000005], + [1000000006, 1000000007, 1000000008, 1000000009, 1000000010, 1000000011], + [1000000012, 1000000013, 1000000014, 1000000015, 1000000016, 1000000017], + [1000000018, 1000000019, 1000000020, 1000000021, 1000000022, 1000000023], + [1000000024, 1000000025, 1000000026, 1000000027, 1000000028, 1000000029], + [1000000030, 1000000031, 1000000032, 1000000033, 1000000034, 1000000035]]], + [[[1000000036, 1000000037, 1000000038, 1000000039, 1000000040, 1000000041], + [1000000042, 1000000043, 1000000044, 1000000045, 1000000046, 1000000047], + [1000000048, 1000000049, 1000000050, 1000000051, 1000000052, 1000000053], + [1000000054, 1000000055, 1000000056, 1000000057, 1000000058, 1000000059], + [1000000060, 1000000061, 1000000062, 1000000063, 1000000064, 1000000065], + [1000000066, 1000000067, 1000000068, 1000000069, 1000000070, 1000000071]]], + [[[1000000072, 1000000073, 1000000074, 1000000075, 1000000076, 1000000077], + [1000000078, 1000000079, 1000000080, 1000000081, 1000000082, 1000000083], + [1000000084, 1000000085, 1000000086, 1000000087, 1000000088, 1000000089], + [1000000090, 1000000091, 1000000092, 1000000093, 1000000094, 1000000095], + [1000000096, 1000000097, 1000000098, 1000000099, 1000000100, 1000000101], + [1000000102, 1000000103, 1000000104, 1000000105, 1000000106, 1000000107]]]], + [[[[1000000108, 1000000109, 1000000110, 1000000111, 1000000112, 1000000113], + [1000000114, 1000000115, 1000000116, 1000000117, 1000000118, 1000000119], + [1000000120, 1000000121, 1000000122, 1000000123, 1000000124, 1000000125], + [1000000126, 1000000127, 1000000128, 1000000129, 1000000130, 1000000131], + [1000000132, 1000000133, 1000000134, 1000000135, 1000000136, 1000000137], + [1000000138, 1000000139, 1000000140, 1000000141, 1000000142, 1000000143]]], + [[[1000000144, 1000000145, 1000000146, 1000000147, 1000000148, 1000000149], + [1000000150, 1000000151, 1000000152, 1000000153, 1000000154, 1000000155], + [1000000156, 1000000157, 1000000158, 1000000159, 1000000160, 1000000161], + [1000000162, 1000000163, 1000000164, 1000000165, 1000000166, 1000000167], + [1000000168, 1000000169, 1000000170, 1000000171, 1000000172, 1000000173], + [1000000174, 1000000175, 1000000176, 1000000177, 1000000178, 1000000179]]], + [[[1000000180, 1000000181, 1000000182, 1000000183, 1000000184, 1000000185], + [1000000186, 1000000187, 1000000188, 1000000189, 1000000190, 1000000191], + [1000000192, 1000000193, 1000000194, 1000000195, 1000000196, 1000000197], + [1000000198, 1000000199, 1000000200, 1000000201, 1000000202, 1000000203], + [1000000204, 1000000205, 1000000206, 1000000207, 1000000208, 1000000209], + [1000000210, 1000000211, 1000000212, 1000000213, 1000000214, 1000000215]]]]]""") test "Display 5D tensor with string elements": let t = toSeq(1..72).mapIt("Value: " & $it).toTensor.reshape(2,3,3,4) @@ -254,12 +425,104 @@ Tensor[system.string] of shape "[2, 3, 3, 4]" on backend "Cpu" |Value: 45 Value: 46 Value: 47 Value: 48| |Value: 57 Value: 58 Value: 59 Value: 60| |Value: 69 Value: 70 Value: 71 Value: 72| -------------------------------------------------------------------------------------------------------------------------------------------------------- """) + compareStrings(t.pretty("<:>"), """ +Tensor[string]<2,3,3,4>: +[[[[Value: 1, Value: 2, Value: 3, Value: 4], + [Value: 5, Value: 6, Value: 7, Value: 8], + [Value: 9, Value: 10, Value: 11, Value: 12]], + [[Value: 13, Value: 14, Value: 15, Value: 16], + [Value: 17, Value: 18, Value: 19, Value: 20], + [Value: 21, Value: 22, Value: 23, Value: 24]], + [[Value: 25, Value: 26, Value: 27, Value: 28], + [Value: 29, Value: 30, Value: 31, Value: 32], + [Value: 33, Value: 34, Value: 35, Value: 36]]], + [[[Value: 37, Value: 38, Value: 39, Value: 40], + [Value: 41, Value: 42, Value: 43, Value: 44], + [Value: 45, Value: 46, Value: 47, Value: 48]], + [[Value: 49, Value: 50, Value: 51, Value: 52], + [Value: 53, Value: 54, Value: 55, Value: 56], + [Value: 57, Value: 58, Value: 59, Value: 60]], + [[Value: 61, Value: 62, Value: 63, Value: 64], + [Value: 65, Value: 66, Value: 67, Value: 68], + [Value: 69, Value: 70, Value: 71, Value: 72]]]]""") + + + test "Format-strings (1D tensor)": + let t = arange(-2, 10) + + # Table-style tensor format strings + compareStrings(&"{t}", """ +Tensor[system.int] of shape "[12]" on backend "Cpu" + -2 -1 0 1 2 3 4 5 6 7 8 9""") + check &"{t}" == &"{t:<>||}" + check &"{t:||}" == " -2 -1 0 1 2 3 4 5 6 7 8 9" + + # Single-line array-style format strings + check &"{t:[]}" == "[-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]" + check &"{t:<>}" == "Tensor[int]<12>:[-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]" + check &"{t:<>}" == &"{t:<>[]}" + check &"{t:<>}" == t.pretty("<>") + + # Multi-line array-style format strings + check &"{t:[:]}" == "[-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]" + check &"{t:<:>}" == "Tensor[int]<12>:\n[-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]" + check &"{t:<:>}" == &"{t:<>[:]}" + + + test "Format-strings (3D tensor)": + let t = arange(-2, 10).reshape(2, 2, 3) + + # Table-style tensor format strings + compareStrings(&"{t}", """ +Tensor[system.int] of shape "[2, 2, 3]" on backend "Cpu" + 0 1 +|-2 -1 0| |4 5 6| +|1 2 3| |7 8 9| +""") + check &"{t}" == &"{t:<>||}" + compareStrings(&"{t:||}", """ + 0 1 +|-2 -1 0| |4 5 6| +|1 2 3| |7 8 9| +""") + + # Single-line array-style format strings + check &"{t:[]}" == "[[[-2, -1, 0], [1, 2, 3]], [[4, 5, 6], [7, 8, 9]]]" + check &"{t:<>}" == "Tensor[int]<2,2,3>:[[[-2, -1, 0], [1, 2, 3]], [[4, 5, 6], [7, 8, 9]]]" + check &"{t:<>}" == &"{t:<>[]}" + check &"{t:<>}" == t.pretty("<>") + + # Multi-line array-style format strings + compareStrings(&"{t:[:]}", """ +[[[-2, -1, 0], + [1, 2, 3]], + [[4, 5, 6], + [7, 8, 9]]]""") + compareStrings(&"{t:<:>}", """ +Tensor[int]<2,2,3>: +[[[-2, -1, 0], + [1, 2, 3]], + [[4, 5, 6], + [7, 8, 9]]]""") + check &"{t:<:>}" == &"{t:<>[:]}" + + test "Complex format specifiers": + let t_int = arange(-2, 22, 4).reshape(2, 3) + compareStrings(&"{t_int:X<>}", """ +Tensor[int]<2,3>:[[-2, 2, 6], [A, E, 12]]""") + + let t_float = arange(-2.0, 22.0, 4.0).reshape(2, 3) + compareStrings(&"{t_float:+06.2f[:]}", """ +[[-02.00, +02.00, +06.00], + [+10.00, +14.00, +18.00]]""") test "Displaying of unininitialized tensors works": template checkTypes(typ: untyped): untyped = var x: Tensor[typ] var exp = "Uninitialized Tensor[system." & astToStr(typ) & """] of shape "[]" on backend "Cpu"""" + var exp_array = "Tensor[" & astToStr(typ) & "]<>:[] (uninitialized)" check $x == exp + check x.pretty("<>") == exp_array checkTypes(int) checkTypes(char) checkTypes(float)