From ed76f63beaaabd8f2f81ac0c1e67e92ee751b749 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 17 Feb 2024 15:47:37 +0000 Subject: [PATCH] Deploy to GitHub pages --- accessors.html | 1026 ++++ accessors.idx | 44 + accessors_macros_read.html | 477 ++ accessors_macros_read.idx | 2 + accessors_macros_syntax.html | 862 ++++ accessors_macros_syntax.idx | 27 + accessors_macros_write.html | 506 ++ accessors_macros_write.idx | 3 + accuracy_score.html | 450 ++ accuracy_score.idx | 2 + aggregate.html | 1071 +++++ aggregate.idx | 40 + algebra.html | 469 ++ algebra.idx | 4 + algorithms.html | 489 ++ algorithms.idx | 4 + align_unroller.html | 454 ++ align_unroller.idx | 3 + ast_utils.html | 639 +++ ast_utils.idx | 11 + autograd.html | 420 ++ autograd.idx | 1 + autograd_common.html | 752 +++ autograd_common.idx | 19 + auxiliary_blas.html | 476 ++ auxiliary_blas.idx | 5 + auxiliary_lapack.html | 506 ++ auxiliary_lapack.idx | 4 + blas_l3_gemm.html | 455 ++ blas_l3_gemm.idx | 3 + blis.html | 399 ++ blis.idx | 1 + common_error_functions.html | 598 +++ common_error_functions.idx | 14 + compiler_optim_hints.html | 555 +++ compiler_optim_hints.idx | 7 + complex.html | 568 +++ complex.idx | 11 + conv.html | 522 ++ conv.idx | 5 + conv2D.html | 600 +++ conv2D.idx | 8 + cpuinfo_x86.html | 1652 +++++++ cpuinfo_x86.idx | 66 + cross_entropy_losses.html | 547 +++ cross_entropy_losses.idx | 7 + cublas.html | 624 +++ cublas.idx | 9 + cuda.html | 502 ++ cuda.idx | 5 + cuda_global_state.html | 442 ++ cuda_global_state.idx | 3 + cudnn.html | 514 ++ cudnn.idx | 4 + cudnn_conv_interface.html | 651 +++ cudnn_conv_interface.idx | 10 + data_structure.html | 854 ++++ data_structure.idx | 24 + datatypes.html | 906 ++++ datatypes.idx | 28 + dbscan.html | 463 ++ dbscan.idx | 2 + decomposition.html | 614 +++ decomposition.idx | 9 + decomposition_lapack.html | 607 +++ decomposition_lapack.idx | 7 + decomposition_rand.html | 483 ++ decomposition_rand.idx | 2 + deprecate.html | 445 ++ deprecate.idx | 2 + display.html | 463 ++ display.idx | 3 + display_cuda.html | 463 ++ display_cuda.idx | 3 + distances.html | 652 +++ distances.idx | 14 + distributions.html | 627 +++ distributions.idx | 12 + dochack.js | 2476 ++++++++++ docutils.css | 823 ++++ dynamic_stack_arrays.html | 985 ++++ dynamic_stack_arrays.idx | 36 + einsum.html | 508 ++ einsum.idx | 12 + embedding.html | 595 +++ embedding.idx | 8 + exporting.html | 592 +++ exporting.idx | 9 + filling_data.html | 444 ++ filling_data.idx | 2 + flatten.html | 533 ++ flatten.idx | 6 + foreach.html | 572 +++ foreach.idx | 7 + foreach_common.html | 521 ++ foreach_common.idx | 6 + foreach_staged.html | 455 ++ foreach_staged.idx | 2 + functional.html | 436 ++ functional.idx | 2 + gates_basic.html | 503 ++ gates_basic.idx | 5 + gates_blas.html | 472 ++ gates_blas.idx | 3 + gates_hadamard.html | 472 ++ gates_hadamard.idx | 3 + gates_reduce.html | 518 ++ gates_reduce.idx | 7 + gates_shapeshifting_concat_split.html | 503 ++ gates_shapeshifting_concat_split.idx | 4 + gates_shapeshifting_views.html | 588 +++ gates_shapeshifting_views.idx | 9 + gcn.html | 583 +++ gcn.idx | 8 + gemm.html | 493 ++ gemm.idx | 5 + gemm_packing.html | 473 ++ gemm_packing.idx | 3 + gemm_prepacked.html | 586 +++ gemm_prepacked.idx | 8 + gemm_tiling.html | 734 +++ gemm_tiling.idx | 23 + gemm_ukernel_avx.html | 537 +++ gemm_ukernel_avx.idx | 5 + gemm_ukernel_avx2.html | 481 ++ gemm_ukernel_avx2.idx | 3 + gemm_ukernel_avx512.html | 649 +++ gemm_ukernel_avx512.idx | 9 + gemm_ukernel_avx_fma.html | 537 +++ gemm_ukernel_avx_fma.idx | 5 + gemm_ukernel_dispatch.html | 471 ++ gemm_ukernel_dispatch.idx | 3 + gemm_ukernel_generator.html | 516 ++ gemm_ukernel_generator.idx | 4 + gemm_ukernel_generic.html | 550 +++ gemm_ukernel_generic.idx | 6 + gemm_ukernel_sse.html | 481 ++ gemm_ukernel_sse.idx | 3 + gemm_ukernel_sse2.html | 593 +++ gemm_ukernel_sse2.idx | 7 + gemm_ukernel_sse4_1.html | 481 ++ gemm_ukernel_sse4_1.idx | 3 + gemm_utils.html | 586 +++ gemm_utils.idx | 8 + global_config.html | 481 ++ global_config.idx | 6 + gru.html | 566 +++ gru.idx | 6 + higher_order_applymap.html | 778 +++ higher_order_applymap.idx | 21 + higher_order_foldreduce.html | 616 +++ higher_order_foldreduce.idx | 11 + howto.perceptron.html | 426 ++ howto.type_conversion.html | 363 ++ howto.ufunc.html | 384 ++ imdb.html | 444 ++ imdb.idx | 2 + incl_accessors_cuda.html | 399 ++ incl_accessors_cuda.idx | 1 + incl_higher_order_cuda.html | 399 ++ incl_higher_order_cuda.idx | 1 + incl_kernels_cuda.html | 399 ++ incl_kernels_cuda.idx | 1 + index.html | 6415 +++++++++++++++++++++++++ init.html | 575 +++ init.idx | 7 + init_colmajor.html | 445 ++ init_colmajor.idx | 2 + init_copy_cpu.html | 448 ++ init_copy_cpu.idx | 2 + init_copy_cuda.html | 444 ++ init_copy_cuda.idx | 2 + init_cpu.html | 936 ++++ init_cpu.idx | 29 + init_cuda.html | 508 ++ init_cuda.idx | 5 + init_opencl.html | 508 ++ init_opencl.idx | 5 + initialization.html | 780 +++ initialization.idx | 23 + io.html | 420 ++ io.idx | 1 + io_csv.html | 485 ++ io_csv.idx | 3 + io_hdf5.html | 546 +++ io_hdf5.idx | 9 + io_image.html | 544 +++ io_image.idx | 8 + io_npy.html | 477 ++ io_npy.idx | 3 + io_stream_readers.html | 676 +++ io_stream_readers.idx | 14 + kde.html | 665 +++ kde.idx | 18 + kdtree.html | 646 +++ kdtree.idx | 12 + kmeans.html | 484 ++ kmeans.idx | 4 + lapack.html | 444 ++ lapack.idx | 2 + least_squares.html | 456 ++ least_squares.idx | 2 + least_squares_lapack.html | 453 ++ least_squares_lapack.idx | 2 + linear.html | 581 +++ linear.idx | 8 + linear_algebra.html | 420 ++ linear_algebra.idx | 1 + linear_systems.html | 485 ++ linear_systems.idx | 10 + math_functions.html | 1342 ++++++ math_functions.idx | 65 + math_ops_fusion.html | 484 ++ math_ops_fusion.idx | 7 + maxpool2D.html | 587 +++ maxpool2D.idx | 8 + mean_square_error_loss.html | 475 ++ mean_square_error_loss.idx | 3 + memory.html | 444 ++ memory.idx | 2 + memory_optimization_hints.html | 454 ++ memory_optimization_hints.idx | 3 + ml.html | 420 ++ ml.idx | 1 + mnist.html | 541 +++ mnist.idx | 6 + naive_l2_gemv.html | 447 ++ naive_l2_gemv.idx | 2 + nav.css | 138 + neighbors.html | 456 ++ neighbors.idx | 2 + nested_containers.html | 505 ++ nested_containers.idx | 7 + nimdoc.out.css | 1026 ++++ nlp.html | 420 ++ nlp.idx | 1 + nn.html | 420 ++ nn.idx | 1 + nn_dsl.html | 474 ++ nn_dsl.idx | 2 + nn_primitives.html | 420 ++ nn_primitives.idx | 1 + nnp_activation.html | 599 +++ nnp_activation.idx | 10 + nnp_conv2d_cudnn.html | 498 ++ nnp_conv2d_cudnn.idx | 3 + nnp_convolution.html | 540 +++ nnp_convolution.idx | 6 + nnp_embedding.html | 494 ++ nnp_embedding.idx | 3 + nnp_gru.html | 663 +++ nnp_gru.idx | 7 + nnp_linear.html | 496 ++ nnp_linear.idx | 7 + nnp_maxpooling.html | 475 ++ nnp_maxpooling.idx | 3 + nnp_numerical_gradient.html | 461 ++ nnp_numerical_gradient.idx | 4 + nnp_sigmoid_cross_entropy.html | 487 ++ nnp_sigmoid_cross_entropy.idx | 3 + nnp_softmax.html | 451 ++ nnp_softmax.idx | 2 + nnp_softmax_cross_entropy.html | 556 +++ nnp_softmax_cross_entropy.idx | 5 + nnpack.html | 1243 +++++ nnpack.idx | 27 + nnpack_interface.html | 472 ++ nnpack_interface.idx | 3 + opencl_backend.html | 556 +++ opencl_backend.idx | 9 + opencl_global_state.html | 455 ++ opencl_global_state.idx | 4 + openmp.html | 587 +++ openmp.idx | 9 + operators_blas_l1.html | 773 +++ operators_blas_l1.idx | 26 + operators_blas_l1_cuda.html | 609 +++ operators_blas_l1_cuda.idx | 12 + operators_blas_l1_opencl.html | 657 +++ operators_blas_l1_opencl.idx | 20 + operators_blas_l2l3.html | 553 +++ operators_blas_l2l3.idx | 11 + operators_blas_l2l3_cuda.html | 444 ++ operators_blas_l2l3_cuda.idx | 2 + operators_blas_l2l3_opencl.html | 444 ++ operators_blas_l2l3_opencl.idx | 2 + operators_broadcasted.html | 1128 +++++ operators_broadcasted.idx | 46 + operators_broadcasted_cuda.html | 862 ++++ operators_broadcasted_cuda.idx | 29 + operators_broadcasted_opencl.html | 597 +++ operators_broadcasted_opencl.idx | 9 + operators_comparison.html | 994 ++++ operators_comparison.idx | 34 + operators_logical.html | 521 ++ operators_logical.idx | 5 + optim_ops_fusion.html | 533 ++ optim_ops_fusion.idx | 5 + optimizers.html | 778 +++ optimizers.idx | 18 + overload.html | 435 ++ overload.idx | 2 + p_accessors.html | 744 +++ p_accessors.idx | 20 + p_accessors_macros_desugar.html | 444 ++ p_accessors_macros_desugar.idx | 2 + p_accessors_macros_read.html | 651 +++ p_accessors_macros_read.idx | 18 + p_accessors_macros_write.html | 672 +++ p_accessors_macros_write.idx | 16 + p_activation.html | 435 ++ p_activation.idx | 2 + p_checks.html | 772 +++ p_checks.idx | 22 + p_complex.html | 513 ++ p_complex.idx | 7 + p_display.html | 469 ++ p_display.idx | 3 + p_empty_tensors.html | 477 ++ p_empty_tensors.idx | 3 + p_init_cuda.html | 464 ++ p_init_cuda.idx | 4 + p_init_opencl.html | 463 ++ p_init_opencl.idx | 4 + p_kernels_interface_cuda.html | 643 +++ p_kernels_interface_cuda.idx | 11 + p_kernels_interface_opencl.html | 520 ++ p_kernels_interface_opencl.idx | 5 + p_logsumexp.html | 499 ++ p_logsumexp.idx | 6 + p_nnp_checks.html | 444 ++ p_nnp_checks.idx | 2 + p_nnp_types.html | 509 ++ p_nnp_types.idx | 5 + p_operator_blas_l2l3.html | 491 ++ p_operator_blas_l2l3.idx | 4 + p_shapeshifting.html | 640 +++ p_shapeshifting.idx | 13 + pca.html | 597 +++ pca.idx | 5 + relu.html | 474 ++ relu.idx | 3 + selectors.html | 742 +++ selectors.idx | 22 + sequninit.html | 435 ++ sequninit.idx | 2 + shapeshifting.html | 1046 ++++ shapeshifting.idx | 32 + shapeshifting_cuda.html | 604 +++ shapeshifting_cuda.idx | 10 + shapeshifting_opencl.html | 450 ++ shapeshifting_opencl.idx | 2 + sigmoid.html | 474 ++ sigmoid.idx | 3 + simd.html | 3940 +++++++++++++++ simd.idx | 175 + softmax.html | 474 ++ softmax.idx | 3 + solve_lapack.html | 446 ++ solve_lapack.idx | 2 + special_matrices.html | 846 ++++ special_matrices.idx | 22 + stats.html | 449 ++ stats.idx | 2 + std_version_types.html | 399 ++ std_version_types.idx | 1 + syntactic_sugar.html | 492 ++ syntactic_sugar.idx | 3 + tanh.html | 474 ++ tanh.idx | 3 + tensor.html | 420 ++ tensor.idx | 1 + tensor_compare_helper.html | 444 ++ tensor_compare_helper.idx | 2 + tensor_cuda.html | 420 ++ tensor_cuda.idx | 1 + tensor_opencl.html | 420 ++ tensor_opencl.idx | 1 + theindex.html | 6415 +++++++++++++++++++++++++ tokenizers.html | 445 ++ tokenizers.idx | 2 + triangular.html | 521 ++ triangular.idx | 5 + tuto.aggregate_stats.html | 364 ++ tuto.broadcasting.html | 376 ++ tuto.first_steps.html | 493 ++ tuto.iterators.html | 400 ++ tuto.linear_algebra.html | 378 ++ tuto.map_reduce.html | 384 ++ tuto.shapeshifting.html | 420 ++ tuto.slicing.html | 557 +++ ufunc.html | 1589 ++++++ ufunc.idx | 96 + uth.copy_semantics.html | 369 ++ uth.opencl_cuda_nim.html | 580 +++ uth.speed.html | 409 ++ util.html | 456 ++ util.idx | 3 + 398 files changed, 135970 insertions(+) create mode 100644 accessors.html create mode 100644 accessors.idx create mode 100644 accessors_macros_read.html create mode 100644 accessors_macros_read.idx create mode 100644 accessors_macros_syntax.html create mode 100644 accessors_macros_syntax.idx create mode 100644 accessors_macros_write.html create mode 100644 accessors_macros_write.idx create mode 100644 accuracy_score.html create mode 100644 accuracy_score.idx create mode 100644 aggregate.html create mode 100644 aggregate.idx create mode 100644 algebra.html create mode 100644 algebra.idx create mode 100644 algorithms.html create mode 100644 algorithms.idx create mode 100644 align_unroller.html create mode 100644 align_unroller.idx create mode 100644 ast_utils.html create mode 100644 ast_utils.idx create mode 100644 autograd.html create mode 100644 autograd.idx create mode 100644 autograd_common.html create mode 100644 autograd_common.idx create mode 100644 auxiliary_blas.html create mode 100644 auxiliary_blas.idx create mode 100644 auxiliary_lapack.html create mode 100644 auxiliary_lapack.idx create mode 100644 blas_l3_gemm.html create mode 100644 blas_l3_gemm.idx create mode 100644 blis.html create mode 100644 blis.idx create mode 100644 common_error_functions.html create mode 100644 common_error_functions.idx create mode 100644 compiler_optim_hints.html create mode 100644 compiler_optim_hints.idx create mode 100644 complex.html create mode 100644 complex.idx create mode 100644 conv.html create mode 100644 conv.idx create mode 100644 conv2D.html create mode 100644 conv2D.idx create mode 100644 cpuinfo_x86.html create mode 100644 cpuinfo_x86.idx create mode 100644 cross_entropy_losses.html create mode 100644 cross_entropy_losses.idx create mode 100644 cublas.html create mode 100644 cublas.idx create mode 100644 cuda.html create mode 100644 cuda.idx create mode 100644 cuda_global_state.html create mode 100644 cuda_global_state.idx create mode 100644 cudnn.html create mode 100644 cudnn.idx create mode 100644 cudnn_conv_interface.html create mode 100644 cudnn_conv_interface.idx create mode 100644 data_structure.html create mode 100644 data_structure.idx create mode 100644 datatypes.html create mode 100644 datatypes.idx create mode 100644 dbscan.html create mode 100644 dbscan.idx create mode 100644 decomposition.html create mode 100644 decomposition.idx create mode 100644 decomposition_lapack.html create mode 100644 decomposition_lapack.idx create mode 100644 decomposition_rand.html create mode 100644 decomposition_rand.idx create mode 100644 deprecate.html create mode 100644 deprecate.idx create mode 100644 display.html create mode 100644 display.idx create mode 100644 display_cuda.html create mode 100644 display_cuda.idx create mode 100644 distances.html create mode 100644 distances.idx create mode 100644 distributions.html create mode 100644 distributions.idx create mode 100644 dochack.js create mode 100644 docutils.css create mode 100644 dynamic_stack_arrays.html create mode 100644 dynamic_stack_arrays.idx create mode 100644 einsum.html create mode 100644 einsum.idx create mode 100644 embedding.html create mode 100644 embedding.idx create mode 100644 exporting.html create mode 100644 exporting.idx create mode 100644 filling_data.html create mode 100644 filling_data.idx create mode 100644 flatten.html create mode 100644 flatten.idx create mode 100644 foreach.html create mode 100644 foreach.idx create mode 100644 foreach_common.html create mode 100644 foreach_common.idx create mode 100644 foreach_staged.html create mode 100644 foreach_staged.idx create mode 100644 functional.html create mode 100644 functional.idx create mode 100644 gates_basic.html create mode 100644 gates_basic.idx create mode 100644 gates_blas.html create mode 100644 gates_blas.idx create mode 100644 gates_hadamard.html create mode 100644 gates_hadamard.idx create mode 100644 gates_reduce.html create mode 100644 gates_reduce.idx create mode 100644 gates_shapeshifting_concat_split.html create mode 100644 gates_shapeshifting_concat_split.idx create mode 100644 gates_shapeshifting_views.html create mode 100644 gates_shapeshifting_views.idx create mode 100644 gcn.html create mode 100644 gcn.idx create mode 100644 gemm.html create mode 100644 gemm.idx create mode 100644 gemm_packing.html create mode 100644 gemm_packing.idx create mode 100644 gemm_prepacked.html create mode 100644 gemm_prepacked.idx create mode 100644 gemm_tiling.html create mode 100644 gemm_tiling.idx create mode 100644 gemm_ukernel_avx.html create mode 100644 gemm_ukernel_avx.idx create mode 100644 gemm_ukernel_avx2.html create mode 100644 gemm_ukernel_avx2.idx create mode 100644 gemm_ukernel_avx512.html create mode 100644 gemm_ukernel_avx512.idx create mode 100644 gemm_ukernel_avx_fma.html create mode 100644 gemm_ukernel_avx_fma.idx create mode 100644 gemm_ukernel_dispatch.html create mode 100644 gemm_ukernel_dispatch.idx create mode 100644 gemm_ukernel_generator.html create mode 100644 gemm_ukernel_generator.idx create mode 100644 gemm_ukernel_generic.html create mode 100644 gemm_ukernel_generic.idx create mode 100644 gemm_ukernel_sse.html create mode 100644 gemm_ukernel_sse.idx create mode 100644 gemm_ukernel_sse2.html create mode 100644 gemm_ukernel_sse2.idx create mode 100644 gemm_ukernel_sse4_1.html create mode 100644 gemm_ukernel_sse4_1.idx create mode 100644 gemm_utils.html create mode 100644 gemm_utils.idx create mode 100644 global_config.html create mode 100644 global_config.idx create mode 100644 gru.html create mode 100644 gru.idx create mode 100644 higher_order_applymap.html create mode 100644 higher_order_applymap.idx create mode 100644 higher_order_foldreduce.html create mode 100644 higher_order_foldreduce.idx create mode 100644 howto.perceptron.html create mode 100644 howto.type_conversion.html create mode 100644 howto.ufunc.html create mode 100644 imdb.html create mode 100644 imdb.idx create mode 100644 incl_accessors_cuda.html create mode 100644 incl_accessors_cuda.idx create mode 100644 incl_higher_order_cuda.html create mode 100644 incl_higher_order_cuda.idx create mode 100644 incl_kernels_cuda.html create mode 100644 incl_kernels_cuda.idx create mode 100644 index.html create mode 100644 init.html create mode 100644 init.idx create mode 100644 init_colmajor.html create mode 100644 init_colmajor.idx create mode 100644 init_copy_cpu.html create mode 100644 init_copy_cpu.idx create mode 100644 init_copy_cuda.html create mode 100644 init_copy_cuda.idx create mode 100644 init_cpu.html create mode 100644 init_cpu.idx create mode 100644 init_cuda.html create mode 100644 init_cuda.idx create mode 100644 init_opencl.html create mode 100644 init_opencl.idx create mode 100644 initialization.html create mode 100644 initialization.idx create mode 100644 io.html create mode 100644 io.idx create mode 100644 io_csv.html create mode 100644 io_csv.idx create mode 100644 io_hdf5.html create mode 100644 io_hdf5.idx create mode 100644 io_image.html create mode 100644 io_image.idx create mode 100644 io_npy.html create mode 100644 io_npy.idx create mode 100644 io_stream_readers.html create mode 100644 io_stream_readers.idx create mode 100644 kde.html create mode 100644 kde.idx create mode 100644 kdtree.html create mode 100644 kdtree.idx create mode 100644 kmeans.html create mode 100644 kmeans.idx create mode 100644 lapack.html create mode 100644 lapack.idx create mode 100644 least_squares.html create mode 100644 least_squares.idx create mode 100644 least_squares_lapack.html create mode 100644 least_squares_lapack.idx create mode 100644 linear.html create mode 100644 linear.idx create mode 100644 linear_algebra.html create mode 100644 linear_algebra.idx create mode 100644 linear_systems.html create mode 100644 linear_systems.idx create mode 100644 math_functions.html create mode 100644 math_functions.idx create mode 100644 math_ops_fusion.html create mode 100644 math_ops_fusion.idx create mode 100644 maxpool2D.html create mode 100644 maxpool2D.idx create mode 100644 mean_square_error_loss.html create mode 100644 mean_square_error_loss.idx create mode 100644 memory.html create mode 100644 memory.idx create mode 100644 memory_optimization_hints.html create mode 100644 memory_optimization_hints.idx create mode 100644 ml.html create mode 100644 ml.idx create mode 100644 mnist.html create mode 100644 mnist.idx create mode 100644 naive_l2_gemv.html create mode 100644 naive_l2_gemv.idx create mode 100644 nav.css create mode 100644 neighbors.html create mode 100644 neighbors.idx create mode 100644 nested_containers.html create mode 100644 nested_containers.idx create mode 100644 nimdoc.out.css create mode 100644 nlp.html create mode 100644 nlp.idx create mode 100644 nn.html create mode 100644 nn.idx create mode 100644 nn_dsl.html create mode 100644 nn_dsl.idx create mode 100644 nn_primitives.html create mode 100644 nn_primitives.idx create mode 100644 nnp_activation.html create mode 100644 nnp_activation.idx create mode 100644 nnp_conv2d_cudnn.html create mode 100644 nnp_conv2d_cudnn.idx create mode 100644 nnp_convolution.html create mode 100644 nnp_convolution.idx create mode 100644 nnp_embedding.html create mode 100644 nnp_embedding.idx create mode 100644 nnp_gru.html create mode 100644 nnp_gru.idx create mode 100644 nnp_linear.html create mode 100644 nnp_linear.idx create mode 100644 nnp_maxpooling.html create mode 100644 nnp_maxpooling.idx create mode 100644 nnp_numerical_gradient.html create mode 100644 nnp_numerical_gradient.idx create mode 100644 nnp_sigmoid_cross_entropy.html create mode 100644 nnp_sigmoid_cross_entropy.idx create mode 100644 nnp_softmax.html create mode 100644 nnp_softmax.idx create mode 100644 nnp_softmax_cross_entropy.html create mode 100644 nnp_softmax_cross_entropy.idx create mode 100644 nnpack.html create mode 100644 nnpack.idx create mode 100644 nnpack_interface.html create mode 100644 nnpack_interface.idx create mode 100644 opencl_backend.html create mode 100644 opencl_backend.idx create mode 100644 opencl_global_state.html create mode 100644 opencl_global_state.idx create mode 100644 openmp.html create mode 100644 openmp.idx create mode 100644 operators_blas_l1.html create mode 100644 operators_blas_l1.idx create mode 100644 operators_blas_l1_cuda.html create mode 100644 operators_blas_l1_cuda.idx create mode 100644 operators_blas_l1_opencl.html create mode 100644 operators_blas_l1_opencl.idx create mode 100644 operators_blas_l2l3.html create mode 100644 operators_blas_l2l3.idx create mode 100644 operators_blas_l2l3_cuda.html create mode 100644 operators_blas_l2l3_cuda.idx create mode 100644 operators_blas_l2l3_opencl.html create mode 100644 operators_blas_l2l3_opencl.idx create mode 100644 operators_broadcasted.html create mode 100644 operators_broadcasted.idx create mode 100644 operators_broadcasted_cuda.html create mode 100644 operators_broadcasted_cuda.idx create mode 100644 operators_broadcasted_opencl.html create mode 100644 operators_broadcasted_opencl.idx create mode 100644 operators_comparison.html create mode 100644 operators_comparison.idx create mode 100644 operators_logical.html create mode 100644 operators_logical.idx create mode 100644 optim_ops_fusion.html create mode 100644 optim_ops_fusion.idx create mode 100644 optimizers.html create mode 100644 optimizers.idx create mode 100644 overload.html create mode 100644 overload.idx create mode 100644 p_accessors.html create mode 100644 p_accessors.idx create mode 100644 p_accessors_macros_desugar.html create mode 100644 p_accessors_macros_desugar.idx create mode 100644 p_accessors_macros_read.html create mode 100644 p_accessors_macros_read.idx create mode 100644 p_accessors_macros_write.html create mode 100644 p_accessors_macros_write.idx create mode 100644 p_activation.html create mode 100644 p_activation.idx create mode 100644 p_checks.html create mode 100644 p_checks.idx create mode 100644 p_complex.html create mode 100644 p_complex.idx create mode 100644 p_display.html create mode 100644 p_display.idx create mode 100644 p_empty_tensors.html create mode 100644 p_empty_tensors.idx create mode 100644 p_init_cuda.html create mode 100644 p_init_cuda.idx create mode 100644 p_init_opencl.html create mode 100644 p_init_opencl.idx create mode 100644 p_kernels_interface_cuda.html create mode 100644 p_kernels_interface_cuda.idx create mode 100644 p_kernels_interface_opencl.html create mode 100644 p_kernels_interface_opencl.idx create mode 100644 p_logsumexp.html create mode 100644 p_logsumexp.idx create mode 100644 p_nnp_checks.html create mode 100644 p_nnp_checks.idx create mode 100644 p_nnp_types.html create mode 100644 p_nnp_types.idx create mode 100644 p_operator_blas_l2l3.html create mode 100644 p_operator_blas_l2l3.idx create mode 100644 p_shapeshifting.html create mode 100644 p_shapeshifting.idx create mode 100644 pca.html create mode 100644 pca.idx create mode 100644 relu.html create mode 100644 relu.idx create mode 100644 selectors.html create mode 100644 selectors.idx create mode 100644 sequninit.html create mode 100644 sequninit.idx create mode 100644 shapeshifting.html create mode 100644 shapeshifting.idx create mode 100644 shapeshifting_cuda.html create mode 100644 shapeshifting_cuda.idx create mode 100644 shapeshifting_opencl.html create mode 100644 shapeshifting_opencl.idx create mode 100644 sigmoid.html create mode 100644 sigmoid.idx create mode 100644 simd.html create mode 100644 simd.idx create mode 100644 softmax.html create mode 100644 softmax.idx create mode 100644 solve_lapack.html create mode 100644 solve_lapack.idx create mode 100644 special_matrices.html create mode 100644 special_matrices.idx create mode 100644 stats.html create mode 100644 stats.idx create mode 100644 std_version_types.html create mode 100644 std_version_types.idx create mode 100644 syntactic_sugar.html create mode 100644 syntactic_sugar.idx create mode 100644 tanh.html create mode 100644 tanh.idx create mode 100644 tensor.html create mode 100644 tensor.idx create mode 100644 tensor_compare_helper.html create mode 100644 tensor_compare_helper.idx create mode 100644 tensor_cuda.html create mode 100644 tensor_cuda.idx create mode 100644 tensor_opencl.html create mode 100644 tensor_opencl.idx create mode 100644 theindex.html create mode 100644 tokenizers.html create mode 100644 tokenizers.idx create mode 100644 triangular.html create mode 100644 triangular.idx create mode 100644 tuto.aggregate_stats.html create mode 100644 tuto.broadcasting.html create mode 100644 tuto.first_steps.html create mode 100644 tuto.iterators.html create mode 100644 tuto.linear_algebra.html create mode 100644 tuto.map_reduce.html create mode 100644 tuto.shapeshifting.html create mode 100644 tuto.slicing.html create mode 100644 ufunc.html create mode 100644 ufunc.idx create mode 100644 uth.copy_semantics.html create mode 100644 uth.opencl_cuda_nim.html create mode 100644 uth.speed.html create mode 100644 util.html create mode 100644 util.idx diff --git a/accessors.html b/accessors.html new file mode 100644 index 000000000..6ab90b4d1 --- /dev/null +++ b/accessors.html @@ -0,0 +1,1026 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/accessors + + + + + + + + + +Arraymancer - src/arraymancer/tensor/accessors + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/accessors

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc atAxisIndex[T](t: Tensor[T]; axis, idx: int; length = 1): Tensor[T] {.
+    noinit, inline.}
+
+ + Returns a sliced tensor in the given axis index +   Source +Edit + +
+
+ +
+
+
+
proc atContiguousIndex[T](t: Tensor[T]; idx: int): T {.noSideEffect, inline.}
+
+ + Return value of tensor at contiguous index i.e. as treat the tensor as flattened +   Source +Edit + +
+
+
+
proc atContiguousIndex[T](t: var Tensor[T]; idx: int): var T {.noSideEffect,
+    inline.}
+
+ + Return value of tensor at contiguous index (mutable) i.e. as treat the tensor as flattened +   Source +Edit + +
+
+ +
+ +
+
+
+

Iterators

+
+
+
+
iterator axis[T](t: Tensor[T]; axis, offset, size: int): Tensor[T] {.inline.}
+
+ + +   Source +Edit + +
+
+
+
iterator axis[T](t: Tensor[T]; axis: int): Tensor[T] {.inline.}
+
+ +

Inline iterator over an axis.

+

Returns:

+
  • A slice along the given axis at each iteration.
  • +
+

Note: The slice dimension is not collapsed by default. You can use squeeze to collapse it.

+

Usage: .. code:: nim for subtensor in t.axis(1): # do stuff

+ +   Source +Edit + +
+
+ +
+
+
+
iterator enumerate[T](t: Tensor[T]): (int, T) {.inline.}
+
+ + Enumerate Tensor values +   Source +Edit + +
+
+
+
iterator enumerate[T](t: Tensor[T]; offset, size: int): (int, T) {.inline,
+    noSideEffect.}
+
+ + Enumerate Tensor values (with offset) +   Source +Edit + +
+
+ +
+
+
+
iterator enumerateAxis[T](t: Tensor[T]; axis, offset, size: int): (int,
+    Tensor[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+
+
iterator enumerateAxis[T](t: Tensor[T]; axis: int): (int, Tensor[T]) {.inline.}
+
+ +

Inline iterator over an axis.

+

Returns a tuple:

+
  • The index along the axis
  • +
  • A slice along the given axis at each iteration.
  • +
+

Note: The slice dimension is not collapsed by default. You can use squeeze to collapse it.

+

Usage: .. code:: nim for subtensor in t.axis(1): # do stuff

+ +   Source +Edit + +
+
+ +
+
+
+
iterator enumerateZip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V]): (
+    int, T, U, V) {.inline, noSideEffect.}
+
+ + Enumerate simultaneously on two tensors returning their elements in a tuple. Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+
+
iterator enumerateZip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V];
+                               offset, size: int): (int, T, U, V) {.inline,
+    noSideEffect.}
+
+ + Enumerate simultaneously on two tensors returning their elements in a tuple. (with offset) Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+
+
iterator enumerateZip[T, U](t1: Tensor[T]; t2: Tensor[U]): (int, T, U) {.inline,
+    noSideEffect.}
+
+ + Enumerate simultaneously on two tensors returning their elements in a tuple. Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+
+
iterator enumerateZip[T, U](t1: Tensor[T]; t2: Tensor[U]; offset, size: int): (
+    int, T, U) {.inline, noSideEffect.}
+
+ + Enumerate simultaneously on two tensors returning their elements in a tuple. (with offset) Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+ +
+
+
+
iterator items[T](t: Tensor[T]): T {.inline, noSideEffect.}
+
+ +

Inline iterator on Tensor values

+

The iterator will iterate in C order regardingless of the tensor properties (Fortran layout, non-contiguous, slice ...). So 0, 0, 0 then 0, 0, 1 then ... then 0, 1, 0 ...

+

Usage: .. code:: nim for val in t: # items is implicitly called val += 42

+ +   Source +Edit + +
+
+
+
iterator items[T](t: Tensor[T]; offset, size: int): T {.inline, noSideEffect.}
+
+ + Inline iterator on Tensor values (with offset) +   Source +Edit + +
+
+ +
+ + +
+
+
iterator mitems[T](t: var Tensor[T]): var T {.inline, noSideEffect.}
+
+ +

Inline iterator on Tensor values (mutable, with offset)

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+
+
iterator mitems[T](t: var Tensor[T]; offset, size: int): var T {.inline,
+    noSideEffect.}
+
+ +

Inline iterator on Tensor values (mutable, with offset)

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+ +
+
+
+
iterator mpairs[T](t: var Tensor[T]): (seq[int], var T) {.inline, noSideEffect.}
+
+ +

Inline iterator on Tensor (coordinates, values) (mutable)

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+ +
+
+
+
iterator mzip[T, U, V](t1: var Tensor[T]; t2: Tensor[U]; t3: Tensor[V]): (var T,
+    U, V) {.inline, noSideEffect.}
+
+ +

Iterates simultaneously on two tensors returning their elements in a tuple. (mutable) Note: only tensors of the same shape will be zipped together.

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+
+
iterator mzip[T, U, V](t1: var Tensor[T]; t2: Tensor[U]; t3: Tensor[V];
+                       offset, size: int): (var T, U, V) {.inline, noSideEffect.}
+
+ +

Iterates simultaneously on two tensors returning their elements in a tuple. (mutable, with offset) Note: only tensors of the same shape will be zipped together.

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+
+
iterator mzip[T, U](t1: var Tensor[T]; t2: Tensor[U]): (var T, U) {.inline,
+    noSideEffect.}
+
+ +

Iterates simultaneously on two tensors returning their elements in a tuple. (mutable) Note: only tensors of the same shape will be zipped together.

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+
+
iterator mzip[T, U](t1: var Tensor[T]; t2: Tensor[U]; offset, size: int): (
+    var T, U) {.inline, noSideEffect.}
+
+ +

Iterates simultaneously on two tensors returning their elements in a tuple. (mutable, with offset) Note: only tensors of the same shape will be zipped together.

+

Note: due to C++ restrictions and Nim current codegen on mutable iterator, it is not possible to use this iterator with the C++ backend or at the same time as Cuda (that uses C++)

+ +   Source +Edit + +
+
+ +
+
+
+
iterator pairs[T](t: Tensor[T]): (seq[int], T) {.inline, noSideEffect.}
+
+ +

Inline iterator on Tensor (coordinates, values)

+

The iterator will iterate in C order regardingless of the tensor properties (Fortran layout, non-contiguous, slice ...). So 0, 0, 0 then 0, 0, 1 then ... then 0, 1, 0 ...

+

It returns a tuple of (coordinates, value) like (@1,0,1, 1337)

+

Usage: .. code:: nim for coord, val in t: echo coord echo val .. code:: nim for coordval in t: echo coordval0 echo coordval1

+ +   Source +Edit + +
+
+ +
+
+
+
iterator zip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V]): (T, U, V) {.
+    inline, noSideEffect.}
+
+ + Iterates simultaneously on two tensors returning their elements in a tuple. Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+
+
iterator zip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V];
+                      offset, size: int): (T, U, V) {.inline, noSideEffect.}
+
+ + Iterates simultaneously on two tensors returning their elements in a tuple. (with offset) Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+
+
iterator zip[T, U](t1: Tensor[T]; t2: Tensor[U]): (T, U) {.inline, noSideEffect.}
+
+ + Iterates simultaneously on two tensors returning their elements in a tuple. Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+
+
iterator zip[T, U](t1: Tensor[T]; t2: Tensor[U]; offset, size: int): (T, U) {.
+    inline, noSideEffect.}
+
+ + Iterates simultaneously on two tensors returning their elements in a tuple. (with offset) Note: only tensors of the same shape will be zipped together. +   Source +Edit + +
+
+ +
+
+
+
iterator zipAxis[T, U](a: Tensor[T]; b: Tensor[U]; axis: int): tuple[
+    a: Tensor[T], b: Tensor[U]] {.inline.}
+
+ +

Inline iterator over 2 tensors over an axis.

+

Returns:

+
  • 2 slices along the given axis at each iteration.
  • +
+

Note: The slice dimension is not collapsed by default. You can use squeeze to collapse it.

+

Usage: .. code:: nim for subtensor in zipAxis(a, b, 1): # do stuff

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/accessors.idx b/accessors.idx new file mode 100644 index 000000000..ecc3cc2db --- /dev/null +++ b/accessors.idx @@ -0,0 +1,44 @@ +nimTitle accessors accessors.html module src/arraymancer/tensor/accessors 0 +nim atContiguousIndex accessors.html#atContiguousIndex,Tensor[T],int proc atContiguousIndex[T](t: Tensor[T]; idx: int): T 19 +nim atContiguousIndex accessors.html#atContiguousIndex,Tensor[T],int_2 proc atContiguousIndex[T](t: var Tensor[T]; idx: int): var T 27 +nim atAxisIndex accessors.html#atAxisIndex,Tensor[T],int,int,int proc atAxisIndex[T](t: Tensor[T]; axis, idx: int; length = 1): Tensor[T] 35 +nim items accessors.html#items.i,Tensor[T] iterator items[T](t: Tensor[T]): T 45 +nim items accessors.html#items.i,Tensor[T],int,int iterator items[T](t: Tensor[T]; offset, size: int): T 57 +nim mitems accessors.html#mitems.i,Tensor[T] iterator mitems[T](t: var Tensor[T]): var T 64 +nim mitems accessors.html#mitems.i,Tensor[T],int,int iterator mitems[T](t: var Tensor[T]; offset, size: int): var T 72 +nim enumerate accessors.html#enumerate.i,Tensor[T] iterator enumerate[T](t: Tensor[T]): (int, T) 83 +nim enumerate accessors.html#enumerate.i,Tensor[T],int,int iterator enumerate[T](t: Tensor[T]; offset, size: int): (int, T) 87 +nim menumerate accessors.html#menumerate.i,Tensor[T] iterator menumerate[T](t: Tensor[T]): (int, var T) 94 +nim menumerate accessors.html#menumerate.i,Tensor[T],int,int iterator menumerate[T](t: Tensor[T]; offset, size: int): (int, var T) 102 +nim pairs accessors.html#pairs.i,Tensor[T] iterator pairs[T](t: Tensor[T]): (seq[int], T) 113 +nim mpairs accessors.html#mpairs.i,Tensor[T] iterator mpairs[T](t: var Tensor[T]): (seq[int], var T) 132 +nim zip accessors.html#zip.i,Tensor[T],Tensor[U] iterator zip[T, U](t1: Tensor[T]; t2: Tensor[U]): (T, U) 140 +nim zip accessors.html#zip.i,Tensor[T],Tensor[U],int,int iterator zip[T, U](t1: Tensor[T]; t2: Tensor[U]; offset, size: int): (T, U) 147 +nim zip accessors.html#zip.i,Tensor[T],Tensor[U],Tensor[V] iterator zip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V]): (T, U, V) 156 +nim zip accessors.html#zip.i,Tensor[T],Tensor[U],Tensor[V],int,int iterator zip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V]; offset, size: int): (\n T, U, V) 164 +nim mzip accessors.html#mzip.i,Tensor[T],Tensor[U] iterator mzip[T, U](t1: var Tensor[T]; t2: Tensor[U]): (var T, U) 174 +nim mzip accessors.html#mzip.i,Tensor[T],Tensor[U],int,int iterator mzip[T, U](t1: var Tensor[T]; t2: Tensor[U]; offset, size: int): (var T, U) 185 +nim mzip accessors.html#mzip.i,Tensor[T],Tensor[U],Tensor[V] iterator mzip[T, U, V](t1: var Tensor[T]; t2: Tensor[U]; t3: Tensor[V]): (var T, U, V) 198 +nim mzip accessors.html#mzip.i,Tensor[T],Tensor[U],Tensor[V],int,int iterator mzip[T, U, V](t1: var Tensor[T]; t2: Tensor[U]; t3: Tensor[V]; offset, size: int): (\n var T, U, V) 209 +nim enumerateZip accessors.html#enumerateZip.i,Tensor[T],Tensor[U] iterator enumerateZip[T, U](t1: Tensor[T]; t2: Tensor[U]): (int, T, U) 223 +nim enumerateZip accessors.html#enumerateZip.i,Tensor[T],Tensor[U],int,int iterator enumerateZip[T, U](t1: Tensor[T]; t2: Tensor[U]; offset, size: int): (int, T, U) 230 +nim enumerateZip accessors.html#enumerateZip.i,Tensor[T],Tensor[U],Tensor[V] iterator enumerateZip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V]): (int, T, U,\n V) 239 +nim enumerateZip accessors.html#enumerateZip.i,Tensor[T],Tensor[U],Tensor[V],int,int iterator enumerateZip[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V];\n offset, size: int): (int, T, U, V) 247 +nim menumerateZip accessors.html#menumerateZip.i,Tensor[T],Tensor[U] iterator menumerateZip[T, U](t1: var Tensor[T]; t2: Tensor[U]): (int, var T, U) 257 +nim menumerateZip accessors.html#menumerateZip.i,Tensor[T],Tensor[U],int,int iterator menumerateZip[T, U](t1: var Tensor[T]; t2: Tensor[U]; offset, size: int): (int,\n var T, U) 268 +nim axis accessors.html#axis.i,Tensor[T],int iterator axis[T](t: Tensor[T]; axis: int): Tensor[T] 304 +nim axis accessors.html#axis.i,Tensor[T],int,int,int iterator axis[T](t: Tensor[T]; axis, offset, size: int): Tensor[T] 319 +nim zipAxis accessors.html#zipAxis.i,Tensor[T],Tensor[U],int iterator zipAxis[T, U](a: Tensor[T]; b: Tensor[U]; axis: int): tuple[a: Tensor[T],\n b: Tensor[U]] 322 +nim enumerateAxis accessors.html#enumerateAxis.i,Tensor[T],int iterator enumerateAxis[T](t: Tensor[T]; axis: int): (int, Tensor[T]) 344 +nim enumerateAxis accessors.html#enumerateAxis.i,Tensor[T],int,int,int iterator enumerateAxis[T](t: Tensor[T]; axis, offset, size: int): (int, Tensor[T]) 360 +nimgrp atcontiguousindex accessors.html#atContiguousIndex-procs-all proc 19 +nimgrp enumerate accessors.html#enumerate-iterators-all iterator 83 +nimgrp enumeratezip accessors.html#enumerateZip-iterators-all iterator 223 +nimgrp mzip accessors.html#mzip-iterators-all iterator 174 +nimgrp menumeratezip accessors.html#menumerateZip-iterators-all iterator 257 +nimgrp items accessors.html#items-iterators-all iterator 45 +nimgrp zip accessors.html#zip-iterators-all iterator 140 +nimgrp axis accessors.html#axis-iterators-all iterator 304 +nimgrp mitems accessors.html#mitems-iterators-all iterator 64 +nimgrp enumerateaxis accessors.html#enumerateAxis-iterators-all iterator 344 +nimgrp menumerate accessors.html#menumerate-iterators-all iterator 94 diff --git a/accessors_macros_read.html b/accessors_macros_read.html new file mode 100644 index 000000000..91b55c23b --- /dev/null +++ b/accessors_macros_read.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/accessors_macros_read + + + + + + + + + +Arraymancer - src/arraymancer/tensor/accessors_macros_read + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/accessors_macros_read

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro `[]`[T](t: AnyTensor[T]; args: varargs[untyped]): untyped
+
+ + Slice a Tensor or a CudaTensor Input:
  • a Tensor or a CudaTensor
  • +
  • and:
    • specific coordinates (varargs[int])
    • +
    • or a slice (cf. tutorial)
    • +
    • or a boolean tensor or openArray mask with the same shape as the tensor
    • +
    +
  • +
+

Returns:

+
  • a value or a tensor corresponding to the slice or to the true elements of the mask
  • +
+

Warning โš  CudaTensor temporary default: For CudaTensor only, this is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable. Usage:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/accessors_macros_read.idx b/accessors_macros_read.idx new file mode 100644 index 000000000..3133b8db3 --- /dev/null +++ b/accessors_macros_read.idx @@ -0,0 +1,2 @@ +nimTitle accessors_macros_read accessors_macros_read.html module src/arraymancer/tensor/accessors_macros_read 0 +nim `[]` accessors_macros_read.html#[].m,AnyTensor[T],varargs[untyped] macro `[]`[T](t: AnyTensor[T]; args: varargs[untyped]): untyped 20 diff --git a/accessors_macros_syntax.html b/accessors_macros_syntax.html new file mode 100644 index 000000000..761f65c32 --- /dev/null +++ b/accessors_macros_syntax.html @@ -0,0 +1,862 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/accessors_macros_syntax + + + + + + + + + +Arraymancer - src/arraymancer/tensor/accessors_macros_syntax + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/accessors_macros_syntax

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+ +
+
Ellipsis = object
+
+ + Dummy type for ellipsis i.e. "Don't slice the rest of dimensions" +   Source +Edit + +
+
+
+
Step = object
+  
+
+ +

Internal: Workaround to build SteppedSlice without using parenthesis.

+

Expected syntax is tensor[0..10|1].

+

Due to operator precedence of | over .. 0..10|1 is interpreted as 0..(10|1)

+ +   Source +Edit + +
+
+
+
SteppedSlice = object
+  a*, b*: int
+  step*: int
+  a_from_end*: bool
+  b_from_end*: bool
+
+
+ + Internal: A slice object related to a tensor single dimension:
  • a, b: Respectively the beginning and the end of the range of the dimension
  • +
  • step: The stepping of the slice (can be negative)
  • +
  • a/b_from_end: Indicates if a/b should be counted from 0 or from the end of the tensor relevant dimension.
  • +
+

Slicing syntax like a2, 1..<5, _ will be converted at compile-time to SteppedSlices

+ +   Source +Edit + +
+
+ +
+
+
+

Consts

+
+
+
... = ()
+
+ + +   Source +Edit + +
+
+
+
_ = (a: 0, b: 1, step: 1, a_from_end: false, b_from_end: true)
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc `..`(a: int; s: Step): SteppedSlice {.noSideEffect, inline, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Internal: Build a SteppedSlice from a .. (b|step) (workaround to operator precedence) Input:
- the beginning of the slice range
+- a ``Step`` workaround object
+

Returns:

+
- a ``SteppedSlice``, end of range will be inclusive
+ +   Source +Edit + +
+
+ +
+
+
+
proc `..<`(a: int; s: Step): SteppedSlice {.noSideEffect, inline, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Internal: Build a SteppedSlice from a ..< (b|step) (workaround to operator precedence) Input:
- the beginning of the slice range
+- a ``Step`` workaround object
+

Returns:

+
- a ``SteppedSlice``, end of range will be exclusive.
+ +   Source +Edit + +
+
+ +
+
+
+
proc `..^`(a: int; s: Step): SteppedSlice {.noSideEffect, inline, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Internal: Build a SteppedSlice from a ..^ (b|step) (workaround to operator precedence and ..^b not being interpreted as .. ^b) Input:
- the beginning of the slice range
+- a ``Step`` workaround object
+

Returns:

+
- a ``SteppedSlice``, end of range will start at "b" away from the end
+ +   Source +Edit + +
+
+ +
+
+
+
proc `^`(s: Slice): SteppedSlice {.noSideEffect, inline.}
+
+ + Internal: Prefix to a to indicate starting the slice at "a" away from the end Note: This does not automatically inverse stepping, what if we want ^5..^1 +   Source +Edit + +
+
+
+
proc `^`(s: SteppedSlice): SteppedSlice {.noSideEffect, inline, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Internal: Prefix to a to indicate starting the slice at "a" away from the end Note: This does not automatically inverse stepping, what if we want ^5..^1 +   Source +Edit + +
+
+ +
+
+
+
proc initSpanSlices(len: int): ArrayOfSlices {.inline, ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc toArrayOfSlices(s: varargs[SteppedSlice]): ArrayOfSlices {.inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `|`(b, step: int): Step {.noSideEffect, inline, ...raises: [], tags: [],
+                               forbids: [].}
+
+ +

Internal: A Step constructor

+

Step is a workaround due to operator precedence.

+

0..10|1 is interpreted as 0..(10|1) Input:

+
- the end of a slice range
+- a step
+

Returns:

+
- a ``Step``
+ +   Source +Edit + +
+
+
+
proc `|`(s: Slice[int]; step: int): SteppedSlice {.noSideEffect, inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ + Internal: A SteppedSlice constructor Input:
- a slice
+- a step
+

Returns:

+
- a ``SteppedSlice``
+ +   Source +Edit + +
+
+
+
proc `|`(ss: SteppedSlice; step: int): SteppedSlice {.noSideEffect, inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ + Internal: Modifies the step of a SteppedSlice Input:
- a ``SteppedSLice``
+- the new stepping
+

Returns:

+
- a ``SteppedSLice``
+ +   Source +Edit + +
+
+ +
+
+
+
proc `|+`(b, step: int): Step {.noSideEffect, inline, ...raises: [], tags: [],
+                                forbids: [].}
+
+ + Internal: Alias for | +   Source +Edit + +
+
+
+
proc `|+`(s: Slice[int]; step: int): SteppedSlice {.noSideEffect, inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ + Internal: Alias for | +   Source +Edit + +
+
+
+
proc `|+`(ss: SteppedSlice; step: int): SteppedSlice {.noSideEffect, inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ + Internal: Alias for | +   Source +Edit + +
+
+ +
+
+
+
proc `|-`(b, step: int): Step {.noSideEffect, inline, ...raises: [], tags: [],
+                                forbids: [].}
+
+ +

Internal: A SteppedSlice constructor

+

Workaround to tensor0..10|-1 being intepreted as 0 .. (10 `|-` 1)

+

Properly create SteppedSLice with negative stepping

+ +   Source +Edit + +
+
+
+
proc `|-`(s: Slice[int]; step: int): SteppedSlice {.noSideEffect, inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ +

Internal: A SteppedSlice constructor

+

Workaround to tensorslice|-1 being interpreted as slice `|-` 1

+

Properly create SteppedSLice with negative stepping

+ +   Source +Edit + +
+
+
+
proc `|-`(ss: SteppedSlice; step: int): SteppedSlice {.noSideEffect, inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ +

Internal: Modifies the step of a SteppedSlice

+

Workaround to tensorslice|-1 being interpreted as slice `|-` 1

+

Properly create SteppedSLice with negative stepping

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/accessors_macros_syntax.idx b/accessors_macros_syntax.idx new file mode 100644 index 000000000..cb5a863df --- /dev/null +++ b/accessors_macros_syntax.idx @@ -0,0 +1,27 @@ +nimTitle accessors_macros_syntax accessors_macros_syntax.html module src/arraymancer/tensor/accessors_macros_syntax 0 +nim SteppedSlice accessors_macros_syntax.html#SteppedSlice object SteppedSlice 56 +nim Step accessors_macros_syntax.html#Step object Step 67 +nim `_` accessors_macros_syntax.html#_ const `_` 78 +nim Ellipsis accessors_macros_syntax.html#Ellipsis object Ellipsis 80 +nim `...` accessors_macros_syntax.html#... const `...` 82 +nim ArrayOfSlices accessors_macros_syntax.html#ArrayOfSlices type ArrayOfSlices 87 +nim toArrayOfSlices accessors_macros_syntax.html#toArrayOfSlices,varargs[SteppedSlice] proc toArrayOfSlices(s: varargs[SteppedSlice]): ArrayOfSlices 89 +nim initSpanSlices accessors_macros_syntax.html#initSpanSlices,int proc initSpanSlices(len: int): ArrayOfSlices 97 +nim `|` accessors_macros_syntax.html#|,Slice[int],int proc `|`(s: Slice[int]; step: int): SteppedSlice 108 +nim `|` accessors_macros_syntax.html#|,int,int proc `|`(b, step: int): Step 117 +nim `|` accessors_macros_syntax.html#|,SteppedSlice,int proc `|`(ss: SteppedSlice; step: int): SteppedSlice 130 +nim `|+` accessors_macros_syntax.html#|+,Slice[int],int proc `|+`(s: Slice[int]; step: int): SteppedSlice 140 +nim `|+` accessors_macros_syntax.html#|+,int,int proc `|+`(b, step: int): Step 144 +nim `|+` accessors_macros_syntax.html#|+,SteppedSlice,int proc `|+`(ss: SteppedSlice; step: int): SteppedSlice 148 +nim `|-` accessors_macros_syntax.html#|-,Slice[int],int proc `|-`(s: Slice[int]; step: int): SteppedSlice 152 +nim `|-` accessors_macros_syntax.html#|-,int,int proc `|-`(b, step: int): Step 160 +nim `|-` accessors_macros_syntax.html#|-,SteppedSlice,int proc `|-`(ss: SteppedSlice; step: int): SteppedSlice 168 +nim `..` accessors_macros_syntax.html#..,int,Step proc `..`(a: int; s: Step): SteppedSlice 177 +nim `..<` accessors_macros_syntax.html#..<,int,Step proc `..<`(a: int; s: Step): SteppedSlice 186 +nim `..^` accessors_macros_syntax.html#..^,int,Step proc `..^`(a: int; s: Step): SteppedSlice 195 +nim `^` accessors_macros_syntax.html#^,SteppedSlice proc `^`(s: SteppedSlice): SteppedSlice 204 +nim `^` accessors_macros_syntax.html#^,Slice proc `^`(s: Slice): SteppedSlice 210 +nimgrp | accessors_macros_syntax.html#|-procs-all proc 108 +nimgrp ^ accessors_macros_syntax.html#^-procs-all proc 204 +nimgrp |+ accessors_macros_syntax.html#|+-procs-all proc 140 +nimgrp |- accessors_macros_syntax.html#|--procs-all proc 152 diff --git a/accessors_macros_write.html b/accessors_macros_write.html new file mode 100644 index 000000000..3edfee38d --- /dev/null +++ b/accessors_macros_write.html @@ -0,0 +1,506 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/accessors_macros_write + + + + + + + + + +Arraymancer - src/arraymancer/tensor/accessors_macros_write + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/accessors_macros_write

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro `[]=`[T](t: var Tensor[T]; args: varargs[untyped]): untyped
+
+ +

Modifies a tensor inplace at the corresponding location or slice

+

Input:

+
  • a var tensor
  • +
  • a location or a boolean mask:
    • specific coordinates (varargs[int])
    • +
    • or a slice (cf. tutorial)
    • +
    • or a boolean tensor or openArray mask with the same shape as the tensor
    • +
    +
  • +
  • a value:
    • a single value that will
      • replace the value at the specific coordinates
      • +
      • or be applied to the whole slice
      • +
      • or be applied to the true elements of the boolean mask
      • +
      +
    • +
    • a tensor or openArray with a shape that matches the slice
    • +
    • a tensor whose values will be applied to the true elements of the boolean mask
    • +
    +
  • +
+

Result:

+
  • Nothing, the tensor is modified in-place
  • +
+

Usage:

+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template `[]=`[T](t: Tensor[T]; args: varargs[untyped]): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/accessors_macros_write.idx b/accessors_macros_write.idx new file mode 100644 index 000000000..e8354b24d --- /dev/null +++ b/accessors_macros_write.idx @@ -0,0 +1,3 @@ +nimTitle accessors_macros_write accessors_macros_write.html module src/arraymancer/tensor/accessors_macros_write 0 +nim `[]=` accessors_macros_write.html#[]=.m,Tensor[T],varargs[untyped] macro `[]=`[T](t: var Tensor[T]; args: varargs[untyped]): untyped 21 +nim `[]=` accessors_macros_write.html#[]=.t,Tensor[T],varargs[untyped] template `[]=`[T](t: Tensor[T]; args: varargs[untyped]): untyped 82 diff --git a/accuracy_score.html b/accuracy_score.html new file mode 100644 index 000000000..69bd3cc0e --- /dev/null +++ b/accuracy_score.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/ml/metrics/accuracy_score + + + + + + + + + +Arraymancer - src/arraymancer/ml/metrics/accuracy_score + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/ml/metrics/accuracy_score

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc accuracy_score[T](y_pred, y_true: Tensor[T]): float
+
+ + Input:
  • y_true: TensorT containing the ground truth (correct) labels
  • +
  • y_pred: TensorT containing the predicted labels
  • +
+

Returns:

+
  • The proportion of correctly classified samples (as float).
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/accuracy_score.idx b/accuracy_score.idx new file mode 100644 index 000000000..12d9d603d --- /dev/null +++ b/accuracy_score.idx @@ -0,0 +1,2 @@ +nimTitle accuracy_score accuracy_score.html module src/arraymancer/ml/metrics/accuracy_score 0 +nim accuracy_score accuracy_score.html#accuracy_score,Tensor[T],Tensor[T] proc accuracy_score[T](y_pred, y_true: Tensor[T]): float 18 diff --git a/aggregate.html b/aggregate.html new file mode 100644 index 000000000..2d5908684 --- /dev/null +++ b/aggregate.html @@ -0,0 +1,1071 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/aggregate + + + + + + + + + +Arraymancer - src/arraymancer/tensor/aggregate + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/aggregate

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
func all[T](t: Tensor[T]): bool
+
+ +

Returns true if all of the items in the input tensor are true or non-zero

+

Input:

+
  • A tensor
  • +
+

Returns:

+
  • True if at least one element is not zero
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
func any[T](t: Tensor[T]): bool
+
+ +

Returns true if any of the items in the input tensor is true or non-zero

+

Input:

+
  • A tensor
  • +
+

Returns:

+
  • True if at least one element is not zero
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc argmax[T](arg: Tensor[T]; axis: int): Tensor[int] {.inline.}
+
+ +

Returns the index of the maximum along an axis

+

Input:

+
  • A tensor
  • +
  • An axis (int)
  • +
+

Returns:

+
  • A tensor of index of the maximums along this axis
  • +
+

Example: .. code:: nim let a = [0, 4, 7, 1, 9, 5, 3, 4, 1].toTensor assert argmax(a, 0) == [2, 1, 0].toTensor assert argmax(a, 1) == [2, 1, 1].toTensor

+ +   Source +Edit + +
+
+ +
+
+
+
proc argmax_max[T: SomeNumber](arg: Tensor[T]; axis: int): tuple[
+    indices: Tensor[int], maxes: Tensor[T]] {.noinit.}
+
+ +

Returns (indices, maxes) along an axis

+

Input:

+
  • A tensor
  • +
  • An axis (int)
  • +
+

Returns:

+
  • A tuple of tensors (indices, maxes) along this axis
  • +
+

Example: .. code:: nim let a = [0, 4, 7, 1, 9, 5, 3, 4, 1].toTensor assert argmax(a, 0).indices == [2, 1, 0].toTensor assert argmax(a, 1).indices == [2, 1, 1].toTensor

+ +   Source +Edit + +
+
+ +
+
+
+
proc argmin[T](arg: Tensor[T]; axis: int): Tensor[int] {.inline.}
+
+ +

Returns the index of the minimum along an axis

+

Input:

+
  • A tensor
  • +
  • An axis (int)
  • +
+

Returns:

+
  • A tensor of index of the minimums along this axis
  • +
+

Example: .. code:: nim let a = [0, 4, 7, 1, 9, 5, 3, 4, 1].toTensor assert argmin(a, 0) == [2, 1, 0].toTensor assert argmin(a, 1) == [2, 1, 1].toTensor

+ +   Source +Edit + +
+
+ +
+
+
+
proc argmin_min[T: SomeNumber](arg: Tensor[T]; axis: int): tuple[
+    indices: Tensor[int], mins: Tensor[T]] {.noinit.}
+
+ +

Returns (indices, mins) along an axis

+

Input:

+
  • A tensor
  • +
  • An axis (int)
  • +
+

Returns:

+
  • A tuple of tensors (indices, min) along this axis
  • +
+

Example: .. code:: nim let a = [0, 4, 7, 1, 9, 5, 3, 4, 1].toTensor assert argmin(a, 0).indices == [0, 0, 2].toTensor assert argmin(a, 1).indices == [0, 0, 2].toTensor

+ +   Source +Edit + +
+
+ +
+
+
+
proc cumprod[T](arg: Tensor[T]; axis: int = 0): Tensor[T]
+
+ + Calculates the cumulative sum of a rank-n Tensor. Inputs:
  • t: a rank-n tensor to cumulatively sum
  • +
  • axis: int
  • +
+

Returns:

+
  • A tensor cumulatively summed at axis, that is, add each value to
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc cumsum[T](arg: Tensor[T]; axis: int = 0): Tensor[T]
+
+ + Calculates the cumulative sum of a rank-n Tensor. Inputs:
  • t: a rank-n tensor to cumulatively sum
  • +
  • axis: int
  • +
+

Returns:

+
  • A tensor cumulatively summed at axis, that is, add each value to
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc diff_discrete[T](arg: Tensor[T]; n = 1; axis: int = -1): Tensor[T]
+
+ +

Calculate the n-th discrete difference along the given axis.

+

The first difference is given by out[i] = a[i+1] - a[i] along the given axis. Higher differences are calculated by using diff recursively.

+

Input:

+
  • A tensor
  • +
  • n: The number of times values are differenced. If zero, the input is returned as-is.
  • +
  • axis: The axis along which the difference is taken, default is the last axis.
  • +
+

Returns:

+
  • A tensor with the n-th discrete difference along the given axis. It's size along that axis will be reduced by one.
  • +
  • The code in this function is heavily based upon and equivalent
  • +
+

to numpy's diff() function.

+ +   Source +Edit + +
+
+ +
+
+
+
proc iqr[T](arg: Tensor[T]): float
+
+ +

Returns the interquartile range of the 1D tensor t.

+

The interquartile range (IQR) is the distance between the 25th and 75th percentile

+ +   Source +Edit + +
+
+ +
+
+
+
proc max[T](arg: Tensor[T]): T
+
+ + Compute the max of all elements +   Source +Edit + +
+
+
+
proc max[T](arg: Tensor[T]; axis: int): Tensor[T] {.noinit.}
+
+ + Compute the max along an axis +   Source +Edit + +
+
+ +
+
+
+
proc mean[T: Complex[float32] or Complex[float64]](arg: Tensor[T]): T {.inline.}
+
+ + Compute the mean of all elements +   Source +Edit + +
+
+
+
proc mean[T: Complex[float32] or Complex[float64]](arg: Tensor[T]; axis: int): Tensor[
+    T] {.noinit, inline.}
+
+ + Compute the mean along an axis +   Source +Edit + +
+
+
+
proc mean[T: SomeFloat](arg: Tensor[T]): T {.inline.}
+
+ + Compute the mean of all elements +   Source +Edit + +
+
+
+
proc mean[T: SomeFloat](arg: Tensor[T]; axis: int): Tensor[T] {.noinit, inline.}
+
+ + Compute the mean along an axis +   Source +Edit + +
+
+
+
proc mean[T: SomeInteger](arg: Tensor[T]): T {.inline.}
+
+ +

Compute the mean of all elements

+

Warning โš : Since input is integer, output will also be integer (using integer division)

+ +   Source +Edit + +
+
+
+
proc mean[T: SomeInteger](arg: Tensor[T]; axis: int): Tensor[T] {.noinit, inline.}
+
+ +

Compute the mean along an axis

+

Warning โš : Since input is integer, output will also be integer (using integer division)

+ +   Source +Edit + +
+
+ +
+
+
+
proc median[T](arg: Tensor[T]; isSorted = false): float {.inline.}
+
+ + Compute the median of all elements (same as arg.percentile(50)) +   Source +Edit + +
+
+ +
+
+
+
proc min[T](arg: Tensor[T]): T
+
+ + Compute the min of all elements +   Source +Edit + +
+
+
+
proc min[T](arg: Tensor[T]; axis: int): Tensor[T] {.noinit.}
+
+ + Compute the min along an axis +   Source +Edit + +
+
+ +
+
+
+
proc nonzero[T](arg: Tensor[T]): Tensor[int]
+
+ +

Returns the indices, which are non zero as a Tensor[int].

+

The resulting tensor is 2 dimensional and has one element for each dimension in t. Each of those elements contains the indicies along the corresponding axis (element 0 == axis 0), which are non zero.

+

Input:

+
  • A tensor
  • +
+

Returns:

+
  • A 2D tensor with N elements, where N is the rank of t
  • +
+

Example: .. code:: nim let a = [3, 0, 0, 0, 4, 0, 5, 6, 0].toTensor() assert a.nonzero == [0, 1, 2, 2, 0, 1, 0, 1].toTensor # ^-- indices.. ^ ..for axis 0 # โˆŸ-- indices for axis 1 # axis 0: 0, 1, 2, 2 refers to: # - 0 -> 3 in row 0 # - 1 -> 4 in row 1 # - 2 -> 5 in row 2 # - 2 -> 6 in row 2 # axis 1: 0, 1, 0, 1 refers to: # - 0 -> 3 in col 0 # - 1 -> 4 in col 1 # - 0 -> 5 in col 0 # - 1 -> 6 in col 1

+ +   Source +Edit + +
+
+ +
+
+
+
proc percentile[T](arg: Tensor[T]; p: int; isSorted = false): float
+
+ +

statistical percentile value of t, where p percentile value is between 0 and 100 inclusively, and p=0 gives the min value, p=100 gives the max value and p=50 gives the median value.

+

If the input percentile does not match an element of t exactly the result is the linear interpolation between the neighbors.

+

t does not need to be sorted, because percentile sorts a copy of the data itself. If isSorted is true however, no sorting is done.

+ +   Source +Edit + +
+
+ +
+
+
+
proc product[T](arg: Tensor[T]): T
+
+ + Compute the product of all elements +   Source +Edit + +
+
+
+
proc product[T](arg: Tensor[T]; axis: int): Tensor[T] {.noinit.}
+
+ + Compute the product along an axis +   Source +Edit + +
+
+ +
+
+
+
proc std[T: SomeFloat](arg: Tensor[T]): T {.inline.}
+
+ + Compute the standard deviation of all elements The normalization is by the (n-1), like in the formal definition +   Source +Edit + +
+
+
+
proc std[T: SomeFloat](arg: Tensor[T]; axis: int): Tensor[T] {.noinit, inline.}
+
+ + Compute the standard deviation of all elements The normalization is by the (n-1), like in the formal definition +   Source +Edit + +
+
+ +
+
+
+
proc sum[T](arg: Tensor[T]): T
+
+ + Compute the sum of all elements +   Source +Edit + +
+
+
+
proc sum[T](arg: Tensor[T]; axis: int): Tensor[T] {.noinit.}
+
+ + Compute the sum of all elements along an axis +   Source +Edit + +
+
+ +
+
+
+
proc unwrap_period[T: SomeNumber](t: Tensor[T]; discont: T = -1; axis = -1;
+                                  period: T = default(T)): Tensor[T] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc variance[T: SomeFloat](arg: Tensor[T]): T
+
+ + Compute the sample variance of all elements The normalization is by (n-1), also known as Bessel's correction, which partially correct the bias of estimating a population variance from a sample of this population. +   Source +Edit + +
+
+
+
proc variance[T: SomeFloat](arg: Tensor[T]; axis: int): Tensor[T] {.noinit.}
+
+ + Compute the variance of all elements The normalization is by the (n-1), like in the formal definition +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/aggregate.idx b/aggregate.idx new file mode 100644 index 000000000..a63c562b7 --- /dev/null +++ b/aggregate.idx @@ -0,0 +1,40 @@ +nimTitle aggregate aggregate.html module src/arraymancer/tensor/aggregate 0 +nim sum aggregate.html#sum,Tensor[T] proc sum[T](arg: Tensor[T]): T 36 +nim sum aggregate.html#sum,Tensor[T],int proc sum[T](arg: Tensor[T]; axis: int): Tensor[T] 41 +nim product aggregate.html#product,Tensor[T] proc product[T](arg: Tensor[T]): T 47 +nim product aggregate.html#product,Tensor[T],int proc product[T](arg: Tensor[T]; axis: int): Tensor[T] 52 +nim mean aggregate.html#mean,Tensor[T: SomeInteger] proc mean[T: SomeInteger](arg: Tensor[T]): T 58 +nim mean aggregate.html#mean,Tensor[T: SomeInteger],int proc mean[T: SomeInteger](arg: Tensor[T]; axis: int): Tensor[T] 64 +nim mean aggregate.html#mean,Tensor[T: SomeFloat] proc mean[T: SomeFloat](arg: Tensor[T]): T 71 +nim mean aggregate.html#mean,Tensor[T: Complex[system.float32] or Complex[system.float64]] proc mean[T: Complex[float32] or Complex[float64]](arg: Tensor[T]): T 75 +nim mean aggregate.html#mean,Tensor[T: SomeFloat],int proc mean[T: SomeFloat](arg: Tensor[T]; axis: int): Tensor[T] 80 +nim mean aggregate.html#mean,Tensor[T: Complex[system.float32] or Complex[system.float64]],int proc mean[T: Complex[float32] or Complex[float64]](arg: Tensor[T]; axis: int): Tensor[\n T] 85 +nim min aggregate.html#min,Tensor[T] proc min[T](arg: Tensor[T]): T 91 +nim min aggregate.html#min,Tensor[T],int proc min[T](arg: Tensor[T]; axis: int): Tensor[T] 96 +nim max aggregate.html#max,Tensor[T] proc max[T](arg: Tensor[T]): T 103 +nim max aggregate.html#max,Tensor[T],int proc max[T](arg: Tensor[T]; axis: int): Tensor[T] 108 +nim variance aggregate.html#variance,Tensor[T: SomeFloat] proc variance[T: SomeFloat](arg: Tensor[T]): T 115 +nim variance aggregate.html#variance,Tensor[T: SomeFloat],int proc variance[T: SomeFloat](arg: Tensor[T]; axis: int): Tensor[T] 131 +nim std aggregate.html#std,Tensor[T: SomeFloat] proc std[T: SomeFloat](arg: Tensor[T]): T 148 +nim std aggregate.html#std,Tensor[T: SomeFloat],int proc std[T: SomeFloat](arg: Tensor[T]; axis: int): Tensor[T] 153 +nim argmax_max aggregate.html#argmax_max,Tensor[T: SomeNumber],int proc argmax_max[T: SomeNumber](arg: Tensor[T]; axis: int): tuple[\n indices: Tensor[int], maxes: Tensor[T]] 159 +nim argmax aggregate.html#argmax,Tensor[T],int proc argmax[T](arg: Tensor[T]; axis: int): Tensor[int] 195 +nim argmin_min aggregate.html#argmin_min,Tensor[T: SomeNumber],int proc argmin_min[T: SomeNumber](arg: Tensor[T]; axis: int): tuple[\n indices: Tensor[int], mins: Tensor[T]] 216 +nim argmin aggregate.html#argmin,Tensor[T],int proc argmin[T](arg: Tensor[T]; axis: int): Tensor[int] 252 +nim percentile aggregate.html#percentile,Tensor[T],int proc percentile[T](arg: Tensor[T]; p: int; isSorted = false): float 273 +nim median aggregate.html#median,Tensor[T] proc median[T](arg: Tensor[T]; isSorted = false): float 300 +nim iqr aggregate.html#iqr,Tensor[T] proc iqr[T](arg: Tensor[T]): float 304 +nim cumsum aggregate.html#cumsum,Tensor[T],int proc cumsum[T](arg: Tensor[T]; axis: int = 0): Tensor[T] 313 +nim cumprod aggregate.html#cumprod,Tensor[T],int proc cumprod[T](arg: Tensor[T]; axis: int = 0): Tensor[T] 329 +nim diff_discrete aggregate.html#diff_discrete,Tensor[T],int,int proc diff_discrete[T](arg: Tensor[T]; n = 1; axis: int = -1): Tensor[T] 345 +nim unwrap_period aggregate.html#unwrap_period,Tensor[T: SomeNumber],T,int,T proc unwrap_period[T: SomeNumber](t: Tensor[T]; discont: T = -1; axis = -1;\n period: T = default(T)): Tensor[T] 386 +nim nonzero aggregate.html#nonzero,Tensor[T] proc nonzero[T](arg: Tensor[T]): Tensor[int] 463 +nim all aggregate.html#all,Tensor[T] proc all[T](t: Tensor[T]): bool 525 +nim any aggregate.html#any,Tensor[T] proc any[T](t: Tensor[T]): bool 543 +nimgrp mean aggregate.html#mean-procs-all proc 58 +nimgrp max aggregate.html#max-procs-all proc 103 +nimgrp product aggregate.html#product-procs-all proc 47 +nimgrp std aggregate.html#std-procs-all proc 148 +nimgrp variance aggregate.html#variance-procs-all proc 115 +nimgrp min aggregate.html#min-procs-all proc 91 +nimgrp sum aggregate.html#sum-procs-all proc 36 diff --git a/algebra.html b/algebra.html new file mode 100644 index 000000000..78d4f92fc --- /dev/null +++ b/algebra.html @@ -0,0 +1,469 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/algebra + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/algebra + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/algebra

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc pinv[T: Complex32 | Complex64](A: Tensor[T]; rcond = 1e-15): Tensor[T]
+
+ +

Compute the (Moore-Penrose) pseudo-inverse of a matrix.

+

Calculate the generalized inverse of a matrix using its singular-value decomposition (SVD) and including all large singular values.

+

Input:

+
  • A: the rank-2 tensor to invert
  • +
  • rcond: Cutoff ratio for small singular values. Singular values less than or equal to rcond * largest_singular_value are set to zero.
  • +
+ +   Source +Edit + +
+
+
+
proc pinv[T: SomeFloat](A: Tensor[T]; rcond = 1e-15): Tensor[T]
+
+ +

Compute the (Moore-Penrose) pseudo-inverse of a matrix.

+

Calculate the generalized inverse of a matrix using its singular-value decomposition (SVD) and including all large singular values.

+

Input:

+
  • A: the rank-2 tensor to invert
  • +
  • rcond: Cutoff ratio for small singular values. Singular values less than or equal to rcond * largest_singular_value are set to zero.
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/algebra.idx b/algebra.idx new file mode 100644 index 000000000..748991a35 --- /dev/null +++ b/algebra.idx @@ -0,0 +1,4 @@ +nimTitle algebra algebra.html module src/arraymancer/linear_algebra/algebra 0 +nim pinv algebra.html#pinv,Tensor[T: SomeFloat],float proc pinv[T: SomeFloat](A: Tensor[T]; rcond = 1e-15): Tensor[T] 9 +nim pinv algebra.html#pinv,Tensor[T: Complex32 or Complex64],float proc pinv[T: Complex32 | Complex64](A: Tensor[T]; rcond = 1e-15): Tensor[T] 30 +nimgrp pinv algebra.html#pinv-procs-all proc 9 diff --git a/algorithms.html b/algorithms.html new file mode 100644 index 000000000..87722d3ec --- /dev/null +++ b/algorithms.html @@ -0,0 +1,489 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/algorithms + + + + + + + + + +Arraymancer - src/arraymancer/tensor/algorithms + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/algorithms

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc argsort[T](t: Tensor[T]; order = SortOrder.Ascending; toCopy = false): Tensor[
+    int]
+
+ +

Returns the indices which would sort t. Useful to apply the same sorting to multiple tensors based on the order of the tensor t.

+

If toCopy is true the input tensor is cloned. Else it is already sorted.

+ +   Source +Edit + +
+
+ +
+
+
+
proc sort[T](t: var Tensor[T]; order = SortOrder.Ascending)
+
+ +

Sorts the given tensor inplace. For the time being this is only supported for 1D tensors!

+

Sorts the raw underlying data!

+ +   Source +Edit + +
+
+ +
+
+
+
proc sorted[T](t: Tensor[T]; order = SortOrder.Ascending): Tensor[T]
+
+ + Returns a sorted version of the given tensor t. Also only supported for 1D tensors for the time being! +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/algorithms.idx b/algorithms.idx new file mode 100644 index 000000000..3f85d6f34 --- /dev/null +++ b/algorithms.idx @@ -0,0 +1,4 @@ +nimTitle algorithms algorithms.html module src/arraymancer/tensor/algorithms 0 +nim sort algorithms.html#sort,Tensor[T] proc sort[T](t: var Tensor[T]; order = SortOrder.Ascending) 22 +nim sorted algorithms.html#sorted,Tensor[T] proc sorted[T](t: Tensor[T]; order = SortOrder.Ascending): Tensor[T] 32 +nim argsort algorithms.html#argsort,Tensor[T] proc argsort[T](t: Tensor[T]; order = SortOrder.Ascending; toCopy = false): Tensor[\n int] 38 diff --git a/align_unroller.html b/align_unroller.html new file mode 100644 index 000000000..d46e805f0 --- /dev/null +++ b/align_unroller.html @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/private/align_unroller + + + + + + + + + +Arraymancer - src/arraymancer/laser/private/align_unroller + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/private/align_unroller

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
func round_step_down(x: Natural; step: static Natural): int {.inline.}
+
+ + Round the input to the previous multiple of "step" +   Source +Edit + +
+
+ +
+
+
+
func round_step_up(x: Natural; step: static Natural): int {.inline.}
+
+ + Round the input to the next multiple of "step" +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/align_unroller.idx b/align_unroller.idx new file mode 100644 index 000000000..b0059b8eb --- /dev/null +++ b/align_unroller.idx @@ -0,0 +1,3 @@ +nimTitle align_unroller align_unroller.html module src/arraymancer/laser/private/align_unroller 0 +nim round_step_down align_unroller.html#round_step_down,Natural,staticNatural proc round_step_down(x: Natural; step: static Natural): int 6 +nim round_step_up align_unroller.html#round_step_up,Natural,staticNatural proc round_step_up(x: Natural; step: static Natural): int 14 diff --git a/ast_utils.html b/ast_utils.html new file mode 100644 index 000000000..9d3c1e1be --- /dev/null +++ b/ast_utils.html @@ -0,0 +1,639 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/private/ast_utils + + + + + + + + + +Arraymancer - src/arraymancer/private/ast_utils + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/private/ast_utils

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
proc hasType(x: NimNode; t: static[string]): bool {.compileTime.}
+
+ + Compile-time type checking +   Source +Edit + +
+
+ +
+
+
+
proc isAllInt(slice_args: NimNode): bool {.compileTime, ...raises: [], tags: [],
+    forbids: [].}
+
+ + Compile-time type checking +   Source +Edit + +
+
+ +
+
+
+
proc isBool(x: NimNode): bool {.compileTime, ...raises: [], tags: [], forbids: [].}
+
+ + Compile-time type checking +   Source +Edit + +
+
+ +
+
+
+
proc isInt(x: NimNode): bool {.compileTime, ...raises: [], tags: [], forbids: [].}
+
+ + Compile-time type checking +   Source +Edit + +
+
+ +
+
+
+
proc isOpenArray(x: NimNode): bool {.compileTime, ...raises: [], tags: [],
+                                     forbids: [].}
+
+ + Compile-time type checking +   Source +Edit + +
+
+ +
+
+
+
proc pop(tree: var NimNode): NimNode {.compileTime, ...raises: [], tags: [],
+                                       forbids: [].}
+
+ + varargsuntyped consumes all arguments so the actual value should be popped https://github.com/nim-lang/Nim/issues/5855 +   Source +Edit + +
+
+ +
+
+
+
proc replaceNodes(ast: NimNode; replacements: NimNode; to_replace: NimNode): NimNode {.
+    ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc replaceSymsByIdents(ast: NimNode): NimNode {....raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Macros

+
+
+
+
macro getSubType(TT: typedesc): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template letsGoDeeper()
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/ast_utils.idx b/ast_utils.idx new file mode 100644 index 000000000..ad6985b48 --- /dev/null +++ b/ast_utils.idx @@ -0,0 +1,11 @@ +nimTitle ast_utils ast_utils.html module src/arraymancer/private/ast_utils 0 +nim hasType ast_utils.html#hasType,NimNode,static[string] proc hasType(x: NimNode; t: static[string]): bool 19 +nim isInt ast_utils.html#isInt,NimNode proc isInt(x: NimNode): bool 23 +nim isBool ast_utils.html#isBool,NimNode proc isBool(x: NimNode): bool 27 +nim isOpenArray ast_utils.html#isOpenArray,NimNode proc isOpenArray(x: NimNode): bool 31 +nim isAllInt ast_utils.html#isAllInt,NimNode proc isAllInt(slice_args: NimNode): bool 36 +nim pop ast_utils.html#pop,NimNode proc pop(tree: var NimNode): NimNode 44 +nim getSubType ast_utils.html#getSubType.m,typedesc macro getSubType(TT: typedesc): untyped 50 +nim letsGoDeeper ast_utils.html#letsGoDeeper.t template letsGoDeeper() 54 +nim replaceSymsByIdents ast_utils.html#replaceSymsByIdents,NimNode proc replaceSymsByIdents(ast: NimNode): NimNode 60 +nim replaceNodes ast_utils.html#replaceNodes,NimNode,NimNode,NimNode proc replaceNodes(ast: NimNode; replacements: NimNode; to_replace: NimNode): NimNode 76 diff --git a/autograd.html b/autograd.html new file mode 100644 index 000000000..f3061584c --- /dev/null +++ b/autograd.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd + + + + + + + + + +Arraymancer - src/arraymancer/autograd + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+ +
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/autograd.idx b/autograd.idx new file mode 100644 index 000000000..6807de1c7 --- /dev/null +++ b/autograd.idx @@ -0,0 +1 @@ +nimTitle autograd autograd.html module src/arraymancer/autograd 0 diff --git a/autograd_common.html b/autograd_common.html new file mode 100644 index 000000000..b10869b8d --- /dev/null +++ b/autograd_common.html @@ -0,0 +1,752 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/autograd_common + + + + + + + + + +Arraymancer - src/arraymancer/autograd/autograd_common + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/autograd_common

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Types

+
+
+
Backward[TT] = proc (self: Gate[TT]; payload: Payload[TT]): SmallDiffs[TT] {.
+    nimcall.}
+
+ + โš ๏ธ Warning: make sure the identifier is not overloaded https://github.com/nim-lang/Nim/issues/9997 +   Source +Edit + +
+
+
+
Context[TT] = ref object
+  
+
+ + An autograd context is a record of operations or layers. It holds the following fields:
  • nodes: This records the list of operations(Node) applied in the context
  • +
  • no_grad: This disable tracing the list of operations altogether. This is useful to save memory when you don't need the gradient (for validation or prediction for example)
  • +
+

A context is also called a tape or a Wengert list.

+

Note: backpropagation empties the list of operations.

+ +   Source +Edit + +
+
+
+
Gate[TT] = ref object of RootObj
+  
+
+ + Base operator or layer. You can describe your custom operations or layers by inheriting from Gate and add a forward and optionally a backward method. Each operations should set the number of gradients produced during backpropagation. Additional fields specific to the operations like weights or inputs cache should be added too. +   Source +Edit + +
+
+
+
Payload[TT] = object
+  case kind*: PayloadKind
+  of pkVar:
+    variable*: Variable[TT]
+  of pkSeq:
+    sequence*: seq[Variable[TT]]
+  
+
+ + +   Source +Edit + +
+
+
+
PayloadKind = enum
+  pkVar, pkSeq
+
+ + +   Source +Edit + +
+
+
+
SmallDiffs[TT] = seq[TT]
+
+ + +   Source +Edit + +
+
+
+
Variable[TT] = ref object
+  context*: Context[TT]
+  value*: TT
+  grad*: TT
+  requires_grad*: bool
+
+
+ + A variable is a wrapper for Tensors that tracks operations applied to it. It consists of:
  • A weak reference to a record of operations context
  • +
  • The tensor being tracked value
  • +
  • The gradient of the tensor grad
  • +
  • a flag that indicates if gradient is needed
  • +
+ +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc backprop[TT](v: Variable[TT])
+
+ + Differentiate the chain of operations w.r.t to this variable. Context will be reset +   Source +Edit + +
+
+ +
+
+
+
func check_ctx(a, b: Variable) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func is_grad_needed(v: Variable): bool {.inline.}
+
+ + Depending on the input variable and its context no_grad_mode, returns true if gradient computation is needed and false otherwise +   Source +Edit + +
+
+ +
+
+
+
func newContext(TT: typedesc): Context[TT]
+
+ + Initialize a context +   Source +Edit + +
+
+ +
+
+
+
func newDiffs[TT](num: Natural): SmallDiffs[TT] {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func newParents[TT](num: Natural): Parents[TT] {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func register_node[TT](name: static string; gate: Gate[TT];
+                       backward: Backward[TT];
+                       result: Variable[TT] or seq[Variable[TT]];
+                       parents: varargs[Variable[TT]])
+
+ + Add an operation / gate as a new node in the computation graph +   Source +Edit + +
+
+ +
+
+
+
proc variable[TT](ctx: Context[TT]; value: TT; requires_grad = false): Variable[
+    TT]
+
+ + Wrap a variable to the context T is a Tensor[T, CudaTensorT or scalar T +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template no_grad_mode(ctx: Context; body: untyped): untyped
+
+ +

Within this block, the context will not track the operations applied to each Variable.

+

This should be used for validation or prediction to optimize memory.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/autograd_common.idx b/autograd_common.idx new file mode 100644 index 000000000..d6e4551ff --- /dev/null +++ b/autograd_common.idx @@ -0,0 +1,19 @@ +nimTitle autograd_common autograd_common.html module src/arraymancer/autograd/autograd_common 0 +nim Context autograd_common.html#Context type Context 43 +nim Variable autograd_common.html#Variable type Variable 57 +nim Gate autograd_common.html#Gate type Gate 70 +nim pkVar autograd_common.html#pkVar PayloadKind.pkVar 76 +nim pkSeq autograd_common.html#pkSeq PayloadKind.pkSeq 76 +nim PayloadKind autograd_common.html#PayloadKind enum PayloadKind 76 +nim Payload autograd_common.html#Payload object Payload 78 +nim Backward autograd_common.html#Backward type Backward 83 +nim SmallDiffs autograd_common.html#SmallDiffs type SmallDiffs 102 +nim newContext autograd_common.html#newContext,typedesc proc newContext(TT: typedesc): Context[TT] 145 +nim variable autograd_common.html#variable,Context[TT],TT proc variable[TT](ctx: Context[TT]; value: TT; requires_grad = false): Variable[TT] 150 +nim register_node autograd_common.html#register_node,staticstring,Gate[TT],Backward[TT],,varargs[Variable[TT]] proc register_node[TT](name: static string; gate: Gate[TT]; backward: Backward[TT];\n result: Variable[TT] or seq[Variable[TT]];\n parents: varargs[Variable[TT]]) 174 +nim no_grad_mode autograd_common.html#no_grad_mode.t,Context,untyped template no_grad_mode(ctx: Context; body: untyped): untyped 197 +nim is_grad_needed autograd_common.html#is_grad_needed,Variable proc is_grad_needed(v: Variable): bool 209 +nim check_ctx autograd_common.html#check_ctx,Variable,Variable proc check_ctx(a, b: Variable) 214 +nim backprop autograd_common.html#backprop,Variable[TT] proc backprop[TT](v: Variable[TT]) 218 +nim newParents autograd_common.html#newParents,Natural proc newParents[TT](num: Natural): Parents[TT] 241 +nim newDiffs autograd_common.html#newDiffs,Natural proc newDiffs[TT](num: Natural): SmallDiffs[TT] 244 diff --git a/auxiliary_blas.html b/auxiliary_blas.html new file mode 100644 index 000000000..92c285001 --- /dev/null +++ b/auxiliary_blas.html @@ -0,0 +1,476 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/auxiliary_blas + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/auxiliary_blas + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/auxiliary_blas

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
SyrkKind = enum
+  AAt = "A * A.transpose", AtA = "A.transpose * A"
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc syrk[T: SomeFloat](alpha: T; A: Tensor[T]; mul_order: static SyrkKind;
+                        beta: T; C: var Tensor[T]; uplo: static char)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/auxiliary_blas.idx b/auxiliary_blas.idx new file mode 100644 index 000000000..b2a7e3f75 --- /dev/null +++ b/auxiliary_blas.idx @@ -0,0 +1,5 @@ +nimTitle auxiliary_blas auxiliary_blas.html module src/arraymancer/linear_algebra/helpers/auxiliary_blas 0 +nim AAt auxiliary_blas.html#AAt SyrkKind.AAt 14 +nim AtA auxiliary_blas.html#AtA SyrkKind.AtA 14 +nim SyrkKind auxiliary_blas.html#SyrkKind enum SyrkKind 14 +nim syrk auxiliary_blas.html#syrk,T,Tensor[T: SomeFloat],staticSyrkKind,T,Tensor[T: SomeFloat],staticchar proc syrk[T: SomeFloat](alpha: T; A: Tensor[T]; mul_order: static SyrkKind; beta: T;\n C: var Tensor[T]; uplo: static char) 18 diff --git a/auxiliary_lapack.html b/auxiliary_lapack.html new file mode 100644 index 000000000..2d93a69e9 --- /dev/null +++ b/auxiliary_lapack.html @@ -0,0 +1,506 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/auxiliary_lapack + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/auxiliary_lapack + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/auxiliary_lapack

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc laswp(a: var Tensor; pivot_indices: openArray[int32];
+           pivot_from: static int32)
+
+ +

Apply A = P * A where P is a permutation matrix, represented pivot_indices of rows.

+

A is a matrix of shape MxN. A is permuted in-place

+ +   Source +Edit + +
+
+ +
+
+
+
proc orgqr[T: SomeFloat](rv_q: var Tensor[T]; tau: openArray[T];
+                         scratchspace: var seq[T])
+
+ +

Wrapper for LAPACK orgqr routine Generates the orthonormal Q matrix from elementary Householder reflectors

+

Inputs must come from a previous geqrf

+
  • rv_q: contains r_v (reflector vector) on input. A column-major vector factors of elementary reflectors
  • +
  • tau: Scalar factors of elementary reflectors
  • +
+

Outputs

+
  • rv_q: overwritten by Q
  • +
+

Note that while rv_q is MxN on input on output the shape is M x min(M,N)

+

โš ๏ธ: Output must be sliced by M, min(M,N) if M>N as the rest contains garbage

+

Spec: https://www.nag.co.uk/numeric/fl/nagdoc_fl24/pdf/f08/f08aff.pdf API: http://www.netlib.org/lapack/explore-html/da/dba/group__double_o_t_h_e_rcomputational_ga14b45f7374dc8654073aa06879c1c459.html

+ +   Source +Edit + +
+
+ +
+
+
+
proc ormqr[T: SomeFloat](C: var Tensor[T]; Q: Tensor[T]; tau: openArray[T];
+                         side, trans: static char; scratchspace: var seq[T])
+
+ +

Wrapper for LAPACK ormqr routine Multiply the orthonormal Q matrix from geqrf with another matrix C without materializing Q

+

C is a matrix of shae M, N and will be overwritten by

+
SIDE = 'L'     SIDE = 'R'
+

TRANS = 'N': Q * C C * Q TRANS = 'T': QT * C C * QT

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/auxiliary_lapack.idx b/auxiliary_lapack.idx new file mode 100644 index 000000000..bcd1a09fd --- /dev/null +++ b/auxiliary_lapack.idx @@ -0,0 +1,4 @@ +nimTitle auxiliary_lapack auxiliary_lapack.html module src/arraymancer/linear_algebra/helpers/auxiliary_lapack 0 +nim laswp auxiliary_lapack.html#laswp,Tensor,openArray[int32],staticint32 proc laswp(a: var Tensor; pivot_indices: openArray[int32]; pivot_from: static int32) 16 +nim orgqr auxiliary_lapack.html#orgqr,Tensor[T: SomeFloat],openArray[T],seq[T] proc orgqr[T: SomeFloat](rv_q: var Tensor[T]; tau: openArray[T];\n scratchspace: var seq[T]) 46 +nim ormqr auxiliary_lapack.html#ormqr,Tensor[T: SomeFloat],Tensor[T: SomeFloat],openArray[T],staticchar,staticchar,seq[T] proc ormqr[T: SomeFloat](C: var Tensor[T]; Q: Tensor[T]; tau: openArray[T];\n side, trans: static char; scratchspace: var seq[T]) 101 diff --git a/blas_l3_gemm.html b/blas_l3_gemm.html new file mode 100644 index 000000000..fb1b1950e --- /dev/null +++ b/blas_l3_gemm.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/fallback/legacy/blas_l3_gemm + + + + + + + + + +Arraymancer - src/arraymancer/tensor/fallback/legacy/blas_l3_gemm + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/fallback/legacy/blas_l3_gemm

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+

See blis config: https://github.com/flame/blis/blob/master/config/haswell/bli_kernel.h

+ +
+

Procs

+
+
+
+
proc gemm_nn_fallback[T](m, n, k: int; alpha: T; A: seq[T]; offA: int;
+                         incRowA, incColA: int; B: seq[T]; offB: int;
+                         incRowB, incColB: int; beta: T; C: var seq[T];
+                         offC: int; incRowC, incColc: int)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/blas_l3_gemm.idx b/blas_l3_gemm.idx new file mode 100644 index 000000000..633f07a78 --- /dev/null +++ b/blas_l3_gemm.idx @@ -0,0 +1,3 @@ +nimTitle blas_l3_gemm blas_l3_gemm.html module src/arraymancer/tensor/fallback/legacy/blas_l3_gemm 0 +nim gemm_nn_fallback blas_l3_gemm.html#gemm_nn_fallback,int,int,int,T,seq[T],int,int,int,seq[T],int,int,int,T,seq[T],int,int,int proc gemm_nn_fallback[T](m, n, k: int; alpha: T; A: seq[T]; offA: int;\n incRowA, incColA: int; B: seq[T]; offB: int;\n incRowB, incColB: int; beta: T; C: var seq[T]; offC: int;\n incRowC, incColc: int) 62 +heading See blis config: https://github.com/flame/blis/blob/master/config/haswell/bli_kernel.h blas_l3_gemm.html#see-blis-configcolon-httpscolonslashslashgithubdotcomslashflameslashblisslashblobslashmasterslashconfigslashhaswellslashbli-kerneldoth See blis config: https://github.com/flame/blis/blob/master/config/haswell/bli_kernel.h 0 diff --git a/blis.html b/blis.html new file mode 100644 index 000000000..29e941f94 --- /dev/null +++ b/blis.html @@ -0,0 +1,399 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/blis + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/blis + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/blis

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ +
+
+   Source +Edit + +
+ +

+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/blis.idx b/blis.idx new file mode 100644 index 000000000..1731c8c7f --- /dev/null +++ b/blis.idx @@ -0,0 +1 @@ +nimTitle blis blis.html module src/arraymancer/tensor/backend/blis 0 diff --git a/common_error_functions.html b/common_error_functions.html new file mode 100644 index 000000000..683c81981 --- /dev/null +++ b/common_error_functions.html @@ -0,0 +1,598 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/ml/metrics/common_error_functions + + + + + + + + + +Arraymancer - src/arraymancer/ml/metrics/common_error_functions + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/ml/metrics/common_error_functions

+
+ +
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc absolute_error[T: SomeFloat](y, y_true: T | Complex[T]): T {.inline.}
+
+ + Absolute error for a single value, |y_true - y| +   Source +Edit + +
+
+
+
proc absolute_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[
+    T] {.noinit.}
+
+ + Element-wise absolute error for a tensor +   Source +Edit + +
+
+ +
+
+
+
proc mean_absolute_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T
+
+ + Also known as L1 loss, absolute error between elements: sum(|y_true - y|)/m where m is the number of elements +   Source +Edit + +
+
+ +
+
+
+
proc mean_relative_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T
+
+ + Mean relative error for Tensor, mean of the element-wise |y_true - y|/max(|y_true|, |y|) Normally the relative error is defined as |y_true - y| / |y_true|, but here max is used to make it symmetric and to prevent dividing by zero, guaranteed to return zero in the case when both values are zero. +   Source +Edit + +
+
+ +
+
+
+
proc mean_squared_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T
+
+ + Also known as MSE or L2 loss, mean squared error between elements: sum(|y_true - y| ^2)/m where m is the number of elements +   Source +Edit + +
+
+ +
+
+
+
proc relative_error[T: SomeFloat](y, y_true: T | Complex[T]): T {.inline.}
+
+ + Relative error, |y_true - y|/max(|y_true|, |y|) Normally the relative error is defined as |y_true - y| / |y_true|, but here max is used to make it symmetric and to prevent dividing by zero, guaranteed to return zero in the case when both values are zero. +   Source +Edit + +
+
+
+
proc relative_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[
+    T] {.noinit.}
+
+ + Relative error for Tensor, element-wise |y_true - x|/max(|y_true|, |x|) Normally the relative error is defined as |y_true - x| / |y_true|, but here max is used to make it symmetric and to prevent dividing by zero, guaranteed to return zero in the case when both values are zero. +   Source +Edit + +
+
+ +
+
+
+
proc squared_error[T: SomeFloat](y, y_true: Complex[T]): T {.inline.}
+
+ + Squared error for a single value, |y_true - y| ^2 +   Source +Edit + +
+
+
+
proc squared_error[T: SomeFloat](y, y_true: T): T {.inline.}
+
+ + Squared error for a single value, |y_true - y| ^2 +   Source +Edit + +
+
+
+
proc squared_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[
+    T] {.noinit.}
+
+ + Element-wise squared error for a tensor, |y_true - y| ^2 +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/common_error_functions.idx b/common_error_functions.idx new file mode 100644 index 000000000..419894fe1 --- /dev/null +++ b/common_error_functions.idx @@ -0,0 +1,14 @@ +nimTitle common_error_functions common_error_functions.html module src/arraymancer/ml/metrics/common_error_functions 0 +nim squared_error common_error_functions.html#squared_error,T,T proc squared_error[T: SomeFloat](y, y_true: T): T 19 +nim squared_error common_error_functions.html#squared_error,Complex[T: SomeFloat],Complex[T: SomeFloat] proc squared_error[T: SomeFloat](y, y_true: Complex[T]): T 23 +nim squared_error common_error_functions.html#squared_error,, proc squared_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[T] 27 +nim mean_squared_error common_error_functions.html#mean_squared_error,, proc mean_squared_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T 31 +nim relative_error common_error_functions.html#relative_error,, proc relative_error[T: SomeFloat](y, y_true: T | Complex[T]): T 39 +nim relative_error common_error_functions.html#relative_error,,_2 proc relative_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[\n T] 50 +nim mean_relative_error common_error_functions.html#mean_relative_error,, proc mean_relative_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T 57 +nim absolute_error common_error_functions.html#absolute_error,, proc absolute_error[T: SomeFloat](y, y_true: T | Complex[T]): T 67 +nim absolute_error common_error_functions.html#absolute_error,,_2 proc absolute_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[\n T] 71 +nim mean_absolute_error common_error_functions.html#mean_absolute_error,, proc mean_absolute_error[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T 75 +nimgrp absoluteerror common_error_functions.html#absolute_error-procs-all proc 67 +nimgrp squarederror common_error_functions.html#squared_error-procs-all proc 19 +nimgrp relativeerror common_error_functions.html#relative_error-procs-all proc 39 diff --git a/compiler_optim_hints.html b/compiler_optim_hints.html new file mode 100644 index 000000000..08d88b7fc --- /dev/null +++ b/compiler_optim_hints.html @@ -0,0 +1,555 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/compiler_optim_hints + + + + + + + + + +Arraymancer - src/arraymancer/laser/compiler_optim_hints + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/compiler_optim_hints

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Types

+
+
+
PrefetchLocality {.size: 4.} = enum
+  NoTemporalLocality = 0, LowTemporalLocality = 1, ModerateTemporalLocality = 2,
+  HighTemporalLocality = 3
+
+ + +   Source +Edit + +
+
+
+
PrefetchRW {.size: 4.} = enum
+  Read = 0, Write = 1
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Consts

+
+
+
LASER_MEM_ALIGN {.intdefine.} = 64
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Templates

+
+
+
+
template assume_aligned[T](data: ptr T; alignment: static int = LASER_MEM_ALIGN): ptr T
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
template withCompilerOptimHints()
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/compiler_optim_hints.idx b/compiler_optim_hints.idx new file mode 100644 index 000000000..03339c628 --- /dev/null +++ b/compiler_optim_hints.idx @@ -0,0 +1,7 @@ +nimTitle compiler_optim_hints compiler_optim_hints.html module src/arraymancer/laser/compiler_optim_hints 0 +nim LASER_MEM_ALIGN compiler_optim_hints.html#LASER_MEM_ALIGN const LASER_MEM_ALIGN 9 +nim withCompilerOptimHints compiler_optim_hints.html#withCompilerOptimHints.t template withCompilerOptimHints() 14 +nim PrefetchRW compiler_optim_hints.html#PrefetchRW enum PrefetchRW 34 +nim PrefetchLocality compiler_optim_hints.html#PrefetchLocality enum PrefetchLocality 37 +nim assume_aligned compiler_optim_hints.html#assume_aligned.t,ptr.T,staticint template assume_aligned[T](data: ptr T; alignment: static int = LASER_MEM_ALIGN): ptr T 56 +nim prefetch compiler_optim_hints.html#prefetch.t,ptr.,staticPrefetchRW,staticPrefetchLocality template prefetch[T](data: ptr (T or UncheckedArray[T]); rw: static PrefetchRW = Read;\n locality: static PrefetchLocality = HighTemporalLocality) 64 diff --git a/complex.html b/complex.html new file mode 100644 index 000000000..8ef1e4c93 --- /dev/null +++ b/complex.html @@ -0,0 +1,568 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/complex + + + + + + + + + +Arraymancer - src/arraymancer/tensor/complex + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/complex

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc complex[T: SomeFloat](re: Tensor[T]; im: Tensor[T]): Tensor[Complex[T]] {.
+    inline, noinit.}
+
+ +

Create a new, complex Tensor by combining two real Tensors

+

The first input Tensor is copied into the real part of the output Tensor, while the second input Tensor is copied into the imaginary part

+ +   Source +Edit + +
+
+ +
+
+
+
proc conjugate[T: Complex32 | Complex64](A: Tensor[T]): Tensor[T]
+
+ + Return the element-wise complex conjugate of a tensor of complex numbers. The complex conjugate of a complex number is obtained by changing the sign of its imaginary part. +   Source +Edit + +
+
+ +
+
+
+
proc imag[T: SomeFloat](t: Tensor[Complex[T]]): Tensor[T] {.inline, noinit.}
+
+ + Get the imaginary part of a complex Tensor (as a float Tensor) +   Source +Edit + +
+
+ +
+
+
+
proc imag=[T: SomeFloat](t: var Tensor[Complex[T]]; val: T) {.inline.}
+
+ + Set the imaginary part of all the items of a complex Tensor to a certain floating point value +   Source +Edit + +
+
+
+
proc imag=[T: SomeFloat](t: var Tensor[Complex[T]]; val: Tensor[T]) {.inline.}
+
+ + Copy a real Tensor into the imaginary part of an existing complex Tensor The source and target Tensor sizes must match, but the shapes might differ +   Source +Edit + +
+
+ +
+
+
+
proc real[T: SomeFloat](t: Tensor[Complex[T]]): Tensor[T] {.inline, noinit.}
+
+ + Get the real part of a complex Tensor (as a float Tensor) +   Source +Edit + +
+
+ +
+
+
+
proc real=[T: SomeFloat](t: var Tensor[Complex[T]]; val: T) {.inline.}
+
+ + Set the real part of all the items of a complex Tensor to a certain floating point value +   Source +Edit + +
+
+
+
proc real=[T: SomeFloat](t: var Tensor[Complex[T]]; val: Tensor[T]) {.inline.}
+
+ + Copy a real Tensor into the real part of an existing complex Tensor The source and target Tensor sizes must match, but the shapes might differ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/complex.idx b/complex.idx new file mode 100644 index 000000000..6d24c0f3f --- /dev/null +++ b/complex.idx @@ -0,0 +1,11 @@ +nimTitle complex complex.html module src/arraymancer/tensor/complex 0 +nim complex complex.html#complex,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc complex[T: SomeFloat](re: Tensor[T]; im: Tensor[T]): Tensor[Complex[T]] 11 +nim real complex.html#real,Tensor[Complex[T: SomeFloat]] proc real[T: SomeFloat](t: Tensor[Complex[T]]): Tensor[T] 18 +nim real= complex.html#real=,Tensor[Complex[T: SomeFloat]],T proc real=[T: SomeFloat](t: var Tensor[Complex[T]]; val: T) 22 +nim real= complex.html#real=,Tensor[Complex[T: SomeFloat]],Tensor[T: SomeFloat] proc real=[T: SomeFloat](t: var Tensor[Complex[T]]; val: Tensor[T]) 27 +nim imag complex.html#imag,Tensor[Complex[T: SomeFloat]] proc imag[T: SomeFloat](t: Tensor[Complex[T]]): Tensor[T] 33 +nim imag= complex.html#imag=,Tensor[Complex[T: SomeFloat]],T proc imag=[T: SomeFloat](t: var Tensor[Complex[T]]; val: T) 37 +nim imag= complex.html#imag=,Tensor[Complex[T: SomeFloat]],Tensor[T: SomeFloat] proc imag=[T: SomeFloat](t: var Tensor[Complex[T]]; val: Tensor[T]) 42 +nim conjugate complex.html#conjugate,Tensor[T: Complex32 or Complex64] proc conjugate[T: Complex32 | Complex64](A: Tensor[T]): Tensor[T] 48 +nimgrp real= complex.html#real=-procs-all proc 22 +nimgrp imag= complex.html#imag=-procs-all proc 37 diff --git a/conv.html b/conv.html new file mode 100644 index 000000000..d3078b440 --- /dev/null +++ b/conv.html @@ -0,0 +1,522 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/fallback/conv + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/fallback/conv + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/fallback/conv

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc col2im[T](input: Tensor[T]; channels, height, width: int;
+               kernel_size: Size2D; padding: Size2D = (0, 0);
+               stride: Size2D = (1, 1)): Tensor[T]
+
+ + Convert blocks of an image from columns back to an image, collapsed pixels are summed +   Source +Edit + +
+
+ +
+
+
+
proc im2col[T](input: Tensor[T]; kernel_size: Size2D; padding: Size2D = (0, 0);
+               stride: Size2D = (1, 1); result: var Tensor[T])
+
+ + Convert blocks of an image into columns, useful for preprocessing an image before convolutions +   Source +Edit + +
+
+ +
+
+
+
proc im2colgemm_conv2d[T](input, kernel, bias: Tensor[T];
+                          padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Tensor[
+    T]
+
+ + Compute cross-correlate for image with the given kernel weights +   Source +Edit + +
+
+ +
+
+
+
proc im2colgemm_conv2d_gradient[T](input, kernel: Tensor[T];
+                                   padding: Size2D = (0, 0);
+                                   stride: Size2D = (1, 1);
+                                   grad_output: Tensor[T];
+                                   grad_input, grad_weight: var Tensor[T])
+
+ + Computes gradients w.r.t input and weights for a 2D convolution +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/conv.idx b/conv.idx new file mode 100644 index 000000000..972786c93 --- /dev/null +++ b/conv.idx @@ -0,0 +1,5 @@ +nimTitle conv conv.html module src/arraymancer/nn_primitives/fallback/conv 0 +nim im2col conv.html#im2col,Tensor[T],Size2D,Size2D,Size2D,Tensor[T] proc im2col[T](input: Tensor[T]; kernel_size: Size2D; padding: Size2D = (0, 0);\n stride: Size2D = (1, 1); result: var Tensor[T]) 18 +nim col2im conv.html#col2im,Tensor[T],int,int,int,Size2D,Size2D,Size2D proc col2im[T](input: Tensor[T]; channels, height, width: int; kernel_size: Size2D;\n padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Tensor[T] 56 +nim im2colgemm_conv2d conv.html#im2colgemm_conv2d,Tensor[T],Tensor[T],Tensor[T],Size2D,Size2D proc im2colgemm_conv2d[T](input, kernel, bias: Tensor[T]; padding: Size2D = (0, 0);\n stride: Size2D = (1, 1)): Tensor[T] 81 +nim im2colgemm_conv2d_gradient conv.html#im2colgemm_conv2d_gradient,Tensor[T],Tensor[T],Size2D,Size2D,Tensor[T],Tensor[T],Tensor[T] proc im2colgemm_conv2d_gradient[T](input, kernel: Tensor[T];\n padding: Size2D = (0, 0);\n stride: Size2D = (1, 1); grad_output: Tensor[T];\n grad_input, grad_weight: var Tensor[T]) 108 diff --git a/conv2D.html b/conv2D.html new file mode 100644 index 000000000..e64589260 --- /dev/null +++ b/conv2D.html @@ -0,0 +1,600 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/conv2D + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/conv2D + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/conv2D

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
Conv2D[T] = object
+  weight*: Variable[Tensor[T]]
+  bias*: Variable[Tensor[T]]
+  padding*: Size2D
+  stride*: Size2D
+  inShape*: seq[int]
+
+
+ + +   Source +Edit + +
+
+
+
Conv2DGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc conv2d[TT](input, weight: Variable[TT]; bias: Variable[TT] = nil;
+                padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Variable[TT]
+
+ + Input:
- ``input`` Variable wrapping a 4D Tensor batch of images of the size [N,C_in,H_in,W_in]
+- ``weight`` Variable wrapping a 4D Tensor convolving kernel weights of the size [C_out,C_in,kH,kW]
+- ``bias`` Nil-able Variable wrapping a 3D Tensor bias of the size [C_out,1,1]
+- ``padding`` Size2D tuple with height and width of the padding
+- ``stride`` Size2D tuple with height and width of the stride
+

Returns:

+
- A variable with a convolved 4D Tensor of size [N,C_out,H_out,W_out], where
+   H_out = (H_in + (2*padding.height) - kH) / stride.height + 1
+   W_out = (W_in + (2*padding.width) - kW) / stride.width + 1
+

Future TODO: In the future the conv2D layer will allow different input layout

+

Warning โš :

+
  • Experimental, there is no tests yet for this layer
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc forward[T](self: Conv2D[T]; input: Variable[Tensor[T]]): Variable[Tensor[T]]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Conv2D[T]];
+             inShape: seq[int]; outChannels: int; kernelSize: Size2D;
+             padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Conv2D[T]
+
+ + Creates a 2D convolutional layer. Input:
- ``inShape`` Shape of the expected input tensor in the form of ``[C_in, H_in, W_in]``
+- ``outChannels`` Number of channels in the output
+- ``kernelSize`` Shape of the kernel ``(width, height)``
+- ``padding`` Padding, defaults to ``(0, 0)``
+- ``stride`` Stride, defaults to ``(1, 1)``
+

Returns the created Conv2D.

+ +   Source +Edit + +
+
+ +
+
+
+
func inShape[T](self: Conv2D[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func outShape[T](self: Conv2D[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/conv2D.idx b/conv2D.idx new file mode 100644 index 000000000..b4d346cf1 --- /dev/null +++ b/conv2D.idx @@ -0,0 +1,8 @@ +nimTitle conv2D conv2D.html module src/arraymancer/nn/layers/conv2D 0 +nim Conv2DGate conv2D.html#Conv2DGate type Conv2DGate 20 +nim conv2d conv2D.html#conv2d,Variable[TT],Variable[TT],Variable[TT],Size2D,Size2D proc conv2d[TT](input, weight: Variable[TT]; bias: Variable[TT] = nil;\n padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Variable[TT] 77 +nim Conv2D conv2D.html#Conv2D object Conv2D 132 +nim init conv2D.html#init,Context[Tensor[T]],typedesc[Conv2D[T]],seq[int],int,Size2D,Size2D,Size2D proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Conv2D[T]];\n inShape: seq[int]; outChannels: int; kernelSize: Size2D;\n padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Conv2D[T] 139 +nim forward conv2D.html#forward,Conv2D[T],Variable[Tensor[T]] proc forward[T](self: Conv2D[T]; input: Variable[Tensor[T]]): Variable[Tensor[T]] 175 +nim outShape conv2D.html#outShape,Conv2D[T] proc outShape[T](self: Conv2D[T]): seq[int] 184 +nim inShape conv2D.html#inShape,Conv2D[T] proc inShape[T](self: Conv2D[T]): seq[int] 204 diff --git a/cpuinfo_x86.html b/cpuinfo_x86.html new file mode 100644 index 000000000..44390cbab --- /dev/null +++ b/cpuinfo_x86.html @@ -0,0 +1,1652 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/cpuinfo_x86 + + + + + + + + + +Arraymancer - src/arraymancer/laser/cpuinfo_x86 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/cpuinfo_x86

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
proc has3DNow(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc has3DNowEnhanced(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAbm(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAdx(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAes(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAmdv(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx2(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512bfloat16(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512bitalg(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512bw(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512cd(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512dq(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512er(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512f(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512fmaps4(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512ifma(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512pf(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vbmi(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vbmi2(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vl(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vnni(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vnniw4(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vp2intersect(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasAvx512vpopcntdq(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasBmi1(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasBmi2(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasCas8B(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasCas16B(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasClflush(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasClflushOpt(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasClwb(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasFloat16c(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasFma3(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasFma4(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasGfni(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasIntelVtx(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasMmx(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasMmxExt(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasMovBigEndian(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasMpx(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasNxBit(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasPclmulqdq(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasPopcnt(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasPrefetch(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasPrefetchWT1(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasRdrand(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasRdseed(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSgx(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSha(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSimultaneousMultithreading(): bool {.inline, ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSse(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSse2(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSse3(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSse4a(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSse41(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSse42(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasSsse3(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasTsxHle(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasTsxRtm(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasVaes(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasVpclmulqdq(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasX87fpu(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc hasXop(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc isHypervisorPresent(): bool {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cpuinfo_x86.idx b/cpuinfo_x86.idx new file mode 100644 index 000000000..29ef4376e --- /dev/null +++ b/cpuinfo_x86.idx @@ -0,0 +1,66 @@ +nimTitle cpuinfo_x86 cpuinfo_x86.html module src/arraymancer/laser/cpuinfo_x86 0 +nim isHypervisorPresent cpuinfo_x86.html#isHypervisorPresent proc isHypervisorPresent(): bool 275 +nim hasSimultaneousMultithreading cpuinfo_x86.html#hasSimultaneousMultithreading proc hasSimultaneousMultithreading(): bool 282 +nim hasIntelVtx cpuinfo_x86.html#hasIntelVtx proc hasIntelVtx(): bool 289 +nim hasAmdv cpuinfo_x86.html#hasAmdv proc hasAmdv(): bool 295 +nim hasX87fpu cpuinfo_x86.html#hasX87fpu proc hasX87fpu(): bool 301 +nim hasMmx cpuinfo_x86.html#hasMmx proc hasMmx(): bool 315 +nim hasMmxExt cpuinfo_x86.html#hasMmxExt proc hasMmxExt(): bool 328 +nim has3DNow cpuinfo_x86.html#has3DNow proc has3DNow(): bool 339 +nim has3DNowEnhanced cpuinfo_x86.html#has3DNowEnhanced proc has3DNowEnhanced(): bool 354 +nim hasPrefetch cpuinfo_x86.html#hasPrefetch proc hasPrefetch(): bool 369 +nim hasSse cpuinfo_x86.html#hasSse proc hasSse(): bool 378 +nim hasSse2 cpuinfo_x86.html#hasSse2 proc hasSse2(): bool 388 +nim hasSse3 cpuinfo_x86.html#hasSse3 proc hasSse3(): bool 398 +nim hasSsse3 cpuinfo_x86.html#hasSsse3 proc hasSsse3(): bool 405 +nim hasSse4a cpuinfo_x86.html#hasSse4a proc hasSse4a(): bool 412 +nim hasSse41 cpuinfo_x86.html#hasSse41 proc hasSse41(): bool 419 +nim hasSse42 cpuinfo_x86.html#hasSse42 proc hasSse42(): bool 426 +nim hasAvx cpuinfo_x86.html#hasAvx proc hasAvx(): bool 433 +nim hasAvx2 cpuinfo_x86.html#hasAvx2 proc hasAvx2(): bool 442 +nim hasAvx512f cpuinfo_x86.html#hasAvx512f proc hasAvx512f(): bool 449 +nim hasAvx512dq cpuinfo_x86.html#hasAvx512dq proc hasAvx512dq(): bool 456 +nim hasAvx512ifma cpuinfo_x86.html#hasAvx512ifma proc hasAvx512ifma(): bool 463 +nim hasAvx512pf cpuinfo_x86.html#hasAvx512pf proc hasAvx512pf(): bool 470 +nim hasAvx512er cpuinfo_x86.html#hasAvx512er proc hasAvx512er(): bool 477 +nim hasAvx512cd cpuinfo_x86.html#hasAvx512cd proc hasAvx512cd(): bool 484 +nim hasAvx512bw cpuinfo_x86.html#hasAvx512bw proc hasAvx512bw(): bool 491 +nim hasAvx512vl cpuinfo_x86.html#hasAvx512vl proc hasAvx512vl(): bool 498 +nim hasAvx512vbmi cpuinfo_x86.html#hasAvx512vbmi proc hasAvx512vbmi(): bool 505 +nim hasAvx512vbmi2 cpuinfo_x86.html#hasAvx512vbmi2 proc hasAvx512vbmi2(): bool 512 +nim hasAvx512vpopcntdq cpuinfo_x86.html#hasAvx512vpopcntdq proc hasAvx512vpopcntdq(): bool 519 +nim hasAvx512vnni cpuinfo_x86.html#hasAvx512vnni proc hasAvx512vnni(): bool 527 +nim hasAvx512vnniw4 cpuinfo_x86.html#hasAvx512vnniw4 proc hasAvx512vnniw4(): bool 534 +nim hasAvx512fmaps4 cpuinfo_x86.html#hasAvx512fmaps4 proc hasAvx512fmaps4(): bool 542 +nim hasAvx512bitalg cpuinfo_x86.html#hasAvx512bitalg proc hasAvx512bitalg(): bool 549 +nim hasAvx512bfloat16 cpuinfo_x86.html#hasAvx512bfloat16 proc hasAvx512bfloat16(): bool 556 +nim hasAvx512vp2intersect cpuinfo_x86.html#hasAvx512vp2intersect proc hasAvx512vp2intersect(): bool 563 +nim hasRdrand cpuinfo_x86.html#hasRdrand proc hasRdrand(): bool 571 +nim hasRdseed cpuinfo_x86.html#hasRdseed proc hasRdseed(): bool 578 +nim hasMovBigEndian cpuinfo_x86.html#hasMovBigEndian proc hasMovBigEndian(): bool 586 +nim hasPopcnt cpuinfo_x86.html#hasPopcnt proc hasPopcnt(): bool 593 +nim hasFma3 cpuinfo_x86.html#hasFma3 proc hasFma3(): bool 600 +nim hasFma4 cpuinfo_x86.html#hasFma4 proc hasFma4(): bool 607 +nim hasXop cpuinfo_x86.html#hasXop proc hasXop(): bool 614 +nim hasCas8B cpuinfo_x86.html#hasCas8B proc hasCas8B(): bool 624 +nim hasCas16B cpuinfo_x86.html#hasCas16B proc hasCas16B(): bool 631 +nim hasAbm cpuinfo_x86.html#hasAbm proc hasAbm(): bool 638 +nim hasBmi1 cpuinfo_x86.html#hasBmi1 proc hasBmi1(): bool 646 +nim hasBmi2 cpuinfo_x86.html#hasBmi2 proc hasBmi2(): bool 653 +nim hasTsxHle cpuinfo_x86.html#hasTsxHle proc hasTsxHle(): bool 660 +nim hasTsxRtm cpuinfo_x86.html#hasTsxRtm proc hasTsxRtm(): bool 667 +nim hasAdx cpuinfo_x86.html#hasAdx proc hasAdx(): bool 675 +nim hasSgx cpuinfo_x86.html#hasSgx proc hasSgx(): bool 682 +nim hasGfni cpuinfo_x86.html#hasGfni proc hasGfni(): bool 689 +nim hasAes cpuinfo_x86.html#hasAes proc hasAes(): bool 696 +nim hasVaes cpuinfo_x86.html#hasVaes proc hasVaes(): bool 703 +nim hasVpclmulqdq cpuinfo_x86.html#hasVpclmulqdq proc hasVpclmulqdq(): bool 710 +nim hasPclmulqdq cpuinfo_x86.html#hasPclmulqdq proc hasPclmulqdq(): bool 717 +nim hasNxBit cpuinfo_x86.html#hasNxBit proc hasNxBit(): bool 724 +nim hasFloat16c cpuinfo_x86.html#hasFloat16c proc hasFloat16c(): bool 731 +nim hasSha cpuinfo_x86.html#hasSha proc hasSha(): bool 739 +nim hasClflush cpuinfo_x86.html#hasClflush proc hasClflush(): bool 746 +nim hasClflushOpt cpuinfo_x86.html#hasClflushOpt proc hasClflushOpt(): bool 753 +nim hasClwb cpuinfo_x86.html#hasClwb proc hasClwb(): bool 760 +nim hasPrefetchWT1 cpuinfo_x86.html#hasPrefetchWT1 proc hasPrefetchWT1(): bool 767 +nim hasMpx cpuinfo_x86.html#hasMpx proc hasMpx(): bool 774 diff --git a/cross_entropy_losses.html b/cross_entropy_losses.html new file mode 100644 index 000000000..12e3fc88d --- /dev/null +++ b/cross_entropy_losses.html @@ -0,0 +1,547 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/loss/cross_entropy_losses + + + + + + + + + +Arraymancer - src/arraymancer/nn/loss/cross_entropy_losses + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/loss/cross_entropy_losses

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
SigmoidCrossEntropyLoss[TT] {.inject, final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+
+
SoftmaxCrossEntropyLoss[TT] {.inject, final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+
+
SparseSoftmaxCrossEntropyLoss[TT; Idx] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc sigmoid_cross_entropy[TT](a`gensym0: Variable[TT]; target`gensym0: TT): Variable[
+    TT]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc softmax_cross_entropy[TT](a`gensym1: Variable[TT]; target`gensym1: TT): Variable[
+    TT]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc sparse_softmax_cross_entropy[TT; Idx: SomeNumber or byte or char or enum](
+    a: Variable[TT]; target: Tensor[Idx]): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cross_entropy_losses.idx b/cross_entropy_losses.idx new file mode 100644 index 000000000..37104b9b6 --- /dev/null +++ b/cross_entropy_losses.idx @@ -0,0 +1,7 @@ +nimTitle cross_entropy_losses cross_entropy_losses.html module src/arraymancer/nn/loss/cross_entropy_losses 0 +nim SigmoidCrossEntropyLoss cross_entropy_losses.html#SigmoidCrossEntropyLoss type SigmoidCrossEntropyLoss 23 +nim sigmoid_cross_entropy cross_entropy_losses.html#sigmoid_cross_entropy,,TT proc sigmoid_cross_entropy[TT](a`gensym0: Variable[TT]; target`gensym0: TT): Variable[\n TT] 65 +nim SoftmaxCrossEntropyLoss cross_entropy_losses.html#SoftmaxCrossEntropyLoss type SoftmaxCrossEntropyLoss 23 +nim softmax_cross_entropy cross_entropy_losses.html#softmax_cross_entropy,,TT proc softmax_cross_entropy[TT](a`gensym1: Variable[TT]; target`gensym1: TT): Variable[\n TT] 66 +nim SparseSoftmaxCrossEntropyLoss cross_entropy_losses.html#SparseSoftmaxCrossEntropyLoss type SparseSoftmaxCrossEntropyLoss 68 +nim sparse_softmax_cross_entropy cross_entropy_losses.html#sparse_softmax_cross_entropy,Variable[TT],Tensor[Idx: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or byte or char or enum] proc sparse_softmax_cross_entropy[TT; Idx: SomeNumber or byte or char or enum](\n a: Variable[TT]; target: Tensor[Idx]): Variable[TT] 102 diff --git a/cublas.html b/cublas.html new file mode 100644 index 000000000..c2db3efbd --- /dev/null +++ b/cublas.html @@ -0,0 +1,624 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/cublas + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/cublas + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/cublas

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc cublas_axpy[T: SomeFloat](n: int; alpha: T; x: ptr T; incx: int; y: ptr T;
+                               incy: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_copy[T: SomeFloat](n: int; x: ptr T; incx: int; y: ptr T; incy: int) {.
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_dot[T: SomeFloat](n: int; x: ptr T; incx: int; y: ptr T; incy: int;
+                              output: ptr T) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_geam[T: SomeFloat](transa, transb: cublasOperation_t; m, n: int;
+                               alpha: T; A: ptr T; lda: int; beta: T; B: ptr T;
+                               ldb: int; C: ptr T; ldc: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_gemm[T: SomeFloat](transa, transb: cublasOperation_t; m, n, k: int;
+                               alpha: T; A: ptr T; lda: int; B: ptr T; ldb: int;
+                               beta: T; C: ptr T; ldc: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_gemmStridedBatched[T: SomeFloat](transa, transb: cublasOperation_t;
+    m, n, k: int; alpha: T; A: ptr T; lda: int; strideA: int; B: ptr T;
+    ldb: int; strideB: int; beta: T; C: ptr T; ldc: int; strideC: int;
+    batchCount: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_gemv[T: SomeFloat](trans: cublasOperation_t; m, n: int; alpha: T;
+                               A: ptr T; lda: int; x: ptr T; incx: int; beta: T;
+                               y: ptr T; incy: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc cublas_scal[T: SomeFloat](n: int; alpha: T; x: ptr T; incx: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cublas.idx b/cublas.idx new file mode 100644 index 000000000..cc720e38f --- /dev/null +++ b/cublas.idx @@ -0,0 +1,9 @@ +nimTitle cublas cublas.html module src/arraymancer/tensor/backend/cublas 0 +nim cublas_copy cublas.html#cublas_copy,int,ptr.T,int,ptr.T,int proc cublas_copy[T: SomeFloat](n: int; x: ptr T; incx: int; y: ptr T; incy: int) 29 +nim cublas_dot cublas.html#cublas_dot,int,ptr.T,int,ptr.T,int,ptr.T proc cublas_dot[T: SomeFloat](n: int; x: ptr T; incx: int; y: ptr T; incy: int;\n output: ptr T) 41 +nim cublas_axpy cublas.html#cublas_axpy,int,T,ptr.T,int,ptr.T,int proc cublas_axpy[T: SomeFloat](n: int; alpha: T; x: ptr T; incx: int; y: ptr T;\n incy: int) 55 +nim cublas_scal cublas.html#cublas_scal,int,T,ptr.T,int proc cublas_scal[T: SomeFloat](n: int; alpha: T; x: ptr T; incx: int) 73 +nim cublas_geam cublas.html#cublas_geam,cublasOperation_t,cublasOperation_t,int,int,T,ptr.T,int,T,ptr.T,int,ptr.T,int proc cublas_geam[T: SomeFloat](transa, transb: cublasOperation_t; m, n: int;\n alpha: T; A: ptr T; lda: int; beta: T; B: ptr T;\n ldb: int; C: ptr T; ldc: int) 88 +nim cublas_gemv cublas.html#cublas_gemv,cublasOperation_t,int,int,T,ptr.T,int,ptr.T,int,T,ptr.T,int proc cublas_gemv[T: SomeFloat](trans: cublasOperation_t; m, n: int; alpha: T;\n A: ptr T; lda: int; x: ptr T; incx: int; beta: T;\n y: ptr T; incy: int) 115 +nim cublas_gemm cublas.html#cublas_gemm,cublasOperation_t,cublasOperation_t,int,int,int,T,ptr.T,int,ptr.T,int,T,ptr.T,int proc cublas_gemm[T: SomeFloat](transa, transb: cublasOperation_t; m, n, k: int;\n alpha: T; A: ptr T; lda: int; B: ptr T; ldb: int;\n beta: T; C: ptr T; ldc: int) 142 +nim cublas_gemmStridedBatched cublas.html#cublas_gemmStridedBatched,cublasOperation_t,cublasOperation_t,int,int,int,T,ptr.T,int,int,ptr.T,int,int,T,ptr.T,int,int,int proc cublas_gemmStridedBatched[T: SomeFloat](transa, transb: cublasOperation_t;\n m, n, k: int; alpha: T; A: ptr T;\n lda: int; strideA: int; B: ptr T;\n ldb: int; strideB: int; beta: T;\n C: ptr T; ldc: int; strideC: int;\n batchCount: int) 172 diff --git a/cuda.html b/cuda.html new file mode 100644 index 000000000..e001e5586 --- /dev/null +++ b/cuda.html @@ -0,0 +1,502 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc cudaMalloc[T](size: Natural): ptr T {.noSideEffect, inline.}
+
+ + Internal proc. Wrap CudaMAlloc(var pointer, size) -> Error_code +   Source +Edit + +
+
+ +
+
+
+
proc deallocCuda[T](p: ref [ptr T]) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc layoutOnDevice[T: SomeFloat](t: CudaTensor[T]): CudaTensorLayout[T] {.
+    noSideEffect.}
+
+ + Store a CudaTensor shape, strides, etc information on the GPU +   Source +Edit + +
+
+ +
+
+
+
proc newCudaStorage[T: SomeFloat](length: int): CudaStorage[T] {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cuda.idx b/cuda.idx new file mode 100644 index 000000000..7b166d46f --- /dev/null +++ b/cuda.idx @@ -0,0 +1,5 @@ +nimTitle cuda cuda.html module src/arraymancer/tensor/backend/cuda 0 +nim cudaMalloc cuda.html#cudaMalloc,Natural proc cudaMalloc[T](size: Natural): ptr T 23 +nim deallocCuda cuda.html#deallocCuda,ref. proc deallocCuda[T](p: ref [ptr T]) 29 +nim newCudaStorage cuda.html#newCudaStorage,int proc newCudaStorage[T: SomeFloat](length: int): CudaStorage[T] 37 +nim layoutOnDevice cuda.html#layoutOnDevice,CudaTensor[T: SomeFloat] proc layoutOnDevice[T: SomeFloat](t: CudaTensor[T]): CudaTensorLayout[T] 91 diff --git a/cuda_global_state.html b/cuda_global_state.html new file mode 100644 index 000000000..2e28e565a --- /dev/null +++ b/cuda_global_state.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/cuda_global_state + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/cuda_global_state + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/cuda_global_state

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Lets

+
+
+
cublasHandle0 = initCublasHandle()
+
+ + +   Source +Edit + +
+
+
+
cudaStream0 = initCudaStream()
+
+ + +   Source +Edit + +
+
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cuda_global_state.idx b/cuda_global_state.idx new file mode 100644 index 000000000..8c954c50a --- /dev/null +++ b/cuda_global_state.idx @@ -0,0 +1,3 @@ +nimTitle cuda_global_state cuda_global_state.html module src/arraymancer/tensor/backend/cuda_global_state 0 +nim cudaStream0 cuda_global_state.html#cudaStream0 let cudaStream0 37 +nim cublasHandle0 cuda_global_state.html#cublasHandle0 let cublasHandle0 38 diff --git a/cudnn.html b/cudnn.html new file mode 100644 index 000000000..27d28732d --- /dev/null +++ b/cudnn.html @@ -0,0 +1,514 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/backend/cudnn + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/backend/cudnn + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/backend/cudnn

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor, cuda +
+
+
+

Lets

+
+
+
cudnnHandle0 = initCudnnHandle()
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc newCudnn4DTensorDesc[T: SomeFloat](t: CudaTensor[T]): cudnnTensorDescriptor_t {.
+    inline, noinit.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template asCudnnType[T: SomeFloat](typ: typedesc[T]): cudnnDataType_t
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cudnn.idx b/cudnn.idx new file mode 100644 index 000000000..a5878b2b3 --- /dev/null +++ b/cudnn.idx @@ -0,0 +1,4 @@ +nimTitle cudnn cudnn.html module src/arraymancer/nn_primitives/backend/cudnn 0 +nim cudnnHandle0 cudnn.html#cudnnHandle0 let cudnnHandle0 34 +nim asCudnnType cudnn.html#asCudnnType.t,typedesc[T] template asCudnnType[T: SomeFloat](typ: typedesc[T]): cudnnDataType_t 39 +nim newCudnn4DTensorDesc cudnn.html#newCudnn4DTensorDesc,CudaTensor[T: SomeFloat] proc newCudnn4DTensorDesc[T: SomeFloat](t: CudaTensor[T]): cudnnTensorDescriptor_t 59 diff --git a/cudnn_conv_interface.html b/cudnn_conv_interface.html new file mode 100644 index 000000000..f1ebff466 --- /dev/null +++ b/cudnn_conv_interface.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/backend/cudnn_conv_interface + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/backend/cudnn_conv_interface + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/backend/cudnn_conv_interface

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
ConvAlgoSpace[T; Algo] = object
+  algo*: Algo
+  workspace*: ref [ptr T]
+  sizeInBytes*: csize_t
+
+
+ + +   Source +Edit + +
+
+
+
ConvConfig[N] = object
+  pad*: array[N, cint]
+  strides*: array[N, cint]
+  dilation*: array[N, cint]
+
+
+ + +   Source +Edit + +
+
+
+
SizeHW = array[2, int]
+
+ + height width +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc conv_bwd_data_algo_workspace[T: SomeFloat](
+    srcTensorDesc: cudnnTensorDescriptor_t;
+    gradOutputTensorDesc: cudnnTensorDescriptor_t;
+    kernelDesc: cudnnFilterDescriptor_t; convDesc: cudnnConvolutionDescriptor_t;
+    gradInputTensorDesc: cudnnTensorDescriptor_t): ConvAlgoSpace[T,
+    cudnnConvolutionBwdDataAlgo_t] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc conv_bwd_kernel_algo_workspace[T: SomeFloat](
+    srcTensorDesc: cudnnTensorDescriptor_t;
+    gradOutputTensorDesc: cudnnTensorDescriptor_t;
+    gradKernelDesc: cudnnFilterDescriptor_t;
+    convDesc: cudnnConvolutionDescriptor_t): ConvAlgoSpace[T,
+    cudnnConvolutionBwdFilterAlgo_t] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc convOutDims(input, kernel: CudaTensor; padding, strides, dilation: SizeHW): Metadata {.
+    inline, noinit.}
+
+ + Each dimension of the (nbDims-2)-D images of the output tensor is computed as followed: outputDim = 1 + ( inputDim + 2pad - (((filterDim-1)upscaleA)+1) )/ convolutionStride; +   Source +Edit + +
+
+ +
+
+
+
proc newConv2dDesc[T: SomeFloat](padding, strides, dilation: SizeHW): cudnnConvolutionDescriptor_t {.
+    noinit, inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc newConvAlgoSpace[T: SomeFloat](srcTensorDesc: cudnnTensorDescriptor_t;
+                                    kernelDesc: cudnnFilterDescriptor_t;
+                                    convDesc: cudnnConvolutionDescriptor_t;
+                                    dstTensorDesc: cudnnTensorDescriptor_t): ConvAlgoSpace[
+    T, cudnnConvolutionFwdAlgo_t] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc newCudnnConvKernelDesc[T: SomeFloat](convKernel: CudaTensor[T]): cudnnFilterDescriptor_t {.
+    inline, noinit.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/cudnn_conv_interface.idx b/cudnn_conv_interface.idx new file mode 100644 index 000000000..878a3a07d --- /dev/null +++ b/cudnn_conv_interface.idx @@ -0,0 +1,10 @@ +nimTitle cudnn_conv_interface cudnn_conv_interface.html module src/arraymancer/nn_primitives/backend/cudnn_conv_interface 0 +nim SizeHW cudnn_conv_interface.html#SizeHW type SizeHW 28 +nim ConvConfig cudnn_conv_interface.html#ConvConfig object ConvConfig 32 +nim ConvAlgoSpace cudnn_conv_interface.html#ConvAlgoSpace object ConvAlgoSpace 39 +nim newConv2dDesc cudnn_conv_interface.html#newConv2dDesc,SizeHW,SizeHW,SizeHW proc newConv2dDesc[T: SomeFloat](padding, strides, dilation: SizeHW): cudnnConvolutionDescriptor_t 70 +nim newCudnnConvKernelDesc cudnn_conv_interface.html#newCudnnConvKernelDesc,CudaTensor[T: SomeFloat] proc newCudnnConvKernelDesc[T: SomeFloat](convKernel: CudaTensor[T]): cudnnFilterDescriptor_t 77 +nim convOutDims cudnn_conv_interface.html#convOutDims,CudaTensor,CudaTensor,SizeHW,SizeHW,SizeHW proc convOutDims(input, kernel: CudaTensor; padding, strides, dilation: SizeHW): Metadata 95 +nim newConvAlgoSpace cudnn_conv_interface.html#newConvAlgoSpace,cudnnTensorDescriptor_t,cudnnFilterDescriptor_t,cudnnConvolutionDescriptor_t,cudnnTensorDescriptor_t proc newConvAlgoSpace[T: SomeFloat](srcTensorDesc: cudnnTensorDescriptor_t;\n kernelDesc: cudnnFilterDescriptor_t;\n convDesc: cudnnConvolutionDescriptor_t;\n dstTensorDesc: cudnnTensorDescriptor_t): ConvAlgoSpace[\n T, cudnnConvolutionFwdAlgo_t] 135 +nim conv_bwd_kernel_algo_workspace cudnn_conv_interface.html#conv_bwd_kernel_algo_workspace,cudnnTensorDescriptor_t,cudnnTensorDescriptor_t,cudnnFilterDescriptor_t,cudnnConvolutionDescriptor_t proc conv_bwd_kernel_algo_workspace[T: SomeFloat](\n srcTensorDesc: cudnnTensorDescriptor_t;\n gradOutputTensorDesc: cudnnTensorDescriptor_t;\n gradKernelDesc: cudnnFilterDescriptor_t;\n convDesc: cudnnConvolutionDescriptor_t): ConvAlgoSpace[T,\n cudnnConvolutionBwdFilterAlgo_t] 180 +nim conv_bwd_data_algo_workspace cudnn_conv_interface.html#conv_bwd_data_algo_workspace,cudnnTensorDescriptor_t,cudnnTensorDescriptor_t,cudnnFilterDescriptor_t,cudnnConvolutionDescriptor_t,cudnnTensorDescriptor_t proc conv_bwd_data_algo_workspace[T: SomeFloat](\n srcTensorDesc: cudnnTensorDescriptor_t;\n gradOutputTensorDesc: cudnnTensorDescriptor_t;\n kernelDesc: cudnnFilterDescriptor_t; convDesc: cudnnConvolutionDescriptor_t;\n gradInputTensorDesc: cudnnTensorDescriptor_t): ConvAlgoSpace[T,\n cudnnConvolutionBwdDataAlgo_t] 225 diff --git a/data_structure.html b/data_structure.html new file mode 100644 index 000000000..4e8299b28 --- /dev/null +++ b/data_structure.html @@ -0,0 +1,854 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/data_structure + + + + + + + + + +Arraymancer - src/arraymancer/tensor/data_structure + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/data_structure

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
AnyTensor[T] = Tensor[T] or CudaTensor[T] or ClTensor[T]
+
+ + +   Source +Edit + +
+
+
+
ClStorage[T] = object
+  Flen*: int
+  Fdata*: ptr UncheckedArray[T]
+  Fref_tracking*: ref [ptr UncheckedArray[T]]
+
+
+ + Opaque seq-like structure for storage on the OpenCL backend. +   Source +Edit + +
+
+
+
ClTensor[T] = object
+  shape*: Metadata
+  strides*: Metadata
+  offset*: int
+  storage*: ClStorage[T]
+
+
+ + Tensor data structure stored on OpenCL (CPU, GPU, FPGAs or other accelerators)
  • shape: Dimensions of the CudaTensor
  • +
  • strides: Numbers of items to skip to get the next item along a dimension.
  • +
  • offset: Offset to get the first item of the CudaTensor. Note: offset can be negative, in particular for slices.
  • +
  • storage: An opaque data storage for the CudaTensor
  • +
+

Warning โš : Assignment var a = b does not copy the data. Data modification on one CudaTensor will be reflected on the other. However modification on metadata (shape, strides or offset) will not affect the other tensor. Explicit copies can be made with clone: var a = b.clone

+ +   Source +Edit + +
+
+
+
CudaStorage[T] = object
+  Flen*: int
+  Fdata*: ptr UncheckedArray[T]
+  Fref_tracking*: ref [ptr UncheckedArray[T]]
+
+
+ +

Opaque seq-like structure for storage on the Cuda backend.

+

Nim garbage collector will automatically ask cuda to clear GPU memory if data becomes unused.

+ +   Source +Edit + +
+
+
+
CudaTensor[T] = object
+  shape*: Metadata
+  strides*: Metadata
+  offset*: int
+  storage*: CudaStorage[T]
+
+
+ + Tensor data structure stored on Nvidia GPU (Cuda)
  • shape: Dimensions of the CudaTensor
  • +
  • strides: Numbers of items to skip to get the next item along a dimension.
  • +
  • offset: Offset to get the first item of the CudaTensor. Note: offset can be negative, in particular for slices.
  • +
  • storage: An opaque data storage for the CudaTensor
  • +
+

Warning โš : Assignment var a = b does not copy the data. Data modification on one CudaTensor will be reflected on the other. However modification on metadata (shape, strides or offset) will not affect the other tensor. Explicit copies can be made with clone: var a = b.clone

+ +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc data=[T](t: var Tensor[T]; s: seq[T]) {.
+    ...deprecated: "Use copyFromRaw instead".}
+
+
+ Deprecated: Use copyFromRaw instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc dataArray[T: KnownSupportsCopyMem](t: Tensor[T]): ptr UncheckedArray[T] {.
+    noSideEffect, inline, ...deprecated: "Use toUnsafeView instead".}
+
+
+ Deprecated: Use toUnsafeView instead +
+ + Input:
- A tensor
+

Returns:

+
- A pointer to the offset start of the data.
+  Return value supports array indexing.
+ +   Source +Edit + +
+
+
+
proc dataArray[T: not KnownSupportsCopyMem](t: Tensor[T]): ptr UncheckedArray[T] {.error: "`dataArray`  is deprecated for mem copyable types and not supported for GC\'ed types!".}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc get_data_ptr[T: KnownSupportsCopyMem](t: Tensor[T]): ptr T {.noSideEffect,
+    inline.}
+
+ + Input:
- A tensor
+

Returns:

+
- A pointer to the real start of its data (no offset)
+ +   Source +Edit + +
+
+
+
proc get_data_ptr[T: not KnownSupportsCopyMem](t: AnyTensor[T]): ptr T {.
+    error: "`get_data_ptr` cannot be safely used for GC\'ed types!".}
+
+ + +   Source +Edit + +
+
+
+
proc get_data_ptr[T](t: CudaTensor[T] or ClTensor[T]): ptr T {.noSideEffect,
+    inline.}
+
+ + Input:
- A tensor
+

Returns:

+
- A pointer to the real start of its data (no offset)
+ +   Source +Edit + +
+
+ +
+
+
+
proc get_offset_ptr[T: KnownSupportsCopyMem](t: Tensor[T]): ptr T {.
+    noSideEffect, inline.}
+
+ + Input:
- A tensor
+

Returns:

+
- A pointer to the offset start of its data
+ +   Source +Edit + +
+
+
+
proc get_offset_ptr[T: not KnownSupportsCopyMem](t: AnyTensor[T]): ptr T {.
+    error: "`get_offset_ptr` cannot be safely used for GC\'ed types!".}
+
+ + +   Source +Edit + +
+
+
+
proc get_offset_ptr[T](t: CudaTensor[T] or ClTensor[T]): ptr T {.noSideEffect,
+    inline.}
+
+ + Input:
- A tensor
+

Returns:

+
- A pointer to the offset start of its data
+ +   Source +Edit + +
+
+ +
+
+
+
func is_C_contiguous(t: CudaTensor or ClTensor): bool
+
+ + Check if the tensor follows C convention / is row major +   Source +Edit + +
+
+ +
+
+
+
proc is_F_contiguous(t: AnyTensor): bool {.noSideEffect, inline.}
+
+ + Check if the tensor follows Fortran convention / is column major +   Source +Edit + +
+
+ +
+
+
+
proc isContiguous(t: AnyTensor): bool {.noSideEffect, inline.}
+
+ + Check if the tensor is contiguous +   Source +Edit + +
+
+ +
+
+
+
func rank[T](t: CudaTensor[T] or ClTensor[T]): range[0 .. LASER_MAXRANK] {.
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc shape_to_strides(shape: Metadata; layout: OrderType = rowMajor;
+                      result: var Metadata) {.noSideEffect, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Input:
- A shape (Metadata), for example [3,5] for a 3x5 matrix
+- Optionally rowMajor (C layout - default) or colMajor (Fortran)
+

Returns:

+
- The strides in C or Fortran order corresponding to this shape and layout
+

ย Arraymancer defaults to rowMajor. Temporarily, CudaTensors are colMajor by default.

+ +   Source +Edit + +
+
+ +
+
+
+
func size[T](t: CudaTensor[T] or ClTensor[T]): Natural {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/data_structure.idx b/data_structure.idx new file mode 100644 index 000000000..970125c31 --- /dev/null +++ b/data_structure.idx @@ -0,0 +1,24 @@ +nimTitle data_structure data_structure.html module src/arraymancer/tensor/data_structure 0 +nim CudaStorage data_structure.html#CudaStorage object CudaStorage 26 +nim CudaTensor data_structure.html#CudaTensor object CudaTensor 36 +nim ClStorage data_structure.html#ClStorage object ClStorage 52 +nim ClTensor data_structure.html#ClTensor object ClTensor 58 +nim AnyTensor data_structure.html#AnyTensor type AnyTensor 74 +nim data= data_structure.html#data=,Tensor[T],seq[T] proc data=[T](t: var Tensor[T]; s: seq[T]) 80 +nim rank data_structure.html#rank proc rank[T](t: CudaTensor[T] or ClTensor[T]): range[0 .. LASER_MAXRANK] 93 +nim size data_structure.html#size proc size[T](t: CudaTensor[T] or ClTensor[T]): Natural 96 +nim shape_to_strides data_structure.html#shape_to_strides,Metadata,OrderType,Metadata proc shape_to_strides(shape: Metadata; layout: OrderType = rowMajor;\n result: var Metadata) 99 +nim is_C_contiguous data_structure.html#is_C_contiguous proc is_C_contiguous(t: CudaTensor or ClTensor): bool 122 +nim is_F_contiguous data_structure.html#is_F_contiguous,AnyTensor proc is_F_contiguous(t: AnyTensor): bool 133 +nim isContiguous data_structure.html#isContiguous,AnyTensor proc isContiguous(t: AnyTensor): bool 144 +nim get_data_ptr data_structure.html#get_data_ptr,Tensor[T: KnownSupportsCopyMem] proc get_data_ptr[T: KnownSupportsCopyMem](t: Tensor[T]): ptr T 153 +nim get_data_ptr data_structure.html#get_data_ptr,AnyTensor[T: not KnownSupportsCopyMem] proc get_data_ptr[T: not KnownSupportsCopyMem](t: AnyTensor[T]): ptr T 160 +nim get_offset_ptr data_structure.html#get_offset_ptr,Tensor[T: KnownSupportsCopyMem] proc get_offset_ptr[T: KnownSupportsCopyMem](t: Tensor[T]): ptr T 163 +nim get_offset_ptr data_structure.html#get_offset_ptr,AnyTensor[T: not KnownSupportsCopyMem] proc get_offset_ptr[T: not KnownSupportsCopyMem](t: AnyTensor[T]): ptr T 170 +nim get_data_ptr data_structure.html#get_data_ptr proc get_data_ptr[T](t: CudaTensor[T] or ClTensor[T]): ptr T 173 +nim get_offset_ptr data_structure.html#get_offset_ptr proc get_offset_ptr[T](t: CudaTensor[T] or ClTensor[T]): ptr T 180 +nim dataArray data_structure.html#dataArray,Tensor[T: KnownSupportsCopyMem] proc dataArray[T: KnownSupportsCopyMem](t: Tensor[T]): ptr UncheckedArray[T] 187 +nim dataArray data_structure.html#dataArray,Tensor[T: not KnownSupportsCopyMem] proc dataArray[T: not KnownSupportsCopyMem](t: Tensor[T]): ptr UncheckedArray[T] 195 +nimgrp getdataptr data_structure.html#get_data_ptr-procs-all proc 153 +nimgrp dataarray data_structure.html#dataArray-procs-all proc 187 +nimgrp getoffsetptr data_structure.html#get_offset_ptr-procs-all proc 163 diff --git a/datatypes.html b/datatypes.html new file mode 100644 index 000000000..4d4b8f9fc --- /dev/null +++ b/datatypes.html @@ -0,0 +1,906 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/tensor/datatypes + + + + + + + + + +Arraymancer - src/arraymancer/laser/tensor/datatypes + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/tensor/datatypes

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
CpuStorage[T] {.shallow.} = ref CpuStorageObj[T]
+
+ + +   Source +Edit + +
+
+
+
KnownSupportsCopyMem = concept xtypeof(T)
+    supportsCopyMem(T)
+
+ + +   Source +Edit + +
+
+
+
Metadata = DynamicStackArray[int]
+
+ + +   Source +Edit + +
+
+
+
MetadataArray {....deprecated: "Use Metadata instead".} = Metadata
+
+
+ Deprecated: Use Metadata instead +
+ + +   Source +Edit + +
+
+
+
RawImmutableView[T] = distinct ptr UncheckedArray[T]
+
+ + +   Source +Edit + +
+
+
+
RawMutableView[T] = distinct ptr UncheckedArray[T]
+
+ + +   Source +Edit + +
+
+
+
Tensor[T] = object
+  shape*: Metadata
+  strides*: Metadata
+  offset*: int
+  storage*: CpuStorage[T]
+
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc allocCpuStorage[T](storage: var CpuStorage[T]; size: int)
+
+ + Allocate aligned memory to hold size elements of type T. If T does not supports copyMem, it is also zero-initialized. I.e. Tensors of seq, strings, ref types or types with non-trivial destructors are always zero-initialized. This prevents potential GC issues. +   Source +Edit + +
+
+ +
+
+
+
proc cpuStorageFromBuffer[T: KnownSupportsCopyMem](storage: var CpuStorage[T];
+    rawBuffer: pointer; size: int)
+
+ +

Create a CpuStorage, which stores data from a given raw pointer, which it does not own. The destructor/finalizer will be a no-op, because the memory is marked as not owned by the CpuStorage.

+

The input buffer must be a raw pointer.

+ +   Source +Edit + +
+
+ +
+
+
+
proc initMetadataArray(len: int): Metadata {.inline, ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func is_C_contiguous(t: Tensor): bool
+
+ + Check if the tensor follows C convention / is row major +   Source +Edit + +
+
+ +
+
+
+
func rank[T](t: Tensor[T]): range[0 .. LASER_MAXRANK] {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func size[T](t: Tensor[T]): Natural {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc toMetadataArray(s: varargs[int]): Metadata {.inline, ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func unsafe_raw_buf[T: KnownSupportsCopyMem](t: Tensor[T];
+    aligned: static bool = true): RawImmutableView[T] {.inline.}
+
+ +

Returns a view to the start of the data buffer

+

Unsafe: the pointer can outlive the input tensor For optimization purposes, Laser will hint the compiler that while the pointer is valid, all data accesses will be through it (no aliasing) and that the data is aligned by LASER_MEM_ALIGN (default 64).

+ +   Source +Edit + +
+
+
+
func unsafe_raw_buf[T: KnownSupportsCopyMem](t: var Tensor[T];
+    aligned: static bool = true): RawMutableView[T] {.inline.}
+
+ +

Returns a view to the start of the data buffer

+

Unsafe: the pointer can outlive the input tensor For optimization purposes, Laser will hint the compiler that while the pointer is valid, all data accesses will be through it (no aliasing) and that the data is aligned by LASER_MEM_ALIGN (default 64).

+ +   Source +Edit + +
+
+
+
func unsafe_raw_buf[T: not KnownSupportsCopyMem](t: Tensor[T];
+    aligned: static bool = true): ptr UncheckedArray[T] {.
+    error: "Access via raw pointer forbidden for non mem copyable types!".}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func unsafe_raw_offset[T: KnownSupportsCopyMem](t: Tensor[T];
+    aligned: static bool = true): RawImmutableView[T] {.inline.}
+
+ +

Returns a view to the start of the valid data

+

Unsafe: the pointer can outlive the input tensor For optimization purposes, Laser will hint the compiler that while the pointer is valid, all data accesses will be through it (no aliasing) and that the data is aligned by LASER_MEM_ALIGN (default 64).

+ +   Source +Edit + +
+
+
+
func unsafe_raw_offset[T: KnownSupportsCopyMem](t: var Tensor[T];
+    aligned: static bool = true): RawMutableView[T] {.inline.}
+
+ +

Returns a view to the start of the valid data

+

Unsafe: the pointer can outlive the input tensor For optimization purposes, Laser will hint the compiler that while the pointer is valid, all data accesses will be through it (no aliasing) and that the data is aligned by LASER_MEM_ALIGN (default 64).

+ +   Source +Edit + +
+
+
+
func unsafe_raw_offset[T: not KnownSupportsCopyMem](t: Tensor[T];
+    aligned: static bool = true): ptr UncheckedArray[T] {.
+    error: "Access via raw pointer forbidden for non mem copyable types!".}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Macros

+
+
+
+
macro raw_data_unaligned(body: untyped): untyped
+
+ +

Within this code block, all raw data accesses will not be assumed aligned by default (LASER_MEM_ALIGN is 64 by default). Use this when interfacing with external buffers of unknown alignment.

+

โš ๏ธ Warning: At the moment Nim's builtin term-rewriting macros are not scoped. All processing within the file this is called will be considered unaligned. https://github.com/nim-lang/Nim/issues/7214#issuecomment-431567894.

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template `[]`[T](v: RawImmutableView[T]; idx: int): T
+
+ + +   Source +Edit + +
+
+
+
template `[]`[T](v: RawMutableView[T]; idx: int): var T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template `[]=`[T](v: RawMutableView[T]; idx: int; val: T)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/datatypes.idx b/datatypes.idx new file mode 100644 index 000000000..9e02ce2f1 --- /dev/null +++ b/datatypes.idx @@ -0,0 +1,28 @@ +nimTitle datatypes datatypes.html module src/arraymancer/laser/tensor/datatypes 0 +nim KnownSupportsCopyMem datatypes.html#KnownSupportsCopyMem type KnownSupportsCopyMem 21 +nim RawImmutableView datatypes.html#RawImmutableView type RawImmutableView 24 +nim RawMutableView datatypes.html#RawMutableView type RawMutableView 25 +nim Metadata datatypes.html#Metadata type Metadata 27 +nim MetadataArray datatypes.html#MetadataArray type MetadataArray 31 +nim Tensor datatypes.html#Tensor object Tensor 33 +nim CpuStorage datatypes.html#CpuStorage type CpuStorage 39 +nim initMetadataArray datatypes.html#initMetadataArray,int proc initMetadataArray(len: int): Metadata 49 +nim toMetadataArray datatypes.html#toMetadataArray,varargs[int] proc toMetadataArray(s: varargs[int]): Metadata 52 +nim rank datatypes.html#rank,Tensor[T] proc rank[T](t: Tensor[T]): range[0 .. LASER_MAXRANK] 60 +nim size datatypes.html#size,Tensor[T] proc size[T](t: Tensor[T]): Natural 63 +nim allocCpuStorage datatypes.html#allocCpuStorage,CpuStorage[T],int proc allocCpuStorage[T](storage: var CpuStorage[T]; size: int) 86 +nim cpuStorageFromBuffer datatypes.html#cpuStorageFromBuffer,CpuStorage[T: KnownSupportsCopyMem],pointer,int proc cpuStorageFromBuffer[T: KnownSupportsCopyMem](storage: var CpuStorage[T];\n rawBuffer: pointer; size: int) 103 +nim is_C_contiguous datatypes.html#is_C_contiguous,Tensor proc is_C_contiguous(t: Tensor): bool 120 +nim unsafe_raw_buf datatypes.html#unsafe_raw_buf,Tensor[T: KnownSupportsCopyMem],staticbool proc unsafe_raw_buf[T: KnownSupportsCopyMem](t: Tensor[T];\n aligned: static bool = true): RawImmutableView[\n T] 157 +nim unsafe_raw_buf datatypes.html#unsafe_raw_buf,Tensor[T: KnownSupportsCopyMem],staticbool_2 proc unsafe_raw_buf[T: KnownSupportsCopyMem](t: var Tensor[T];\n aligned: static bool = true): RawMutableView[\n T] 166 +nim unsafe_raw_offset datatypes.html#unsafe_raw_offset,Tensor[T: KnownSupportsCopyMem],staticbool proc unsafe_raw_offset[T: KnownSupportsCopyMem](t: Tensor[T];\n aligned: static bool = true): RawImmutableView[T] 175 +nim unsafe_raw_offset datatypes.html#unsafe_raw_offset,Tensor[T: KnownSupportsCopyMem],staticbool_2 proc unsafe_raw_offset[T: KnownSupportsCopyMem](t: var Tensor[T];\n aligned: static bool = true): RawMutableView[T] 184 +nim unsafe_raw_buf datatypes.html#unsafe_raw_buf,Tensor[T: not KnownSupportsCopyMem],staticbool proc unsafe_raw_buf[T: not KnownSupportsCopyMem](t: Tensor[T];\n aligned: static bool = true): ptr UncheckedArray[T] 193 +nim unsafe_raw_offset datatypes.html#unsafe_raw_offset,Tensor[T: not KnownSupportsCopyMem],staticbool proc unsafe_raw_offset[T: not KnownSupportsCopyMem](t: Tensor[T];\n aligned: static bool = true): ptr UncheckedArray[T] 195 +nim raw_data_unaligned datatypes.html#raw_data_unaligned.m,untyped macro raw_data_unaligned(body: untyped): untyped 197 +nim `[]` datatypes.html#[].t,RawImmutableView[T],int template `[]`[T](v: RawImmutableView[T]; idx: int): T 213 +nim `[]` datatypes.html#[].t,RawMutableView[T],int template `[]`[T](v: RawMutableView[T]; idx: int): var T 217 +nim `[]=` datatypes.html#[]=.t,RawMutableView[T],int,T template `[]=`[T](v: RawMutableView[T]; idx: int; val: T) 221 +nimgrp unsaferawbuf datatypes.html#unsafe_raw_buf-procs-all proc 157 +nimgrp unsaferawoffset datatypes.html#unsafe_raw_offset-procs-all proc 175 +nimgrp [] datatypes.html#[]-templates-all template 213 diff --git a/dbscan.html b/dbscan.html new file mode 100644 index 000000000..680fd716b --- /dev/null +++ b/dbscan.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/ml/clustering/dbscan + + + + + + + + + +Arraymancer - src/arraymancer/ml/clustering/dbscan + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/ml/clustering/dbscan

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc dbscan[T: SomeFloat](X: Tensor[T]; eps: float; minSamples: int;
+                          metric: typedesc[AnyMetric] = Euclidean; p = 2.0): seq[
+    int]
+
+ + Performs DBSCAN clustering on the input data X. X needs to be a tensor of rank 2 with the following shape:
  • [n_observations, n_dimensions]
  • +
+

so that we have n_observations points that each have a dimensionality of n_dimensions (or sometimes called number of features).

+

eps is the radius in which we search for neighbors around each point using the give metric.

+

minSamples is the minimum number of elements that need to be in the search radius eps to consider a set of points a proto-cluster (the "core points"), from which to compute the final clusters.

+

If we use the Minkowski metric, p is the power to use in it. Otherwise the value is ignored.

+ +   Source +Edit + +
+
+ +
+ +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/dbscan.idx b/dbscan.idx new file mode 100644 index 000000000..ab6a1dfb3 --- /dev/null +++ b/dbscan.idx @@ -0,0 +1,2 @@ +nimTitle dbscan dbscan.html module src/arraymancer/ml/clustering/dbscan 0 +nim dbscan dbscan.html#dbscan,Tensor[T: SomeFloat],float,int,typedesc[AnyMetric],float proc dbscan[T: SomeFloat](X: Tensor[T]; eps: float; minSamples: int;\n metric: typedesc[AnyMetric] = Euclidean; p = 2.0): seq[int] 50 diff --git a/decomposition.html b/decomposition.html new file mode 100644 index 000000000..55ce2a427 --- /dev/null +++ b/decomposition.html @@ -0,0 +1,614 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/decomposition + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/decomposition + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/decomposition

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc lu_permuted[T: SupportedDecomposition](a: Tensor[T]): tuple[
+    PL, U: Tensor[T]]
+
+ +

Compute the pivoted LU decomposition of an input matrix a.

+

The decomposition solves the equation: A = P L U

+

where:

+
  • P is a permutation matrix
  • +
  • L is a lower-triangular matrix with unit diagonal
  • +
  • U is an upper-triangular matrix
  • +
+

Input:

+
  • a, a MxN matrix
  • +
+

Output: with K = min(M, N)

+
  • PL, the product of P and L, of shape M, K
  • +
  • U, upper-triangular matrix of shape K, N
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc qr[T: SupportedDecomposition](a: Tensor[T]): tuple[Q, R: Tensor[T]]
+
+ +

Compute the QR decomposition of an input matrix a Decomposition is done through the Householder method without pivoting.

+

Input:

+
  • a, matrix of shape M, N
  • +
+

We note K = min(M, N)

+

Returns:

+
  • Q orthonormal matrix of shape M, K
  • +
  • R upper-triangular matrix of shape K, N
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc svd[T: SupportedDecomposition; U: SomeFloat](A: Tensor[T]; _: typedesc[U]): tuple[
+    U: Tensor[T], S: Tensor[U], Vh: Tensor[T]]
+
+ +

Compute the Singular Value Decomposition of an input matrix a Decomposition is done through recursive divide & conquer.

+

Input:

+
  • A, matrix of shape M, N
  • +
+

Returns: with K = min(M, N)

+
  • U: Unitary matrix of shape M, K with left singular vectors as columns
  • +
  • S: Singular values diagonal of length K in decreasing order
  • +
  • Vh: Unitary matrix of shape K, N with right singular vectors as rows
  • +
+

SVD solves the equation: A = U S V.h

+
  • with S being a diagonal matrix of singular values
  • +
  • with V being the right singular vectors and V.h being the hermitian conjugate of V for real matrices, this is equivalent to V.t (transpose)
  • +
+

โš ๏ธ: Input must not contain NaN

+

Compared to Numpy svd procedure, we default to "full_matrices = false".

+

Exception:

+
  • This can throw if the algorithm did not converge.
  • +
+ +   Source +Edit + +
+
+
+
proc svd[T: SupportedDecomposition](A: Tensor[T]): auto
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc symeig[T: SupportedDecomposition](a: Tensor[T]; return_eigenvectors: static
+    bool = false; uplo: static char = 'U'): tuple[eigenval, eigenvec: Tensor[T]] {.
+    inline.}
+
+ + Compute the eigenvalues and eigen vectors of a symmetric matrix Input:
  • A symmetric matrix of shape n x n
  • +
  • A boolean: true if you also want the eigenvectors, false otherwise
  • +
  • A char U for upper or L for lower This allows you to only fill half of the input symmetric matrix
  • +
+

Returns:

+
  • A tuple with:
    • The eigenvalues sorted from lowest to highest. (shape n)
    • +
    • The corresponding eigenvectors of shape n, n if it was requested. If eigenvectors were not requested, this have to be discarded. Using the result will create a runtime error.
    • +
    +
  • +
+

Implementation is done through the Multiple Relatively Robust Representations

+ +   Source +Edit + +
+
+
+
proc symeig[T: SupportedDecomposition](a: Tensor[T]; return_eigenvectors: static
+    bool = false; uplo: static char = 'U';
+                                       slice: HSlice[[type node], [type node]]): tuple[
+    eigenval, eigenvec: Tensor[T]] {.inline.}
+
+ + Compute the eigenvalues and eigen vectors of a symmetric matrix Input:
  • A symmetric matrix of shape n, n
  • +
  • A boolean: true if you also want the eigenvectors, false otherwise
  • +
  • A char U for upper or L for lower This allows you to only fill half of the input symmetric matrix
  • +
  • A slice of the rankings of eigenvalues you request. For example requesting eigenvalues 2 and 3 would be done with 1..2.
  • +
+

Returns:

+
  • A tuple with:
    • The eigenvalues sorted from lowest to highest. (shape m where m is the slice size)
    • +
    • The corresponding eigenvector if it was requested. (shape n, m) If eigenvectors were not requested, this have to be discarded. Using the result will create a runtime error.
    • +
    +
  • +
+

Implementation is done through the Multiple Relatively Robust Representations

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/decomposition.idx b/decomposition.idx new file mode 100644 index 000000000..265966cc9 --- /dev/null +++ b/decomposition.idx @@ -0,0 +1,9 @@ +nimTitle decomposition decomposition.html module src/arraymancer/linear_algebra/decomposition 0 +nim symeig decomposition.html#symeig,Tensor[T: SupportedDecomposition],staticbool,staticchar proc symeig[T: SupportedDecomposition](a: Tensor[T];\n return_eigenvectors: static bool = false;\n uplo: static char = 'U'): tuple[\n eigenval, eigenvec: Tensor[T]] 19 +nim symeig decomposition.html#symeig,Tensor[T: SupportedDecomposition],staticbool,staticchar,HSlice[,] proc symeig[T: SupportedDecomposition](a: Tensor[T];\n return_eigenvectors: static bool = false;\n uplo: static char = 'U';\n slice: HSlice[[type node], [type node]]): tuple[\n eigenval, eigenvec: Tensor[T]] 39 +nim qr decomposition.html#qr,Tensor[T: SupportedDecomposition] proc qr[T: SupportedDecomposition](a: Tensor[T]): tuple[Q, R: Tensor[T]] 63 +nim lu_permuted decomposition.html#lu_permuted,Tensor[T: SupportedDecomposition] proc lu_permuted[T: SupportedDecomposition](a: Tensor[T]): tuple[PL, U: Tensor[T]] 93 +nim svd decomposition.html#svd,Tensor[T: SupportedDecomposition],typedesc[U] proc svd[T: SupportedDecomposition; U: SomeFloat](A: Tensor[T]; _: typedesc[U]): tuple[\n U: Tensor[T], S: Tensor[U], Vh: Tensor[T]] 124 +nim svd decomposition.html#svd,Tensor[T: SupportedDecomposition] proc svd[T: SupportedDecomposition](A: Tensor[T]): auto 178 +nimgrp svd decomposition.html#svd-procs-all proc 124 +nimgrp symeig decomposition.html#symeig-procs-all proc 19 diff --git a/decomposition_lapack.html b/decomposition_lapack.html new file mode 100644 index 000000000..71fd8f38a --- /dev/null +++ b/decomposition_lapack.html @@ -0,0 +1,607 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/decomposition_lapack + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/decomposition_lapack + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/decomposition_lapack

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
SupportedDecomposition = SomeFloat | Complex64 | Complex32
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc geqrf[T: SupportedDecomposition](Q: var Tensor[T]; tau: var seq[T];
+                                      scratchspace: var seq[T])
+
+ +

Wrapper for LAPACK geqrf routine (GEneral QR Factorization) Decomposition is done through Householder Reflection and without pivoting

+

In-place version, this will overwrite Q and tau

+ +   Source +Edit + +
+
+ +
+
+
+
proc gesdd[T: SupportedDecomposition; X: SupportedDecomposition](
+    a: var Tensor[T]; U: var Tensor[T]; S: var Tensor[X]; Vh: var Tensor[T];
+    scratchspace: var seq[T])
+
+ +

Wrapper for LAPACK gesdd routine (GEneral Singular value Decomposition by Divide & conquer)

+

Parameters:

+
  • a: Input - MxN matrix to factorize, in column major format
  • +
  • U: Output - Unitary matrix containing the left singular vectors as columns
  • +
  • S: Output - Singular values sorted in decreasing order
  • +
  • Vh: Output - Unitary matrix containing the right singular vectors as rows
  • +
+

SVD solves the equation: A = U S V.h

+
  • with S being a diagonal matrix of singular values
  • +
  • with V being the right singular vectors and V.h being the hermitian conjugate of V for real matrices, this is equivalent to V.t (transpose)
  • +
+

โš ๏ธ: Input must not contain NaN

+

Performance note:

+
  • Lapack, especially with the OpenBLAS backend is much more optimized for input M, N where M > N versus N < M (2x - 3x speed difference) Transpose accordingly. Matrices must be column major.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc getrf[T: SupportedDecomposition](lu: var Tensor[T];
+                                      pivot_indices: var seq[int32])
+
+ +

Wrapper for LAPACK getrf routine (GEneral ??? Pivoted LU Factorization)

+

In-place version, this will overwrite LU and tau

+ +   Source +Edit + +
+
+ +
+
+
+
proc syevr[T: SupportedDecomposition](a: var Tensor[T]; uplo: static char;
+                                      return_eigenvectors: static bool;
+                                      low_idx: int; high_idx: int;
+                                      eigenval, eigenvec: var Tensor[T];
+                                      scratchspace: var seq[T])
+
+ +

Wrapper for LAPACK syevr routine (Symmetric Recursive Eigenvalue Decomposition)

+

eigenvalues are returned in ascending order (from lower to upper)

+

if uplo = 'L', the lower part of A is used it is destroyed on exit (and upper part is untouched) vice-versa if uplo = 'U' for the upper part of A

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template address(x: typed): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/decomposition_lapack.idx b/decomposition_lapack.idx new file mode 100644 index 000000000..4b8f1ab9d --- /dev/null +++ b/decomposition_lapack.idx @@ -0,0 +1,7 @@ +nimTitle decomposition_lapack decomposition_lapack.html module src/arraymancer/linear_algebra/helpers/decomposition_lapack 0 +nim SupportedDecomposition decomposition_lapack.html#SupportedDecomposition type SupportedDecomposition 30 +nim address decomposition_lapack.html#address.t,typed template address(x: typed): untyped 32 +nim syevr decomposition_lapack.html#syevr,Tensor[T: SupportedDecomposition],staticchar,staticbool,int,int,Tensor[T: SupportedDecomposition],Tensor[T: SupportedDecomposition],seq[T] proc syevr[T: SupportedDecomposition](a: var Tensor[T]; uplo: static char;\n return_eigenvectors: static bool; low_idx: int;\n high_idx: int;\n eigenval, eigenvec: var Tensor[T];\n scratchspace: var seq[T]) 53 +nim geqrf decomposition_lapack.html#geqrf,Tensor[T: SupportedDecomposition],seq[T],seq[T] proc geqrf[T: SupportedDecomposition](Q: var Tensor[T]; tau: var seq[T];\n scratchspace: var seq[T]) 178 +nim gesdd decomposition_lapack.html#gesdd,Tensor[T: SupportedDecomposition],Tensor[T: SupportedDecomposition],Tensor[X: SupportedDecomposition],Tensor[T: SupportedDecomposition],seq[T] proc gesdd[T: SupportedDecomposition; X: SupportedDecomposition](a: var Tensor[T];\n U: var Tensor[T]; S: var Tensor[X]; Vh: var Tensor[T];\n scratchspace: var seq[T]) 220 +nim getrf decomposition_lapack.html#getrf,Tensor[T: SupportedDecomposition],seq[int32] proc getrf[T: SupportedDecomposition](lu: var Tensor[T];\n pivot_indices: var seq[int32]) 349 diff --git a/decomposition_rand.html b/decomposition_rand.html new file mode 100644 index 000000000..e632c29bf --- /dev/null +++ b/decomposition_rand.html @@ -0,0 +1,483 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/decomposition_rand + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/decomposition_rand + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/decomposition_rand

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+

+ +
+

Procs

+
+
+
+
proc svd_randomized[T](A: Tensor[T]; n_components = 2; n_oversamples = 5;
+                       n_power_iters = 2): tuple[U, S, Vh: Tensor[T]]
+
+ +

Compute approximate nearly optimal truncated Singular Value Decomposition of an input matrix a.

+

Decomposition is truncated to nb_components.

+

Increasing nb_oversamples or nb_iter increases the accuracy of the approximation

+

Input:

+
  • A, a matrix of shape M, N
  • +
  • nb_components: rank/dimension of the approximation i.e. number of singular values and vectors to extract Must be lower than min(M, N) Default to 2 for 2D visualization
  • +
  • nb_oversamples: Additional number of random projections in the sampling matrix Recommended range 2 .. 10
  • +
  • nb_power_iter: Number of power iterations Power iterations enforce rapid decay of singular values and allow the algorithm to sample dominant singular values and suppress irrelevant information. Useful for noisy problems.
  • +
+

Returns: with K = nb_components

+
  • U: Unitary matrix of shape M, K with rank-K approximation of left singular vectors as columns
  • +
  • S: Rank-k approximation of singular values diagonal of length K in decreasing order
  • +
  • Vh: Unitary matrix of shape K, N with rank-K approximation of right singular vectors as rows
  • +
+

This is an approximate solution of the equation: A = U S V.h

+
  • with S being a diagonal matrix of singular values
  • +
  • with V being the right singular vectors and V.h being the hermitian conjugate of V for real matrices, this is equivalent to V.t (transpose)
  • +
+

โš ๏ธ: Input must not contain NaN

+

Exception:

+
  • This can throw if the algorithm did not converge.
  • +
+

References:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/decomposition_rand.idx b/decomposition_rand.idx new file mode 100644 index 000000000..36eef1276 --- /dev/null +++ b/decomposition_rand.idx @@ -0,0 +1,2 @@ +nimTitle decomposition_rand decomposition_rand.html module src/arraymancer/linear_algebra/decomposition_rand 0 +nim svd_randomized decomposition_rand.html#svd_randomized,Tensor[T],int,int,int proc svd_randomized[T](A: Tensor[T]; n_components = 2; n_oversamples = 5;\n n_power_iters = 2): tuple[U, S, Vh: Tensor[T]] 51 diff --git a/deprecate.html b/deprecate.html new file mode 100644 index 000000000..246b02975 --- /dev/null +++ b/deprecate.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/private/deprecate + + + + + + + + + +Arraymancer - src/arraymancer/private/deprecate + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/private/deprecate

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ ast_utils +
+
+
+

Macros

+
+
+
+
macro implDeprecatedBy(oldName: untyped; replacement: typed;
+                       exported: static bool): untyped
+
+ + Implement a proc that is deprecated +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/deprecate.idx b/deprecate.idx new file mode 100644 index 000000000..19351d947 --- /dev/null +++ b/deprecate.idx @@ -0,0 +1,2 @@ +nimTitle deprecate deprecate.html module src/arraymancer/private/deprecate 0 +nim implDeprecatedBy deprecate.html#implDeprecatedBy.m,untyped,typed,staticbool macro implDeprecatedBy(oldName: untyped; replacement: typed; exported: static bool): untyped 72 diff --git a/display.html b/display.html new file mode 100644 index 000000000..ba15de068 --- /dev/null +++ b/display.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/display + + + + + + + + + +Arraymancer - src/arraymancer/tensor/display + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/display

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `$`[T](t: Tensor[T]): string
+
+ + Pretty-print a tensor (when using echo for example) +   Source +Edit + +
+
+ +
+
+
+
proc pretty[T](t: Tensor[T]; precision = -1): string
+
+ + Pretty-print a Tensor with the option to set a custom precision for float values. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/display.idx b/display.idx new file mode 100644 index 000000000..a03b29121 --- /dev/null +++ b/display.idx @@ -0,0 +1,3 @@ +nimTitle display display.html module src/arraymancer/tensor/display 0 +nim pretty display.html#pretty,Tensor[T],int proc pretty[T](t: Tensor[T]; precision = -1): string 19 +nim `$` display.html#$,Tensor[T] proc `$`[T](t: Tensor[T]): string 32 diff --git a/display_cuda.html b/display_cuda.html new file mode 100644 index 000000000..c2f87415f --- /dev/null +++ b/display_cuda.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/display_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/display_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/display_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `$`[T](t: CudaTensor[T]): string
+
+ + Pretty-print a CudaTensor (when using echo for example) +   Source +Edit + +
+
+ +
+
+
+
proc pretty[T](t: CudaTensor[T]; precision = -1): string
+
+ + Pretty-print a CudaTensor with the option to set a custom precision for float values. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/display_cuda.idx b/display_cuda.idx new file mode 100644 index 000000000..a599e860a --- /dev/null +++ b/display_cuda.idx @@ -0,0 +1,3 @@ +nimTitle display_cuda display_cuda.html module src/arraymancer/tensor/display_cuda 0 +nim pretty display_cuda.html#pretty,CudaTensor[T],int proc pretty[T](t: CudaTensor[T]; precision = -1): string 19 +nim `$` display_cuda.html#$,CudaTensor[T] proc `$`[T](t: CudaTensor[T]): string 34 diff --git a/distances.html b/distances.html new file mode 100644 index 000000000..69d61f24d --- /dev/null +++ b/distances.html @@ -0,0 +1,652 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/spatial/distances + + + + + + + + + +Arraymancer - src/arraymancer/spatial/distances + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/spatial/distances

+
+ +
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Types

+
+ +
+
CustomMetric = object
+
+ + +   Source +Edit + +
+
+
+
Euclidean = object
+
+ + +   Source +Edit + +
+
+
+
Jaccard = object
+
+ + +   Source +Edit + +
+
+
+
Manhattan = object
+
+ + +   Source +Edit + +
+
+
+
Minkowski = object
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc distance(metric: typedesc[Euclidean]; v, w: Tensor[float];
+              squared: static bool = false): float
+
+ +

Computes the Euclidean distance between points v and w. Both need to be rank 1 tensors with k elements, where k is the dimensionality of the points.

+

The Euclidean metric is defined as:

+

d = ( ฮฃ_i | v_i - w_i |^2 )^(1/2)

+

If squared is true returns the square of the distance

+ +   Source +Edit + +
+
+
+
proc distance(metric: typedesc[Jaccard]; v, w: Tensor[float]): float
+
+ +

Computes the Jaccard distance between points v and w. Both need to be rank 1 tensors with k elements, where k is the dimensionality of the points.

+

The Jaccard distance is defined as:

+

d = 1 - J(A, B) = ( | A โˆช B | - | A โˆฉ B | ) / ( | A โˆช B | )

+ +   Source +Edit + +
+
+
+
proc distance(metric: typedesc[Manhattan]; v, w: Tensor[float]): float
+
+ +

Computes the Manhattan distance between points v and w. Both need to be rank 1 tensors with k elements, where k is the dimensionality of the points.

+

The Manhattan metric is defined as:

+

d = ฮฃ_i | v_i - w_i |

+ +   Source +Edit + +
+
+
+
proc distance(metric: typedesc[Minkowski]; v, w: Tensor[float]; p = 2.0;
+              squared: static bool = false): float
+
+ +

Computes the Minkowski distance between points v and w. Both need to be rank 1 tensors with k elements, where k is the dimensionality of the points.

+

The Minkowski metric is defined as:

+

d = ( ฮฃ_i | v_i - w_i |^p )^(1/p)

+

Thus, it reduces to the Manhattan distance for p = 1 and the Euclidean metric for p = 2.

+

If squared is true returns the p-th power of the metric. For the Euclidean case this is the square of the distance, hence the name.

+ +   Source +Edit + +
+
+ +
+
+
+
proc distanceMatrix(metric: typedesc[AnyMetric]; x, y: Tensor[float]; p = 2.0;
+                    squared: static bool = false): Tensor[float]
+
+ + Computes the distance matrix between all points in x and y. x and y need to be tensors of rank 2 with:
  • [n_observations, n_dimensions]
  • +
+

The first argument is the metric to compute the distance under. If the Minkowski metric is selected the power p is used.

+

If squared is true and we are computing under a Minkowski or Euclidean metric, we return the p-th power of the distances.

+

Result is a tensor of rank 2, a symmetric matrix where element (i, j) is the distance between x_i and y_j.

+ +   Source +Edit + +
+
+ +
+
+
+
proc pairwiseDistances(metric: typedesc[AnyMetric]; x, y: Tensor[float];
+                       p = 2.0; squared: static bool = false): Tensor[float]
+
+ + Computes all distances between all pairs in x and y. That is if x and y are rank 2 tensors of each:
  • [n_observations, n_dimensions]
  • +
+

we compute the distance between each observation x_i and y_i.

+

One of the arguments may have only 1 observation and thus be of shape [1, n_dimensions]. In this case all distances between this point and all in the other input will be computed so that the result is always of shape [n_observations]. If one input has only shape [n_dimensions] it is unsqueezed to [1, n_dimensions].

+

The first argument is the metric to compute the distance under. If the Minkowski metric is selected the power p is used.

+

If squared is true and we are computing under a Minkowski or Euclidean metric, we return the p-th power of the distances.

+

Result is a tensor of rank 1, with one element for each distance.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/distances.idx b/distances.idx new file mode 100644 index 000000000..e103490d1 --- /dev/null +++ b/distances.idx @@ -0,0 +1,14 @@ +nimTitle distances distances.html module src/arraymancer/spatial/distances 0 +nim Euclidean distances.html#Euclidean object Euclidean 7 +nim Manhattan distances.html#Manhattan object Manhattan 8 +nim Minkowski distances.html#Minkowski object Minkowski 9 +nim Jaccard distances.html#Jaccard object Jaccard 10 +nim CustomMetric distances.html#CustomMetric object CustomMetric 11 +nim AnyMetric distances.html#AnyMetric type AnyMetric 13 +nim distance distances.html#distance,typedesc[Manhattan],Tensor[float],Tensor[float] proc distance(metric: typedesc[Manhattan]; v, w: Tensor[float]): float 24 +nim distance distances.html#distance,typedesc[Minkowski],Tensor[float],Tensor[float],float,staticbool proc distance(metric: typedesc[Minkowski]; v, w: Tensor[float]; p = 2.0;\n squared: static bool = false): float 36 +nim distance distances.html#distance,typedesc[Euclidean],Tensor[float],Tensor[float],staticbool proc distance(metric: typedesc[Euclidean]; v, w: Tensor[float];\n squared: static bool = false): float 63 +nim distance distances.html#distance,typedesc[Jaccard],Tensor[float],Tensor[float] proc distance(metric: typedesc[Jaccard]; v, w: Tensor[float]): float 101 +nim pairwiseDistances distances.html#pairwiseDistances,typedesc[AnyMetric],Tensor[float],Tensor[float],float,staticbool proc pairwiseDistances(metric: typedesc[AnyMetric]; x, y: Tensor[float]; p = 2.0;\n squared: static bool = false): Tensor[float] 118 +nim distanceMatrix distances.html#distanceMatrix,typedesc[AnyMetric],Tensor[float],Tensor[float],float,staticbool proc distanceMatrix(metric: typedesc[AnyMetric]; x, y: Tensor[float]; p = 2.0;\n squared: static bool = false): Tensor[float] 174 +nimgrp distance distances.html#distance-procs-all proc 24 diff --git a/distributions.html b/distributions.html new file mode 100644 index 000000000..af14ee296 --- /dev/null +++ b/distributions.html @@ -0,0 +1,627 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/stats/distributions + + + + + + + + + +Arraymancer - src/arraymancer/stats/distributions + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/stats/distributions

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc box(x: float): float {....raises: [], tags: [], forbids: [].}
+
+ + provides a box distribution +   Source +Edit + +
+
+ +
+
+
+
proc epanechnikov(x: float): float {....raises: [], tags: [], forbids: [].}
+
+ + provides an Epanechnikov distribution +   Source +Edit + +
+
+ +
+
+
+
proc gauss[T](x, mean, sigma: T; norm = false): float
+
+ +

Returns a value of the gaussian distribution described by mean, sigma at position x.

+

If norm is true the value will be normalized by 1 / sqrt(2ฯ€).

+

Based on the ROOT implementation of TMath::Gaus: https://root.cern.ch/root/html524/src/TMath.cxx.html#dKZ4iB

+

Inputs are converted to float.

+ +   Source +Edit + +
+
+
+
proc gauss[T](x: Tensor[T]; mean, sigma: T; norm = false): Tensor[float]
+
+ + Returns a tensor evaluated at all positions of its values on the gaussian distribution described by mean and sigma. +   Source +Edit + +
+
+ +
+
+
+
proc triangular(x: float): float {....raises: [], tags: [], forbids: [].}
+
+ + provides a triangular distribution +   Source +Edit + +
+
+ +
+
+
+
proc trigonometric(x: float): float {....raises: [], tags: [], forbids: [].}
+
+ + provides a trigonometric distribution +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template box[T](t`gensym0: Tensor[T]): Tensor[float]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template epanechnikov[T](t`gensym3: Tensor[T]): Tensor[float]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template triangular[T](t`gensym1: Tensor[T]): Tensor[float]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template trigonometric[T](t`gensym2: Tensor[T]): Tensor[float]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/distributions.idx b/distributions.idx new file mode 100644 index 000000000..9c6e1c566 --- /dev/null +++ b/distributions.idx @@ -0,0 +1,12 @@ +nimTitle distributions distributions.html module src/arraymancer/stats/distributions 0 +nim gauss distributions.html#gauss,T,T,T proc gauss[T](x, mean, sigma: T; norm = false): float 18 +nim gauss distributions.html#gauss,Tensor[T],T,T proc gauss[T](x: Tensor[T]; mean, sigma: T; norm = false): Tensor[float] 38 +nim box distributions.html#box,float proc box(x: float): float 43 +nim triangular distributions.html#triangular,float proc triangular(x: float): float 47 +nim trigonometric distributions.html#trigonometric,float proc trigonometric(x: float): float 52 +nim epanechnikov distributions.html#epanechnikov,float proc epanechnikov(x: float): float 57 +nim box distributions.html#box.t template box[T](t`gensym0: Tensor[T]): Tensor[float] 67 +nim triangular distributions.html#triangular.t template triangular[T](t`gensym1: Tensor[T]): Tensor[float] 68 +nim trigonometric distributions.html#trigonometric.t template trigonometric[T](t`gensym2: Tensor[T]): Tensor[float] 69 +nim epanechnikov distributions.html#epanechnikov.t template epanechnikov[T](t`gensym3: Tensor[T]): Tensor[float] 70 +nimgrp gauss distributions.html#gauss-procs-all proc 18 diff --git a/dochack.js b/dochack.js new file mode 100644 index 000000000..c2de4782a --- /dev/null +++ b/dochack.js @@ -0,0 +1,2476 @@ +/* Generated by the Nim Compiler v2.0.3 */ +var framePtr = null; +var excHandler = 0; +var lastJSError = null; +var NTI33554466 = {size: 0,kind: 1,base: null,node: null,finalizer: null}; +var NTI671088654 = {size: 0, kind: 18, base: null, node: null, finalizer: null}; +var NTI872415311 = {size: 0,kind: 31,base: null,node: null,finalizer: null}; +var NTI872415317 = {size: 0, kind: 18, base: null, node: null, finalizer: null}; +var NTI134217745 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI134217749 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI134217751 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI33554435 = {size: 0,kind: 31,base: null,node: null,finalizer: null}; +var NTI33555835 = {size: 0, kind: 18, base: null, node: null, finalizer: null}; +var NTI33555173 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI33555181 = {size: 0, kind: 22, base: null, node: null, finalizer: null}; +var NTI33554449 = {size: 0,kind: 28,base: null,node: null,finalizer: null}; +var NTI33554450 = {size: 0,kind: 29,base: null,node: null,finalizer: null}; +var NTI33555180 = {size: 0, kind: 22, base: null, node: null, finalizer: null}; +var NTI33555177 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI33555178 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI134217741 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NTI134217743 = {size: 0, kind: 17, base: null, node: null, finalizer: null}; +var NNI134217743 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI134217743.node = NNI134217743; +var NNI134217741 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI134217741.node = NNI134217741; +var NNI33555178 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI33555178.node = NNI33555178; +NTI33555180.base = NTI33555177; +NTI33555181.base = NTI33555177; +var NNI33555177 = {kind: 2, len: 5, offset: 0, typ: null, name: null, sons: [{kind: 1, offset: "parent", len: 0, typ: NTI33555180, name: "parent", sons: null}, +{kind: 1, offset: "name", len: 0, typ: NTI33554450, name: "name", sons: null}, +{kind: 1, offset: "message", len: 0, typ: NTI33554449, name: "msg", sons: null}, +{kind: 1, offset: "trace", len: 0, typ: NTI33554449, name: "trace", sons: null}, +{kind: 1, offset: "up", len: 0, typ: NTI33555181, name: "up", sons: null}]}; +NTI33555177.node = NNI33555177; +var NNI33555173 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI33555173.node = NNI33555173; +NTI33555177.base = NTI33555173; +NTI33555178.base = NTI33555177; +NTI134217741.base = NTI33555178; +NTI134217743.base = NTI134217741; +var NNI33555835 = {kind: 2, len: 3, offset: 0, typ: null, name: null, sons: [{kind: 1, offset: "Field0", len: 0, typ: NTI33554450, name: "Field0", sons: null}, +{kind: 1, offset: "Field1", len: 0, typ: NTI33554435, name: "Field1", sons: null}, +{kind: 1, offset: "Field2", len: 0, typ: NTI33554450, name: "Field2", sons: null}]}; +NTI33555835.node = NNI33555835; +var NNI134217751 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI134217751.node = NNI134217751; +NTI134217751.base = NTI33555178; +var NNI134217749 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI134217749.node = NNI134217749; +NTI134217749.base = NTI33555178; +var NNI134217745 = {kind: 2, len: 0, offset: 0, typ: null, name: null, sons: []}; +NTI134217745.node = NNI134217745; +NTI134217745.base = NTI33555178; +var NNI872415317 = {kind: 2, len: 2, offset: 0, typ: null, name: null, sons: [{kind: 1, offset: "a", len: 0, typ: NTI872415311, name: "a", sons: null}, +{kind: 1, offset: "b", len: 0, typ: NTI33554435, name: "b", sons: null}]}; +NTI872415317.node = NNI872415317; +var NNI671088654 = {kind: 2, len: 2, offset: 0, typ: null, name: null, sons: [{kind: 1, offset: "Field0", len: 0, typ: NTI33554435, name: "Field0", sons: null}, +{kind: 1, offset: "Field1", len: 0, typ: NTI33554466, name: "Field1", sons: null}]}; +NTI671088654.node = NNI671088654; + +function mnewString(len_33557003) { + var result = new Array(len_33557003); + for (var i = 0; i < len_33557003; i++) {result[i] = 0;} + return result; + + + +} + +function arrayConstr(len_33557314, value_33557315, typ_33557316) { + var result = new Array(len_33557314); + for (var i = 0; i < len_33557314; ++i) result[i] = nimCopy(null, value_33557315, typ_33557316); + return result; + + + +} + +function setConstr() { + var result = {}; + for (var i = 0; i < arguments.length; ++i) { + var x = arguments[i]; + if (typeof(x) == "object") { + for (var j = x[0]; j <= x[1]; ++j) { + result[j] = true; + } + } else { + result[x] = true; + } + } + return result; + + + +} +var ConstSet1 = setConstr(17, 16, 4, 18, 27, 19, 23, 22, 21); + +function nimCopy(dest_33557268, src_33557269, ti_33557270) { + var result_33557279 = null; + + switch (ti_33557270.kind) { + case 21: + case 22: + case 23: + case 5: + if (!(isFatPointer_33557259(ti_33557270))) { + result_33557279 = src_33557269; + } + else { + result_33557279 = [src_33557269[0], src_33557269[1]]; + } + + break; + case 19: + if (dest_33557268 === null || dest_33557268 === undefined) { + dest_33557268 = {}; + } + else { + for (var key in dest_33557268) { delete dest_33557268[key]; } + } + for (var key in src_33557269) { dest_33557268[key] = src_33557269[key]; } + result_33557279 = dest_33557268; + + break; + case 18: + case 17: + if (!((ti_33557270.base == null))) { + result_33557279 = nimCopy(dest_33557268, src_33557269, ti_33557270.base); + } + else { + if ((ti_33557270.kind == 17)) { + result_33557279 = (dest_33557268 === null || dest_33557268 === undefined) ? {m_type: ti_33557270} : dest_33557268; + } + else { + result_33557279 = (dest_33557268 === null || dest_33557268 === undefined) ? {} : dest_33557268; + } + } + nimCopyAux(result_33557279, src_33557269, ti_33557270.node); + break; + case 4: + case 16: + if(ArrayBuffer.isView(src_33557269)) { + if(dest_33557268 === null || dest_33557268 === undefined || dest_33557268.length != src_33557269.length) { + dest_33557268 = new src_33557269.constructor(src_33557269); + } else { + dest_33557268.set(src_33557269, 0); + } + result_33557279 = dest_33557268; + } else { + if (src_33557269 === null) { + result_33557279 = null; + } + else { + if (dest_33557268 === null || dest_33557268 === undefined || dest_33557268.length != src_33557269.length) { + dest_33557268 = new Array(src_33557269.length); + } + result_33557279 = dest_33557268; + for (var i = 0; i < src_33557269.length; ++i) { + result_33557279[i] = nimCopy(result_33557279[i], src_33557269[i], ti_33557270.base); + } + } + } + + break; + case 24: + case 27: + if (src_33557269 === null) { + result_33557279 = null; + } + else { + if (dest_33557268 === null || dest_33557268 === undefined || dest_33557268.length != src_33557269.length) { + dest_33557268 = new Array(src_33557269.length); + } + result_33557279 = dest_33557268; + for (var i = 0; i < src_33557269.length; ++i) { + result_33557279[i] = nimCopy(result_33557279[i], src_33557269[i], ti_33557270.base); + } + } + + break; + case 28: + if (src_33557269 !== null) { + result_33557279 = src_33557269.slice(0); + } + + break; + default: + result_33557279 = src_33557269; + break; + } + + return result_33557279; + +} + +function cstrToNimstr(c_33556898) { + var ln = c_33556898.length; + var result = new Array(ln); + var r = 0; + for (var i = 0; i < ln; ++i) { + var ch = c_33556898.charCodeAt(i); + + if (ch < 128) { + result[r] = ch; + } + else { + if (ch < 2048) { + result[r] = (ch >> 6) | 192; + } + else { + if (ch < 55296 || ch >= 57344) { + result[r] = (ch >> 12) | 224; + } + else { + ++i; + ch = 65536 + (((ch & 1023) << 10) | (c_33556898.charCodeAt(i) & 1023)); + result[r] = (ch >> 18) | 240; + ++r; + result[r] = ((ch >> 12) & 63) | 128; + } + ++r; + result[r] = ((ch >> 6) & 63) | 128; + } + ++r; + result[r] = (ch & 63) | 128; + } + ++r; + } + return result; + + + +} + +function toJSStr(s_33556901) { + var result_33556902 = null; + + var res_33556943 = newSeq_33556919((s_33556901).length); + var i_33556944 = 0; + var j_33556945 = 0; + Label1: { + Label2: while (true) { + if (!(i_33556944 < (s_33556901).length)) break Label2; + var c_33556946 = s_33556901[i_33556944]; + if ((c_33556946 < 128)) { + res_33556943[j_33556945] = String.fromCharCode(c_33556946); + i_33556944 += 1; + } + else { + var helper_33556959 = newSeq_33556919(0); + Label3: { + Label4: while (true) { + if (!true) break Label4; + var code_33556960 = c_33556946.toString(16); + if ((((code_33556960) == null ? 0 : (code_33556960).length) == 1)) { + helper_33556959.push("%0");; + } + else { + helper_33556959.push("%");; + } + + helper_33556959.push(code_33556960);; + i_33556944 += 1; + if ((((s_33556901).length <= i_33556944) || (s_33556901[i_33556944] < 128))) { + break Label3; + } + + c_33556946 = s_33556901[i_33556944]; + } + }; +++excHandler; + try { + res_33556943[j_33556945] = decodeURIComponent(helper_33556959.join("")); +--excHandler; +} catch (EXCEPTION) { + var prevJSError = lastJSError; + lastJSError = EXCEPTION; + --excHandler; + res_33556943[j_33556945] = helper_33556959.join(""); + lastJSError = prevJSError; + } finally { + } + } + + j_33556945 += 1; + } + }; + if (res_33556943.length < j_33556945) { for (var i = res_33556943.length ; i < j_33556945 ; ++i) res_33556943.push(null); } + else { res_33556943.length = j_33556945; }; + result_33556902 = res_33556943.join(""); + + return result_33556902; + +} + +function raiseException(e_33556653, ename_33556654) { + e_33556653.name = ename_33556654; + if ((excHandler == 0)) { + unhandledException(e_33556653); + } + + e_33556653.trace = nimCopy(null, rawWriteStackTrace_33556604(), NTI33554449); + throw e_33556653; + + +} + +function addInt(a_33557050, b_33557051) { + var result = a_33557050 + b_33557051; + checkOverflowInt(result); + return result; + + + +} + +function chckRange(i_33557324, a_33557325, b_33557326) { + var result_33557327 = 0; + + BeforeRet: { + if (((a_33557325 <= i_33557324) && (i_33557324 <= b_33557326))) { + result_33557327 = i_33557324; + break BeforeRet; + } + else { + raiseRangeError(); + } + + }; + + return result_33557327; + +} + +function chckIndx(i_33557319, a_33557320, b_33557321) { + var result_33557322 = 0; + + BeforeRet: { + if (((a_33557320 <= i_33557319) && (i_33557319 <= b_33557321))) { + result_33557322 = i_33557319; + break BeforeRet; + } + else { + raiseIndexError(i_33557319, a_33557320, b_33557321); + } + + }; + + return result_33557322; + +} + +function makeNimstrLit(c_33556895) { + var result = []; + for (var i = 0; i < c_33556895.length; ++i) { + result[i] = c_33556895.charCodeAt(i); + } + return result; + + + +} + +function subInt(a_33557054, b_33557055) { + var result = a_33557054 - b_33557055; + checkOverflowInt(result); + return result; + + + +} +var ConstSet2 = setConstr([65, 90]); +var ConstSet3 = setConstr(95, 32, 46); +var ConstSet4 = setConstr(95, 32, 46); + +function mulInt(a_33557058, b_33557059) { + var result = a_33557058 * b_33557059; + checkOverflowInt(result); + return result; + + + +} +var ConstSet5 = setConstr([97, 122]); +var ConstSet6 = setConstr([65, 90], [97, 122]); +var ConstSet7 = setConstr([97, 122]); +var ConstSet8 = setConstr([65, 90]); +var ConstSet9 = setConstr([65, 90], [97, 122]); + +function nimMax(a_33557108, b_33557109) { + var Temporary1; + + var result_33557110 = 0; + + BeforeRet: { + if ((b_33557109 <= a_33557108)) { + Temporary1 = a_33557108; + } + else { + Temporary1 = b_33557109; + } + + result_33557110 = Temporary1; + break BeforeRet; + }; + + return result_33557110; + +} + +function nimMin(a_33557104, b_33557105) { + var Temporary1; + + var result_33557106 = 0; + + BeforeRet: { + if ((a_33557104 <= b_33557105)) { + Temporary1 = a_33557104; + } + else { + Temporary1 = b_33557105; + } + + result_33557106 = Temporary1; + break BeforeRet; + }; + + return result_33557106; + +} + +function addChar(x_33557415, c_33557416) { + x_33557415.push(c_33557416); + + +} +var F = {procname: "module dom", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/dom.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module dom", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/dom.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module parseutils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/parseutils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module parseutils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/parseutils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module macros", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/core/macros.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module macros", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/core/macros.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module bitops_utils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/bitops_utils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module bitops_utils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/bitops_utils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module countbits_impl", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/countbits_impl.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module countbits_impl", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/countbits_impl.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module bitops", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/bitops.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module bitops", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/bitops.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module fenv", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/fenv.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module fenv", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/fenv.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module math", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/math.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module math", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/math.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module algorithm", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/algorithm.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module algorithm", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/algorithm.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module typetraits", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/typetraits.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module typetraits", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/typetraits.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module enumutils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/enumutils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module enumutils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/enumutils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module strbasics", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/strbasics.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module strbasics", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/strbasics.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module unicode", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/unicode.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module unicode", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/unicode.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module strimpl", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/strimpl.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module strimpl", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/strimpl.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module strutils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/strutils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module strutils", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/strutils.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module fuzzysearch", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/fuzzysearch.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module fuzzysearch", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/fuzzysearch.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module hashes", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/hashes.nim", line: 0}; +framePtr = F; +F.line = 218; +F.filename = "hashes.nim"; +var objectID_1040187569 = [0]; +framePtr = F.prev; +var F = {procname: "module hashes", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/hashes.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module outparams", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/outparams.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module outparams", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/outparams.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module tables", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/collections/tables.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module tables", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/collections/tables.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsffi", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/jsffi.nim", line: 0}; +framePtr = F; +F.line = 45; +F.filename = "jsffi.nim"; +F.line = 46; +framePtr = F.prev; +var F = {procname: "module jsffi", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/jsffi.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module asyncjs", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/asyncjs.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module asyncjs", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/asyncjs.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsformdata", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/jsformdata.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsformdata", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/jsformdata.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsheaders", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/jsheaders.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsheaders", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/jsheaders.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module httpcore", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/httpcore.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module httpcore", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/httpcore.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsfetch", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/jsfetch.nim", line: 0}; +framePtr = F; +framePtr = F.prev; +var F = {procname: "module jsfetch", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/jsfetch.nim", line: 0}; +framePtr = F; +framePtr = F.prev; + +function setTheme(theme_520093702) { + var F = {procname: "dochack.setTheme", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 7; + F.filename = "dochack.nim"; + document.documentElement.setAttribute("data-theme", theme_520093702); + F.line = 8; + window.localStorage.setItem("theme", theme_520093702); + framePtr = F.prev; + + +} + +function add_33556373(x_33556374, x_33556374_Idx, y_33556375) { + if (x_33556374[x_33556374_Idx] === null) { x_33556374[x_33556374_Idx] = []; } + var off = x_33556374[x_33556374_Idx].length; + x_33556374[x_33556374_Idx].length += y_33556375.length; + for (var i = 0; i < y_33556375.length; ++i) { + x_33556374[x_33556374_Idx][off+i] = y_33556375.charCodeAt(i); + } + + + +} + +function isFatPointer_33557259(ti_33557260) { + var result_33557261 = false; + + BeforeRet: { + result_33557261 = !((ConstSet1[ti_33557260.base.kind] != undefined)); + break BeforeRet; + }; + + return result_33557261; + +} + +function nimCopyAux(dest_33557272, src_33557273, n_33557274) { + switch (n_33557274.kind) { + case 0: + break; + case 1: + dest_33557272[n_33557274.offset] = nimCopy(dest_33557272[n_33557274.offset], src_33557273[n_33557274.offset], n_33557274.typ); + + break; + case 2: + for (var i = 0; i < n_33557274.sons.length; i++) { + nimCopyAux(dest_33557272, src_33557273, n_33557274.sons[i]); + } + + break; + case 3: + dest_33557272[n_33557274.offset] = nimCopy(dest_33557272[n_33557274.offset], src_33557273[n_33557274.offset], n_33557274.typ); + for (var i = 0; i < n_33557274.sons.length; ++i) { + nimCopyAux(dest_33557272, src_33557273, n_33557274.sons[i][1]); + } + + break; + } + + +} + +function auxWriteStackTrace_33556508(f_33556509) { + var result_33556510 = [[]]; + + var it_33556518 = f_33556509; + var i_33556519 = 0; + var total_33556520 = 0; + var tempFrames_33556521 = arrayConstr(64, {Field0: null, Field1: 0, Field2: null}, NTI33555835); + Label1: { + Label2: while (true) { + if (!(!((it_33556518 == null)) && (i_33556519 <= 63))) break Label2; + tempFrames_33556521[i_33556519].Field0 = it_33556518.procname; + tempFrames_33556521[i_33556519].Field1 = it_33556518.line; + tempFrames_33556521[i_33556519].Field2 = it_33556518.filename; + i_33556519 += 1; + total_33556520 += 1; + it_33556518 = it_33556518.prev; + } + }; + Label3: { + Label4: while (true) { + if (!!((it_33556518 == null))) break Label4; + total_33556520 += 1; + it_33556518 = it_33556518.prev; + } + }; + result_33556510[0] = nimCopy(null, [], NTI33554449); + if (!((total_33556520 == i_33556519))) { + result_33556510[0].push.apply(result_33556510[0], [40]);; + result_33556510[0].push.apply(result_33556510[0], HEX24_369098760((total_33556520 - i_33556519)));; + result_33556510[0].push.apply(result_33556510[0], [32,99,97,108,108,115,32,111,109,105,116,116,101,100,41,32,46,46,46,10]);; + } + + Label5: { + var j_33556554 = 0; + var colontmp__520094900 = 0; + colontmp__520094900 = (i_33556519 - 1); + var res_520094902 = colontmp__520094900; + Label6: { + Label7: while (true) { + if (!(0 <= res_520094902)) break Label7; + j_33556554 = res_520094902; + result_33556510[0].push.apply(result_33556510[0], cstrToNimstr(tempFrames_33556521[j_33556554].Field2));; + if ((0 < tempFrames_33556521[j_33556554].Field1)) { + result_33556510[0].push.apply(result_33556510[0], [40]);; + addInt_301990129(result_33556510, 0, tempFrames_33556521[j_33556554].Field1); + if (false) { + result_33556510[0].push.apply(result_33556510[0], [44,32]);; + addInt_301990129(result_33556510, 0, 0); + } + + result_33556510[0].push.apply(result_33556510[0], [41]);; + } + + result_33556510[0].push.apply(result_33556510[0], [32,97,116,32]);; + add_33556373(result_33556510, 0, tempFrames_33556521[j_33556554].Field0); + result_33556510[0].push.apply(result_33556510[0], [10]);; + res_520094902 -= 1; + } + }; + }; + + return result_33556510[0]; + +} + +function rawWriteStackTrace_33556604() { + var result_33556605 = []; + + if (!((framePtr == null))) { + result_33556605 = nimCopy(null, ([84,114,97,99,101,98,97,99,107,32,40,109,111,115,116,32,114,101,99,101,110,116,32,99,97,108,108,32,108,97,115,116,41,10] || []).concat(auxWriteStackTrace_33556508(framePtr) || []), NTI33554449); + } + else { + result_33556605 = nimCopy(null, [78,111,32,115,116,97,99,107,32,116,114,97,99,101,98,97,99,107,32,97,118,97,105,108,97,98,108,101,10], NTI33554449); + } + + + return result_33556605; + +} + +function newSeq_33556919(len_33556921) { + var result_33556922 = []; + + var F = {procname: "newSeq.newSeq", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system.nim", line: 0}; + framePtr = F; + F.line = 631; + F.filename = "system.nim"; + result_33556922 = new Array(len_33556921); for (var i = 0 ; i < len_33556921 ; ++i) { result_33556922[i] = null; } framePtr = F.prev; + + return result_33556922; + +} + +function unhandledException(e_33556649) { + var buf_33556650 = [[]]; + if (!(((e_33556649.message).length == 0))) { + buf_33556650[0].push.apply(buf_33556650[0], [69,114,114,111,114,58,32,117,110,104,97,110,100,108,101,100,32,101,120,99,101,112,116,105,111,110,58,32]);; + buf_33556650[0].push.apply(buf_33556650[0], e_33556649.message);; + } + else { + buf_33556650[0].push.apply(buf_33556650[0], [69,114,114,111,114,58,32,117,110,104,97,110,100,108,101,100,32,101,120,99,101,112,116,105,111,110]);; + } + + buf_33556650[0].push.apply(buf_33556650[0], [32,91]);; + add_33556373(buf_33556650, 0, e_33556649.name); + buf_33556650[0].push.apply(buf_33556650[0], [93,10]);; + buf_33556650[0].push.apply(buf_33556650[0], rawWriteStackTrace_33556604());; + var cbuf_33556651 = toJSStr(buf_33556650[0]); + framePtr = null; + if (typeof(Error) !== "undefined") { + throw new Error(cbuf_33556651); + } + else { + throw cbuf_33556651; + } + + + +} + +function raiseOverflow() { + raiseException({message: [111,118,101,114,45,32,111,114,32,117,110,100,101,114,102,108,111,119], parent: null, m_type: NTI134217743, name: null, trace: [], up: null}, "OverflowDefect"); + + +} + +function checkOverflowInt(a_33557048) { + if (a_33557048 > 2147483647 || a_33557048 < -2147483648) raiseOverflow(); + + + +} + +function raiseRangeError() { + raiseException({message: [118,97,108,117,101,32,111,117,116,32,111,102,32,114,97,110,103,101], parent: null, m_type: NTI134217751, name: null, trace: [], up: null}, "RangeDefect"); + + +} + +function addChars_301990090(result_301990092, result_301990092_Idx, x_301990093, start_301990094, n_301990095) { + var Temporary1; + + var F = {procname: "addChars.addChars", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/digitsutils.nim", line: 0}; + framePtr = F; + F.line = 43; + F.filename = "digitsutils.nim"; + var old_301990096 = (result_301990092[result_301990092_Idx]).length; + F.line = 44; + if (result_301990092[result_301990092_Idx].length < (Temporary1 = chckRange(addInt(old_301990096, n_301990095), 0, 2147483647), Temporary1)) { for (var i = result_301990092[result_301990092_Idx].length; i < Temporary1; ++i) result_301990092[result_301990092_Idx].push(0); } + else {result_301990092[result_301990092_Idx].length = Temporary1; }; + Label2: { + F.line = 46; + var iHEX60gensym4_301990110 = 0; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094895 = 0; + Label3: { + F.line = 120; + Label4: while (true) { + if (!(i_520094895 < n_301990095)) break Label4; + F.line = 49; + F.filename = "digitsutils.nim"; + iHEX60gensym4_301990110 = i_520094895; + F.line = 49; + result_301990092[result_301990092_Idx][chckIndx(addInt(old_301990096, iHEX60gensym4_301990110), 0, (result_301990092[result_301990092_Idx]).length - 1)] = x_301990093.charCodeAt(chckIndx(addInt(start_301990094, iHEX60gensym4_301990110), 0, (x_301990093).length - 1)); + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094895 = addInt(i_520094895, 1); + } + }; + }; + framePtr = F.prev; + + +} + +function addChars_301990086(result_301990088, result_301990088_Idx, x_301990089) { + var F = {procname: "addChars.addChars", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/digitsutils.nim", line: 0}; + framePtr = F; + F.line = 55; + F.filename = "digitsutils.nim"; + addChars_301990090(result_301990088, result_301990088_Idx, x_301990089, 0, ((x_301990089) == null ? 0 : (x_301990089).length)); + framePtr = F.prev; + + +} + +function addInt_301990111(result_301990112, result_301990112_Idx, x_301990113) { + var F = {procname: "digitsutils.addInt", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/digitsutils.nim", line: 0}; + framePtr = F; + F.line = 111; + F.filename = "digitsutils.nim"; + addChars_301990086(result_301990112, result_301990112_Idx, ((x_301990113) + "")); + framePtr = F.prev; + + +} + +function addInt_301990129(result_301990130, result_301990130_Idx, x_301990131) { + var F = {procname: "digitsutils.addInt", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/private/digitsutils.nim", line: 0}; + framePtr = F; + F.line = 115; + F.filename = "digitsutils.nim"; + addInt_301990111(result_301990130, result_301990130_Idx, BigInt(x_301990131)); + framePtr = F.prev; + + +} + +function HEX24_369098760(x_369098761) { + var result_369098762 = [[]]; + + var F = {procname: "dollars.$", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/dollars.nim", line: 0}; + framePtr = F; + F.line = 18; + F.filename = "dollars.nim"; + addInt_301990129(result_369098762, 0, x_369098761); + framePtr = F.prev; + + return result_369098762[0]; + +} + +function raiseIndexError(i_33556812, a_33556813, b_33556814) { + var Temporary1; + + if ((b_33556814 < a_33556813)) { + Temporary1 = [105,110,100,101,120,32,111,117,116,32,111,102,32,98,111,117,110,100,115,44,32,116,104,101,32,99,111,110,116,97,105,110,101,114,32,105,115,32,101,109,112,116,121]; + } + else { + Temporary1 = ([105,110,100,101,120,32] || []).concat(HEX24_369098760(i_33556812) || [],[32,110,111,116,32,105,110,32] || [],HEX24_369098760(a_33556813) || [],[32,46,46,32] || [],HEX24_369098760(b_33556814) || []); + } + + raiseException({message: nimCopy(null, Temporary1, NTI33554449), parent: null, m_type: NTI134217749, name: null, trace: [], up: null}, "IndexDefect"); + + +} + +function sysFatal_268435501(message_268435504) { + var F = {procname: "sysFatal.sysFatal", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/fatal.nim", line: 0}; + framePtr = F; + F.line = 53; + F.filename = "fatal.nim"; + raiseException({message: nimCopy(null, message_268435504, NTI33554449), m_type: NTI134217745, parent: null, name: null, trace: [], up: null}, "AssertionDefect"); + framePtr = F.prev; + + +} + +function raiseAssert_268435499(msg_268435500) { + var F = {procname: "assertions.raiseAssert", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/assertions.nim", line: 0}; + framePtr = F; + F.line = 36; + F.filename = "assertions.nim"; + sysFatal_268435501(msg_268435500); + framePtr = F.prev; + + +} + +function failedAssertImpl_268435541(msg_268435542) { + var F = {procname: "assertions.failedAssertImpl", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/std/assertions.nim", line: 0}; + framePtr = F; + F.line = 41; + F.filename = "assertions.nim"; + raiseAssert_268435499(msg_268435542); + framePtr = F.prev; + + +} + +function onDOMLoaded(e_520093737) { + +function HEX3Aanonymous_520093761(event_520093762) { + var F = {procname: "onDOMLoaded.:anonymous", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 22; + F.filename = "dochack.nim"; + event_520093762.target.parentNode.style.display = "none"; + F.line = 24; + event_520093762.target.parentNode.nextSibling.style.display = "inline"; + framePtr = F.prev; + + + } + + var F = {procname: "dochack.onDOMLoaded", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 17; + F.filename = "dochack.nim"; + document.getElementById("theme-select").value = window.localStorage.getItem("theme"); + Label1: { + F.line = 19; + var pragmaDots_520093760 = null; + F.line = 239; + F.filename = "iterators.nim"; + var colontmp__520094886 = []; + F.line = 19; + F.filename = "dochack.nim"; + colontmp__520094886 = document.getElementsByClassName("pragmadots"); + F.line = 241; + F.filename = "iterators.nim"; + var i_520094888 = 0; + F.line = 242; + var L_520094889 = (colontmp__520094886).length; + Label2: { + F.line = 243; + Label3: while (true) { + if (!(i_520094888 < L_520094889)) break Label3; + F.line = 19; + F.filename = "dochack.nim"; + pragmaDots_520093760 = colontmp__520094886[chckIndx(i_520094888, 0, (colontmp__520094886).length - 1)]; + F.line = 19; + pragmaDots_520093760.onclick = HEX3Aanonymous_520093761; + F.line = 245; + F.filename = "iterators.nim"; + i_520094888 = addInt(i_520094888, 1); + if (!(((colontmp__520094886).length == L_520094889))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + framePtr = F.prev; + + +} + +function isWhitespace_520094104(x_520094105) { + var result_520094106 = false; + + var F = {procname: "dochack.isWhitespace", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 119; + F.filename = "dochack.nim"; + result_520094106 = (((x_520094105.nodeName == "#text") && !/\S/.test(x_520094105.textContent)) || (x_520094105.nodeName == "#comment")); + framePtr = F.prev; + + return result_520094106; + +} + +function toToc_520094107(x_520094108, father_520094109) { + var Temporary5; + var Temporary6; + var Temporary7; + var Temporary8; + var Temporary15; + + var F = {procname: "dochack.toToc", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + if ((x_520094108.nodeName == "UL")) { + F.line = 123; + F.filename = "dochack.nim"; + var f_520094117 = {heading: null, kids: [], sortId: (father_520094109.kids).length, doSort: false}; + F.line = 124; + var i_520094118 = 0; + Label1: { + F.line = 125; + Label2: while (true) { + if (!(i_520094118 < x_520094108.childNodes.length)) break Label2; + F.line = 126; + var nxt_520094119 = addInt(i_520094118, 1); + Label3: { + F.line = 127; + Label4: while (true) { + if (!(nxt_520094119 < x_520094108.childNodes.length)) Temporary5 = false; else { Temporary5 = isWhitespace_520094104(x_520094108.childNodes[nxt_520094119]); } if (!Temporary5) break Label4; + F.line = 128; + nxt_520094119 = addInt(nxt_520094119, 1); + } + }; + if (!(nxt_520094119 < x_520094108.childNodes.length)) Temporary8 = false; else { Temporary8 = (x_520094108.childNodes[i_520094118].nodeName == "LI"); } if (!Temporary8) Temporary7 = false; else { Temporary7 = (x_520094108.childNodes[i_520094118].childNodes.length == 1); } if (!Temporary7) Temporary6 = false; else { Temporary6 = (x_520094108.childNodes[nxt_520094119].nodeName == "UL"); } if (Temporary6) { + F.line = 131; + var e_520094132 = {heading: x_520094108.childNodes[i_520094118].childNodes[0], kids: [], sortId: (f_520094117.kids).length, doSort: false}; + F.line = 132; + var it_520094133 = x_520094108.childNodes[nxt_520094119]; + Label9: { + F.line = 133; + var j_520094138 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094909 = 0; + F.line = 133; + F.filename = "dochack.nim"; + colontmp__520094909 = it_520094133.childNodes.length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094910 = 0; + Label10: { + F.line = 120; + Label11: while (true) { + if (!(i_520094910 < colontmp__520094909)) break Label11; + F.line = 133; + F.filename = "dochack.nim"; + j_520094138 = i_520094910; + F.line = 133; + toToc_520094107(it_520094133.childNodes[j_520094138], e_520094132); + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094910 = addInt(i_520094910, 1); + } + }; + }; + F.line = 135; + F.filename = "dochack.nim"; + f_520094117.kids.push(e_520094132);; + F.line = 136; + i_520094118 = addInt(nxt_520094119, 1); + } + else { + F.line = 138; + toToc_520094107(x_520094108.childNodes[i_520094118], f_520094117); + F.line = 139; + i_520094118 = addInt(i_520094118, 1); + } + + } + }; + F.line = 140; + father_520094109.kids.push(f_520094117);; + } + else { + if (isWhitespace_520094104(x_520094108)) { + } + else { + if ((x_520094108.nodeName == "LI")) { + F.line = 144; + var idx_520094156 = []; + Label12: { + F.line = 145; + var i_520094161 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094913 = 0; + F.line = 145; + F.filename = "dochack.nim"; + colontmp__520094913 = x_520094108.childNodes.length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094914 = 0; + Label13: { + F.line = 120; + Label14: while (true) { + if (!(i_520094914 < colontmp__520094913)) break Label14; + F.line = 145; + F.filename = "dochack.nim"; + i_520094161 = i_520094914; + if (!(isWhitespace_520094104(x_520094108.childNodes[i_520094161]))) { + F.line = 146; + idx_520094156.push(i_520094161);; + } + + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094914 = addInt(i_520094914, 1); + } + }; + }; + if (!((idx_520094156).length == 2)) Temporary15 = false; else { Temporary15 = (x_520094108.childNodes[idx_520094156[chckIndx(1, 0, (idx_520094156).length - 1)]].nodeName == "UL"); } if (Temporary15) { + F.line = 148; + F.filename = "dochack.nim"; + var e_520094177 = {heading: x_520094108.childNodes[idx_520094156[chckIndx(0, 0, (idx_520094156).length - 1)]], kids: [], sortId: (father_520094109.kids).length, doSort: false}; + F.line = 149; + var it_520094178 = x_520094108.childNodes[idx_520094156[chckIndx(1, 0, (idx_520094156).length - 1)]]; + Label16: { + F.line = 150; + var j_520094183 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094917 = 0; + F.line = 150; + F.filename = "dochack.nim"; + colontmp__520094917 = it_520094178.childNodes.length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094918 = 0; + Label17: { + F.line = 120; + Label18: while (true) { + if (!(i_520094918 < colontmp__520094917)) break Label18; + F.line = 150; + F.filename = "dochack.nim"; + j_520094183 = i_520094918; + F.line = 150; + toToc_520094107(it_520094178.childNodes[j_520094183], e_520094177); + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094918 = addInt(i_520094918, 1); + } + }; + }; + F.line = 152; + F.filename = "dochack.nim"; + father_520094109.kids.push(e_520094177);; + } + else { + Label19: { + F.line = 154; + var i_520094192 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094921 = 0; + F.line = 154; + F.filename = "dochack.nim"; + colontmp__520094921 = x_520094108.childNodes.length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094922 = 0; + Label20: { + F.line = 120; + Label21: while (true) { + if (!(i_520094922 < colontmp__520094921)) break Label21; + F.line = 154; + F.filename = "dochack.nim"; + i_520094192 = i_520094922; + F.line = 154; + toToc_520094107(x_520094108.childNodes[i_520094192], father_520094109); + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094922 = addInt(i_520094922, 1); + } + }; + }; + } + + } + else { + F.line = 157; + F.filename = "dochack.nim"; + father_520094109.kids.push({heading: x_520094108, kids: [], sortId: (father_520094109.kids).length, doSort: false});; + } + }} + framePtr = F.prev; + + +} + +function extractItems_520093895(x_520093896, heading_520093897, items_520093898, items_520093898_Idx) { + var F = {procname: "dochack.extractItems", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + BeforeRet: { + if ((x_520093896 == null)) { + F.line = 76; + F.filename = "dochack.nim"; + break BeforeRet; + } + + if ((!((x_520093896.heading == null)) && (x_520093896.heading.textContent == heading_520093897))) { + Label1: { + F.line = 78; + var i_520093915 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094925 = 0; + F.line = 78; + F.filename = "dochack.nim"; + colontmp__520094925 = (x_520093896.kids).length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094926 = 0; + Label2: { + F.line = 120; + Label3: while (true) { + if (!(i_520094926 < colontmp__520094925)) break Label3; + F.line = 78; + F.filename = "dochack.nim"; + i_520093915 = i_520094926; + F.line = 78; + items_520093898[items_520093898_Idx].push(x_520093896.kids[chckIndx(i_520093915, 0, (x_520093896.kids).length - 1)].heading);; + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094926 = addInt(i_520094926, 1); + } + }; + }; + } + else { + Label4: { + F.line = 81; + F.filename = "dochack.nim"; + var k_520093941 = null; + F.line = 241; + F.filename = "iterators.nim"; + var i_520094930 = 0; + F.line = 242; + var L_520094931 = (x_520093896.kids).length; + Label5: { + F.line = 243; + Label6: while (true) { + if (!(i_520094930 < L_520094931)) break Label6; + F.line = 81; + F.filename = "dochack.nim"; + k_520093941 = x_520093896.kids[chckIndx(i_520094930, 0, (x_520093896.kids).length - 1)]; + F.line = 81; + extractItems_520093895(k_520093941, heading_520093897, items_520093898, items_520093898_Idx); + F.line = 245; + F.filename = "iterators.nim"; + i_520094930 = addInt(i_520094930, 1); + if (!(((x_520093896.kids).length == L_520094931))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + } + + }; + framePtr = F.prev; + + +} + +function tree_520093826(tag_520093827, kids_520093828) { + var result_520093829 = null; + + var F = {procname: "dochack.tree", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 28; + F.filename = "dochack.nim"; + result_520093829 = document.createElement(tag_520093827); + Label1: { + F.line = 29; + var k_520093843 = null; + F.line = 13; + F.filename = "iterators.nim"; + var i_520094943 = 0; + Label2: { + F.line = 14; + Label3: while (true) { + if (!(i_520094943 < (kids_520093828).length)) break Label3; + F.line = 29; + F.filename = "dochack.nim"; + k_520093843 = kids_520093828[chckIndx(i_520094943, 0, (kids_520093828).length - 1)]; + F.line = 29; + result_520093829.appendChild(k_520093843); + F.line = 16; + F.filename = "iterators.nim"; + i_520094943 = addInt(i_520094943, 1); + } + }; + }; + framePtr = F.prev; + + return result_520093829; + +} + +function text_520093851(s_520093852) { + var result_520093853 = null; + + var F = {procname: "dochack.text", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 42; + F.filename = "dochack.nim"; + result_520093853 = document.createTextNode(s_520093852); + framePtr = F.prev; + + return result_520093853; + +} + +function uncovered_520094296(x_520094297) { + var Temporary1; + + var result_520094298 = null; + + var F = {procname: "dochack.uncovered", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + BeforeRet: { + if ((((x_520094297.kids).length == 0) && !((x_520094297.heading == null)))) { + F.line = 171; + F.filename = "dochack.nim"; + if (!(x_520094297.heading.hasOwnProperty('__karaxMarker__'))) { + Temporary1 = x_520094297; + } + else { + Temporary1 = null; + } + + result_520094298 = Temporary1; + break BeforeRet; + } + + F.line = 172; + result_520094298 = {heading: x_520094297.heading, kids: [], sortId: x_520094297.sortId, doSort: x_520094297.doSort}; + Label2: { + F.line = 174; + var k_520094313 = null; + F.line = 241; + F.filename = "iterators.nim"; + var i_520094950 = 0; + F.line = 242; + var L_520094951 = (x_520094297.kids).length; + Label3: { + F.line = 243; + Label4: while (true) { + if (!(i_520094950 < L_520094951)) break Label4; + F.line = 174; + F.filename = "dochack.nim"; + k_520094313 = x_520094297.kids[chckIndx(i_520094950, 0, (x_520094297.kids).length - 1)]; + F.line = 175; + var y_520094314 = uncovered_520094296(k_520094313); + if (!((y_520094314 == null))) { + F.line = 176; + result_520094298.kids.push(y_520094314);; + } + + F.line = 245; + F.filename = "iterators.nim"; + i_520094950 = addInt(i_520094950, 1); + if (!(((x_520094297.kids).length == L_520094951))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + if (((result_520094298.kids).length == 0)) { + F.line = 177; + F.filename = "dochack.nim"; + result_520094298 = null; + } + + }; + framePtr = F.prev; + + return result_520094298; + +} + +function mergeTocs_520094326(orig_520094327, news_520094328) { + var result_520094329 = null; + + var F = {procname: "dochack.mergeTocs", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 180; + F.filename = "dochack.nim"; + result_520094329 = uncovered_520094296(orig_520094327); + if ((result_520094329 == null)) { + F.line = 182; + result_520094329 = news_520094328; + } + else { + Label1: { + F.line = 184; + var i_520094341 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094946 = 0; + F.line = 184; + F.filename = "dochack.nim"; + colontmp__520094946 = (news_520094328.kids).length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094947 = 0; + Label2: { + F.line = 120; + Label3: while (true) { + if (!(i_520094947 < colontmp__520094946)) break Label3; + F.line = 184; + F.filename = "dochack.nim"; + i_520094341 = i_520094947; + F.line = 184; + result_520094329.kids.push(news_520094328.kids[chckIndx(i_520094341, 0, (news_520094328.kids).length - 1)]);; + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094947 = addInt(i_520094947, 1); + } + }; + }; + } + + framePtr = F.prev; + + return result_520094329; + +} + +function buildToc_520094346(orig_520094347, types_520094348, procs_520094349) { + var result_520094350 = null; + + var F = {procname: "dochack.buildToc", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 188; + F.filename = "dochack.nim"; + var newStuff_520094355 = {heading: null, kids: [], doSort: true, sortId: 0}; + Label1: { + F.line = 189; + var t_520094359 = null; + F.line = 241; + F.filename = "iterators.nim"; + var i_520094938 = 0; + F.line = 242; + var L_520094939 = (types_520094348).length; + Label2: { + F.line = 243; + Label3: while (true) { + if (!(i_520094938 < L_520094939)) break Label3; + F.line = 189; + F.filename = "dochack.nim"; + t_520094359 = types_520094348[chckIndx(i_520094938, 0, (types_520094348).length - 1)]; + F.line = 190; + var c_520094364 = {heading: t_520094359.cloneNode(true), kids: [], doSort: true, sortId: 0}; + F.line = 189; + t_520094359.__karaxMarker__ = true; + Label4: { + F.line = 192; + var p_520094368 = null; + F.line = 241; + F.filename = "iterators.nim"; + var i_520094935 = 0; + F.line = 242; + var L_520094936 = (procs_520094349).length; + Label5: { + F.line = 243; + Label6: while (true) { + if (!(i_520094935 < L_520094936)) break Label6; + F.line = 192; + F.filename = "dochack.nim"; + p_520094368 = procs_520094349[chckIndx(i_520094935, 0, (procs_520094349).length - 1)]; + if (!(p_520094368.hasOwnProperty('__karaxMarker__'))) { + F.line = 194; + var xx_520094369 = p_520094368.parentNode.getElementsByClassName("attachedType"); + if ((((xx_520094369).length == 1) && (xx_520094369[chckIndx(0, 0, (xx_520094369).length - 1)].textContent == t_520094359.textContent))) { + F.line = 196; + var q_520094374 = tree_520093826("A", [text_520093851(p_520094368.title)]); + F.line = 197; + q_520094374.setAttribute("href", p_520094368.getAttribute("href")); + F.line = 198; + c_520094364.kids.push({heading: q_520094374, kids: [], sortId: 0, doSort: false});; + F.line = 199; + p_520094368.__karaxMarker__ = true; + } + + } + + F.line = 245; + F.filename = "iterators.nim"; + i_520094935 = addInt(i_520094935, 1); + if (!(((procs_520094349).length == L_520094936))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + F.line = 189; + F.filename = "dochack.nim"; + newStuff_520094355.kids.push(c_520094364);; + F.line = 245; + F.filename = "iterators.nim"; + i_520094938 = addInt(i_520094938, 1); + if (!(((types_520094348).length == L_520094939))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + F.line = 201; + F.filename = "dochack.nim"; + result_520094350 = mergeTocs_520094326(orig_520094347, newStuff_520094355); + framePtr = F.prev; + + return result_520094350; + +} + +function add_520093844(parent_520093845, kid_520093846) { + var F = {procname: "dochack.add", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + if (((parent_520093845.nodeName == "TR") && ((kid_520093846.nodeName == "TD") || (kid_520093846.nodeName == "TH")))) { + F.line = 34; + F.filename = "dochack.nim"; + var k_520093847 = document.createElement("TD"); + F.line = 35; + k_520093847.appendChild(kid_520093846); + F.line = 36; + parent_520093845.appendChild(k_520093847); + } + else { + F.line = 38; + parent_520093845.appendChild(kid_520093846); + } + + framePtr = F.prev; + + +} + +function setClass_520093848(e_520093849, value_520093850) { + var F = {procname: "dochack.setClass", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 41; + F.filename = "dochack.nim"; + e_520093849.setAttribute("class", value_520093850); + framePtr = F.prev; + + +} + +function toHtml_520093974(x_520093975, isRoot_520093976) { + +function HEX3Aanonymous_520093994(a_520093995, b_520093996) { + var result_520093997 = 0; + + var F = {procname: "toHtml.:anonymous", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + BeforeRet: { + if ((!((a_520093995.heading == null)) && !((b_520093996.heading == null)))) { + F.line = 100; + F.filename = "dochack.nim"; + var x_520094006 = a_520093995.heading.textContent; + F.line = 101; + var y_520094007 = b_520093996.heading.textContent; + if ((x_520094006 < y_520094007)) { + F.line = 102; + result_520093997 = (-1); + break BeforeRet; + } + + if ((y_520094007 < x_520094006)) { + F.line = 103; + result_520093997 = 1; + break BeforeRet; + } + + F.line = 104; + result_520093997 = 0; + break BeforeRet; + } + else { + F.line = 107; + result_520093997 = subInt(a_520093995.sortId, b_520093996.sortId); + break BeforeRet; + } + + }; + framePtr = F.prev; + + return result_520093997; + + } + + var result_520093977 = null; + + var F = {procname: "dochack.toHtml", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + BeforeRet: { + if ((x_520093975 == null)) { + F.line = 85; + F.filename = "dochack.nim"; + result_520093977 = null; + break BeforeRet; + } + + if (((x_520093975.kids).length == 0)) { + if ((x_520093975.heading == null)) { + F.line = 87; + result_520093977 = null; + break BeforeRet; + } + + F.line = 88; + result_520093977 = x_520093975.heading.cloneNode(true); + break BeforeRet; + } + + F.line = 89; + result_520093977 = tree_520093826("DIV", []); + if ((!((x_520093975.heading == null)) && !(x_520093975.heading.hasOwnProperty('__karaxMarker__')))) { + F.line = 91; + add_520093844(result_520093977, x_520093975.heading.cloneNode(true)); + } + + F.line = 92; + var ul_520093993 = tree_520093826("UL", []); + if (isRoot_520093976) { + F.line = 94; + setClass_520093848(ul_520093993, "simple simple-toc"); + } + else { + F.line = 96; + setClass_520093848(ul_520093993, "simple"); + } + + if (x_520093975.doSort) { + F.line = 98; + x_520093975.kids.sort(HEX3Aanonymous_520093994); + } + + Label1: { + F.line = 109; + var k_520094019 = null; + F.line = 241; + F.filename = "iterators.nim"; + var i_520094954 = 0; + F.line = 242; + var L_520094955 = (x_520093975.kids).length; + Label2: { + F.line = 243; + Label3: while (true) { + if (!(i_520094954 < L_520094955)) break Label3; + F.line = 109; + F.filename = "dochack.nim"; + k_520094019 = x_520093975.kids[chckIndx(i_520094954, 0, (x_520093975.kids).length - 1)]; + F.line = 110; + var y_520094020 = toHtml_520093974(k_520094019, false); + if (!((y_520094020 == null))) { + F.line = 112; + add_520093844(ul_520093993, tree_520093826("LI", [y_520094020])); + } + + F.line = 245; + F.filename = "iterators.nim"; + i_520094954 = addInt(i_520094954, 1); + if (!(((x_520093975.kids).length == L_520094955))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + if (!((ul_520093993.childNodes.length == 0))) { + F.line = 113; + F.filename = "dochack.nim"; + add_520093844(result_520093977, ul_520093993); + } + + if ((result_520093977.childNodes.length == 0)) { + F.line = 114; + result_520093977 = null; + } + + }; + framePtr = F.prev; + + return result_520093977; + +} + +function replaceById_520093854(id_520093855, newTree_520093856) { + var F = {procname: "dochack.replaceById", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 45; + F.filename = "dochack.nim"; + var x_520093857 = document.getElementById(id_520093855); + F.line = 46; + x_520093857.parentNode.replaceChild(newTree_520093856, x_520093857); + F.line = 47; + newTree_520093856.id = id_520093855; + framePtr = F.prev; + + +} + +function togglevis_520094404(d_520094405) { + var F = {procname: "dochack.togglevis", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + if ((d_520094405.style.display == "none")) { + F.line = 207; + F.filename = "dochack.nim"; + d_520094405.style.display = "inline"; + } + else { + F.line = 209; + d_520094405.style.display = "none"; + } + + framePtr = F.prev; + + +} + +function groupBy(value_520094407) { + var F = {procname: "dochack.groupBy", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 212; + F.filename = "dochack.nim"; + var toc_520094408 = document.getElementById("toc-list"); + if ((alternative_520094403[0] == null)) { + F.line = 214; + var tt_520094416 = {heading: null, kids: [], sortId: 0, doSort: false}; + F.line = 215; + toToc_520094107(toc_520094408, tt_520094416); + F.line = 216; + tt_520094416 = tt_520094416.kids[chckIndx(0, 0, (tt_520094416.kids).length - 1)]; + F.line = 218; + var types_520094421 = [[]]; + F.line = 219; + var procs_520094426 = [[]]; + F.line = 221; + extractItems_520093895(tt_520094416, "Types", types_520094421, 0); + F.line = 222; + extractItems_520093895(tt_520094416, "Procs", procs_520094426, 0); + F.line = 223; + extractItems_520093895(tt_520094416, "Converters", procs_520094426, 0); + F.line = 224; + extractItems_520093895(tt_520094416, "Methods", procs_520094426, 0); + F.line = 225; + extractItems_520093895(tt_520094416, "Templates", procs_520094426, 0); + F.line = 226; + extractItems_520093895(tt_520094416, "Macros", procs_520094426, 0); + F.line = 227; + extractItems_520093895(tt_520094416, "Iterators", procs_520094426, 0); + F.line = 229; + var ntoc_520094427 = buildToc_520094346(tt_520094416, types_520094421[0], procs_520094426[0]); + F.line = 230; + var x_520094428 = toHtml_520093974(ntoc_520094427, true); + F.line = 231; + alternative_520094403[0] = tree_520093826("DIV", [x_520094428]); + } + + if ((value_520094407 == "type")) { + F.line = 233; + replaceById_520093854("tocRoot", alternative_520094403[0]); + } + else { + F.line = 235; + replaceById_520093854("tocRoot", tree_520093826("DIV", [])); + } + + F.line = 236; + togglevis_520094404(document.getElementById("toc-list")); + framePtr = F.prev; + + +} + +function HEX5BHEX5D_687867163(s_687867166, x_687867167) { + var result_687867168 = []; + + var F = {procname: "[].[]", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/indices.nim", line: 0}; + framePtr = F; + F.line = 83; + F.filename = "indices.nim"; + var a_687867170 = x_687867167.a; + F.line = 84; + var L_687867172 = addInt(subInt(subInt((s_687867166).length, x_687867167.b), a_687867170), 1); + F.line = 85; + result_687867168 = nimCopy(null, mnewString(chckRange(L_687867172, 0, 2147483647)), NTI33554449); + Label1: { + F.line = 86; + var i_687867177 = 0; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094964 = 0; + Label2: { + F.line = 120; + Label3: while (true) { + if (!(i_520094964 < L_687867172)) break Label3; + F.line = 86; + F.filename = "indices.nim"; + i_687867177 = i_520094964; + F.line = 86; + result_687867168[chckIndx(i_687867177, 0, (result_687867168).length - 1)] = s_687867166[chckIndx(addInt(i_687867177, a_687867170), 0, (s_687867166).length - 1)]; + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094964 = addInt(i_520094964, 1); + } + }; + }; + framePtr = F.prev; + + return result_687867168; + +} + +function HEX2EHEX2E_872415336(a_872415339, b_872415340) { + var result_872415343 = ({a: 0, b: 0}); + + var F = {procname: ".....", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/system.nim", line: 0}; + framePtr = F; + F.line = 416; + F.filename = "system.nim"; + result_872415343 = nimCopy(result_872415343, {a: a_872415339, b: b_872415340}, NTI872415317); + framePtr = F.prev; + + return result_872415343; + +} +async function loadIndex_520094622() { + var result_520094624 = null; + + var F = {procname: "dochack.loadIndex", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + BeforeRet: { + F.line = 286; + F.filename = "dochack.nim"; + var indexURL_520094630 = document.getElementById("indexLink").getAttribute("href"); + F.line = 288; + var rootURL_520094656 = HEX5BHEX5D_687867163(cstrToNimstr(indexURL_520094630), HEX2EHEX2E_872415336(0, 14)); + F.line = 289; + var resp_520094668 = (await (await fetch(indexURL_520094630)).text()); + F.line = 291; + var indexElem_520094669 = document.createElement("div"); + F.line = 292; + indexElem_520094669.innerHTML = resp_520094668; + Label1: { + F.line = 294; + var href_520094691 = null; + F.line = 239; + F.filename = "iterators.nim"; + var colontmp__520094958 = []; + F.line = 294; + F.filename = "dochack.nim"; + colontmp__520094958 = indexElem_520094669.getElementsByClassName("reference"); + F.line = 241; + F.filename = "iterators.nim"; + var i_520094960 = 0; + F.line = 242; + var L_520094961 = (colontmp__520094958).length; + Label2: { + F.line = 243; + Label3: while (true) { + if (!(i_520094960 < L_520094961)) break Label3; + F.line = 294; + F.filename = "dochack.nim"; + href_520094691 = colontmp__520094958[chckIndx(i_520094960, 0, (colontmp__520094958).length - 1)]; + F.line = 294; + href_520094691.setAttribute("href", toJSStr((rootURL_520094656 || []).concat(cstrToNimstr(href_520094691.getAttribute("href")) || []))); + F.line = 297; + db_520094445[0].push(href_520094691);; + F.line = 298; + contents_520094446[0].push(href_520094691.getAttribute("data-doc-search-tag"));; + F.line = 245; + F.filename = "iterators.nim"; + i_520094960 = addInt(i_520094960, 1); + if (!(((colontmp__520094958).length == L_520094961))) { + F.line = 246; + failedAssertImpl_268435541(makeNimstrLit("/home/runner/work/Arraymancer/Arraymancer/nim/lib/system/iterators.nim(246, 11) `len(a) == L` the length of the seq changed while iterating over it")); + } + + } + }; + }; + F.line = 148; + F.filename = "asyncjs.nim"; + result_520094624 = undefined; + break BeforeRet; + }; + framePtr = F.prev; + + return result_520094624; + +} + +function then_520094800(future_520094803, onSuccess_520094804, onReject_520094805) { + var result_520094806 = null; + + var F = {procname: "then.then", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/js/asyncjs.nim", line: 0}; + framePtr = F; + BeforeRet: { + F.line = 243; + F.filename = "asyncjs.nim"; + var ret_520094816 = null; + F.line = 244; + ret_520094816 = future_520094803.then(onSuccess_520094804, onReject_520094805) + F.line = 245; + result_520094806 = ret_520094816; + break BeforeRet; + }; + framePtr = F.prev; + + return result_520094806; + +} + +function nsuToLowerAsciiChar(c_687865941) { + var result_687865942 = 0; + + var F = {procname: "strutils.toLowerAscii", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/lib/pure/strutils.nim", line: 0}; + framePtr = F; + if ((ConstSet2[c_687865941] != undefined)) { + F.line = 218; + F.filename = "strutils.nim"; + result_687865942 = (c_687865941 ^ 32); + } + else { + F.line = 220; + result_687865942 = c_687865941; + } + + framePtr = F.prev; + + return result_687865942; + +} + +function fuzzyMatch_671088656(pattern_671088657, str_671088658) { + var Temporary4; + var Temporary5; + var Temporary6; + var Temporary7; + var Temporary8; + + var result_671088661 = {Field0: 0, Field1: false}; + + var F = {procname: "fuzzysearch.fuzzyMatch", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/fuzzysearch.nim", line: 0}; + framePtr = F; + F.line = 36; + F.filename = "fuzzysearch.nim"; + var scoreState_671088662 = (-100); + F.line = 37; + var headerMatched_671088663 = false; + F.line = 38; + var unmatchedLeadingCharCount_671088664 = 0; + F.line = 39; + var consecutiveMatchCount_671088665 = 0; + F.line = 40; + var strIndex_671088666 = 0; + F.line = 41; + var patIndex_671088667 = 0; + F.line = 42; + var score_671088668 = 0; + Label1: { + F.line = 48; + Label2: while (true) { + if (!((strIndex_671088666 < ((str_671088658) == null ? 0 : (str_671088658).length)) && (patIndex_671088667 < ((pattern_671088657) == null ? 0 : (pattern_671088657).length)))) break Label2; + Label3: { + F.line = 50; + var patternChar_671088671 = nsuToLowerAsciiChar(pattern_671088657.charCodeAt(chckIndx(patIndex_671088667, 0, (pattern_671088657).length - 1))); + F.line = 51; + var strChar_671088672 = nsuToLowerAsciiChar(str_671088658.charCodeAt(chckIndx(strIndex_671088666, 0, (str_671088658).length - 1))); + if ((ConstSet3[patternChar_671088671] != undefined)) { + F.line = 55; + patIndex_671088667 = addInt(patIndex_671088667, 1); + F.line = 56; + break Label3; + } + + if ((ConstSet4[strChar_671088672] != undefined)) { + F.line = 58; + strIndex_671088666 = addInt(strIndex_671088666, 1); + F.line = 59; + break Label3; + } + + if ((!(headerMatched_671088663) && (strChar_671088672 == 58))) { + F.line = 64; + headerMatched_671088663 = true; + F.line = 65; + scoreState_671088662 = (-100); + F.line = 66; + score_671088668 = ((Math.floor((0.5 * score_671088668))) | 0); + F.line = 67; + patIndex_671088667 = 0; + F.line = 68; + strIndex_671088666 = addInt(strIndex_671088666, 1); + F.line = 69; + break Label3; + } + + if ((strChar_671088672 == patternChar_671088671)) { + F.line = 72; + switch (scoreState_671088662) { + case (-100): + case 20: + F.line = 74; + scoreState_671088662 = 10; + break; + case 0: + F.line = 77; + scoreState_671088662 = 5; + F.line = 77; + score_671088668 = addInt(score_671088668, scoreState_671088662); + break; + case 10: + case 5: + F.line = 80; + consecutiveMatchCount_671088665 = addInt(consecutiveMatchCount_671088665, 1); + F.line = 81; + scoreState_671088662 = 5; + F.line = 82; + score_671088668 = addInt(score_671088668, mulInt(5, consecutiveMatchCount_671088665)); + if ((scoreState_671088662 == 10)) { + F.line = 85; + score_671088668 = addInt(score_671088668, 10); + } + + F.line = 87; + var onBoundary_671088724 = (patIndex_671088667 == ((pattern_671088657) == null ? -1 : (pattern_671088657).length - 1)); + if ((!(onBoundary_671088724) && (strIndex_671088666 < ((str_671088658) == null ? -1 : (str_671088658).length - 1)))) { + F.line = 90; + var nextPatternChar_671088725 = nsuToLowerAsciiChar(pattern_671088657.charCodeAt(chckIndx(addInt(patIndex_671088667, 1), 0, (pattern_671088657).length - 1))); + F.line = 91; + var nextStrChar_671088726 = nsuToLowerAsciiChar(str_671088658.charCodeAt(chckIndx(addInt(strIndex_671088666, 1), 0, (str_671088658).length - 1))); + F.line = 94; + if (!!((ConstSet5[nextStrChar_671088726] != undefined))) Temporary4 = false; else { Temporary4 = !((nextStrChar_671088726 == nextPatternChar_671088725)); } onBoundary_671088724 = Temporary4; + } + + if (onBoundary_671088724) { + F.line = 99; + scoreState_671088662 = 20; + F.line = 99; + score_671088668 = addInt(score_671088668, scoreState_671088662); + } + + break; + case (-1): + case (-3): + F.line = 102; + if (!((ConstSet6[str_671088658.charCodeAt(chckIndx(subInt(strIndex_671088666, 1), 0, (str_671088658).length - 1))] != undefined))) Temporary5 = true; else { if (!(ConstSet7[str_671088658.charCodeAt(chckIndx(subInt(strIndex_671088666, 1), 0, (str_671088658).length - 1))] != undefined)) Temporary6 = false; else { Temporary6 = (ConstSet8[str_671088658.charCodeAt(chckIndx(strIndex_671088666, 0, (str_671088658).length - 1))] != undefined); } Temporary5 = Temporary6; } var isLeadingChar_671088750 = Temporary5; + if (isLeadingChar_671088750) { + F.line = 109; + scoreState_671088662 = 10; + } + else { + F.line = 113; + scoreState_671088662 = 0; + F.line = 113; + score_671088668 = addInt(score_671088668, scoreState_671088662); + } + + break; + } + F.line = 114; + patIndex_671088667 = addInt(patIndex_671088667, 1); + } + else { + F.line = 117; + switch (scoreState_671088662) { + case (-100): + F.line = 119; + scoreState_671088662 = (-3); + F.line = 119; + score_671088668 = addInt(score_671088668, scoreState_671088662); + break; + case 5: + F.line = 122; + scoreState_671088662 = (-1); + F.line = 122; + score_671088668 = addInt(score_671088668, scoreState_671088662); + F.line = 123; + consecutiveMatchCount_671088665 = 0; + break; + case (-3): + if ((unmatchedLeadingCharCount_671088664 < 3)) { + F.line = 127; + scoreState_671088662 = (-3); + F.line = 127; + score_671088668 = addInt(score_671088668, scoreState_671088662); + } + + F.line = 128; + unmatchedLeadingCharCount_671088664 = addInt(unmatchedLeadingCharCount_671088664, 1); + break; + default: + F.line = 131; + scoreState_671088662 = (-1); + F.line = 131; + score_671088668 = addInt(score_671088668, scoreState_671088662); + break; + } + } + + F.line = 133; + strIndex_671088666 = addInt(strIndex_671088666, 1); + }; + } + }; + if (!(patIndex_671088667 == ((pattern_671088657) == null ? 0 : (pattern_671088657).length))) Temporary7 = false; else { if ((strIndex_671088666 == ((str_671088658) == null ? 0 : (str_671088658).length))) Temporary8 = true; else { Temporary8 = !((ConstSet9[str_671088658.charCodeAt(chckIndx(strIndex_671088666, 0, (str_671088658).length - 1))] != undefined)); } Temporary7 = Temporary8; } if (Temporary7) { + F.line = 136; + score_671088668 = addInt(score_671088668, 10); + } + + F.line = 139; + var colontmp__520094977 = nimMax(0, score_671088668); + F.line = 140; + var colontmp__520094978 = (0 < score_671088668); + F.line = 138; + result_671088661 = nimCopy(result_671088661, {Field0: colontmp__520094977, Field1: colontmp__520094978}, NTI671088654); + framePtr = F.prev; + + return result_671088661; + +} + +function escapeCString_520094447(x_520094448, x_520094448_Idx) { + var F = {procname: "dochack.escapeCString", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 246; + F.filename = "dochack.nim"; + var s_520094449 = []; + Label1: { + F.line = 247; + var c_520094450 = 0; + F.line = 75; + F.filename = "iterators.nim"; + var iHEX60gensym6_520094981 = 0; + F.line = 76; + var nHEX60gensym6_520094982 = ((x_520094448[x_520094448_Idx]) == null ? 0 : (x_520094448[x_520094448_Idx]).length); + Label2: { + F.line = 80; + Label3: while (true) { + if (!(iHEX60gensym6_520094981 < nHEX60gensym6_520094982)) break Label3; + F.line = 247; + F.filename = "dochack.nim"; + c_520094450 = x_520094448[x_520094448_Idx].charCodeAt(chckIndx(iHEX60gensym6_520094981, 0, (x_520094448[x_520094448_Idx]).length - 1)); + F.line = 247; + switch (c_520094450) { + case 60: + F.line = 249; + s_520094449.push.apply(s_520094449, [38,108,116,59]);; + break; + case 62: + F.line = 250; + s_520094449.push.apply(s_520094449, [38,103,116,59]);; + break; + default: + F.line = 251; + addChar(s_520094449, c_520094450);; + break; + } + F.line = 79; + F.filename = "iterators.nim"; + iHEX60gensym6_520094981 = addInt(iHEX60gensym6_520094981, 1); + } + }; + }; + F.line = 252; + F.filename = "dochack.nim"; + x_520094448[x_520094448_Idx] = toJSStr(s_520094449); + framePtr = F.prev; + + +} + +function dosearch_520094451(value_520094452) { + +function HEX3Aanonymous_520094479(a_520094484, b_520094485) { + var result_520094490 = 0; + + var F = {procname: "dosearch.:anonymous", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 272; + F.filename = "dochack.nim"; + result_520094490 = subInt(b_520094485["Field1"], a_520094484["Field1"]); + framePtr = F.prev; + + return result_520094490; + + } + + var result_520094453 = null; + + var F = {procname: "dochack.dosearch", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + BeforeRet: { + if (((db_520094445[0]).length == 0)) { + F.line = 256; + F.filename = "dochack.nim"; + break BeforeRet; + } + + F.line = 257; + var ul_520094457 = tree_520093826("UL", []); + F.line = 258; + result_520094453 = tree_520093826("DIV", []); + F.line = 259; + setClass_520093848(result_520094453, "search_results"); + F.line = 260; + var matches_520094462 = []; + Label1: { + F.line = 261; + var i_520094470 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094968 = 0; + F.line = 261; + F.filename = "dochack.nim"; + colontmp__520094968 = (db_520094445[0]).length; + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094969 = 0; + Label2: { + F.line = 120; + Label3: while (true) { + if (!(i_520094969 < colontmp__520094968)) break Label3; + F.line = 261; + F.filename = "dochack.nim"; + i_520094470 = i_520094969; + Label4: { + F.line = 262; + var c_520094471 = contents_520094446[0][chckIndx(i_520094470, 0, (contents_520094446[0]).length - 1)]; + if (((c_520094471 == "Examples") || (c_520094471 == "PEG construction"))) { + F.line = 267; + break Label4; + } + + F.line = 268; + var tmpTuple_520094472 = fuzzyMatch_671088656(value_520094452, c_520094471); + F.line = 268; + var score_520094473 = tmpTuple_520094472["Field0"]; + F.line = 268; + var matched_520094474 = tmpTuple_520094472["Field1"]; + if (matched_520094474) { + F.line = 270; + matches_520094462.push({Field0: db_520094445[0][chckIndx(i_520094470, 0, (db_520094445[0]).length - 1)], Field1: score_520094473});; + } + + }; + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094969 = addInt(i_520094969, 1); + } + }; + }; + F.line = 272; + F.filename = "dochack.nim"; + matches_520094462.sort(HEX3Aanonymous_520094479); + Label5: { + F.line = 273; + var i_520094507 = 0; + F.line = 117; + F.filename = "iterators_1.nim"; + var colontmp__520094972 = 0; + F.line = 273; + F.filename = "dochack.nim"; + colontmp__520094972 = nimMin((matches_520094462).length, 29); + F.line = 119; + F.filename = "iterators_1.nim"; + var i_520094973 = 0; + Label6: { + F.line = 120; + Label7: while (true) { + if (!(i_520094973 < colontmp__520094972)) break Label7; + F.line = 273; + F.filename = "dochack.nim"; + i_520094507 = i_520094973; + F.line = 274; + matches_520094462[chckIndx(i_520094507, 0, (matches_520094462).length - 1)]["Field0"].innerHTML = matches_520094462[chckIndx(i_520094507, 0, (matches_520094462).length - 1)]["Field0"].getAttribute("data-doc-search-tag"); + F.line = 273; + escapeCString_520094447(matches_520094462[chckIndx(i_520094507, 0, (matches_520094462).length - 1)]["Field0"], "innerHTML"); + F.line = 273; + add_520093844(ul_520094457, tree_520093826("LI", [matches_520094462[chckIndx(i_520094507, 0, (matches_520094462).length - 1)]["Field0"]])); + F.line = 122; + F.filename = "iterators_1.nim"; + i_520094973 = addInt(i_520094973, 1); + } + }; + }; + if ((ul_520094457.childNodes.length == 0)) { + F.line = 278; + F.filename = "dochack.nim"; + add_520093844(result_520094453, tree_520093826("B", [text_520093851("no search results")])); + } + else { + F.line = 280; + add_520093844(result_520094453, tree_520093826("B", [text_520093851("search results")])); + F.line = 281; + add_520093844(result_520094453, ul_520094457); + } + + }; + framePtr = F.prev; + + return result_520094453; + +} + +function search() { + +function wrapper_520094785() { + var F = {procname: "search.wrapper", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 308; + F.filename = "dochack.nim"; + var elem_520094786 = document.getElementById("searchInput"); + F.line = 309; + var value_520094787 = elem_520094786.value; + if (!((((value_520094787) == null ? 0 : (value_520094787).length) == 0))) { + if ((oldtoc_520094780[0] == null)) { + F.line = 312; + oldtoc_520094780[0] = document.getElementById("tocRoot"); + } + + F.line = 313; + var results_520094791 = dosearch_520094451(value_520094787); + F.line = 314; + replaceById_520093854("tocRoot", results_520094791); + } + else { + if (!((oldtoc_520094780[0] == null))) { + F.line = 316; + replaceById_520093854("tocRoot", oldtoc_520094780[0]); + } + } + framePtr = F.prev; + + + } + + var F = {procname: "dochack.search", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + if ((loadIndexFut_520094783[0] == null)) { + F.line = 320; + F.filename = "dochack.nim"; + loadIndexFut_520094783[0] = loadIndex_520094622(); + F.line = 322; + var _ = then_520094800(loadIndexFut_520094783[0], wrapper_520094785, null); + } + + if (!((timer_520094781[0] == null))) { + F.line = 323; + clearTimeout(timer_520094781[0]); + } + + F.line = 324; + timer_520094781[0] = setTimeout(wrapper_520094785, 400); + framePtr = F.prev; + + +} + +function copyToClipboard() { + var F = {procname: "dochack.copyToClipboard", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; + framePtr = F; + F.line = 327; + F.filename = "dochack.nim"; + + function updatePreTags() { + + const allPreTags = document.querySelectorAll("pre") + + allPreTags.forEach((e) => { + + const div = document.createElement("div") + div.classList.add("copyToClipBoard") + + const preTag = document.createElement("pre") + preTag.innerHTML = e.innerHTML + + const button = document.createElement("button") + button.value = e.textContent.replace('...', '') + button.classList.add("copyToClipBoardBtn") + button.style.cursor = "pointer" + + div.appendChild(preTag) + div.appendChild(button) + + e.outerHTML = div.outerHTML + + }) + } + + + function copyTextToClipboard(e) { + const clipBoardContent = e.target.value + navigator.clipboard.writeText(clipBoardContent).then(function() { + e.target.style.setProperty("--clipboard-image", "var(--clipboard-image-selected)") + }, function(err) { + console.error("Could not copy text: ", err); + }); + } + + window.addEventListener("click", (e) => { + if (e.target.classList.contains("copyToClipBoardBtn")) { + copyTextToClipboard(e) + } + }) + + window.addEventListener("mouseover", (e) => { + if (e.target.nodeName === "PRE") { + e.target.nextElementSibling.style.setProperty("--clipboard-image", "var(--clipboard-image-normal)") + } + }) + + window.addEventListener("DOMContentLoaded", updatePreTags) + + + framePtr = F.prev; + + +} +var Temporary1; +var F = {procname: "module dochack", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; +framePtr = F; +F.line = 11; +F.filename = "dochack.nim"; +F.line = 12; +var t_520093735 = window.localStorage.getItem("theme"); +if ((t_520093735 == null)) { +Temporary1 = "auto"; +} +else { +Temporary1 = t_520093735; +} + +setTheme(Temporary1); +F.line = 203; +var alternative_520094403 = [null]; +F.line = 239; +var db_520094445 = [[]]; +F.line = 240; +var contents_520094446 = [[]]; +F.line = 302; +var oldtoc_520094780 = [null]; +F.line = 303; +var timer_520094781 = [null]; +F.line = 304; +var loadIndexFut_520094783 = [null]; +F.line = 381; +copyToClipboard(); +F.line = 382; +window.addEventListener("DOMContentLoaded", onDOMLoaded, false); +framePtr = F.prev; +var F = {procname: "module dochack", prev: framePtr, filename: "/home/runner/work/Arraymancer/Arraymancer/nim/tools/dochack/dochack.nim", line: 0}; +framePtr = F; +framePtr = F.prev; diff --git a/docutils.css b/docutils.css new file mode 100644 index 000000000..bd4fb0260 --- /dev/null +++ b/docutils.css @@ -0,0 +1,823 @@ +/* +Source: https://github.com/nim-lang/Nim/blob/d3f966922ef4ddd05c137f82e5b2329b3d5dc485/config/nimdoc.cfg#L193-L1353 + +Stylesheet for use with Docutils/rst2html. + +See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to +customize this style sheet. + +Modified from Chad Skeeters' rst2html-style +https://bitbucket.org/cskeeters/rst2html-style/ +Modified by Boyd Greenfield +*/ +/* SCSS variables */ +/* Text weights */ +/* Body colors */ +/* Text colors */ +/* Link colors */ +/* Syntax highlighting colors */ +/* Pct changes */ +/* Mixins */ +/* Body/layout */ +html { + font-size: 100%; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; } +/* Where we want fancier font if available */ +h1, h2, h3, h4, h5, h6, p.module-desc, table.docinfo + blockquote p, table.docinfo blockquote p, h1 + blockquote p { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif !important; } +h1.title { + font-weight: 900; } +body { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-weight: 400; + font-size: 16px; + line-height: 20px; + letter-spacing: 0.15px; } +/* Skeleton grid */ +.container { + position: relative; + width: 100%; + max-width: 960px; + margin: 0 auto; + padding: 0 20px; + box-sizing: border-box; } +.column, +.columns { + width: 100%; + float: left; + box-sizing: border-box; } +/* For devices larger than 400px */ +@media (min-width: 400px) { + .container { + width: 100%; + padding: 0; } } +/* For devices larger than 650px */ +@media (min-width: 650px) { + .container { + width: 100%; } + .column, + .columns { + margin-left: 4%; } + .column:first-child, + .columns:first-child { + margin-left: 0; } + .one.column, + .one.columns { + width: 4.66666666667%; } + .two.columns { + width: 13.3333333333%; } + .three.columns { + width: 22%; } + .four.columns { + width: 30.6666666667%; } + .five.columns { + width: 39.3333333333%; } + .six.columns { + width: 48%; } + .seven.columns { + width: 56.6666666667%; } + .eight.columns { + width: 65.3333333333%; } + .nine.columns { + width: 74.0%; } + .ten.columns { + width: 82.6666666667%; } + .eleven.columns { + width: 91.3333333333%; } + .twelve.columns { + width: 100%; + margin-left: 0; } + .one-third.column { + width: 30.6666666667%; } + .two-thirds.column { + width: 65.3333333333%; } } +/* Customer Overrides */ +.footer { + text-align: center; + padding-top: 10%; } +p.module-desc { + font-size: 1.1em; +}a.link-seesrc { + font-style: italic; } +#toc-list { + word-wrap: break-word; } +ul.simple-toc { + list-style: none; } +ul.simple-toc a.reference-toplevel { + font-weight: bold; +} +ul.simple-toc-section { + list-style-type: circle; +} +cite { + font-style: italic !important; } +dt > pre { + border-color: rgba(0, 0, 0, 0.1); + margin: 15px 0px 5px; } +dd > pre { + border-color: rgba(0, 0, 0, 0.1); + margin-top: 8px; } +.item > dd { + margin-left: 10px; + margin-bottom: 30px; } +/* Nim line-numbered tables */ +.line-nums-table { + width: 100%; + table-layout: fixed; } +/* Nim search input */ +div#searchInput { + margin-bottom: 8px; +} +div#searchInput input#searchInput { + width: 10em; +} +div.search-groupby { + margin-bottom: 8px; +} +table.line-nums-table { + border-radius: 4px; + border: 1px solid #cccccc; + border-collapse: separate; + margin-top: 15px; + margin-bottom: 25px; } +.line-nums-table tbody { + border: none; } +.line-nums-table td pre { + border: none; + background-color: transparent; } +.line-nums-table td.blob-line-nums { + width: 28px; } +.line-nums-table td.blob-line-nums pre { + -webkit-filter: opacity(75%); + text-align: right; + border-color: transparent; + background-color: transparent; + padding-left: 0px; + margin-left: 0px; + padding-right: 0px; + margin-right: 0px; } +/* Docgen styles */ +/* Links */ +a { + text-decoration: none; } +a:hover, +a:focus { + text-decoration: underline; } +a:visited { + color: #00334d; } +a:focus { + outline: thin dotted #2d2d2d; + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; } +a:hover, +a:active { + outline: 0; } +sub, +sup { + position: relative; + font-size: 75%; + line-height: 0; + vertical-align: baseline; } +sup { + top: -0.5em; } +sub { + bottom: -0.25em; } +img { + width: auto; + height: auto; + max-width: 100%; + vertical-align: middle; + border: 0; + -ms-interpolation-mode: bicubic; } +@media print { + * { + text-shadow: none !important; + background: transparent !important; + box-shadow: none !important; } + a, + a:visited { + text-decoration: underline; } + a[href]:after { + content: " (" attr(href) ")"; } + abbr[title]:after { + content: " (" attr(title) ")"; } + .ir a:after, + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; } + pre, + blockquote { + border: 1px solid #999; + page-break-inside: avoid; } + thead { + display: table-header-group; } + tr, + img { + page-break-inside: avoid; } + img { + max-width: 100% !important; } + @page { + margin: 0.5cm; } + h1 { + page-break-before: always; } + h1.title { + page-break-before: avoid; } + p, + h2, + h3 { + orphans: 3; + widows: 3; } + h2, + h3 { + page-break-after: avoid; } } +.img-rounded { + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; } +.img-polaroid { + padding: 4px; + background-color: rgba(252, 248, 244, 0.75); + border: 1px solid #ccc; + border: 1px solid rgba(0, 0, 0, 0.2); + -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); + -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); } +p { + margin: 0 0 8px; } +small { + font-size: 85%; } +strong { + font-weight: 600; } +em { + font-style: italic; } +cite { + font-style: normal; } +h1, +h2, +h3, +h4, +h5, +h6 { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-weight: 600; + line-height: 20px; + color: inherit; + text-rendering: optimizelegibility; } +h1 { + font-size: 2em; + font-weight: 400; + padding-bottom: .15em; + border-bottom: 1px solid #aaaaaa; + margin-top: 1.0em; + line-height: 1.2em; } +h1.title { + padding-bottom: 1em; + border-bottom: 0px; + font-size: 2.75em; } +h2 { + font-size: 1.5em; + margin-top: 1.5em; } +h3 { + font-size: 1.3em; + font-style: italic; + margin-top: 0.75em; } +h4 { + font-size: 1.3em; + margin-top: 0.5em; } +h5 { + font-size: 1.2em; + margin-top: 0.25em; } +h6 { + font-size: 1.1em; } +ul, +ol { + padding: 0; + margin: 0 0 0px 15px; } +ul ul, +ul ol, +ol ol, +ol ul { + margin-bottom: 0; } +li { + line-height: 20px; } +dl { + margin-bottom: 20px; } +dt, +dd { + line-height: 20px; } +dt { + font-weight: bold; } +dd { + margin-left: 10px; + margin-bottom: 26px; } +hr { + margin: 20px 0; + border: 0; + border-top: 1px solid #eeeeee; + border-bottom: 1px solid #ffffff; } +abbr[title], +abbr[data-original-title] { + cursor: help; + border-bottom: 1px dotted #999999; } +abbr.initialism { + font-size: 90%; + text-transform: uppercase; } +blockquote { + padding: 0 0 0 15px; + margin: 0 0 20px; + border-left: 5px solid #EFEBE0; } +table.docinfo + blockquote, table.docinfo blockquote, h1 + blockquote { + border-left: 5px solid #c9c9c9; +} +table.docinfo + blockquote p, table.docinfo blockquote p, h1 + blockquote p { + margin-bottom: 0; + font-size: 15px; + font-weight: 200; + line-height: 1.5; + font-style: italic; } +q:before, +q:after, +blockquote:before, +blockquote:after { + content: ""; } +address { + display: block; + margin-bottom: 20px; + font-style: normal; + line-height: 20px; } +code, +pre { + font-family: "Source Code Pro", Monaco, Menlo, Consolas, "Courier New", monospace; + padding: 0 3px 2px; + font-weight: 500; + font-size: 12px; + -webkit-border-radius: 3px; + -moz-border-radius: 3px; + border-radius: 3px; } +.pre { + font-family: "Source Code Pro", Monaco, Menlo, Consolas, "Courier New", monospace; + font-weight: 600; + /*color: #504da6;*/ +} +code { + padding: 2px 4px; + white-space: nowrap; + border: 1px solid #777777; } +pre { + display: inline-block; + box-sizing: border-box; + min-width: calc(100% - 19.5px); + padding: 9.5px; + margin: 0.25em 10px 10px 10px; + font-size: 15px; + line-height: 20px; + white-space: pre !important; + overflow-y: hidden; + overflow-x: visible; + border: 1px solid #cccccc; + -webkit-border-radius: 4px; + -moz-border-radius: 4px; + border-radius: 4px; } +pre.prettyprint { + margin-bottom: 20px; } +pre code { + padding: 0; + color: inherit; + white-space: pre; + overflow-x: visible; + background-color: transparent; + border: 0; } +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; } +table { + max-width: 100%; + background-color: transparent; + border-collapse: collapse; + border-spacing: 0; } +table th, table td { + padding: 0px 8px 0px; +} +.table { + width: 100%; + margin-bottom: 20px; } +.table th, +.table td { + padding: 8px; + line-height: 20px; + text-align: left; + vertical-align: top; + border-top: 1px solid #444444; } +.table th { + font-weight: bold; } +.table thead th { + vertical-align: bottom; } +.table caption + thead tr:first-child th, +.table caption + thead tr:first-child td, +.table colgroup + thead tr:first-child th, +.table colgroup + thead tr:first-child td, +.table thead:first-child tr:first-child th, +.table thead:first-child tr:first-child td { + border-top: 0; } +.table tbody + tbody { + border-top: 2px solid #444444; } +.table-condensed th, +.table-condensed td { + padding: 4px 5px; } +.table-bordered { + border: 1px solid #444444; + border-collapse: separate; + *border-collapse: collapse; + border-left: 0; + -webkit-border-radius: 4px; + -moz-border-radius: 4px; + border-radius: 4px; } +.table-bordered th, +.table-bordered td { + border-left: 1px solid #444444; } +.table-bordered caption + thead tr:first-child th, +.table-bordered caption + tbody tr:first-child th, +.table-bordered caption + tbody tr:first-child td, +.table-bordered colgroup + thead tr:first-child th, +.table-bordered colgroup + tbody tr:first-child th, +.table-bordered colgroup + tbody tr:first-child td, +.table-bordered thead:first-child tr:first-child th, +.table-bordered tbody:first-child tr:first-child th, +.table-bordered tbody:first-child tr:first-child td { + border-top: 0; } +.table-bordered thead:first-child tr:first-child > th:first-child, +.table-bordered tbody:first-child tr:first-child > td:first-child, +.table-bordered tbody:first-child tr:first-child > th:first-child { + -webkit-border-top-left-radius: 4px; + border-top-left-radius: 4px; + -moz-border-radius-topleft: 4px; } +.table-bordered thead:first-child tr:first-child > th:last-child, +.table-bordered tbody:first-child tr:first-child > td:last-child, +.table-bordered tbody:first-child tr:first-child > th:last-child { + -webkit-border-top-right-radius: 4px; + border-top-right-radius: 4px; + -moz-border-radius-topright: 4px; } +.table-bordered thead:last-child tr:last-child > th:first-child, +.table-bordered tbody:last-child tr:last-child > td:first-child, +.table-bordered tbody:last-child tr:last-child > th:first-child, +.table-bordered tfoot:last-child tr:last-child > td:first-child, +.table-bordered tfoot:last-child tr:last-child > th:first-child { + -webkit-border-bottom-left-radius: 4px; + border-bottom-left-radius: 4px; + -moz-border-radius-bottomleft: 4px; } +.table-bordered thead:last-child tr:last-child > th:last-child, +.table-bordered tbody:last-child tr:last-child > td:last-child, +.table-bordered tbody:last-child tr:last-child > th:last-child, +.table-bordered tfoot:last-child tr:last-child > td:last-child, +.table-bordered tfoot:last-child tr:last-child > th:last-child { + -webkit-border-bottom-right-radius: 4px; + border-bottom-right-radius: 4px; + -moz-border-radius-bottomright: 4px; } +.table-bordered tfoot + tbody:last-child tr:last-child td:first-child { + -webkit-border-bottom-left-radius: 0; + border-bottom-left-radius: 0; + -moz-border-radius-bottomleft: 0; } +.table-bordered tfoot + tbody:last-child tr:last-child td:last-child { + -webkit-border-bottom-right-radius: 0; + border-bottom-right-radius: 0; + -moz-border-radius-bottomright: 0; } +.table-bordered caption + thead tr:first-child th:first-child, +.table-bordered caption + tbody tr:first-child td:first-child, +.table-bordered colgroup + thead tr:first-child th:first-child, +.table-bordered colgroup + tbody tr:first-child td:first-child { + -webkit-border-top-left-radius: 4px; + border-top-left-radius: 4px; + -moz-border-radius-topleft: 4px; } +.table-bordered caption + thead tr:first-child th:last-child, +.table-bordered caption + tbody tr:first-child td:last-child, +.table-bordered colgroup + thead tr:first-child th:last-child, +.table-bordered colgroup + tbody tr:first-child td:last-child { + -webkit-border-top-right-radius: 4px; + border-top-right-radius: 4px; + -moz-border-radius-topright: 4px; } +.table-striped tbody > tr:nth-child(odd) > td, +.table-hover tbody tr:hover > td, +table td[class*="span"], +table th[class*="span"], +.row-fluid table td[class*="span"], +.row-fluid table th[class*="span"] { + display: table-cell; + float: none; + margin-left: 0; } +.hero-unit { + padding: 60px; + margin-bottom: 30px; + font-size: 18px; + font-weight: 200; + line-height: 30px; + color: inherit; + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; } +.hero-unit h1 { + margin-bottom: 0; + font-size: 60px; + line-height: 1; + letter-spacing: -1px; + color: inherit; } +.hero-unit li { + line-height: 30px; } +/* rst2html default used to remove borders from tables and images */ +.borderless, table.borderless td, table.borderless th { + border: 0; } +table.borderless td, table.borderless th { + /* Override padding for "table.docutils td" with "! important". + The right padding separates the table cells. */ + padding: 0 0.5em 0 0 !important; } +.first { + /* Override more specific margin styles with "! important". */ + margin-top: 0 !important; } +.last, .with-subtitle { + margin-bottom: 0 !important; } +.hidden { + display: none; } +a.toc-backref { + text-decoration: none; + color: #444444; } +blockquote.epigraph { + margin: 2em 5em; } +dl.docutils dd { + margin-bottom: 0.5em; } +object[type="image/svg+xml"], object[type="application/x-shockwave-flash"] { + overflow: hidden; } +/* Uncomment (and remove this text!) to get bold-faced definition list terms +dl.docutils dt { + font-weight: bold } +*/ +div.abstract { + margin: 2em 5em; } +div.abstract p.topic-title { + font-weight: bold; + text-align: center; } +div.admonition, div.attention, div.caution, div.danger, div.error, +div.hint, div.important, div.note, div.tip, div.warning { + margin: 2em; + border: medium outset; + padding: 1em; } +div.note, div.warning { + margin: 1.5em 0px; + border: none; } +div.note p.admonition-title, +div.warning p.admonition-title { + display: none; } +/* Clearfix + * http://css-tricks.com/snippets/css/clear-fix/ + */ +div.note:after, +div.warning:after { + content: ""; + display: table; + clear: both; } +div.note p:before, +div.warning p:before { + display: block; + float: left; + font-size: 4em; + line-height: 1em; + margin-right: 20px; + margin-left: 0em; + margin-top: -10px; + content: '\0270D'; + /*handwriting*/ } +div.warning p:before { + content: '\026A0'; + /*warning*/ } +div.admonition p.admonition-title, div.hint p.admonition-title, +div.important p.admonition-title, div.note p.admonition-title, +div.tip p.admonition-title { + font-weight: bold; + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; } +div.attention p.admonition-title, div.caution p.admonition-title, +div.danger p.admonition-title, div.error p.admonition-title, +div.warning p.admonition-title, .code .error { + color: #b30000; + font-weight: bold; + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; } +/* Uncomment (and remove this text!) to get reduced vertical space in + compound paragraphs. +div.compound .compound-first, div.compound .compound-middle { + margin-bottom: 0.5em } +div.compound .compound-last, div.compound .compound-middle { + margin-top: 0.5em } +*/ +div.dedication { + margin: 2em 5em; + text-align: center; + font-style: italic; } +div.dedication p.topic-title { + font-weight: bold; + font-style: normal; } +div.figure { + margin-left: 2em; + margin-right: 2em; } +div.footer, div.header { + clear: both; + font-size: smaller; } +div.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; } +div.line-block div.line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; } +div.sidebar { + margin: 0 0 0.5em 1em; + border: medium outset; + padding: 1em; + width: 40%; + float: right; + clear: right; } +div.sidebar p.rubric { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-size: medium; } +div.system-messages { + margin: 5em; } +div.system-messages h1 { + color: #b30000; } +div.system-message { + border: medium outset; + padding: 1em; } +div.system-message p.system-message-title { + color: #b30000; + font-weight: bold; } +div.topic { + margin: 2em; } +h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, +h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { + margin-top: 0.4em; } +h1.title { + text-align: center; } +h2.subtitle { + text-align: center; } +hr.docutils { + width: 75%; } +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; } +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; } +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; } +.align-left { + text-align: left; } +.align-center { + clear: both; + text-align: center; } +.align-right { + text-align: right; } +/* reset inner alignment in figures */ +div.align-right { + text-align: inherit; } +/* div.align-center * { */ +/* text-align: left } */ +ul.simple > li { + margin-bottom: 0.5em } +ol.simple, ul.simple { + margin-bottom: 1em; } +ol.arabic { + list-style: decimal; } +ol.loweralpha { + list-style: lower-alpha; } +ol.upperalpha { + list-style: upper-alpha; } +ol.lowerroman { + list-style: lower-roman; } +ol.upperroman { + list-style: upper-roman; } +p.attribution { + text-align: right; + margin-left: 50%; } +p.caption { + font-style: italic; } +p.credits { + font-style: italic; + font-size: smaller; } +p.label { + white-space: nowrap; } +p.rubric { + font-weight: bold; + font-size: larger; + color: maroon; + text-align: center; } +p.sidebar-title { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-weight: bold; + font-size: larger; } +p.sidebar-subtitle { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-weight: bold; } +p.topic-title { + font-weight: bold; } +pre.address { + margin-bottom: 0; + margin-top: 0; + font: inherit; } +pre.literal-block, pre.doctest-block, pre.math, pre.code { + margin-left: 2em; + margin-right: 2em; } +/* line numbers */ +pre.code .keyword, code .keyword { + font-weight: bold; } +span.classifier { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-style: oblique; } +span.classifier-delimiter { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-weight: bold; } +span.interpreted { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; } +span.option { + white-space: nowrap; } +span.pre { + white-space: pre; } +span.problematic { + color: #b30000; } +span.section-subtitle { + /* font-size relative to parent (h1..h6 element) */ + font-size: 80%; } +table.citation { + border-left: solid 1px #666666; + margin-left: 1px; } +table.docinfo { + margin: 0em; + margin-top: 2em; + margin-bottom: 2em; + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif !important; +}table.docutils { + margin-top: 0.5em; + margin-bottom: 0.5em; } +table.footnote { + border-left: solid 1px #2d2d2d; + margin-left: 1px; } +table.docutils td, table.docutils th, +table.docinfo td, table.docinfo th { + padding-left: 0.5em; + padding-right: 0.5em; + vertical-align: top; } +table.docutils th.field-name, table.docinfo th.docinfo-name { + font-weight: 700; + text-align: left; + white-space: nowrap; + padding-left: 0; } +h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, +h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { + font-size: 100%; } +ul.auto-toc { + list-style-type: none; } +span.Keyword { + font-weight: 600;} +span.Comment, span.LongComment { + font-style: italic; + font-weight: 400; +} +/* Pop type, const, proc, and iterator defs in nim def blocks */ +dt pre > span.Identifier, dt pre > span.Operator { + font-weight: 700; } +dt pre > span.Identifier ~ span.Identifier, dt pre > span.Operator ~ span.Identifier { + color: inherit; + font-weight: inherit; } +dt pre > span.Operator ~ span.Identifier, dt pre > span.Operator ~ span.Operator { + color: inherit; + font-weight: inherit; } +/* Nim sprite for the footer (taken from main page favicon) */ +.nim-sprite { + display: inline-block; + height: 12px; + width: 12px; + background-position: 0 0; + background-size: 12px 12px; + -webkit-filter: opacity(50%); + background-repeat: no-repeat; + background-image: url("data:image/x-icon;base64,AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AAAAAAUAAAAF////AP///wD///8A////AP///wD///8A////AP///wD///8A////AAAAAAIAAABbAAAAlQAAAKIAAACbAAAAmwAAAKIAAACVAAAAWwAAAAL///8A////AP///wD///8A////AAAAABQAAADAAAAAYwAAAA3///8A////AP///wD///8AAAAADQAAAGMAAADAAAAAFP///wD///8A////AP///wAAAACdAAAAOv///wD///8A////AP///wD///8A////AP///wD///8AAAAAOgAAAJ3///8A////AP///wAAAAAnAAAAcP///wAAAAAoAAAASv///wD///8A////AP///wAAAABKAAAAKP///wAAAABwAAAAJ////wD///8AAAAAgQAAABwAAACIAAAAkAAAAJMAAACtAAAAFQAAABUAAACtAAAAkwAAAJAAAACIAAAAHAAAAIH///8A////AAAAAKQAAACrAAAAaP///wD///8AAAAARQAAANIAAADSAAAARf///wD///8AAAAAaAAAAKsAAACk////AAAAADMAAACcAAAAnQAAABj///8A////AP///wAAAAAYAAAAGP///wD///8A////AAAAABgAAACdAAAAnAAAADMAAAB1AAAAwwAAAP8AAADpAAAAsQAAAE4AAAAb////AP///wAAAAAbAAAATgAAALEAAADpAAAA/wAAAMMAAAB1AAAAtwAAAOkAAAD/AAAA/wAAAP8AAADvAAAA3gAAAN4AAADeAAAA3gAAAO8AAAD/AAAA/wAAAP8AAADpAAAAtwAAAGUAAAA/AAAA3wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAAD/AAAA/wAAAP8AAADfAAAAPwAAAGX///8A////AAAAAEgAAADtAAAAvwAAAL0AAADGAAAA7wAAAO8AAADGAAAAvQAAAL8AAADtAAAASP///wD///8A////AP///wD///8AAAAAO////wD///8A////AAAAAIcAAACH////AP///wD///8AAAAAO////wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A////AP///wD///8A//8AAP//AAD4HwAA7/cAAN/7AAD//wAAoYUAAJ55AACf+QAAh+EAAAAAAADAAwAA4AcAAP5/AAD//wAA//8AAA=="); + margin-bottom: -5px; } +div.pragma { + display: none; +} +span.pragmabegin { + cursor: pointer; +} +span.pragmaend { + cursor: pointer; +} +div.search_results { + background-color: antiquewhite; + margin: 3em; + padding: 1em; + border: 1px solid #4d4d4d; +} +div#global-links ul { + margin-left: 0; + list-style-type: none; +} \ No newline at end of file diff --git a/dynamic_stack_arrays.html b/dynamic_stack_arrays.html new file mode 100644 index 000000000..3cad7943f --- /dev/null +++ b/dynamic_stack_arrays.html @@ -0,0 +1,985 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/dynamic_stack_arrays + + + + + + + + + +Arraymancer - src/arraymancer/laser/dynamic_stack_arrays + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/dynamic_stack_arrays

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Types

+
+
+
DynamicStackArray[T] = object
+  data*: array[LASER_MAXRANK, T]
+  len*: int
+
+
+ + Custom stack allocated array that behaves like seq. We must avoid seq creation when modifying tensor shapes, strides or slicing in a tight loop. Seq creation are also not possible within an OpenMP loop. +   Source +Edit + +
+
+ +
+
+
+

Consts

+
+
+
LASER_MAXRANK {.intdefine.} = 6
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
func `$`(a: DynamicStackArray): string
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func `&`(a, b: DynamicStackArray): DynamicStackArray
+
+ + +   Source +Edit + +
+
+
+
func `&`[T](a: DynamicStackArray[T]; value: T): DynamicStackArray[T] {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func `==`(a, s: DynamicStackArray): bool
+
+ + +   Source +Edit + +
+
+
+
func `==`[T](a: DynamicStackArray[T]; s: openArray[T]): bool
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func `@`[T](a: DynamicStackArray[T]): seq[T]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func `[]`[T](a: DynamicStackArray[T]; idx: Index): T {.inline.}
+
+ + +   Source +Edit + +
+
+
+
func `[]`[T](a: DynamicStackArray[T]; slice: Slice[int]): DynamicStackArray[T]
+
+ + +   Source +Edit + +
+
+
+
func `[]`[T](a: var DynamicStackArray[T]; idx: Index): var T {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func `[]=`[T](a: var DynamicStackArray[T]; idx: Index; v: T) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func add[T](a: var DynamicStackArray[T]; value: T) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func concat[T](dsas: varargs[DynamicStackArray[T]]): DynamicStackArray[T]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func copyFrom(a: var DynamicStackArray; s: DynamicStackArray) {.inline.}
+
+ + +   Source +Edit + +
+
+
+
func copyFrom(a: var DynamicStackArray; s: varargs[int])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func delete(a: var DynamicStackArray; index: int)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func high(a: DynamicStackArray): int {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func insert[T](a: var DynamicStackArray[T]; value: T; index: int = 0)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func low(a: DynamicStackArray): int {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func max[T](a: DynamicStackArray[T]): T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func product[T: SomeNumber](a: DynamicStackArray[T]): T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func reversed(a: DynamicStackArray): DynamicStackArray
+
+ + +   Source +Edit + +
+
+
+
func reversed(a: DynamicStackArray; result: var DynamicStackArray)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func setLen(a: var DynamicStackArray; len: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Iterators

+
+
+
+
iterator items[T](a: DynamicStackArray[T]): T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
iterator mitems[T](a: var DynamicStackArray[T]): var T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
iterator mpairs[T](a: var DynamicStackArray[T]): (int, var T)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
iterator pairs[T](a: DynamicStackArray[T]): (int, T)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
iterator zip[T, U](a: DynamicStackArray[T]; b: DynamicStackArray[U]): (T, T)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/dynamic_stack_arrays.idx b/dynamic_stack_arrays.idx new file mode 100644 index 000000000..259b62435 --- /dev/null +++ b/dynamic_stack_arrays.idx @@ -0,0 +1,36 @@ +nimTitle dynamic_stack_arrays dynamic_stack_arrays.html module src/arraymancer/laser/dynamic_stack_arrays 0 +nim LASER_MAXRANK dynamic_stack_arrays.html#LASER_MAXRANK const LASER_MAXRANK 6 +nim DynamicStackArray dynamic_stack_arrays.html#DynamicStackArray object DynamicStackArray 14 +nim copyFrom dynamic_stack_arrays.html#copyFrom,DynamicStackArray,varargs[int] proc copyFrom(a: var DynamicStackArray; s: varargs[int]) 21 +nim copyFrom dynamic_stack_arrays.html#copyFrom,DynamicStackArray,DynamicStackArray proc copyFrom(a: var DynamicStackArray; s: DynamicStackArray) 26 +nim setLen dynamic_stack_arrays.html#setLen,DynamicStackArray,int proc setLen(a: var DynamicStackArray; len: int) 29 +nim low dynamic_stack_arrays.html#low,DynamicStackArray proc low(a: DynamicStackArray): int 33 +nim high dynamic_stack_arrays.html#high,DynamicStackArray proc high(a: DynamicStackArray): int 36 +nim `[]` dynamic_stack_arrays.html#[],DynamicStackArray[T],Index proc `[]`[T](a: DynamicStackArray[T]; idx: Index): T 45 +nim `[]` dynamic_stack_arrays.html#[],DynamicStackArray[T],Index_2 proc `[]`[T](a: var DynamicStackArray[T]; idx: Index): var T 48 +nim `[]=` dynamic_stack_arrays.html#[]=,DynamicStackArray[T],Index,T proc `[]=`[T](a: var DynamicStackArray[T]; idx: Index; v: T) 51 +nim `[]` dynamic_stack_arrays.html#[],DynamicStackArray[T],Slice[int] proc `[]`[T](a: DynamicStackArray[T]; slice: Slice[int]): DynamicStackArray[T] 54 +nim items dynamic_stack_arrays.html#items.i,DynamicStackArray[T] iterator items[T](a: DynamicStackArray[T]): T 63 +nim mitems dynamic_stack_arrays.html#mitems.i,DynamicStackArray[T] iterator mitems[T](a: var DynamicStackArray[T]): var T 67 +nim pairs dynamic_stack_arrays.html#pairs.i,DynamicStackArray[T] iterator pairs[T](a: DynamicStackArray[T]): (int, T) 71 +nim mpairs dynamic_stack_arrays.html#mpairs.i,DynamicStackArray[T] iterator mpairs[T](a: var DynamicStackArray[T]): (int, var T) 75 +nim `@` dynamic_stack_arrays.html#@,DynamicStackArray[T] proc `@`[T](a: DynamicStackArray[T]): seq[T] 79 +nim `$` dynamic_stack_arrays.html#$,DynamicStackArray proc `$`(a: DynamicStackArray): string 84 +nim product dynamic_stack_arrays.html#product,DynamicStackArray[T: SomeNumber] proc product[T: SomeNumber](a: DynamicStackArray[T]): T 93 +nim insert dynamic_stack_arrays.html#insert,DynamicStackArray[T],T,int proc insert[T](a: var DynamicStackArray[T]; value: T; index: int = 0) 98 +nim delete dynamic_stack_arrays.html#delete,DynamicStackArray,int proc delete(a: var DynamicStackArray; index: int) 104 +nim add dynamic_stack_arrays.html#add,DynamicStackArray[T],T proc add[T](a: var DynamicStackArray[T]; value: T) 110 +nim `&` dynamic_stack_arrays.html#&,DynamicStackArray[T],T proc `&`[T](a: DynamicStackArray[T]; value: T): DynamicStackArray[T] 114 +nim `&` dynamic_stack_arrays.html#&,DynamicStackArray,DynamicStackArray proc `&`(a, b: DynamicStackArray): DynamicStackArray 118 +nim reversed dynamic_stack_arrays.html#reversed,DynamicStackArray proc reversed(a: DynamicStackArray): DynamicStackArray 124 +nim reversed dynamic_stack_arrays.html#reversed,DynamicStackArray,DynamicStackArray proc reversed(a: DynamicStackArray; result: var DynamicStackArray) 129 +nim `==` dynamic_stack_arrays.html#==,DynamicStackArray[T],openArray[T] proc `==`[T](a: DynamicStackArray[T]; s: openArray[T]): bool 136 +nim `==` dynamic_stack_arrays.html#==,DynamicStackArray,DynamicStackArray proc `==`(a, s: DynamicStackArray): bool 144 +nim zip dynamic_stack_arrays.html#zip.i,DynamicStackArray[T],DynamicStackArray[U] iterator zip[T, U](a: DynamicStackArray[T]; b: DynamicStackArray[U]): (T, T) 152 +nim concat dynamic_stack_arrays.html#concat,varargs[DynamicStackArray[T]] proc concat[T](dsas: varargs[DynamicStackArray[T]]): DynamicStackArray[T] 158 +nim max dynamic_stack_arrays.html#max,DynamicStackArray[T] proc max[T](a: DynamicStackArray[T]): T 172 +nimgrp reversed dynamic_stack_arrays.html#reversed-procs-all proc 124 +nimgrp & dynamic_stack_arrays.html#&-procs-all proc 114 +nimgrp copyfrom dynamic_stack_arrays.html#copyFrom-procs-all proc 21 +nimgrp == dynamic_stack_arrays.html#==-procs-all proc 136 +nimgrp [] dynamic_stack_arrays.html#[]-procs-all proc 45 diff --git a/einsum.html b/einsum.html new file mode 100644 index 000000000..e48d68dc8 --- /dev/null +++ b/einsum.html @@ -0,0 +1,508 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/einsum + + + + + + + + + +Arraymancer - src/arraymancer/tensor/einsum + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/einsum

+
+ +
+   Source +Edit + +
+ +

This module provides Einstein summation for an arbitrary number of tensors.

+

Einstein summation describes a special application of index notation in which indices that appear more than once are implicitly summed over. This allows for a concise notation of many vector / matrix / tensor calculations, while exactly representing the required calculation.

+

In general Einstein summation is a subset of Ricci calculus.

+

The implementation of einsum in different languages however, typically goes above and beyond actual Einstein summation, allowing for many aspects of Ricci calculus.

+ +

Simple Einstein summation examples

Typical examples include matrix-vector multiplication, matrix-matrix multiplication or the cross product. The examples below use the einsum / notation for the elements of tensors, namely m[i,j] for element i,j of the matrix m, instead of the more mathematical notation m_ij.

+ +

Matrix-vector multiplication

Let m be an NxM matrix and v a M vector. Then matrix-vector multiplication m * v is defined as: w[i] = \sum_j m[i,j] * v[j]. The result is an N vector w consisting of elements w[i]. Since j appears twice on the RHS of the equation, Einstein summation implies that the sum over j is implicit, hence we can write:

+

w[i] = m[i,j] * v[j].

+ +

Matrix-matrix multiplication

The same can be applied to matrix-matrix multiplication. Let m, n be two compatible matrices (both NxN or NxM and MxN) with elements m[i,j] and n[i,j]. Matrix-matrix multiplication is defined as

+

a[i,k] = \sum_j m[i,j] * n[j,k]

+

and thus in Einstein summation:

+

a[i,k] = m[i,j] * n[j,k].

+ +

Cross-product of two vectors

The cross product of two 3 vectors v, w can be conveniently defined using the Levi-Civita symbol \epsilon_{ijk}:

+

a[i] = \epsilon_{ijk} v[j] * w[k],

+

which implies j and k are summed over, while i is kept for the resulting tensor.

+ +

More complex examples

In this implementation of einsum (similar to other einsum implementations), it's also possible to explicitly keep different dimensions of the multiplied tensors or even perform calculations without a single index appearing mutliple times, for instance to transpose a tensor. For these cases the explicit form of the einsum macro has to be used, see below.

+ +

Transposition of a matrix

Transposition of a matrix can be expressed in index notation simply as an exchange of indices, namely let m be an NxM matrix, the transposed MxN matrix m^T is written as:

+

m[j,i] = m[i,j].

+ +

Hadamard product

The Hadamard product defines the product of two NxM matrices n, m in which the matrices are multiplied element wise. It is a good example of the extension of einsum over standard Einstein summation:

+

a[i,j] = m[i,j] * n[i,j].

+

Naive Einstein summation would demand a sum over both i and j, resulting in a scalar on the LHS instead of another NxM matrix.

+ +

Contracting a whole matrix

Contraction of a full matrix describes summing all elements of a matrix m, resulting in a scalar a. It is expressed by:

+

a = m[i,i].

+ +

The einsum macro

The einsum macro provides two different usage paradigms.

+
  • implicit <- normal Einstein summation
  • +
  • explicit <- potential extended Einstein summation
  • +
+

The macro takes a varargs[Tensor] and a single statement. It returns a Tensor[T], where T is deduced from the subtype of the given tensors, if the result is not a scalar. For a scalar result the return value is of type T. Note that the type of all given tensors must match!

+

The statement given to the macro is just a single line making use of Einstein summation as in all the examples above. As a matter of fact all examples above are valid statements for the einsum macro!

+

Of course only tensors, which are given to the macro in the varargs may be used in the statement.

+

If only the RHS of the examples above are given, the required indices for the resulting tensor are automatically calculated using pure Einstein summation. Assuming a, b are two 2D arraymancer tensors , we could express their matrix mutiplication as

+
let c = einsum(a, b):
+  a[i,j] * b[j,k]

Of course the same can be written in explicit form:

+
let c = einsum(a, b):
+  c[i,k] = a[i,j] * b[j,k]

A few things must be noted here for the explicit case:

+
  • the indices on the LHS are taken as "the truth"! Any index appearing here will not be summed over.
  • +
  • the order on the LHS is taken into account, allowing for transposing dimensions.
  • +
  • the identifier used on the LHS is arbitrary. It can match what the user assigns to, but need not.
  • +
+

For many more examples for typical applications, take a look at the test case ../../tests/tensor/test_einsum.nim.

+ +

Implementation details

The macro calculates, which indices must be contracted and which remain in the final tensor. For each appearing index (of either case) we create a for loop, while the contracting for loops appear within the non contracting indices.

+

The macro creates a block, in which the code is produced and returns the temporary tensor used in it.

+

It also forces the tensors into contiguous, row major form by creating local copies with asContiguous.

+

+ +
+

Macros

+
+
+
+
macro einsum(tensors: varargs[typed]; stmt: untyped): untyped
+
+ +

Performs Einstein summation of the given tensors defined by the stmt. See the top of the module for an explanation on Einstein summation.

+

Let a, b some 2D tensors (matrices), then the usage to perform matrix multiplication of the two might look like: .. code:: nim # implicit Einstein summation let c = einsum(a, b): ai,j * bj,k # explicit Einstein summation. Note that identifier d in statement # is arbitrary and need not match what will be assigned to. let d = einsum(a, b): di,k = ai,j * bj,k # explicit Einstein summation

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/einsum.idx b/einsum.idx new file mode 100644 index 000000000..fcda706d9 --- /dev/null +++ b/einsum.idx @@ -0,0 +1,12 @@ +nimTitle einsum einsum.html module src/arraymancer/tensor/einsum 0 +nim einsum einsum.html#einsum.m,varargs[typed],untyped macro einsum(tensors: varargs[typed]; stmt: untyped): untyped 595 +heading Simple Einstein summation examples einsum.html#simple-einstein-summation-examples Simple Einstein summation examples 0 +heading Matrix-vector multiplication einsum.html#simple-einstein-summation-examples-matrixminusvector-multiplication Matrix-vector multiplication 0 +heading Matrix-matrix multiplication einsum.html#simple-einstein-summation-examples-matrixminusmatrix-multiplication Matrix-matrix multiplication 0 +heading Cross-product of two vectors einsum.html#simple-einstein-summation-examples-crossminusproduct-of-two-vectors Cross-product of two vectors 0 +heading More complex examples einsum.html#more-complex-examples More complex examples 0 +heading Transposition of a matrix einsum.html#more-complex-examples-transposition-of-a-matrix Transposition of a matrix 0 +heading Hadamard product einsum.html#more-complex-examples-hadamard-product Hadamard product 0 +heading Contracting a whole matrix einsum.html#more-complex-examples-contracting-a-whole-matrix Contracting a whole matrix 0 +heading The nimeinsum macro einsum.html#the-nimeinsum-macro The einsum macro 0 +heading Implementation details einsum.html#the-nimeinsum-macro-implementation-details Implementation details 0 diff --git a/embedding.html b/embedding.html new file mode 100644 index 000000000..0c37f8c35 --- /dev/null +++ b/embedding.html @@ -0,0 +1,595 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/embedding + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/embedding + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/embedding

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
Embedding[T] = object
+  weight*: Variable[AnyTensor[T]]
+  paddingIdx*: BiggestInt
+
+
+ + +   Source +Edit + +
+
+
+
EmbeddingGate[TT; scaled; Idx] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc embedding[TT; Idx: VocabIdx](input_vocab_id: Tensor[Idx];
+                                  weight: Variable[TT]; padding_idx: Idx = -1;
+                                  scale_grad_by_freq: static[bool] = false): Variable[
+    TT]
+
+ + Input:
  • A tensor of vocabulary indices, either: +

    Vocabulary can be words, characters, series of words. Each item in your vocabulary must be encoded into an unique integer before being passed to the Embedding layer.

    +
  • +
  • A weight matrix that maps those indices to the embedding vector space of shape vocabulary_size, embedding_size.
  • +
  • An optional padding_idx if an index corresponds to the absence of words (padding) This is necessary to support variable-length sentences. By default, the padding_idx is -1.
  • +
  • An optional parameter to scale the gradient by the words inverse document frequency. This divides the gradient of each words by their occurences in the minibatch. This regularise variations in the weight of very frequent words.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc forward[T; Idx: VocabIdx](self: Embedding[T]; input: Tensor[Idx]): Variable[
+    AnyTensor[T]]
+
+ + Runs input through embedding layer. Each item in your vocabulary/input must be encoded into an unique integer before being passed to the Embedding layer. +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Embedding[T]];
+             vocabSize, embedSize: int; paddingIdx: VocabIdx = -1): Embedding[T]
+
+ + Creates an embedding layer. Input:
- ``vocabSize`` Size of the vocabulary
+- ``embedSize`` Embedding size
+- ``paddingIdx`` Optional parameter for when an index corresponds to the absence of words
+

Returns the created Embedding.

+ +   Source +Edit + +
+
+ +
+
+
+
func inShape[T](self: Embedding[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func outShape[T](self: Embedding[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/embedding.idx b/embedding.idx new file mode 100644 index 000000000..d4f42e055 --- /dev/null +++ b/embedding.idx @@ -0,0 +1,8 @@ +nimTitle embedding embedding.html module src/arraymancer/nn/layers/embedding 0 +nim EmbeddingGate embedding.html#EmbeddingGate type EmbeddingGate 21 +nim embedding embedding.html#embedding,Tensor[Idx: VocabIdx],Variable[TT],Idx,static[bool] proc embedding[TT; Idx: VocabIdx](input_vocab_id: Tensor[Idx]; weight: Variable[TT];\n padding_idx: Idx = -1;\n scale_grad_by_freq: static[bool] = false): Variable[\n TT] 69 +nim Embedding embedding.html#Embedding object Embedding 106 +nim init embedding.html#init,Context[Tensor[T]],typedesc[Embedding[T]],int,int,VocabIdx proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Embedding[T]];\n vocabSize, embedSize: int; paddingIdx: VocabIdx = -1): Embedding[T] 110 +nim forward embedding.html#forward,Embedding[T],Tensor[Idx: VocabIdx] proc forward[T; Idx: VocabIdx](self: Embedding[T]; input: Tensor[Idx]): Variable[\n AnyTensor[T]] 143 +nim outShape embedding.html#outShape,Embedding[T] proc outShape[T](self: Embedding[T]): seq[int] 153 +nim inShape embedding.html#inShape,Embedding[T] proc inShape[T](self: Embedding[T]): seq[int] 155 diff --git a/exporting.html b/exporting.html new file mode 100644 index 000000000..66391a02b --- /dev/null +++ b/exporting.html @@ -0,0 +1,592 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/exporting + + + + + + + + + +Arraymancer - src/arraymancer/tensor/exporting + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/exporting

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc export_tensor[T](t: Tensor[T]): tuple[shape: seq[int], strides: seq[int],
+    data: seq[T]] {.noSideEffect.}
+
+ + Export the tensor as a tuple containing
  • shape
  • +
  • strides
  • +
  • data
  • +
+

If the tensor was not contiguous (a slice for example), it is reshaped. Data is exported in C order (last index changes the fastest, column in 2D case)

+ +   Source +Edit + +
+
+ +
+
+
+
proc toFlatSeq[T](t: Tensor[T]): seq[T]
+
+ + Export the data of the Tensor flattened as a Seq +   Source +Edit + +
+
+ +
+
+
+
proc toRawSeq[T](t: Tensor[T]): seq[T] {.noSideEffect, ...deprecated: "This proc cannot be reimplemented in a backward compatible way.".}
+
+
+ Deprecated: This proc cannot be reimplemented in a backward compatible way. +
+ +

Convert a tensor to the raw sequence of data. Important: Up to v0.6.0, Arraymancer always took full ownership of the data it operated on. In particular, even after slicing, it kept tracked of the full memory allocated initially.

+

This proc used to return the raw in-memory representation of the data without reshaping due to views/slices and offsets This is not true anymore.

+

It instead returns the canonical row-major serialization of the data.

+

It is recommended that you implement your own serialization using Arraymancer's unsafe_raw_buf + shape + strides + offset or that you raise your use-case in the issue tracker https://github.com/mratsim/Arraymancer/issues so that more suitable primitives can be crafted

+ +   Source +Edit + +
+
+ +
+
+
+
proc toSeq1D[T](t: Tensor[T]): seq[T]
+
+ + Exports a rank-1 tensor to a 1D sequence +   Source +Edit + +
+
+ +
+
+
+
proc toSeq2D[T](t: Tensor[T]): seq[seq[T]]
+
+ + Exports a rank-2 tensor to a 2D sequence. +   Source +Edit + +
+
+ +
+
+
+
proc toSeq3D[T](t: Tensor[T]): seq[seq[seq[T]]]
+
+ + Exports a rank-3 tensor to a 3D sequence. +   Source +Edit + +
+
+ +
+
+
+
proc toSeq4D[T](t: Tensor[T]): seq[seq[seq[seq[T]]]]
+
+ + Exports a rank-4 tensor to a 4D sequence. +   Source +Edit + +
+
+ +
+
+
+
proc toSeq5D[T](t: Tensor[T]): seq[seq[seq[seq[seq[T]]]]]
+
+ + Exports a rank-5 tensor to a 5D sequence. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/exporting.idx b/exporting.idx new file mode 100644 index 000000000..dc9872636 --- /dev/null +++ b/exporting.idx @@ -0,0 +1,9 @@ +nimTitle exporting exporting.html module src/arraymancer/tensor/exporting 0 +nim toRawSeq exporting.html#toRawSeq,Tensor[T] proc toRawSeq[T](t: Tensor[T]): seq[T] 19 +nim toFlatSeq exporting.html#toFlatSeq,Tensor[T] proc toFlatSeq[T](t: Tensor[T]): seq[T] 48 +nim export_tensor exporting.html#export_tensor,Tensor[T] proc export_tensor[T](t: Tensor[T]): tuple[shape: seq[int], strides: seq[int],\n data: seq[T]] 54 +nim toSeq1D exporting.html#toSeq1D,Tensor[T] proc toSeq1D[T](t: Tensor[T]): seq[T] 69 +nim toSeq2D exporting.html#toSeq2D,Tensor[T] proc toSeq2D[T](t: Tensor[T]): seq[seq[T]] 77 +nim toSeq3D exporting.html#toSeq3D,Tensor[T] proc toSeq3D[T](t: Tensor[T]): seq[seq[seq[T]]] 88 +nim toSeq4D exporting.html#toSeq4D,Tensor[T] proc toSeq4D[T](t: Tensor[T]): seq[seq[seq[seq[T]]]] 102 +nim toSeq5D exporting.html#toSeq5D,Tensor[T] proc toSeq5D[T](t: Tensor[T]): seq[seq[seq[seq[seq[T]]]]] 119 diff --git a/filling_data.html b/filling_data.html new file mode 100644 index 000000000..fe1fb8be8 --- /dev/null +++ b/filling_data.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/filling_data + + + + + + + + + +Arraymancer - src/arraymancer/tensor/filling_data + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/filling_data

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc copy_from[T](dst: var Tensor[T]; src: Tensor[T])
+
+ + Copy the data from a source Tensor. Both tensors must have the same number of elements but do not need to have the same shape. Data is copied without re-allocation. Warning โš  The destination tensor data will be overwritten. It however conserves its shape and strides. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/filling_data.idx b/filling_data.idx new file mode 100644 index 000000000..793b74496 --- /dev/null +++ b/filling_data.idx @@ -0,0 +1,2 @@ +nimTitle filling_data filling_data.html module src/arraymancer/tensor/filling_data 0 +nim copy_from filling_data.html#copy_from,Tensor[T],Tensor[T] proc copy_from[T](dst: var Tensor[T]; src: Tensor[T]) 19 diff --git a/flatten.html b/flatten.html new file mode 100644 index 000000000..f6764fb1f --- /dev/null +++ b/flatten.html @@ -0,0 +1,533 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/flatten + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/flatten + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/flatten

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor, autograd +
+
+
+

Types

+
+
+
Flatten[T] = object
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc forward[T](self: Flatten[T]; input: Variable[Tensor[T]]): Variable[
+    Tensor[T]]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Flatten[T]];
+             inShape: seq[int]): Flatten[T]
+
+ + Creates a flattening layer, which "flattens" its input by reshaping it into a one-dimensional tensor. inShape describes the expected shape of the input. +   Source +Edit + +
+
+ +
+
+
+
func inShape[T](self: Flatten[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func outShape[T](self: Flatten[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/flatten.idx b/flatten.idx new file mode 100644 index 000000000..710f9837d --- /dev/null +++ b/flatten.idx @@ -0,0 +1,6 @@ +nimTitle flatten flatten.html module src/arraymancer/nn/layers/flatten 0 +nim Flatten flatten.html#Flatten object Flatten 6 +nim init flatten.html#init,Context[Tensor[T]],typedesc[Flatten[T]],seq[int] proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Flatten[T]];\n inShape: seq[int]): Flatten[T] 9 +nim forward flatten.html#forward,Flatten[T],Variable[Tensor[T]] proc forward[T](self: Flatten[T]; input: Variable[Tensor[T]]): Variable[Tensor[T]] 19 +nim outShape flatten.html#outShape,Flatten[T] proc outShape[T](self: Flatten[T]): seq[int] 23 +nim inShape flatten.html#inShape,Flatten[T] proc inShape[T](self: Flatten[T]): seq[int] 28 diff --git a/foreach.html b/foreach.html new file mode 100644 index 000000000..cff35aefc --- /dev/null +++ b/foreach.html @@ -0,0 +1,572 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/strided_iteration/foreach + + + + + + + + + +Arraymancer - src/arraymancer/laser/strided_iteration/foreach + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/strided_iteration/foreach

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro forEach(args: varargs[untyped]): untyped
+
+ +

Parallel iteration over one or more tensors

+

Format: forEach x in a, y in b, z in c: x += y * z

+

The iteration strategy is selected at runtime depending of the tensors memory layout. If you know at compile-time that the tensors are contiguous or strided, use forEachContiguous or forEachStrided instead. Runtime selection requires duplicating the code body.

+

In the contiguous case: The threshold for parallelization by default is OMP_MEMORY_BOUND_GRAIN_SIZE = 1024 elementwise operations to process per cores.

+

Compiler will also be hinted to unroll loop for SIMD vectorization.

+

Otherwise if tensor is strided: The threshold for parallelization by default is OMP_MEMORY_BOUND_GRAIN_SIZE div OMP_NON_CONTIGUOUS_SCALE_FACTOR = 1024/4 = 256 elementwise operations to process per cores.

+

Use forEachStaged to fine-tune this default.

+ +   Source +Edit + +
+
+ +
+
+
+
macro forEachContiguous(args: varargs[untyped]): untyped
+
+ +

Parallel iteration over one or more contiguous tensors.

+

Format: forEachContiguous x in a, y in b, z in c: x += y * z

+

The threshold for parallelization by default is OMP_MEMORY_BOUND_GRAIN_SIZE = 1024 elementwise operations to process per cores.

+

Compiler will also be hinted to unroll loop for SIMD vectorization.

+

Use forEachStaged to fine-tune those defaults.

+ +   Source +Edit + +
+
+ +
+
+
+
macro forEachContiguousSerial(args: varargs[untyped]): untyped
+
+ +

Serial iteration over one or more contiguous tensors.

+

Format: forEachContiguousSerial x in a, y in b, z in c: x += y * z

+ +   Source +Edit + +
+
+ +
+
+
+
macro forEachSerial(args: varargs[untyped]): untyped
+
+ +

Serial iteration over one or more tensors

+

Format: forEachSerial x in a, y in b, z in c: x += y * z

+

openMP parameters will be ignored

+

The iteration strategy is selected at runtime depending of the tensors memory layout. If you know at compile-time that the tensors are contiguous or strided, use forEachContiguousSerial or forEachStridedSerial instead. Runtime selection requires duplicating the code body.

+ +   Source +Edit + +
+
+ +
+
+
+
macro forEachStrided(args: varargs[untyped]): untyped
+
+ +

Parallel iteration over one or more tensors of unknown strides for example resulting from most slices.

+

Format: forEachStrided x in a, y in b, z in c: x += y * z

+

The threshold for parallelization by default is OMP_MEMORY_BOUND_GRAIN_SIZE div OMP_NON_CONTIGUOUS_SCALE_FACTOR = 1024/4 = 256 elementwise operations to process per cores.

+

Use forEachStaged to fine-tune this default.

+ +   Source +Edit + +
+
+ +
+
+
+
macro forEachStridedSerial(args: varargs[untyped]): untyped
+
+ +

Serial iteration over one or more tensors of unknown strides for example resulting from most slices.

+

Format: forEachStridedSerial x in a, y in b, z in c: x += y * z

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Exports

+
+ omp_suffix +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/foreach.idx b/foreach.idx new file mode 100644 index 000000000..4f5a4b3f3 --- /dev/null +++ b/foreach.idx @@ -0,0 +1,7 @@ +nimTitle foreach foreach.html module src/arraymancer/laser/strided_iteration/foreach 0 +nim forEachContiguous foreach.html#forEachContiguous.m,varargs[untyped] macro forEachContiguous(args: varargs[untyped]): untyped 191 +nim forEachContiguousSerial foreach.html#forEachContiguousSerial.m,varargs[untyped] macro forEachContiguousSerial(args: varargs[untyped]): untyped 206 +nim forEachStrided foreach.html#forEachStrided.m,varargs[untyped] macro forEachStrided(args: varargs[untyped]): untyped 214 +nim forEachStridedSerial foreach.html#forEachStridedSerial.m,varargs[untyped] macro forEachStridedSerial(args: varargs[untyped]): untyped 229 +nim forEach foreach.html#forEach.m,varargs[untyped] macro forEach(args: varargs[untyped]): untyped 238 +nim forEachSerial foreach.html#forEachSerial.m,varargs[untyped] macro forEachSerial(args: varargs[untyped]): untyped 264 diff --git a/foreach_common.html b/foreach_common.html new file mode 100644 index 000000000..72da23d88 --- /dev/null +++ b/foreach_common.html @@ -0,0 +1,521 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/strided_iteration/foreach_common + + + + + + + + + +Arraymancer - src/arraymancer/laser/strided_iteration/foreach_common + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/strided_iteration/foreach_common

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc initForEach(params: NimNode; values, aliases, raw_ptrs: var NimNode;
+                 aliases_stmt, raw_ptrs_stmt: var NimNode;
+                 test_shapes: var NimNode) {....raises: [], tags: [], forbids: [].}
+
+ + +

Parse the input

+   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template stridedBodyTemplate(): untyped {.dirty.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template stridedChunkOffset(): untyped {.dirty.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template stridedVarsSetup(): untyped {.dirty.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/foreach_common.idx b/foreach_common.idx new file mode 100644 index 000000000..7088974f8 --- /dev/null +++ b/foreach_common.idx @@ -0,0 +1,6 @@ +nimTitle foreach_common foreach_common.html module src/arraymancer/laser/strided_iteration/foreach_common 0 +nim initForEach foreach_common.html#initForEach,NimNode,NimNode,NimNode,NimNode,NimNode,NimNode,NimNode proc initForEach(params: NimNode; values, aliases, raw_ptrs: var NimNode;\n aliases_stmt, raw_ptrs_stmt: var NimNode; test_shapes: var NimNode) 15 +nim stridedVarsSetup foreach_common.html#stridedVarsSetup.t template stridedVarsSetup(): untyped 79 +nim stridedChunkOffset foreach_common.html#stridedChunkOffset.t template stridedChunkOffset(): untyped 92 +nim stridedBodyTemplate foreach_common.html#stridedBodyTemplate.t template stridedBodyTemplate(): untyped 101 +heading Parse the input foreach_common.html#parse-the-input Parse the input 0 diff --git a/foreach_staged.html b/foreach_staged.html new file mode 100644 index 000000000..117014e91 --- /dev/null +++ b/foreach_staged.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/strided_iteration/foreach_staged + + + + + + + + + +Arraymancer - src/arraymancer/laser/strided_iteration/foreach_staged + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/strided_iteration/foreach_staged

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro forEachStaged(args: varargs[untyped]): untyped
+
+ +

Staged optionally parallel iteration over one or more tensors This is useful if you need thread-local initialization or cleanup before the parallel loop Example usage for reduction

+

forEachStaged xi in x, yi in y: openmp_config: use_openmp: true use_simd: false nowait: true omp_grain_size: OMP_MEMORY_BOUND_GRAIN_SIZE iteration_kind: {contiguous, strided} # Default, "contiguous", "strided" are also possible before_loop: var local_sum = 0.T in_loop: local_sum += xi + yi after_loop: omp_critical: result += local_sum

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Exports

+
+ omp_suffix +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/foreach_staged.idx b/foreach_staged.idx new file mode 100644 index 000000000..914af527e --- /dev/null +++ b/foreach_staged.idx @@ -0,0 +1,2 @@ +nimTitle foreach_staged foreach_staged.html module src/arraymancer/laser/strided_iteration/foreach_staged 0 +nim forEachStaged foreach_staged.html#forEachStaged.m,varargs[untyped] macro forEachStaged(args: varargs[untyped]): untyped 318 diff --git a/functional.html b/functional.html new file mode 100644 index 000000000..832d39fd7 --- /dev/null +++ b/functional.html @@ -0,0 +1,436 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/private/functional + + + + + + + + + +Arraymancer - src/arraymancer/private/functional + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/private/functional

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
proc concatMap[T](s: seq[T]; f: proc (ss: T): string): string {.noSideEffect,
+    effectsOf: f.}
+
+ + Map a function to a sequence of T and concatenate the result as string +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/functional.idx b/functional.idx new file mode 100644 index 000000000..1b7d87aa3 --- /dev/null +++ b/functional.idx @@ -0,0 +1,2 @@ +nimTitle functional functional.html module src/arraymancer/private/functional 0 +nim concatMap functional.html#concatMap,seq[T],proc(T) proc concatMap[T](s: seq[T]; f: proc (ss: T): string): string 21 diff --git a/gates_basic.html b/gates_basic.html new file mode 100644 index 000000000..7327d43e5 --- /dev/null +++ b/gates_basic.html @@ -0,0 +1,503 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/gates_basic + + + + + + + + + +Arraymancer - src/arraymancer/autograd/gates_basic + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/gates_basic

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
AddGate[TT] {.final.} = ref object of Gate[TT]
+
+ + +   Source +Edit + +
+
+
+
SubGate[TT] {.final.} = ref object of Gate[TT]
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc `+`[TT](a, b: Variable[TT]): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `-`[TT](a, b: Variable[TT]): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gates_basic.idx b/gates_basic.idx new file mode 100644 index 000000000..0d6147f76 --- /dev/null +++ b/gates_basic.idx @@ -0,0 +1,5 @@ +nimTitle gates_basic gates_basic.html module src/arraymancer/autograd/gates_basic 0 +nim AddGate gates_basic.html#AddGate type AddGate 21 +nim `+` gates_basic.html#+,Variable[TT],Variable[TT] proc `+`[TT](a, b: Variable[TT]): Variable[TT] 47 +nim SubGate gates_basic.html#SubGate type SubGate 60 +nim `-` gates_basic.html#-,Variable[TT],Variable[TT] proc `-`[TT](a, b: Variable[TT]): Variable[TT] 87 diff --git a/gates_blas.html b/gates_blas.html new file mode 100644 index 000000000..0f59ebc47 --- /dev/null +++ b/gates_blas.html @@ -0,0 +1,472 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/gates_blas + + + + + + + + + +Arraymancer - src/arraymancer/autograd/gates_blas + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/gates_blas

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
MatMulGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + TODO: generalize to C <- alpha AB + C +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc `*`[TT](a, b: Variable[TT]): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gates_blas.idx b/gates_blas.idx new file mode 100644 index 000000000..c0c519829 --- /dev/null +++ b/gates_blas.idx @@ -0,0 +1,3 @@ +nimTitle gates_blas gates_blas.html module src/arraymancer/autograd/gates_blas 0 +nim MatMulGate gates_blas.html#MatMulGate type MatMulGate 18 +nim `*` gates_blas.html#*,Variable[TT],Variable[TT] proc `*`[TT](a, b: Variable[TT]): Variable[TT] 50 diff --git a/gates_hadamard.html b/gates_hadamard.html new file mode 100644 index 000000000..3864af5f9 --- /dev/null +++ b/gates_hadamard.html @@ -0,0 +1,472 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/gates_hadamard + + + + + + + + + +Arraymancer - src/arraymancer/autograd/gates_hadamard + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/gates_hadamard

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
HadamardGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc `*.`[TT](a, b: Variable[TT]): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gates_hadamard.idx b/gates_hadamard.idx new file mode 100644 index 000000000..225cadf69 --- /dev/null +++ b/gates_hadamard.idx @@ -0,0 +1,3 @@ +nimTitle gates_hadamard gates_hadamard.html module src/arraymancer/autograd/gates_hadamard 0 +nim HadamardGate gates_hadamard.html#HadamardGate type HadamardGate 18 +nim `*.` gates_hadamard.html#*.,Variable[TT],Variable[TT] proc `*.`[TT](a, b: Variable[TT]): Variable[TT] 49 diff --git a/gates_reduce.html b/gates_reduce.html new file mode 100644 index 000000000..9401b3b9d --- /dev/null +++ b/gates_reduce.html @@ -0,0 +1,518 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/gates_reduce + + + + + + + + + +Arraymancer - src/arraymancer/autograd/gates_reduce + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/gates_reduce

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
MeanGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + TODO: generalize to C <- alpha AB + C +   Source +Edit + +
+
+
+
SumGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + TODO: generalize to C <- alpha AB + C +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc mean[TT](a: Variable[TT]): Variable[TT]
+
+ + +   Source +Edit + +
+
+
+
proc mean[TT](a: Variable[TT]; axis: Natural): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc sum[TT](a: Variable[TT]): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gates_reduce.idx b/gates_reduce.idx new file mode 100644 index 000000000..5f1d34a50 --- /dev/null +++ b/gates_reduce.idx @@ -0,0 +1,7 @@ +nimTitle gates_reduce gates_reduce.html module src/arraymancer/autograd/gates_reduce 0 +nim MeanGate gates_reduce.html#MeanGate type MeanGate 20 +nim mean gates_reduce.html#mean,Variable[TT] proc mean[TT](a: Variable[TT]): Variable[TT] 86 +nim mean gates_reduce.html#mean,Variable[TT],Natural proc mean[TT](a: Variable[TT]; axis: Natural): Variable[TT] 96 +nim SumGate gates_reduce.html#SumGate type SumGate 106 +nim sum gates_reduce.html#sum,Variable[TT] proc sum[TT](a: Variable[TT]): Variable[TT] 138 +nimgrp mean gates_reduce.html#mean-procs-all proc 86 diff --git a/gates_shapeshifting_concat_split.html b/gates_shapeshifting_concat_split.html new file mode 100644 index 000000000..df04921b0 --- /dev/null +++ b/gates_shapeshifting_concat_split.html @@ -0,0 +1,503 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/gates_shapeshifting_concat_split + + + + + + + + + +Arraymancer - src/arraymancer/autograd/gates_shapeshifting_concat_split + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/gates_shapeshifting_concat_split

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
ChunkSplitGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc chunk[TT](v: Variable[TT]; nb_chunks: Positive; axis: Natural): seq[
+    Variable[TT]]
+
+ +

Splits a Variable into n chunks along the specified axis.

+

In case a tensor cannot be split evenly, with la == length_axis, n = n_chunks it returns la mod n subtensors of size (la div n) + 1 the rest of size la div n. So split sizes at most differs by 1

+

This is consistent with numpy array_split

+ +   Source +Edit + +
+
+ +
+
+
+
proc stack[TT](variables: varargs[Variable[TT]]; axis = 0): Variable[TT]
+
+ +

Join a sequence of Variables along a new axis into a new Variable. All variables must be of the same shape

+

Input:

+
  • a variable
  • +
  • an axis (dimension)
  • +
+

Returns:

+
  • a new stacked variable along the new axis
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gates_shapeshifting_concat_split.idx b/gates_shapeshifting_concat_split.idx new file mode 100644 index 000000000..b5d71afa4 --- /dev/null +++ b/gates_shapeshifting_concat_split.idx @@ -0,0 +1,4 @@ +nimTitle gates_shapeshifting_concat_split gates_shapeshifting_concat_split.html module src/arraymancer/autograd/gates_shapeshifting_concat_split 0 +nim stack gates_shapeshifting_concat_split.html#stack,varargs[Variable[TT]],int proc stack[TT](variables: varargs[Variable[TT]]; axis = 0): Variable[TT] 51 +nim ChunkSplitGate gates_shapeshifting_concat_split.html#ChunkSplitGate type ChunkSplitGate 83 +nim chunk gates_shapeshifting_concat_split.html#chunk,Variable[TT],Positive,Natural proc chunk[TT](v: Variable[TT]; nb_chunks: Positive; axis: Natural): seq[Variable[TT]] 116 diff --git a/gates_shapeshifting_views.html b/gates_shapeshifting_views.html new file mode 100644 index 000000000..94f0aeb9d --- /dev/null +++ b/gates_shapeshifting_views.html @@ -0,0 +1,588 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/autograd/gates_shapeshifting_views + + + + + + + + + +Arraymancer - src/arraymancer/autograd/gates_shapeshifting_views + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/autograd/gates_shapeshifting_views

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
ReshapeGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc flatten[TT](a: Variable[TT]): Variable[TT]
+
+ + Input:
  • A variable
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc reshape[TT](a: Variable[TT]; shape: Metadata): Variable[TT]
+
+ + Input:
  • A variable
  • +
  • A shape
  • +
+ +   Source +Edit + +
+
+
+
proc reshape[TT](a: Variable[TT]; shape: varargs[int]): Variable[TT]
+
+ + Input:
  • A variable
  • +
  • A shape
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc squeeze[TT](v`gensym0: Variable[TT]; axis`gensym0: Natural): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc unsqueeze[TT](v`gensym1: Variable[TT]; axis`gensym1: Natural): Variable[TT]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template `[]`[TT](v: Variable[TT]; args: varargs[untyped]): Variable[TT]
+
+ + Slice the tensor contained by the dynamic graph Variable Input:
  • a Variable
  • +
+

Output:

+
  • a sliced Variable
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gates_shapeshifting_views.idx b/gates_shapeshifting_views.idx new file mode 100644 index 000000000..7001b62bf --- /dev/null +++ b/gates_shapeshifting_views.idx @@ -0,0 +1,9 @@ +nimTitle gates_shapeshifting_views gates_shapeshifting_views.html module src/arraymancer/autograd/gates_shapeshifting_views 0 +nim `[]` gates_shapeshifting_views.html#[].t,Variable[TT],varargs[untyped] template `[]`[TT](v: Variable[TT]; args: varargs[untyped]): Variable[TT] 19 +nim ReshapeGate gates_shapeshifting_views.html#ReshapeGate type ReshapeGate 56 +nim reshape gates_shapeshifting_views.html#reshape,Variable[TT],varargs[int] proc reshape[TT](a: Variable[TT]; shape: varargs[int]): Variable[TT] 94 +nim reshape gates_shapeshifting_views.html#reshape,Variable[TT],Metadata proc reshape[TT](a: Variable[TT]; shape: Metadata): Variable[TT] 100 +nim flatten gates_shapeshifting_views.html#flatten,Variable[TT] proc flatten[TT](a: Variable[TT]): Variable[TT] 106 +nim squeeze gates_shapeshifting_views.html#squeeze,,Natural proc squeeze[TT](v`gensym0: Variable[TT]; axis`gensym0: Natural): Variable[TT] 154 +nim unsqueeze gates_shapeshifting_views.html#unsqueeze,,Natural proc unsqueeze[TT](v`gensym1: Variable[TT]; axis`gensym1: Natural): Variable[TT] 155 +nimgrp reshape gates_shapeshifting_views.html#reshape-procs-all proc 94 diff --git a/gcn.html b/gcn.html new file mode 100644 index 000000000..e70bd4c50 --- /dev/null +++ b/gcn.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/gcn + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/gcn + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/gcn

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
GCNGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+
+
GCNLayer[T] = object
+  weight*: Variable[Tensor[T]]
+  bias*: Variable[Tensor[T]]
+
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc forward[T](self: GCNLayer[T]; input, adjacency: Variable[Tensor[T]]): Variable[
+    Tensor[T]]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gcn[TT](input, adjacency, weight: Variable[TT]; bias: Variable[TT] = nil): Variable[
+    TT]
+
+ + Input: +

Return:

+
  • (AX)W+b
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[GCNLayer[T]];
+             numInput, numOutput: int): GCNLayer[T]
+
+ + Initializes a graph convolutional layer with num_input input features and num_output output features. Using Kaiming He initialisation for weights to provide decent performance in most cases. Biases are set to zero. +   Source +Edit + +
+
+ +
+
+
+
func inShape[T](self: GCNLayer[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func outShape[T](self: GCNLayer[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gcn.idx b/gcn.idx new file mode 100644 index 000000000..ac9bc6440 --- /dev/null +++ b/gcn.idx @@ -0,0 +1,8 @@ +nimTitle gcn gcn.html module src/arraymancer/nn/layers/gcn 0 +nim GCNGate gcn.html#GCNGate type GCNGate 20 +nim gcn gcn.html#gcn,Variable[TT],Variable[TT],Variable[TT],Variable[TT] proc gcn[TT](input, adjacency, weight: Variable[TT]; bias: Variable[TT] = nil): Variable[\n TT] 71 +nim GCNLayer gcn.html#GCNLayer object GCNLayer 108 +nim init gcn.html#init,Context[Tensor[T]],typedesc[GCNLayer[T]],int,int proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[GCNLayer[T]];\n numInput, numOutput: int): GCNLayer[T] 112 +nim forward gcn.html#forward,GCNLayer[T],Variable[Tensor[T]],Variable[Tensor[T]] proc forward[T](self: GCNLayer[T]; input, adjacency: Variable[Tensor[T]]): Variable[\n Tensor[T]] 124 +nim outShape gcn.html#outShape,GCNLayer[T] proc outShape[T](self: GCNLayer[T]): seq[int] 127 +nim inShape gcn.html#inShape,GCNLayer[T] proc inShape[T](self: GCNLayer[T]): seq[int] 129 diff --git a/gemm.html b/gemm.html new file mode 100644 index 000000000..cd7835e5f --- /dev/null +++ b/gemm.html @@ -0,0 +1,493 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebp_mkernel[T; ukernel: static MicroKernel](mc, nc, kc: int; alpha: T;
+    packA, packB: ptr UncheckedArray[T]; beta: T; mcncC: MatrixView[T])
+
+ + Macro kernel, multiply: + +   Source +Edit + +
+
+ +
+
+
+
proc gemm_strided[T: SomeNumber and not (uint32 | uint64 | uint | int)](
+    M, N, K: int; alpha: T; A: ptr T; rowStrideA, colStrideA: int; B: ptr T;
+    rowStrideB, colStrideB: int; beta: T; C: ptr T; rowStrideC, colStrideC: int)
+
+ + +   Source +Edit + +
+
+
+
proc gemm_strided[T: uint32 | uint64 | uint | int](M, N, K: int; alpha: T;
+    A: ptr T; rowStrideA, colStrideA: int; B: ptr T;
+    rowStrideB, colStrideB: int; beta: T; C: ptr T; rowStrideC, colStrideC: int)
+
+ + Overload to avoid bloating the code size with generics monomorphization +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm.idx b/gemm.idx new file mode 100644 index 000000000..d30cd6cfe --- /dev/null +++ b/gemm.idx @@ -0,0 +1,5 @@ +nimTitle gemm gemm.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm 0 +nim gebp_mkernel gemm.html#gebp_mkernel,int,int,int,T,ptr.UncheckedArray[T],ptr.UncheckedArray[T],T,MatrixView[T] proc gebp_mkernel[T; ukernel: static MicroKernel](mc, nc, kc: int; alpha: T;\n packA, packB: ptr UncheckedArray[T]; beta: T; mcncC: MatrixView[T]) 55 +nim gemm_strided gemm.html#gemm_strided,int,int,int,T,ptr.T,int,int,ptr.T,int,int,T,ptr.T,int,int proc gemm_strided[T: SomeNumber and not (uint32 | uint64 | uint | int)](M, N, K: int;\n alpha: T; A: ptr T; rowStrideA, colStrideA: int; B: ptr T;\n rowStrideB, colStrideB: int; beta: T; C: ptr T; rowStrideC, colStrideC: int) 190 +nim gemm_strided gemm.html#gemm_strided,int,int,int,T,ptr.T,int,int,ptr.T,int,int,T,ptr.T,int,int_2 proc gemm_strided[T: uint32 | uint64 | uint | int](M, N, K: int; alpha: T; A: ptr T;\n rowStrideA, colStrideA: int; B: ptr T; rowStrideB, colStrideB: int; beta: T;\n C: ptr T; rowStrideC, colStrideC: int) 273 +nimgrp gemmstrided gemm.html#gemm_strided-procs-all proc 190 diff --git a/gemm_packing.html b/gemm_packing.html new file mode 100644 index 000000000..bd4ff784c --- /dev/null +++ b/gemm_packing.html @@ -0,0 +1,473 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_packing + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_packing + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_packing

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc pack_A_mc_kc[T; ukernel: static MicroKernel](
+    packedA: ptr UncheckedArray[T]; mc, kc: int; A: MatrixView[T])
+
+ +

Packs panel kc, mc into buffer รƒ (size ~half-L2 cache) Pads if needed Note that A is of shape M, K so it is transposed.

+

Concretely the outer dimension of packed matrices is k so that Ci, j = Ai, k * Bk, j does not require strided access

+ +   Source +Edit + +
+
+ +
+
+
+
proc pack_B_kc_nc[T; ukernel: static MicroKernel](
+    packedB: ptr UncheckedArray[T]; kc, nc: int; B: MatrixView[T])
+
+ +

Packs panel kc, nc for ~B (half-L1 cache) Pads if needed

+

Concretely the outer dimension of packed matrices is k so that Ci, j = Ai, k * Bk, j does not require strided access

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_packing.idx b/gemm_packing.idx new file mode 100644 index 000000000..0e3691b37 --- /dev/null +++ b/gemm_packing.idx @@ -0,0 +1,3 @@ +nimTitle gemm_packing gemm_packing.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_packing 0 +nim pack_A_mc_kc gemm_packing.html#pack_A_mc_kc,ptr.UncheckedArray[T],int,int,MatrixView[T] proc pack_A_mc_kc[T; ukernel: static MicroKernel](packedA: ptr UncheckedArray[T];\n mc, kc: int; A: MatrixView[T]) 24 +nim pack_B_kc_nc gemm_packing.html#pack_B_kc_nc,ptr.UncheckedArray[T],int,int,MatrixView[T] proc pack_B_kc_nc[T; ukernel: static MicroKernel](packedB: ptr UncheckedArray[T];\n kc, nc: int; B: MatrixView[T]) 63 diff --git a/gemm_prepacked.html b/gemm_prepacked.html new file mode 100644 index 000000000..b8ede8e59 --- /dev/null +++ b/gemm_prepacked.html @@ -0,0 +1,586 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_prepacked + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_prepacked + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_prepacked

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gemm_packed[T: SomeNumber](M, N, K: int; alpha: T;
+                                packedA: ptr (T or UncheckedArray[T]);
+                                packedB: ptr (T or UncheckedArray[T]); beta: T;
+                                C: ptr (T or UncheckedArray[T]);
+                                rowStrideC, colStrideC: int)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gemm_prepackA[T](dst_packedA: ptr (T or UncheckedArray[T]); M, N, K: int;
+                      src_A: ptr T; rowStrideA, colStrideA: int)
+
+ +

Prepack matrix A of shape MxK and strides rowStrideA and colStrideA for matrix multiplication. A must be 64-bit aligned.

+

For optimal performance packing is machine and architecture dependent i.e. it depends on detected features like AVX and number of cores and may depend on your machine cache sizes in the future. It is unsafe to store or serialize it.

+ +   Source +Edit + +
+
+ +
+
+
+
func gemm_prepackA_mem_required(T: typedesc; M, N, K: int): int
+
+ + Returns the amount of memory that needs to be preallocated to pack matrix B. +   Source +Edit + +
+
+ +
+
+
+
func gemm_prepackA_mem_required_impl(ukernel: static MicroKernel; T: typedesc;
+                                     M, N, K: int): int
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gemm_prepackB[T](dst_packedB: ptr (T or UncheckedArray[T]); M, N, K: int;
+                      src_B: ptr T; rowStrideB, colStrideB: int)
+
+ +

Prepack matrix B of shape KxN and strides rowStrideB and colStrideB for matrix multiplication. B must be 64-bit aligned.

+

For optimal performance packing is machine and architecture dependent i.e. it depends on detected features like AVX and number of cores and may depend on your machine cache sizes in the future. It is unsafe to store or serialize it.

+ +   Source +Edit + +
+
+ +
+
+
+
func gemm_prepackB_mem_required(T: type; M, N, K: int): int
+
+ + Returns the amount of memory that needs to be preallocated to pack matrix B. +   Source +Edit + +
+
+ +
+
+
+
func gemm_prepackB_mem_required_impl(ukernel: static MicroKernel; T: typedesc;
+                                     M, N, K: int): int
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_prepacked.idx b/gemm_prepacked.idx new file mode 100644 index 000000000..e980477e1 --- /dev/null +++ b/gemm_prepacked.idx @@ -0,0 +1,8 @@ +nimTitle gemm_prepacked gemm_prepacked.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_prepacked 0 +nim gemm_prepackB_mem_required_impl gemm_prepacked.html#gemm_prepackB_mem_required_impl,staticMicroKernel,typedesc,int,int,int proc gemm_prepackB_mem_required_impl(ukernel: static MicroKernel; T: typedesc;\n M, N, K: int): int 64 +nim gemm_prepackB_mem_required gemm_prepacked.html#gemm_prepackB_mem_required,type,int,int,int proc gemm_prepackB_mem_required(T: type; M, N, K: int): int 77 +nim gemm_prepackB gemm_prepacked.html#gemm_prepackB,ptr.,int,int,int,ptr.T,int,int proc gemm_prepackB[T](dst_packedB: ptr (T or UncheckedArray[T]); M, N, K: int;\n src_B: ptr T; rowStrideB, colStrideB: int) 112 +nim gemm_prepackA_mem_required_impl gemm_prepacked.html#gemm_prepackA_mem_required_impl,staticMicroKernel,typedesc,int,int,int proc gemm_prepackA_mem_required_impl(ukernel: static MicroKernel; T: typedesc;\n M, N, K: int): int 144 +nim gemm_prepackA_mem_required gemm_prepacked.html#gemm_prepackA_mem_required,typedesc,int,int,int proc gemm_prepackA_mem_required(T: typedesc; M, N, K: int): int 158 +nim gemm_prepackA gemm_prepacked.html#gemm_prepackA,ptr.,int,int,int,ptr.T,int,int proc gemm_prepackA[T](dst_packedA: ptr (T or UncheckedArray[T]); M, N, K: int;\n src_A: ptr T; rowStrideA, colStrideA: int) 195 +nim gemm_packed gemm_prepacked.html#gemm_packed,int,int,int,T,ptr.,ptr.,T,ptr.,int,int proc gemm_packed[T: SomeNumber](M, N, K: int; alpha: T;\n packedA: ptr (T or UncheckedArray[T]);\n packedB: ptr (T or UncheckedArray[T]); beta: T;\n C: ptr (T or UncheckedArray[T]);\n rowStrideC, colStrideC: int) 276 diff --git a/gemm_tiling.html b/gemm_tiling.html new file mode 100644 index 000000000..f567457e4 --- /dev/null +++ b/gemm_tiling.html @@ -0,0 +1,734 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_tiling + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_tiling + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_tiling

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
CPUFeatureX86 = enum
+  x86_Generic, x86_SSE, x86_SSE2, x86_SSE4_1, x86_AVX, x86_AVX_FMA, x86_AVX2,
+  x86_AVX512
+
+ + +   Source +Edit + +
+
+
+
MicroKernel = object
+  mr*, nr*: int
+  cpu_simd*: CPUFeatureX86
+  nb_scalars*: int
+  nb_vecs_nr*: int
+  c_unit_stride*: bool
+  pt*: int
+
+
+ + +   Source +Edit + +
+
+
+
Tiles[T] = ref object
+  a*: ptr UncheckedArray[T]
+  b*: ptr UncheckedArray[T]
+  mc*, nc*, kc*: int
+  ic_num_tasks*: int
+  upanelA_size*: int
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
func get_num_tiles(dim_size, tile_size: int): int {.inline, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Get the number of tiles along a dimension depending on the tile size +   Source +Edit + +
+
+ +
+
+
+
proc newTiles(ukernel: static MicroKernel; T: typedesc; M, N, K: Natural): Tiles[
+    T]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func partitionMNK(ukernel: static MicroKernel; T: typedesc; M, N, K: Natural): tuple[
+    mc, nc, kc: int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func x86_ukernel(cpu: CPUFeatureX86; T: typedesc; c_unit_stride: bool): MicroKernel
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Macros

+
+
+
+
macro extract_c_unit_stride(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro extract_cpu_simd(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro extract_mr(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro extract_nb_scalars(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro extract_nb_vecs_nr(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro extract_nr(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro extract_pt(ukernel: static MicroKernel): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_tiling.idx b/gemm_tiling.idx new file mode 100644 index 000000000..98317e12b --- /dev/null +++ b/gemm_tiling.idx @@ -0,0 +1,23 @@ +nimTitle gemm_tiling gemm_tiling.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_tiling 0 +nim MicroKernel gemm_tiling.html#MicroKernel object MicroKernel 62 +nim x86_Generic gemm_tiling.html#x86_Generic CPUFeatureX86.x86_Generic 74 +nim x86_SSE gemm_tiling.html#x86_SSE CPUFeatureX86.x86_SSE 74 +nim x86_SSE2 gemm_tiling.html#x86_SSE2 CPUFeatureX86.x86_SSE2 74 +nim x86_SSE4_1 gemm_tiling.html#x86_SSE4_1 CPUFeatureX86.x86_SSE4_1 74 +nim x86_AVX gemm_tiling.html#x86_AVX CPUFeatureX86.x86_AVX 74 +nim x86_AVX_FMA gemm_tiling.html#x86_AVX_FMA CPUFeatureX86.x86_AVX_FMA 74 +nim x86_AVX2 gemm_tiling.html#x86_AVX2 CPUFeatureX86.x86_AVX2 74 +nim x86_AVX512 gemm_tiling.html#x86_AVX512 CPUFeatureX86.x86_AVX512 74 +nim CPUFeatureX86 gemm_tiling.html#CPUFeatureX86 enum CPUFeatureX86 74 +nim x86_ukernel gemm_tiling.html#x86_ukernel,CPUFeatureX86,typedesc,bool proc x86_ukernel(cpu: CPUFeatureX86; T: typedesc; c_unit_stride: bool): MicroKernel 199 +nim extract_mr gemm_tiling.html#extract_mr.m,staticMicroKernel macro extract_mr(ukernel: static MicroKernel): untyped 226 +nim extract_nr gemm_tiling.html#extract_nr.m,staticMicroKernel macro extract_nr(ukernel: static MicroKernel): untyped 228 +nim extract_cpu_simd gemm_tiling.html#extract_cpu_simd.m,staticMicroKernel macro extract_cpu_simd(ukernel: static MicroKernel): untyped 230 +nim extract_nb_scalars gemm_tiling.html#extract_nb_scalars.m,staticMicroKernel macro extract_nb_scalars(ukernel: static MicroKernel): untyped 233 +nim extract_nb_vecs_nr gemm_tiling.html#extract_nb_vecs_nr.m,staticMicroKernel macro extract_nb_vecs_nr(ukernel: static MicroKernel): untyped 235 +nim extract_c_unit_stride gemm_tiling.html#extract_c_unit_stride.m,staticMicroKernel macro extract_c_unit_stride(ukernel: static MicroKernel): untyped 237 +nim extract_pt gemm_tiling.html#extract_pt.m,staticMicroKernel macro extract_pt(ukernel: static MicroKernel): untyped 239 +nim Tiles gemm_tiling.html#Tiles type Tiles 251 +nim get_num_tiles gemm_tiling.html#get_num_tiles,int,int proc get_num_tiles(dim_size, tile_size: int): int 272 +nim partitionMNK gemm_tiling.html#partitionMNK,staticMicroKernel,typedesc,Natural,Natural,Natural proc partitionMNK(ukernel: static MicroKernel; T: typedesc; M, N, K: Natural): tuple[\n mc, nc, kc: int] 276 +nim newTiles gemm_tiling.html#newTiles,staticMicroKernel,typedesc,Natural,Natural,Natural proc newTiles(ukernel: static MicroKernel; T: typedesc; M, N, K: Natural): Tiles[T] 312 diff --git a/gemm_ukernel_avx.html b/gemm_ukernel_avx.html new file mode 100644 index 000000000..f5cf2a7d4 --- /dev/null +++ b/gemm_ukernel_avx.html @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_float32_x86_AVX[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];
+    beta`gensym3: float32; vC`gensym3: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_float64_x86_AVX[ukernel: static MicroKernel](
+    mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: float64;
+    packedA`gensym7, packedB`gensym7: ptr UncheckedArray[float64];
+    beta`gensym7: float64; vC`gensym7: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float32_x86_AVX[ukernel: static MicroKernel](kc`gensym2: int;
+    alpha`gensym2: float32;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];
+    beta`gensym2: float32; vC`gensym2: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float64_x86_AVX[ukernel: static MicroKernel](kc`gensym6: int;
+    alpha`gensym6: float64;
+    packedA`gensym6, packedB`gensym6: ptr UncheckedArray[float64];
+    beta`gensym6: float64; vC`gensym6: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_avx.idx b/gemm_ukernel_avx.idx new file mode 100644 index 000000000..76a643f2c --- /dev/null +++ b/gemm_ukernel_avx.idx @@ -0,0 +1,5 @@ +nimTitle gemm_ukernel_avx gemm_ukernel_avx.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx 0 +nim gebb_ukernel_float32_x86_AVX gemm_ukernel_avx.html#gebb_ukernel_float32_x86_AVX,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_float32_x86_AVX[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: float32;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];\n beta`gensym2: float32; vC`gensym2: MatrixView[float32]) 51 +nim gebb_ukernel_edge_float32_x86_AVX gemm_ukernel_avx.html#gebb_ukernel_edge_float32_x86_AVX,int,int,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_edge_float32_x86_AVX[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];\n beta`gensym3: float32; vC`gensym3: MatrixView[float32]) 21 +nim gebb_ukernel_float64_x86_AVX gemm_ukernel_avx.html#gebb_ukernel_float64_x86_AVX,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_float64_x86_AVX[ukernel: static MicroKernel](kc`gensym6: int;\n alpha`gensym6: float64;\n packedA`gensym6, packedB`gensym6: ptr UncheckedArray[float64];\n beta`gensym6: float64; vC`gensym6: MatrixView[float64]) 51 +nim gebb_ukernel_edge_float64_x86_AVX gemm_ukernel_avx.html#gebb_ukernel_edge_float64_x86_AVX,int,int,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_edge_float64_x86_AVX[ukernel: static MicroKernel](\n mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: float64;\n packedA`gensym7, packedB`gensym7: ptr UncheckedArray[float64];\n beta`gensym7: float64; vC`gensym7: MatrixView[float64]) 36 diff --git a/gemm_ukernel_avx2.html b/gemm_ukernel_avx2.html new file mode 100644 index 000000000..9cd537a4a --- /dev/null +++ b/gemm_ukernel_avx2.html @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx2 + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx2 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx2

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_int32_x86_AVX2[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: int32;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[int32];
+    beta`gensym3: int32; vC`gensym3: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_int32_x86_AVX2[ukernel: static MicroKernel](kc`gensym2: int;
+    alpha`gensym2: int32;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[int32];
+    beta`gensym2: int32; vC`gensym2: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_avx2.idx b/gemm_ukernel_avx2.idx new file mode 100644 index 000000000..55bfbfb97 --- /dev/null +++ b/gemm_ukernel_avx2.idx @@ -0,0 +1,3 @@ +nimTitle gemm_ukernel_avx2 gemm_ukernel_avx2.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx2 0 +nim gebb_ukernel_int32_x86_AVX2 gemm_ukernel_avx2.html#gebb_ukernel_int32_x86_AVX2,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_int32_x86_AVX2[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: int32;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[int32];\n beta`gensym2: int32; vC`gensym2: MatrixView[int32]) 51 +nim gebb_ukernel_edge_int32_x86_AVX2 gemm_ukernel_avx2.html#gebb_ukernel_edge_int32_x86_AVX2,int,int,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_edge_int32_x86_AVX2[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: int32;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[int32];\n beta`gensym3: int32; vC`gensym3: MatrixView[int32]) 27 diff --git a/gemm_ukernel_avx512.html b/gemm_ukernel_avx512.html new file mode 100644 index 000000000..7a4d9b573 --- /dev/null +++ b/gemm_ukernel_avx512.html @@ -0,0 +1,649 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx512 + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx512 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx512

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

For the C codegen of AVX512 instructions to be valid, we need the following flag:See: https://stackoverflow.com/a/63711952 for a script to find the required compilation flags for specific SIMD functions.

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_float32_x86_AVX512[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];
+    beta`gensym3: float32; vC`gensym3: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_float64_x86_AVX512[ukernel: static MicroKernel](
+    mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: float64;
+    packedA`gensym7, packedB`gensym7: ptr UncheckedArray[float64];
+    beta`gensym7: float64; vC`gensym7: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_int32_x86_AVX512[ukernel: static MicroKernel](
+    mr`gensym11, nr`gensym11, kc`gensym11: int; alpha`gensym11: int32;
+    packedA`gensym11, packedB`gensym11: ptr UncheckedArray[int32];
+    beta`gensym11: int32; vC`gensym11: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_int64_x86_AVX512[ukernel: static MicroKernel](
+    mr`gensym15, nr`gensym15, kc`gensym15: int; alpha`gensym15: int64;
+    packedA`gensym15, packedB`gensym15: ptr UncheckedArray[int64];
+    beta`gensym15: int64; vC`gensym15: MatrixView[int64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float32_x86_AVX512[ukernel: static MicroKernel](
+    kc`gensym2: int; alpha`gensym2: float32;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];
+    beta`gensym2: float32; vC`gensym2: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float64_x86_AVX512[ukernel: static MicroKernel](
+    kc`gensym6: int; alpha`gensym6: float64;
+    packedA`gensym6, packedB`gensym6: ptr UncheckedArray[float64];
+    beta`gensym6: float64; vC`gensym6: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_int32_x86_AVX512[ukernel: static MicroKernel](
+    kc`gensym10: int; alpha`gensym10: int32;
+    packedA`gensym10, packedB`gensym10: ptr UncheckedArray[int32];
+    beta`gensym10: int32; vC`gensym10: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_int64_x86_AVX512[ukernel: static MicroKernel](
+    kc`gensym14: int; alpha`gensym14: int64;
+    packedA`gensym14, packedB`gensym14: ptr UncheckedArray[int64];
+    beta`gensym14: int64; vC`gensym14: MatrixView[int64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_avx512.idx b/gemm_ukernel_avx512.idx new file mode 100644 index 000000000..890cd8b66 --- /dev/null +++ b/gemm_ukernel_avx512.idx @@ -0,0 +1,9 @@ +nimTitle gemm_ukernel_avx512 gemm_ukernel_avx512.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx512 0 +nim gebb_ukernel_float32_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_float32_x86_AVX512,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_float32_x86_AVX512[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: float32;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];\n beta`gensym2: float32; vC`gensym2: MatrixView[float32]) 51 +nim gebb_ukernel_edge_float32_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_edge_float32_x86_AVX512,int,int,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_edge_float32_x86_AVX512[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];\n beta`gensym3: float32; vC`gensym3: MatrixView[float32]) 21 +nim gebb_ukernel_float64_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_float64_x86_AVX512,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_float64_x86_AVX512[ukernel: static MicroKernel](kc`gensym6: int;\n alpha`gensym6: float64;\n packedA`gensym6, packedB`gensym6: ptr UncheckedArray[float64];\n beta`gensym6: float64; vC`gensym6: MatrixView[float64]) 51 +nim gebb_ukernel_edge_float64_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_edge_float64_x86_AVX512,int,int,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_edge_float64_x86_AVX512[ukernel: static MicroKernel](\n mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: float64;\n packedA`gensym7, packedB`gensym7: ptr UncheckedArray[float64];\n beta`gensym7: float64; vC`gensym7: MatrixView[float64]) 36 +nim gebb_ukernel_int32_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_int32_x86_AVX512,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_int32_x86_AVX512[ukernel: static MicroKernel](kc`gensym10: int;\n alpha`gensym10: int32;\n packedA`gensym10, packedB`gensym10: ptr UncheckedArray[int32];\n beta`gensym10: int32; vC`gensym10: MatrixView[int32]) 51 +nim gebb_ukernel_edge_int32_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_edge_int32_x86_AVX512,int,int,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_edge_int32_x86_AVX512[ukernel: static MicroKernel](\n mr`gensym11, nr`gensym11, kc`gensym11: int; alpha`gensym11: int32;\n packedA`gensym11, packedB`gensym11: ptr UncheckedArray[int32];\n beta`gensym11: int32; vC`gensym11: MatrixView[int32]) 54 +nim gebb_ukernel_int64_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_int64_x86_AVX512,int,int64,ptr.,ptr.,int64, proc gebb_ukernel_int64_x86_AVX512[ukernel: static MicroKernel](kc`gensym14: int;\n alpha`gensym14: int64;\n packedA`gensym14, packedB`gensym14: ptr UncheckedArray[int64];\n beta`gensym14: int64; vC`gensym14: MatrixView[int64]) 51 +nim gebb_ukernel_edge_int64_x86_AVX512 gemm_ukernel_avx512.html#gebb_ukernel_edge_int64_x86_AVX512,int,int,int,int64,ptr.,ptr.,int64, proc gebb_ukernel_edge_int64_x86_AVX512[ukernel: static MicroKernel](\n mr`gensym15, nr`gensym15, kc`gensym15: int; alpha`gensym15: int64;\n packedA`gensym15, packedB`gensym15: ptr UncheckedArray[int64];\n beta`gensym15: int64; vC`gensym15: MatrixView[int64]) 72 diff --git a/gemm_ukernel_avx_fma.html b/gemm_ukernel_avx_fma.html new file mode 100644 index 000000000..77ad2ca93 --- /dev/null +++ b/gemm_ukernel_avx_fma.html @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx_fma + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx_fma + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx_fma

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_float32_x86_AVX_FMA[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];
+    beta`gensym3: float32; vC`gensym3: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_float64_x86_AVX_FMA[ukernel: static MicroKernel](
+    mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: float64;
+    packedA`gensym7, packedB`gensym7: ptr UncheckedArray[float64];
+    beta`gensym7: float64; vC`gensym7: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float32_x86_AVX_FMA[ukernel: static MicroKernel](
+    kc`gensym2: int; alpha`gensym2: float32;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];
+    beta`gensym2: float32; vC`gensym2: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float64_x86_AVX_FMA[ukernel: static MicroKernel](
+    kc`gensym6: int; alpha`gensym6: float64;
+    packedA`gensym6, packedB`gensym6: ptr UncheckedArray[float64];
+    beta`gensym6: float64; vC`gensym6: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_avx_fma.idx b/gemm_ukernel_avx_fma.idx new file mode 100644 index 000000000..8abe574dc --- /dev/null +++ b/gemm_ukernel_avx_fma.idx @@ -0,0 +1,5 @@ +nimTitle gemm_ukernel_avx_fma gemm_ukernel_avx_fma.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_avx_fma 0 +nim gebb_ukernel_float32_x86_AVX_FMA gemm_ukernel_avx_fma.html#gebb_ukernel_float32_x86_AVX_FMA,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_float32_x86_AVX_FMA[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: float32;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];\n beta`gensym2: float32; vC`gensym2: MatrixView[float32]) 51 +nim gebb_ukernel_edge_float32_x86_AVX_FMA gemm_ukernel_avx_fma.html#gebb_ukernel_edge_float32_x86_AVX_FMA,int,int,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_edge_float32_x86_AVX_FMA[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];\n beta`gensym3: float32; vC`gensym3: MatrixView[float32]) 15 +nim gebb_ukernel_float64_x86_AVX_FMA gemm_ukernel_avx_fma.html#gebb_ukernel_float64_x86_AVX_FMA,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_float64_x86_AVX_FMA[ukernel: static MicroKernel](kc`gensym6: int;\n alpha`gensym6: float64;\n packedA`gensym6, packedB`gensym6: ptr UncheckedArray[float64];\n beta`gensym6: float64; vC`gensym6: MatrixView[float64]) 51 +nim gebb_ukernel_edge_float64_x86_AVX_FMA gemm_ukernel_avx_fma.html#gebb_ukernel_edge_float64_x86_AVX_FMA,int,int,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_edge_float64_x86_AVX_FMA[ukernel: static MicroKernel](\n mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: float64;\n packedA`gensym7, packedB`gensym7: ptr UncheckedArray[float64];\n beta`gensym7: float64; vC`gensym7: MatrixView[float64]) 30 diff --git a/gemm_ukernel_dispatch.html b/gemm_ukernel_dispatch.html new file mode 100644 index 000000000..a17fa482a --- /dev/null +++ b/gemm_ukernel_dispatch.html @@ -0,0 +1,471 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_dispatch + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_dispatch + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_dispatch

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel[T; ukernel: static MicroKernel](kc: int; alpha: T;
+    packedA, packedB: ptr UncheckedArray[T]; beta: T; vC: MatrixView[T]) {.
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge[T; ukernel: static MicroKernel](mr, nr, kc: int;
+    alpha: T; packedA, packedB: ptr UncheckedArray[T]; beta: T;
+    vC: MatrixView[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_dispatch.idx b/gemm_ukernel_dispatch.idx new file mode 100644 index 000000000..ece2a632c --- /dev/null +++ b/gemm_ukernel_dispatch.idx @@ -0,0 +1,3 @@ +nimTitle gemm_ukernel_dispatch gemm_ukernel_dispatch.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_dispatch 0 +nim gebb_ukernel gemm_ukernel_dispatch.html#gebb_ukernel,int,T,ptr.UncheckedArray[T],ptr.UncheckedArray[T],T,MatrixView[T] proc gebb_ukernel[T; ukernel: static MicroKernel](kc: int; alpha: T;\n packedA, packedB: ptr UncheckedArray[T]; beta: T; vC: MatrixView[T]) 77 +nim gebb_ukernel_edge gemm_ukernel_dispatch.html#gebb_ukernel_edge,int,int,int,T,ptr.UncheckedArray[T],ptr.UncheckedArray[T],T,MatrixView[T] proc gebb_ukernel_edge[T; ukernel: static MicroKernel](mr, nr, kc: int; alpha: T;\n packedA, packedB: ptr UncheckedArray[T]; beta: T; vC: MatrixView[T]) 121 diff --git a/gemm_ukernel_generator.html b/gemm_ukernel_generator.html new file mode 100644 index 000000000..ba5798d7a --- /dev/null +++ b/gemm_ukernel_generator.html @@ -0,0 +1,516 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generator + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generator + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generator

+
+ +
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro ukernel_generator(simd: static CPUFeatureX86; typ: untyped;
+                        vectype: untyped; nb_scalars: static int;
+                        simd_setZero: untyped; simd_broadcast_value: untyped;
+                        simd_load_aligned: untyped;
+                        simd_load_unaligned: untyped;
+                        simd_store_unaligned: untyped; simd_mul: untyped;
+                        simd_add: untyped; simd_fma: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
macro ukernel_simd_impl(ukernel: static MicroKernel; V: untyped; A, B: untyped;
+                        kc: int; simd_setZero, simd_load_aligned,
+                                 simd_broadcast_value, simd_fma: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template x86only(): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_generator.idx b/gemm_ukernel_generator.idx new file mode 100644 index 000000000..c225a806c --- /dev/null +++ b/gemm_ukernel_generator.idx @@ -0,0 +1,4 @@ +nimTitle gemm_ukernel_generator gemm_ukernel_generator.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generator 0 +nim x86only gemm_ukernel_generator.html#x86only.t template x86only(): untyped 24 +nim ukernel_generator gemm_ukernel_generator.html#ukernel_generator.m,staticCPUFeatureX86,untyped,untyped,staticint,untyped,untyped,untyped,untyped,untyped,untyped,untyped,untyped macro ukernel_generator(simd: static CPUFeatureX86; typ: untyped; vectype: untyped;\n nb_scalars: static int; simd_setZero: untyped;\n simd_broadcast_value: untyped; simd_load_aligned: untyped;\n simd_load_unaligned: untyped; simd_store_unaligned: untyped;\n simd_mul: untyped; simd_add: untyped; simd_fma: untyped): untyped 106 +nim ukernel_simd_impl gemm_ukernel_generator.html#ukernel_simd_impl.m,staticMicroKernel,untyped,untyped,untyped,int,untyped,untyped,untyped,untyped macro ukernel_simd_impl(ukernel: static MicroKernel; V: untyped; A, B: untyped;\n kc: int; simd_setZero, simd_load_aligned,\n simd_broadcast_value, simd_fma: untyped): untyped 143 diff --git a/gemm_ukernel_generic.html b/gemm_ukernel_generic.html new file mode 100644 index 000000000..4deb72ccf --- /dev/null +++ b/gemm_ukernel_generic.html @@ -0,0 +1,550 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generic + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generic + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generic

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
func gebb_ukernel_edge_epilogue[MR, NR: static int; T](alpha: T;
+    AB: ptr array[MR, array[NR, T]]; beta: T; vC: MatrixView[T]; mr, nr: int) {.
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_fallback[T; ukernel: static MicroKernel](mr, nr, kc: int;
+    alpha: T; packedA, packedB: ptr UncheckedArray[T]; beta: T;
+    vC: MatrixView[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_epilogue_fallback[MR, NR: static int; T](alpha: T;
+    AB: ptr array[MR, array[NR, T]]; beta: T; vC: MatrixView[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_fallback[T; ukernel: static MicroKernel](kc: int; alpha: T;
+    packedA, packedB: ptr UncheckedArray[T]; beta: T; vC: MatrixView[T])
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template ukernel_generic_impl() {.dirty.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_generic.idx b/gemm_ukernel_generic.idx new file mode 100644 index 000000000..f347223a6 --- /dev/null +++ b/gemm_ukernel_generic.idx @@ -0,0 +1,6 @@ +nimTitle gemm_ukernel_generic gemm_ukernel_generic.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_generic 0 +nim ukernel_generic_impl gemm_ukernel_generic.html#ukernel_generic_impl.t template ukernel_generic_impl() 22 +nim gebb_ukernel_epilogue_fallback gemm_ukernel_generic.html#gebb_ukernel_epilogue_fallback,T,ptr.array[MR,array[NR,T]],T,MatrixView[T] proc gebb_ukernel_epilogue_fallback[MR, NR: static int; T](alpha: T;\n AB: ptr array[MR, array[NR, T]]; beta: T; vC: MatrixView[T]) 53 +nim gebb_ukernel_fallback gemm_ukernel_generic.html#gebb_ukernel_fallback,int,T,ptr.UncheckedArray[T],ptr.UncheckedArray[T],T,MatrixView[T] proc gebb_ukernel_fallback[T; ukernel: static MicroKernel](kc: int; alpha: T;\n packedA, packedB: ptr UncheckedArray[T]; beta: T; vC: MatrixView[T]) 81 +nim gebb_ukernel_edge_epilogue gemm_ukernel_generic.html#gebb_ukernel_edge_epilogue,T,ptr.array[MR,array[NR,T]],T,MatrixView[T],int,int proc gebb_ukernel_edge_epilogue[MR, NR: static int; T](alpha: T;\n AB: ptr array[MR, array[NR, T]]; beta: T; vC: MatrixView[T]; mr, nr: int) 96 +nim gebb_ukernel_edge_fallback gemm_ukernel_generic.html#gebb_ukernel_edge_fallback,int,int,int,T,ptr.UncheckedArray[T],ptr.UncheckedArray[T],T,MatrixView[T] proc gebb_ukernel_edge_fallback[T; ukernel: static MicroKernel](mr, nr, kc: int;\n alpha: T; packedA, packedB: ptr UncheckedArray[T]; beta: T;\n vC: MatrixView[T]) 130 diff --git a/gemm_ukernel_sse.html b/gemm_ukernel_sse.html new file mode 100644 index 000000000..e8ee652f6 --- /dev/null +++ b/gemm_ukernel_sse.html @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_float32_x86_SSE[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];
+    beta`gensym3: float32; vC`gensym3: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float32_x86_SSE[ukernel: static MicroKernel](kc`gensym2: int;
+    alpha`gensym2: float32;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];
+    beta`gensym2: float32; vC`gensym2: MatrixView[float32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_sse.idx b/gemm_ukernel_sse.idx new file mode 100644 index 000000000..f66477504 --- /dev/null +++ b/gemm_ukernel_sse.idx @@ -0,0 +1,3 @@ +nimTitle gemm_ukernel_sse gemm_ukernel_sse.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse 0 +nim gebb_ukernel_float32_x86_SSE gemm_ukernel_sse.html#gebb_ukernel_float32_x86_SSE,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_float32_x86_SSE[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: float32;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float32];\n beta`gensym2: float32; vC`gensym2: MatrixView[float32]) 51 +nim gebb_ukernel_edge_float32_x86_SSE gemm_ukernel_sse.html#gebb_ukernel_edge_float32_x86_SSE,int,int,int,float32,ptr.,ptr.,float32, proc gebb_ukernel_edge_float32_x86_SSE[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float32;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float32];\n beta`gensym3: float32; vC`gensym3: MatrixView[float32]) 18 diff --git a/gemm_ukernel_sse2.html b/gemm_ukernel_sse2.html new file mode 100644 index 000000000..66d7d7a80 --- /dev/null +++ b/gemm_ukernel_sse2.html @@ -0,0 +1,593 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse2 + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse2 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse2

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_float64_x86_SSE2[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float64;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float64];
+    beta`gensym3: float64; vC`gensym3: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_int32_x86_SSE2[ukernel: static MicroKernel](
+    mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: int32;
+    packedA`gensym7, packedB`gensym7: ptr UncheckedArray[int32];
+    beta`gensym7: int32; vC`gensym7: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_edge_int64_x86_SSE2[ukernel: static MicroKernel](
+    mr`gensym11, nr`gensym11, kc`gensym11: int; alpha`gensym11: int64;
+    packedA`gensym11, packedB`gensym11: ptr UncheckedArray[int64];
+    beta`gensym11: int64; vC`gensym11: MatrixView[int64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_float64_x86_SSE2[ukernel: static MicroKernel](kc`gensym2: int;
+    alpha`gensym2: float64;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float64];
+    beta`gensym2: float64; vC`gensym2: MatrixView[float64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_int32_x86_SSE2[ukernel: static MicroKernel](kc`gensym6: int;
+    alpha`gensym6: int32;
+    packedA`gensym6, packedB`gensym6: ptr UncheckedArray[int32];
+    beta`gensym6: int32; vC`gensym6: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_int64_x86_SSE2[ukernel: static MicroKernel](kc`gensym10: int;
+    alpha`gensym10: int64;
+    packedA`gensym10, packedB`gensym10: ptr UncheckedArray[int64];
+    beta`gensym10: int64; vC`gensym10: MatrixView[int64]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_sse2.idx b/gemm_ukernel_sse2.idx new file mode 100644 index 000000000..c1c07d3c0 --- /dev/null +++ b/gemm_ukernel_sse2.idx @@ -0,0 +1,7 @@ +nimTitle gemm_ukernel_sse2 gemm_ukernel_sse2.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse2 0 +nim gebb_ukernel_float64_x86_SSE2 gemm_ukernel_sse2.html#gebb_ukernel_float64_x86_SSE2,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_float64_x86_SSE2[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: float64;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[float64];\n beta`gensym2: float64; vC`gensym2: MatrixView[float64]) 51 +nim gebb_ukernel_edge_float64_x86_SSE2 gemm_ukernel_sse2.html#gebb_ukernel_edge_float64_x86_SSE2,int,int,int,float64,ptr.,ptr.,float64, proc gebb_ukernel_edge_float64_x86_SSE2[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: float64;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[float64];\n beta`gensym3: float64; vC`gensym3: MatrixView[float64]) 18 +nim gebb_ukernel_int32_x86_SSE2 gemm_ukernel_sse2.html#gebb_ukernel_int32_x86_SSE2,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_int32_x86_SSE2[ukernel: static MicroKernel](kc`gensym6: int;\n alpha`gensym6: int32;\n packedA`gensym6, packedB`gensym6: ptr UncheckedArray[int32];\n beta`gensym6: int32; vC`gensym6: MatrixView[int32]) 51 +nim gebb_ukernel_edge_int32_x86_SSE2 gemm_ukernel_sse2.html#gebb_ukernel_edge_int32_x86_SSE2,int,int,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_edge_int32_x86_SSE2[ukernel: static MicroKernel](\n mr`gensym7, nr`gensym7, kc`gensym7: int; alpha`gensym7: int32;\n packedA`gensym7, packedB`gensym7: ptr UncheckedArray[int32];\n beta`gensym7: int32; vC`gensym7: MatrixView[int32]) 72 +nim gebb_ukernel_int64_x86_SSE2 gemm_ukernel_sse2.html#gebb_ukernel_int64_x86_SSE2,int,int64,ptr.,ptr.,int64, proc gebb_ukernel_int64_x86_SSE2[ukernel: static MicroKernel](kc`gensym10: int;\n alpha`gensym10: int64;\n packedA`gensym10, packedB`gensym10: ptr UncheckedArray[int64];\n beta`gensym10: int64; vC`gensym10: MatrixView[int64]) 51 +nim gebb_ukernel_edge_int64_x86_SSE2 gemm_ukernel_sse2.html#gebb_ukernel_edge_int64_x86_SSE2,int,int,int,int64,ptr.,ptr.,int64, proc gebb_ukernel_edge_int64_x86_SSE2[ukernel: static MicroKernel](\n mr`gensym11, nr`gensym11, kc`gensym11: int; alpha`gensym11: int64;\n packedA`gensym11, packedB`gensym11: ptr UncheckedArray[int64];\n beta`gensym11: int64; vC`gensym11: MatrixView[int64]) 121 diff --git a/gemm_ukernel_sse4_1.html b/gemm_ukernel_sse4_1.html new file mode 100644 index 000000000..70b2fdb85 --- /dev/null +++ b/gemm_ukernel_sse4_1.html @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse4_1 + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse4_1 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse4_1

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gebb_ukernel_edge_int32_x86_SSE4_1[ukernel: static MicroKernel](
+    mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: int32;
+    packedA`gensym3, packedB`gensym3: ptr UncheckedArray[int32];
+    beta`gensym3: int32; vC`gensym3: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gebb_ukernel_int32_x86_SSE4_1[ukernel: static MicroKernel](kc`gensym2: int;
+    alpha`gensym2: int32;
+    packedA`gensym2, packedB`gensym2: ptr UncheckedArray[int32];
+    beta`gensym2: int32; vC`gensym2: MatrixView[int32]) {.used.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_ukernel_sse4_1.idx b/gemm_ukernel_sse4_1.idx new file mode 100644 index 000000000..248e5fe7f --- /dev/null +++ b/gemm_ukernel_sse4_1.idx @@ -0,0 +1,3 @@ +nimTitle gemm_ukernel_sse4_1 gemm_ukernel_sse4_1.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_ukernel_sse4_1 0 +nim gebb_ukernel_int32_x86_SSE4_1 gemm_ukernel_sse4_1.html#gebb_ukernel_int32_x86_SSE4_1,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_int32_x86_SSE4_1[ukernel: static MicroKernel](kc`gensym2: int;\n alpha`gensym2: int32;\n packedA`gensym2, packedB`gensym2: ptr UncheckedArray[int32];\n beta`gensym2: int32; vC`gensym2: MatrixView[int32]) 51 +nim gebb_ukernel_edge_int32_x86_SSE4_1 gemm_ukernel_sse4_1.html#gebb_ukernel_edge_int32_x86_SSE4_1,int,int,int,int32,ptr.,ptr.,int32, proc gebb_ukernel_edge_int32_x86_SSE4_1[ukernel: static MicroKernel](\n mr`gensym3, nr`gensym3, kc`gensym3: int; alpha`gensym3: int32;\n packedA`gensym3, packedB`gensym3: ptr UncheckedArray[int32];\n beta`gensym3: int32; vC`gensym3: MatrixView[int32]) 28 diff --git a/gemm_utils.html b/gemm_utils.html new file mode 100644 index 000000000..54ab65f4c --- /dev/null +++ b/gemm_utils.html @@ -0,0 +1,586 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/primitives/matrix_multiplication/gemm_utils + + + + + + + + + +Arraymancer - src/arraymancer/laser/primitives/matrix_multiplication/gemm_utils + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/primitives/matrix_multiplication/gemm_utils

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
MatrixView[T] = object
+  buffer*: ptr UncheckedArray[T]
+  rowStride*, colStride*: int
+
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
func `+`(p: ptr; offset: int): type(p) {.inline.}
+
+ + Pointer increment +   Source +Edit + +
+
+ +
+
+
+
func stride[T](view: MatrixView[T]; row, col: Natural): MatrixView[T] {.inline.}
+
+ + Returns a new view offset by the row and column stride +   Source +Edit + +
+
+ +
+
+
+
func toMatrixView[T](data: ptr T; rowStride, colStride: int): MatrixView[T] {.
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template `[]`[T](view: MatrixView[T]; row, col: Natural): T
+
+ + Access like a 2D matrix +   Source +Edit + +
+
+ +
+
+
+
template `[]=`[T](view: MatrixView[T]; row, col: Natural; value: T)
+
+ + Access like a 2D matrix +   Source +Edit + +
+
+ +
+
+
+
template to_ptr(AB: typed; MR, NR: static int; T: typedesc): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gemm_utils.idx b/gemm_utils.idx new file mode 100644 index 000000000..de5a89613 --- /dev/null +++ b/gemm_utils.idx @@ -0,0 +1,8 @@ +nimTitle gemm_utils gemm_utils.html module src/arraymancer/laser/primitives/matrix_multiplication/gemm_utils 0 +nim `+` gemm_utils.html#+,ptr,int proc `+`(p: ptr; offset: int): type(p) 16 +nim to_ptr gemm_utils.html#to_ptr.t,typed,staticint,staticint,typedesc template to_ptr(AB: typed; MR, NR: static int; T: typedesc): untyped 27 +nim MatrixView gemm_utils.html#MatrixView object MatrixView 37 +nim toMatrixView gemm_utils.html#toMatrixView,ptr.T,int,int proc toMatrixView[T](data: ptr T; rowStride, colStride: int): MatrixView[T] 41 +nim `[]` gemm_utils.html#[].t,MatrixView[T],Natural,Natural template `[]`[T](view: MatrixView[T]; row, col: Natural): T 46 +nim `[]=` gemm_utils.html#[]=.t,MatrixView[T],Natural,Natural,T template `[]=`[T](view: MatrixView[T]; row, col: Natural; value: T) 50 +nim stride gemm_utils.html#stride,MatrixView[T],Natural,Natural proc stride[T](view: MatrixView[T]; row, col: Natural): MatrixView[T] 54 diff --git a/global_config.html b/global_config.html new file mode 100644 index 000000000..d9504fd08 --- /dev/null +++ b/global_config.html @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/global_config + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/global_config + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/global_config

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Consts

+
+
+
CUDA_HOF_BPG: cint = 256
+
+ + +   Source +Edit + +
+
+
+
CUDA_HOF_TPB: cint = 1024
+
+ + +   Source +Edit + +
+
+
+
MAXRANK = 7
+
+ + +   Source +Edit + +
+
+
+
OMP_FOR_THRESHOLD = 1000
+
+ + +   Source +Edit + +
+
+
+
OMP_MAX_REDUCE_BLOCKS = 8
+
+ + +   Source +Edit + +
+
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/global_config.idx b/global_config.idx new file mode 100644 index 000000000..27dce897b --- /dev/null +++ b/global_config.idx @@ -0,0 +1,6 @@ +nimTitle global_config global_config.html module src/arraymancer/tensor/backend/global_config 0 +nim MAXRANK global_config.html#MAXRANK const MAXRANK 22 +nim CUDA_HOF_TPB global_config.html#CUDA_HOF_TPB const CUDA_HOF_TPB 25 +nim CUDA_HOF_BPG global_config.html#CUDA_HOF_BPG const CUDA_HOF_BPG 27 +nim OMP_FOR_THRESHOLD global_config.html#OMP_FOR_THRESHOLD const OMP_FOR_THRESHOLD 32 +nim OMP_MAX_REDUCE_BLOCKS global_config.html#OMP_MAX_REDUCE_BLOCKS const OMP_MAX_REDUCE_BLOCKS 33 diff --git a/gru.html b/gru.html new file mode 100644 index 000000000..90eba6c3d --- /dev/null +++ b/gru.html @@ -0,0 +1,566 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/gru + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/gru + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/gru

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
GRUGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + For now the GRU layer only supports fixed size GRU stack and Timesteps +   Source +Edit + +
+
+
+
GRULayer[T] = object
+  w3s0*, w3sN*: Variable[Tensor[T]]
+  u3s*: Variable[Tensor[T]]
+  bW3s*, bU3s*: Variable[Tensor[T]]
+
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc forward[T](self: GRULayer[T]; input, hidden0: Variable): tuple[
+    output, hiddenN: Variable]
+
+ + Inputs: +

Outputs:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc gru[TT](input, hidden0: Variable[TT]; W3s0, W3sN, U3s: Variable[TT];
+             bW3s, bU3s: Variable[TT]): tuple[output, hiddenN: Variable[TT]]
+
+ +

โš ๏ธ API subject to change to match CuDNNs

+

Bidirectional support is not implemented

+

Inputs:

+ +

Outputs:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[GRULayer[T]];
+             numInputFeatures, hiddenSize, layers: int): GRULayer[T]
+
+ + Creates an gated recurrent layer. Input:
- ``numInputFeatures`` Number of features of the input.
+- ``hiddenSize`` size of the hidden layer(s)
+- ``layers`` Number of stacked layers
+

Returns the created GRULayer.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/gru.idx b/gru.idx new file mode 100644 index 000000000..80a559aec --- /dev/null +++ b/gru.idx @@ -0,0 +1,6 @@ +nimTitle gru gru.html module src/arraymancer/nn/layers/gru 0 +nim GRUGate gru.html#GRUGate type GRUGate 21 +nim gru gru.html#gru,Variable[TT],Variable[TT],Variable[TT],Variable[TT],Variable[TT],Variable[TT],Variable[TT] proc gru[TT](input, hidden0: Variable[TT]; W3s0, W3sN, U3s: Variable[TT];\n bW3s, bU3s: Variable[TT]): tuple[output, hiddenN: Variable[TT]] 119 +nim GRULayer gru.html#GRULayer object GRULayer 164 +nim init gru.html#init,Context[Tensor[T]],typedesc[GRULayer[T]],int,int,int proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[GRULayer[T]];\n numInputFeatures, hiddenSize, layers: int): GRULayer[T] 169 +nim forward gru.html#forward,GRULayer[T],Variable,Variable proc forward[T](self: GRULayer[T]; input, hidden0: Variable): tuple[\n output, hiddenN: Variable] 198 diff --git a/higher_order_applymap.html b/higher_order_applymap.html new file mode 100644 index 000000000..47a203ffd --- /dev/null +++ b/higher_order_applymap.html @@ -0,0 +1,778 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/higher_order_applymap + + + + + + + + + +Arraymancer - src/arraymancer/tensor/higher_order_applymap + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/higher_order_applymap

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc apply[T: KnownSupportsCopyMem](t: var Tensor[T]; f: proc (x: var T)) {.
+    effectsOf: f.}
+
+ +

Apply a unary function in an element-wise manner on TensorT, in-place.

+

Input:

+
  • a var Tensor
  • +
  • An in-place function that returns no value
  • +
+

Result:

+
  • Nothing, the var Tensor is modified in-place
  • +
+

Usage with Nim's sugar module:

+
  • The sugar module has a functional programming paradigm, anonymous functions cannot mutate the arguments
  • +
+

Usage with named functions: .. code:: nim proc pluseqone[T](x: var T) = x += 1 a.apply(pluseqone) # Apply the in-place function pluseqone apply is especially useful to do multiple element-wise operations on a tensor in a single loop over the data.

+ +   Source +Edit + +
+
+
+
proc apply[T](t: var Tensor[T]; f: T -> T) {.effectsOf: f.}
+
+ +

Apply a unary function in an element-wise manner on TensorT, in-place.

+

Input:

+
  • a var Tensor
  • +
  • A function or anonymous function that returns a value
  • +
+

Result:

+
  • Nothing, the var Tensor is modified in-place
  • +
+

Usage with Nim's sugar module: .. code:: nim var a = newTensor(5,5, int) # a must be var a.apply(x => x+1) # Map the anonymous function x => x+1 Usage with named functions: .. code:: nim proc plusone[T](x: T): T = x + 1 a.apply(plusone) # Apply the function plusone in-place

+ +   Source +Edit + +
+
+ +
+
+
+
proc apply2[T: KnownSupportsCopyMem; U](a: var Tensor[T];
+                                        f: proc (x: var T; y: T); b: Tensor[U]) {.
+    effectsOf: f.}
+
+ +

Apply a binary in-place function in an element-wise manner on two TensorT, returning a new Tensor. Overload for types that are not mem-copyable.

+

The function is applied on the elements with the same coordinates.

+

Input:

+
  • A var tensor
  • +
  • A function
  • +
  • A tensor
  • +
+

Result:

+
  • Nothing, the varTensor is modified in-place
  • +
+

Usage with named functions: .. code:: nim proc **=T = # We create a new in-place power **= function that works on 2 scalars x = pow(x, y) a.apply2(**=, b) # Or apply2(a, **=, b)

+

apply2 is especially useful to do multiple element-wise operations on a two tensors in a single loop over the data. for example A += alpha * sin(A) + B

+ +   Source +Edit + +
+
+
+
proc apply2[T: not KnownSupportsCopyMem; U](a: var Tensor[T];
+    f: proc (x: var T; y: T); b: Tensor[U]) {.effectsOf: f.}
+
+ +

Apply a binary in-place function in an element-wise manner on two TensorT, returning a new Tensor. Overload for types that are not mem-copyable.

+

The function is applied on the elements with the same coordinates.

+

Input:

+
  • A var tensor
  • +
  • A function
  • +
  • A tensor
  • +
+

Result:

+
  • Nothing, the varTensor is modified in-place
  • +
+

Usage with named functions: .. code:: nim proc **=T = # We create a new in-place power **= function that works on 2 scalars x = pow(x, y) a.apply2(**=, b) # Or apply2(a, **=, b) apply2 is especially useful to do multiple element-wise operations on a two tensors in a single loop over the data. for example A += alpha * sin(A) + B

+ +   Source +Edit + +
+
+ +
+
+
+
proc map[T; U](t: Tensor[T]; f: T -> U): Tensor[U] {.noinit, effectsOf: f.}
+
+ +

Apply a unary function in an element-wise manner on TensorT, returning a new Tensor. Usage with Nim's sugar module: .. code:: nim a.map(x => x+1) # Map the anonymous function x => x+1 Usage with named functions: .. code:: nim proc plusone[T](x: T): T = x + 1 a.map(plusone) # Map the function plusone Note: for basic operation, you can use implicit broadcasting instead with operators prefixed by a dot : .. code:: nim a +. 1 map is especially useful to do multiple element-wise operations on a tensor in a single loop over the data.

+

For types that are not mem-copyable types (ref, string, etc.) a non OpenMP accelerated version of apply2_inline is used internally!

+ +   Source +Edit + +
+
+ +
+
+
+
proc map2[T, U; V: KnownSupportsCopyMem](t1: Tensor[T]; f: (T, U) -> V;
+    t2: Tensor[U]): Tensor[V] {.noinit, effectsOf: f.}
+
+ +

Apply a binary function in an element-wise manner on two TensorT, returning a new Tensor.

+

The function is applied on the elements with the same coordinates.

+

Input:

+
  • A tensor
  • +
  • A function
  • +
  • A tensor
  • +
+

Result:

+
  • A new tensor
  • +
+

Usage with named functions: .. code:: nim proc **T: T = # We create a new power ** function that works on 2 scalars pow(x, y) a.map2(**, b) # Or map2(a, **, b) map2 is especially useful to do multiple element-wise operations on a two tensors in a single loop over the data. for example alpha * sin(A) + B

+

For OpenMP compatibility, this map2 doesn't allow ref types as result like seq or string

+ +   Source +Edit + +
+
+
+
proc map2[T, U; V: not KnownSupportsCopyMem](t1: Tensor[T]; f: (T, U) -> V;
+    t2: Tensor[U]): Tensor[V] {.noinit, noSideEffect, effectsOf: f.}
+
+ +

Apply a binary function in an element-wise manner on two TensorT, returning a new Tensor.

+

This is a fallback for ref types as OpenMP will not work with if the results allocate memory managed by GC.

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template apply2_inline[T: KnownSupportsCopyMem; U](dest: var Tensor[T];
+    src: Tensor[U]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+
+
template apply2_inline[T: not KnownSupportsCopyMem; U](dest: var Tensor[T];
+    src: Tensor[U]; op: untyped): untyped
+
+ + NOTE: this is an overload of apply2_inline, which also works with +   Source +Edit + +
+
+ +
+
+
+
template apply3_inline[T: KnownSupportsCopyMem; U, V](dest: var Tensor[T];
+    src1: Tensor[U]; src2: Tensor[V]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template apply_inline[T: KnownSupportsCopyMem](t: var Tensor[T]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+
+
template apply_inline[T: not KnownSupportsCopyMem](t: var Tensor[T]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template map2_inline[T, U](t1: Tensor[T]; t2: Tensor[U]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template map3_inline[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V];
+                              op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template map_inline[T](t: Tensor[T]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/higher_order_applymap.idx b/higher_order_applymap.idx new file mode 100644 index 000000000..c807f967d --- /dev/null +++ b/higher_order_applymap.idx @@ -0,0 +1,21 @@ +nimTitle higher_order_applymap higher_order_applymap.html module src/arraymancer/tensor/higher_order_applymap 0 +nim apply_inline higher_order_applymap.html#apply_inline.t,Tensor[T: KnownSupportsCopyMem],untyped template apply_inline[T: KnownSupportsCopyMem](t: var Tensor[T]; op: untyped): untyped 27 +nim apply_inline higher_order_applymap.html#apply_inline.t,Tensor[T: not KnownSupportsCopyMem],untyped template apply_inline[T: not KnownSupportsCopyMem](t: var Tensor[T]; op: untyped): untyped 34 +nim apply2_inline higher_order_applymap.html#apply2_inline.t,Tensor[T: KnownSupportsCopyMem],Tensor[U],untyped template apply2_inline[T: KnownSupportsCopyMem; U](dest: var Tensor[T]; src: Tensor[U];\n op: untyped): untyped 40 +nim apply2_inline higher_order_applymap.html#apply2_inline.t,Tensor[T: not KnownSupportsCopyMem],Tensor[U],untyped template apply2_inline[T: not KnownSupportsCopyMem; U](dest: var Tensor[T];\n src: Tensor[U]; op: untyped): untyped 49 +nim apply3_inline higher_order_applymap.html#apply3_inline.t,Tensor[T: KnownSupportsCopyMem],Tensor[U],Tensor[V],untyped template apply3_inline[T: KnownSupportsCopyMem; U, V](dest: var Tensor[T];\n src1: Tensor[U]; src2: Tensor[V]; op: untyped): untyped 59 +nim map_inline higher_order_applymap.html#map_inline.t,Tensor[T],untyped template map_inline[T](t: Tensor[T]; op: untyped): untyped 69 +nim map2_inline higher_order_applymap.html#map2_inline.t,Tensor[T],Tensor[U],untyped template map2_inline[T, U](t1: Tensor[T]; t2: Tensor[U]; op: untyped): untyped 92 +nim map3_inline higher_order_applymap.html#map3_inline.t,Tensor[T],Tensor[U],Tensor[V],untyped template map3_inline[T, U, V](t1: Tensor[T]; t2: Tensor[U]; t3: Tensor[V]; op: untyped): untyped 122 +nim map higher_order_applymap.html#map,Tensor[T], proc map[T; U](t: Tensor[T]; f: T -> U): Tensor[U] 152 +nim apply higher_order_applymap.html#apply,Tensor[T], proc apply[T](t: var Tensor[T]; f: T -> T) 174 +nim apply higher_order_applymap.html#apply,Tensor[T: KnownSupportsCopyMem],proc(T) proc apply[T: KnownSupportsCopyMem](t: var Tensor[T]; f: proc (x: var T)) 194 +nim map2 higher_order_applymap.html#map2,Tensor[T],,Tensor[U] proc map2[T, U; V: KnownSupportsCopyMem](t1: Tensor[T]; f: (T, U) -> V; t2: Tensor[U]): Tensor[\n V] 218 +nim map2 higher_order_applymap.html#map2,Tensor[T],,Tensor[U]_2 proc map2[T, U; V: not KnownSupportsCopyMem](t1: Tensor[T]; f: (T, U) -> V;\n t2: Tensor[U]): Tensor[V] 248 +nim apply2 higher_order_applymap.html#apply2,Tensor[T: KnownSupportsCopyMem],proc(T,T),Tensor[U] proc apply2[T: KnownSupportsCopyMem; U](a: var Tensor[T]; f: proc (x: var T; y: T);\n b: Tensor[U]) 263 +nim apply2 higher_order_applymap.html#apply2,Tensor[T: not KnownSupportsCopyMem],proc(T,T),Tensor[U] proc apply2[T: not KnownSupportsCopyMem; U](a: var Tensor[T];\n f: proc (x: var T; y: T); b: Tensor[U]) 294 +nimgrp apply higher_order_applymap.html#apply-procs-all proc 174 +nimgrp apply2 higher_order_applymap.html#apply2-procs-all proc 263 +nimgrp map2 higher_order_applymap.html#map2-procs-all proc 218 +nimgrp apply2inline higher_order_applymap.html#apply2_inline-templates-all template 40 +nimgrp applyinline higher_order_applymap.html#apply_inline-templates-all template 27 diff --git a/higher_order_foldreduce.html b/higher_order_foldreduce.html new file mode 100644 index 000000000..859490da8 --- /dev/null +++ b/higher_order_foldreduce.html @@ -0,0 +1,616 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/higher_order_foldreduce + + + + + + + + + +Arraymancer - src/arraymancer/tensor/higher_order_foldreduce + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/higher_order_foldreduce

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc fold[U, T](arg: Tensor[U]; start_val: T; f: (T, U) -> T): T {.effectsOf: f.}
+
+ + Chain result = f(result, element) over all elements of the Tensor Input:
- A tensor to aggregate on
+- The starting value
+- The aggregation function. It is applied this way: new_aggregate = f(old_aggregate, current_value)
+

Result:

+
- An aggregate of the function called on the starting value and all elements of the tensor
+

Usage: .. code:: nim a.fold(100,max) ## This compare 100 with the first tensor value and returns 100 ## In the end, we will get the highest value in the Tensor or 100 ## whichever is bigger.

+ +   Source +Edit + +
+
+
+
proc fold[U, T](arg: Tensor[U]; start_val: Tensor[T];
+                f: (Tensor[T], Tensor[U]) -> Tensor[T]; axis: int): Tensor[T] {.
+    effectsOf: f.}
+
+ + Chain result = f(result, element) over all elements of the Tensor Input:
- A tensor to aggregate on
+- The starting value
+- The aggregation function. It is applied this way: new_aggregate = f(old_aggregate, current_value)
+- The axis to aggregate on
+

Result:

+
- An Tensor with the aggregate of the function called on the starting value and all slices along the selected axis
+ +   Source +Edit + +
+
+ +
+
+
+
proc reduce[T](arg: Tensor[T]; f: (T, T) -> T): T {.effectsOf: f.}
+
+ +

Chain result = f(result, element) over all elements of the Tensor.

+

The starting value is the first element of the Tensor. Input:

+
- A tensor to aggregate on
+- The aggregation function. It is applied this way: new_aggregate = f(old_aggregate, current_value)
+

Result:

+
- An aggregate of the function called all elements of the tensor
+

Usage: .. code:: nim a.reduce(max) ## This returns the maximum value in the Tensor.

+ +   Source +Edit + +
+
+
+
proc reduce[T](arg: Tensor[T]; f: (Tensor[T], Tensor[T]) -> Tensor[T]; axis: int): Tensor[
+    T] {.noinit, effectsOf: f.}
+
+ +

Chain result = f(result, element) over all elements of the Tensor.

+

The starting value is the first element of the Tensor. Input:

+
- A tensor to aggregate on
+- The aggregation function. It is applied this way: new_aggregate = f(old_aggregate, current_value)
+- An axis to aggregate on
+

Result:

+
- A tensor aggregate of the function called all elements of the tensor
+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template fold_axis_inline[T](arg: Tensor[T]; accumType: typedesc;
+                             fold_axis: int;
+                             op_initial, op_middle, op_final: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template fold_inline[T](arg: Tensor[T]; op_initial, op_middle, op_final: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template reduce_axis_inline[T](arg: Tensor[T]; reduction_axis: int; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template reduce_inline[T](arg: Tensor[T]; op: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/higher_order_foldreduce.idx b/higher_order_foldreduce.idx new file mode 100644 index 000000000..c80c4e777 --- /dev/null +++ b/higher_order_foldreduce.idx @@ -0,0 +1,11 @@ +nimTitle higher_order_foldreduce higher_order_foldreduce.html module src/arraymancer/tensor/higher_order_foldreduce 0 +nim reduce_inline higher_order_foldreduce.html#reduce_inline.t,Tensor[T],untyped template reduce_inline[T](arg: Tensor[T]; op: untyped): untyped 23 +nim fold_inline higher_order_foldreduce.html#fold_inline.t,Tensor[T],untyped,untyped,untyped template fold_inline[T](arg: Tensor[T]; op_initial, op_middle, op_final: untyped): untyped 33 +nim reduce_axis_inline higher_order_foldreduce.html#reduce_axis_inline.t,Tensor[T],int,untyped template reduce_axis_inline[T](arg: Tensor[T]; reduction_axis: int; op: untyped): untyped 44 +nim fold_axis_inline higher_order_foldreduce.html#fold_axis_inline.t,Tensor[T],typedesc,int,untyped,untyped,untyped template fold_axis_inline[T](arg: Tensor[T]; accumType: typedesc; fold_axis: int;\n op_initial, op_middle, op_final: untyped): untyped 60 +nim fold higher_order_foldreduce.html#fold,Tensor[U],T, proc fold[U, T](arg: Tensor[U]; start_val: T; f: (T, U) -> T): T 82 +nim fold higher_order_foldreduce.html#fold,Tensor[U],Tensor[T],,int proc fold[U, T](arg: Tensor[U]; start_val: Tensor[T];\n f: (Tensor[T], Tensor[U]) -> Tensor[T]; axis: int): Tensor[T] 103 +nim reduce higher_order_foldreduce.html#reduce,Tensor[T], proc reduce[T](arg: Tensor[T]; f: (T, T) -> T): T 121 +nim reduce higher_order_foldreduce.html#reduce,Tensor[T],,int proc reduce[T](arg: Tensor[T]; f: (Tensor[T], Tensor[T]) -> Tensor[T]; axis: int): Tensor[\n T] 142 +nimgrp reduce higher_order_foldreduce.html#reduce-procs-all proc 121 +nimgrp fold higher_order_foldreduce.html#fold-procs-all proc 82 diff --git a/howto.perceptron.html b/howto.perceptron.html new file mode 100644 index 000000000..da29e966e --- /dev/null +++ b/howto.perceptron.html @@ -0,0 +1,426 @@ + + + + + + + + + + + + + + + + + + +Spellbook: How to do a multilayer perceptron + + + + + + + + + +Arraymancer - Spellbook: How to do a multilayer perceptron + + + + + + + +Fork me on GitHub + + +
+
+

Spellbook: How to do a multilayer perceptron

+
import arraymancer
+
+# Learning XOR function with a neural network.
+
+# Autograd context / neuralnet graph
+let ctx = newContext Tensor[float32]
+
+let bsz = 32 # batch size
+
+# We will create a tensor of size 3200 (100 batches of size 32)
+# We create it as int between [0, 2[ and convert to bool
+let x_train_bool = randomTensor([bsz * 100, 2], 2).astype(bool)
+
+# Let's build our truth labels. We need to apply xor between the 2 columns of the tensors
+let y_bool = x_train_bool[_,0] xor x_train_bool[_,1]
+
+# Convert to float
+let x_train = ctx.variable(x_train_bool.astype(float32), requires_grad = true)
+let y = y_bool.astype(float32)
+
+# We will build the following network:
+# Input --> Linear(out_features = 3) --> relu --> Linear(out_features = 1) --> Sigmoid --> Cross-Entropy Loss
+
+# First hidden layer of 3 neurons, shape [3 out_features, 2 in_features]
+# We initialize with random weights between -1 and 1
+let layer_3neurons = ctx.variable(
+  randomTensor(3, 2, 2.0f) -. 1.0f,
+  requires_grad = true
+)
+
+# Classifier layer with 1 neuron per feature. (In our case only one neuron overall)
+# We initialize with random weights between -1 and 1
+let classifier_layer = ctx.variable(
+  randomTensor(1, 3, 2.0f) -. 1.0f,
+  requires_grad = true
+)
+
+# Stochastic Gradient Descent
+let optim = newSGD[float32](
+  layer_3neurons, classifier_layer, 0.01f # 0.01 is the learning rate
+)
+
+# Learning loop
+for epoch in 0..5:
+  for batch_id in 0..<100:
+    
+    # minibatch offset in the Tensor
+    let offset = batch_id * 32
+    let x = x_train[offset ..< offset + 32, _]
+    let target = y[offset ..< offset + 32, _]
+    
+    # Building the network
+    let n1 = relu linear(x, layer_3neurons)
+    let n2 = linear(n1, classifier_layer)
+    let loss = n2.sigmoid_cross_entropy(target)
+    
+    echo "Epoch is:" & $epoch
+    echo "Batch id:" & $batch_id
+    echo "Loss is:" & $loss.value
+    
+    # Compute the gradient (i.e. contribution of each parameter to the loss)
+    loss.backprop()
+    
+    # Correct the weights now that we have the gradient information
+    optim.update()
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/howto.type_conversion.html b/howto.type_conversion.html new file mode 100644 index 000000000..7c448c1b1 --- /dev/null +++ b/howto.type_conversion.html @@ -0,0 +1,363 @@ + + + + + + + + + + + + + + + + + + +Spellbook: How to convert a tensor underlying type? + + + + + + + + + +Arraymancer - Spellbook: How to convert a tensor underlying type? + + + + + + + +Fork me on GitHub + + +
+
+

Spellbook: How to convert a tensor underlying type?

+

A type conversion fonction asType is provided for convenience

+
let foo_float = foo.asType(float)
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/howto.ufunc.html b/howto.ufunc.html new file mode 100644 index 000000000..bb95193cb --- /dev/null +++ b/howto.ufunc.html @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + + + + + +Spellbook: How to create an universal function? + + + + + + + + + +Arraymancer - Spellbook: How to create an universal function? + + + + + + + +Fork me on GitHub + + +
+
+

Spellbook: How to create an universal function?

+

Functions that applies to a single element can work on a whole tensor similar to Numpyโ€™s universal functions.

+

3 functions exist: makeUniversal, makeUniversalLocal and map.

+

makeUniversal create a a function that applies to each element of a tensor from any unary function. Most functions from the math module have been generalized to tensors with makeUniversal(sin). Furthermore those universal functions are exported and available for import.

+

makeUniversalLocal does not export the universal functions.

+

map is more generic and map any function to all element of a tensor. map works even if the function changes the type of the tensorโ€™s elements.

+
echo foo.map(x => x.isPowerOfTwo) # map a function (`=>` comes from the future module )
+
+# Tensor of shape 5x5 of type "bool" on backend "Cpu"
+# |true   true    true    true    true|
+# |true   true    true    true    true|
+# |false  false   false   false   false|
+# |true   true    true    true    true|
+# |false  false   false   false   false|
+
+let foo_float = foo.map(x => x.float)
+echo ln foo_float # universal function (convert first to float for ln)
+
+# Tensor of shape 5x5 of type "float" on backend "Cpu"
+# |0.0    0.0     0.0     0.0     0.0|
+# |0.6931471805599453     1.386294361119891       2.079441541679836       2.772588722239781       3.465735902799727|
+# |1.09861228866811       2.19722457733622        3.295836866004329       4.394449154672439       5.493061443340548|
+# |1.386294361119891      2.772588722239781       4.158883083359671       5.545177444479562       6.931471805599453|
+# |1.6094379124341        3.218875824868201       4.828313737302302       6.437751649736401       8.047189562170502|
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/imdb.html b/imdb.html new file mode 100644 index 000000000..3ae6148e3 --- /dev/null +++ b/imdb.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/datasets/imdb + + + + + + + + + +Arraymancer - src/arraymancer/datasets/imdb + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/datasets/imdb

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ util, tensor +
+
+
+

Procs

+
+
+
+
proc load_imdb(cache: static bool = true): Imdb
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/imdb.idx b/imdb.idx new file mode 100644 index 000000000..a1656c893 --- /dev/null +++ b/imdb.idx @@ -0,0 +1,2 @@ +nimTitle imdb imdb.html module src/arraymancer/datasets/imdb 0 +nim load_imdb imdb.html#load_imdb,staticbool proc load_imdb(cache: static bool = true): Imdb 89 diff --git a/incl_accessors_cuda.html b/incl_accessors_cuda.html new file mode 100644 index 000000000..faced76db --- /dev/null +++ b/incl_accessors_cuda.html @@ -0,0 +1,399 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/incl_accessors_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/incl_accessors_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/incl_accessors_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ +
+
+   Source +Edit + +
+ +

+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/incl_accessors_cuda.idx b/incl_accessors_cuda.idx new file mode 100644 index 000000000..2ba397397 --- /dev/null +++ b/incl_accessors_cuda.idx @@ -0,0 +1 @@ +nimTitle incl_accessors_cuda incl_accessors_cuda.html module src/arraymancer/tensor/private/incl_accessors_cuda 0 diff --git a/incl_higher_order_cuda.html b/incl_higher_order_cuda.html new file mode 100644 index 000000000..06eda2abd --- /dev/null +++ b/incl_higher_order_cuda.html @@ -0,0 +1,399 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/incl_higher_order_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/incl_higher_order_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/incl_higher_order_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ +
+
+   Source +Edit + +
+ +

+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/incl_higher_order_cuda.idx b/incl_higher_order_cuda.idx new file mode 100644 index 000000000..7348a4794 --- /dev/null +++ b/incl_higher_order_cuda.idx @@ -0,0 +1 @@ +nimTitle incl_higher_order_cuda incl_higher_order_cuda.html module src/arraymancer/tensor/private/incl_higher_order_cuda 0 diff --git a/incl_kernels_cuda.html b/incl_kernels_cuda.html new file mode 100644 index 000000000..c0bc34794 --- /dev/null +++ b/incl_kernels_cuda.html @@ -0,0 +1,399 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/incl_kernels_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/incl_kernels_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/incl_kernels_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ +
+
+   Source +Edit + +
+ +

+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/incl_kernels_cuda.idx b/incl_kernels_cuda.idx new file mode 100644 index 000000000..dbb76f16a --- /dev/null +++ b/incl_kernels_cuda.idx @@ -0,0 +1 @@ +nimTitle incl_kernels_cuda incl_kernels_cuda.html module src/arraymancer/tensor/private/incl_kernels_cuda 0 diff --git a/index.html b/index.html new file mode 100644 index 000000000..187978832 --- /dev/null +++ b/index.html @@ -0,0 +1,6415 @@ + + + + + + + + + + + + + + + + + + +Index + + + + + + + + + +Arraymancer - Index + + + + + + + +Fork me on GitHub + + +
+
+

Index

+ Modules: accessors, accessors_macros_read, accessors_macros_syntax, accessors_macros_write, accuracy_score, aggregate, algebra, algorithms, align_unroller, ast_utils, autograd, autograd_common, auxiliary_blas, auxiliary_lapack, blas_l3_gemm, blis, common_error_functions, compiler_optim_hints, complex, conv, conv2D, cpuinfo_x86, cross_entropy_losses, cublas, cuda, cuda_global_state, cudnn, cudnn_conv_interface, data_structure, datatypes, dbscan, decomposition, decomposition_lapack, decomposition_rand, deprecate, display, display_cuda, distances, distributions, dynamic_stack_arrays, einsum, embedding, exporting, filling_data, flatten, foreach, foreach_common, foreach_staged, functional, gates_basic, gates_blas, gates_hadamard, gates_reduce, gates_shapeshifting_concat_split, gates_shapeshifting_views, gcn, gemm, gemm_packing, gemm_prepacked, gemm_tiling, gemm_ukernel_avx, gemm_ukernel_avx2, gemm_ukernel_avx512, gemm_ukernel_avx_fma, gemm_ukernel_dispatch, gemm_ukernel_generator, gemm_ukernel_generic, gemm_ukernel_sse, gemm_ukernel_sse2, gemm_ukernel_sse4_1, gemm_utils, global_config, gru, higher_order_applymap, higher_order_foldreduce, imdb, incl_accessors_cuda, incl_higher_order_cuda, incl_kernels_cuda, init, init_colmajor, init_copy_cpu, init_copy_cuda, init_cpu, init_cuda, init_opencl, initialization, io, io_csv, io_hdf5, io_image, io_npy, io_stream_readers, kde, kdtree, kmeans, lapack, least_squares, least_squares_lapack, linear, linear_algebra, linear_systems, math_functions, math_ops_fusion, maxpool2D, mean_square_error_loss, memory, memory_optimization_hints, ml, mnist, naive_l2_gemv, neighbors, nested_containers, nlp, nn, nn_dsl, nn_primitives, nnp_activation, nnp_conv2d_cudnn, nnp_convolution, nnp_embedding, nnp_gru, nnp_linear, nnp_maxpooling, nnp_numerical_gradient, nnp_sigmoid_cross_entropy, nnp_softmax, nnp_softmax_cross_entropy, nnpack, nnpack_interface, opencl_backend, opencl_global_state, openmp, operators_blas_l1, operators_blas_l1_cuda, operators_blas_l1_opencl, operators_blas_l2l3, operators_blas_l2l3_cuda, operators_blas_l2l3_opencl, operators_broadcasted, operators_broadcasted_cuda, operators_broadcasted_opencl, operators_comparison, operators_logical, optim_ops_fusion, optimizers, overload, p_accessors, p_accessors_macros_desugar, p_accessors_macros_read, p_accessors_macros_write, p_activation, p_checks, p_complex, p_display, p_empty_tensors, p_init_cuda, p_init_opencl, p_kernels_interface_cuda, p_kernels_interface_opencl, p_logsumexp, p_nnp_checks, p_nnp_types, p_operator_blas_l2l3, p_shapeshifting, pca, relu, selectors, sequninit, shapeshifting, shapeshifting_cuda, shapeshifting_opencl, sigmoid, simd, softmax, solve_lapack, special_matrices, stats, std_version_types, syntactic_sugar, tanh, tensor, tensor_compare_helper, tensor_cuda, tensor_opencl, tokenizers, triangular, ufunc, util.

API symbols

+
`!=.`:
+
`$`:
+
`&`:
+
`*.=`:
+
`*.`:
+
`*=`:
+
`*`:
+
`+.=`:
+
`+.`:
+
`+=`:
+
`+`:
+
`-.=`:
+
`-.`:
+
`-=`:
+
`-`:
+
`.!=`:
+
`.*`:
+
`.+`:
+
`.-`:
+
`...`:
+
`..<`:
+
`..^`:
+
`..`:
+
`./`:
+
`.<=`:
+
`.<`:
+
`.=*`:
+
`.=+`:
+
`.=-`:
+
`.=/`:
+
`.==`:
+
`.>=`:
+
`.>`:
+
`.^=`:
+
`.^`:
+
`/.=`:
+
`/.`:
+
`/=`:
+
`/`:
+
`<.`:
+
`<=.`:
+
`<`:
+
`==.`:
+
`==`:
+
`>.`:
+
`>=.`:
+
`@`:
+
`[]=`:
+
`[]`:
+
`^.=`:
+
`^.`:
+
`^`:
+
`_`:
+
`and`:
+
`div`:
+
`mod`:
+
`not`:
+
`or`:
+
`xor`:
+
`|+`:
+
`|-`:
+
`|`:
+
AAt:
+
abs:
+
absolute_error:
+
accuracy_score:
+
Adam:
+
add:
+
AddGate:
+
address:
+
advanceStridedIteration:
+
align_raw_data:
+
all:
+
allocCpuStorage:
+
any:
+
AnyMetric:
+
AnyTensor:
+
append:
+
apply:
+
apply2:
+
apply2_inline:
+
apply3_inline:
+
apply_inline:
+
arange:
+
arccos:
+
arccosh:
+
arcsin:
+
arcsinh:
+
arctan:
+
arctanh:
+
argmax:
+
argmax_max:
+
argmin:
+
argmin_min:
+
argsort:
+
ArrayOfSlices:
+
asContiguous:
+
asCudnnType:
+
assume_aligned:
+
asType:
+
at:
+
AtA:
+
atAxisIndex:
+
atContiguousIndex:
+
atIndex:
+
atIndexMut:
+
at_mut:
+
axis:
+
backprop:
+
Backward:
+
bc:
+
blasMM_C_eq_aAB_p_bC:
+
blasMV_y_eq_aAx_p_by:
+
box:
+
boxKernel:
+
broadcast:
+
broadcast2:
+
broadcast2Impl:
+
broadcastImpl:
+
cbrt:
+
ceil:
+
check_axis_index:
+
check_concat:
+
check_contiguous_index:
+
check_ctx:
+
check_dot_prod:
+
check_elementwise:
+
check_index:
+
check_input_target:
+
check_matmat:
+
check_matvec:
+
check_nested_elements:
+
check_reshape:
+
check_shape:
+
check_size:
+
check_squeezeAxis:
+
check_start_end:
+
check_steps:
+
check_unsqueezeAxis:
+
chunk:
+
ChunkSplitGate:
+
clamp:
+
classify:
+
clContext0:
+
clDevice0:
+
clMalloc:
+
clone:
+
clQueue0:
+
ClStorage:
+
ClTensor:
+
col2im:
+
complex:
+
Complex32:
+
Complex64:
+
concat:
+
concatMap:
+
conjugate:
+
Context:
+
contiguousImpl:
+
Conv2D:
+
conv2d:
+
Conv2DAlgorithm:
+
conv2d_backward:
+
Conv2DGate:
+
ConvAlgoSpace:
+
conv_bwd_data_algo_workspace:
+
conv_bwd_kernel_algo_workspace:
+
ConvConfig:
+
convolve:
+
ConvolveMode:
+
convOutDims:
+
copyFrom:
+
copy_from:
+
copyFrom:
+
copyFromRaw:
+
copySign:
+
correlate:
+
CorrelateMode:
+
cos:
+
cosh:
+
covariance_matrix:
+
cpu:
+
CPUFeatureX86:
+
CpuStorage:
+
cpuStorageFromBuffer:
+
create_cache_dirs_if_necessary:
+
cublas_axpy:
+
cublas_copy:
+
cublas_dot:
+
cublas_geam:
+
cublas_gemm:
+
cublas_gemmStridedBatched:
+
cublas_gemv:
+
cublasHandle0:
+
cublas_scal:
+
cuda:
+
cuda_assign_call:
+
cuda_assign_glue:
+
cuda_assignscal_call:
+
cuda_assignscal_glue:
+
cuda_binary_call:
+
cuda_binary_glue:
+
CUDA_HOF_BPG:
+
CUDA_HOF_TPB:
+
cuda_lscal_call:
+
cuda_lscal_glue:
+
cudaMalloc:
+
cuda_rscal_call:
+
cuda_rscal_glue:
+
CudaStorage:
+
cudaStream0:
+
CudaTensor:
+
cudnnHandle0:
+
cumprod:
+
cumsum:
+
CustomMetric:
+
cvtmask64_u64:
+
data=:
+
dataArray:
+
dbscan:
+
deallocCl:
+
deallocCuda:
+
deepCopy:
+
degToRad:
+
delete:
+
desugar:
+
diag:
+
diagonal:
+
diff_discrete:
+
disp2d:
+
distance:
+
distanceMatrix:
+
dot:
+
dualStridedIteration:
+
dualStridedIterationYield:
+
DynamicStackArray:
+
einsum:
+
Ellipsis:
+
elwise_div:
+
elwise_mul:
+
Embedding:
+
embedding:
+
embedding_backward:
+
EmbeddingGate:
+
enumerate:
+
enumerateAxis:
+
enumerateZip:
+
epanechnikov:
+
epanechnikovKernel:
+
erf:
+
erfc:
+
Euclidean:
+
exch_dim:
+
exp:
+
expm1:
+
export_tensor:
+
extract_cpu_simd:
+
extract_c_unit_stride:
+
extract_mr:
+
extract_nb_scalars:
+
extract_nb_vecs_nr:
+
extract_nr:
+
extract_pt:
+
eye:
+
fac:
+
fallbackMM_C_eq_aAB_p_bC:
+
FancyIndex:
+
FancyMaskAxis:
+
FancyMaskFull:
+
FancyNone:
+
FancySelectorKind:
+
FancyUnknownAxis:
+
FancyUnknownFull:
+
flatIter:
+
Flatten:
+
flatten:
+
floor:
+
floorMod:
+
fold:
+
fold_axis_inline:
+
fold_inline:
+
forEach:
+
forEachContiguous:
+
forEachContiguousSerial:
+
forEachSerial:
+
forEachStaged:
+
forEachStrided:
+
forEachStridedSerial:
+
forward:
+
frobenius_inner_prod:
+
fromBuffer:
+
full:
+
gamma:
+
Gate:
+
gauss:
+
gaussKernel:
+
gcn:
+
GCNGate:
+
GCNLayer:
+
gebb_ukernel:
+
gebb_ukernel_edge:
+
gebb_ukernel_edge_epilogue:
+
gebb_ukernel_edge_fallback:
+
gebb_ukernel_edge_float32_x86_AVX:
+
gebb_ukernel_edge_float32_x86_AVX512:
+
gebb_ukernel_edge_float32_x86_AVX_FMA:
+
gebb_ukernel_edge_float32_x86_SSE:
+
gebb_ukernel_edge_float64_x86_AVX:
+
gebb_ukernel_edge_float64_x86_AVX512:
+
gebb_ukernel_edge_float64_x86_AVX_FMA:
+
gebb_ukernel_edge_float64_x86_SSE2:
+
gebb_ukernel_edge_int32_x86_AVX2:
+
gebb_ukernel_edge_int32_x86_AVX512:
+
gebb_ukernel_edge_int32_x86_SSE2:
+
gebb_ukernel_edge_int32_x86_SSE4_1:
+
gebb_ukernel_edge_int64_x86_AVX512:
+
gebb_ukernel_edge_int64_x86_SSE2:
+
gebb_ukernel_epilogue_fallback:
+
gebb_ukernel_fallback:
+
gebb_ukernel_float32_x86_AVX:
+
gebb_ukernel_float32_x86_AVX512:
+
gebb_ukernel_float32_x86_AVX_FMA:
+
gebb_ukernel_float32_x86_SSE:
+
gebb_ukernel_float64_x86_AVX:
+
gebb_ukernel_float64_x86_AVX512:
+
gebb_ukernel_float64_x86_AVX_FMA:
+
gebb_ukernel_float64_x86_SSE2:
+
gebb_ukernel_int32_x86_AVX2:
+
gebb_ukernel_int32_x86_AVX512:
+
gebb_ukernel_int32_x86_SSE2:
+
gebb_ukernel_int32_x86_SSE4_1:
+
gebb_ukernel_int64_x86_AVX512:
+
gebb_ukernel_int64_x86_SSE2:
+
gebp_mkernel:
+
gelsd:
+
gemm:
+
gemm_nn_fallback:
+
gemm_packed:
+
gemm_prepackA:
+
gemm_prepackA_mem_required:
+
gemm_prepackA_mem_required_impl:
+
gemm_prepackB:
+
gemm_prepackB_mem_required:
+
gemm_prepackB_mem_required_impl:
+
gemm_strided:
+
gemv:
+
gen_cl_apply2:
+
gen_cl_apply3:
+
genClInfixOp:
+
genClInPlaceOp:
+
geomspace:
+
geqrf:
+
gesdd:
+
gesv:
+
get_cache_dir:
+
getContiguousIndex:
+
get_data_ptr:
+
getFancySelector:
+
getIndex:
+
get_num_tiles:
+
get_offset_ptr:
+
getrf:
+
getShape:
+
getSubType:
+
gru:
+
gru_backward:
+
gru_cell_backward:
+
gru_cell_forward:
+
gru_cell_inference:
+
gru_forward:
+
GRUGate:
+
gru_inference:
+
GRULayer:
+
HadamardGate:
+
has3DNow:
+
has3DNowEnhanced:
+
hasAbm:
+
hasAdx:
+
hasAes:
+
hasAmdv:
+
hasAvx:
+
hasAvx2:
+
hasAvx512bfloat16:
+
hasAvx512bitalg:
+
hasAvx512bw:
+
hasAvx512cd:
+
hasAvx512dq:
+
hasAvx512er:
+
hasAvx512f:
+
hasAvx512fmaps4:
+
hasAvx512ifma:
+
hasAvx512pf:
+
hasAvx512vbmi:
+
hasAvx512vbmi2:
+
hasAvx512vl:
+
hasAvx512vnni:
+
hasAvx512vnniw4:
+
hasAvx512vp2intersect:
+
hasAvx512vpopcntdq:
+
hasBmi1:
+
hasBmi2:
+
hasCas16B:
+
hasCas8B:
+
hasClflush:
+
hasClflushOpt:
+
hasClwb:
+
hasFloat16c:
+
hasFma3:
+
hasFma4:
+
hasGfni:
+
hasIntelVtx:
+
hasMmx:
+
hasMmxExt:
+
hasMovBigEndian:
+
hasMpx:
+
hasNxBit:
+
hasPclmulqdq:
+
hasPopcnt:
+
hasPrefetch:
+
hasPrefetchWT1:
+
hasRdrand:
+
hasRdseed:
+
hasSgx:
+
hasSha:
+
hasSimultaneousMultithreading:
+
hasSse:
+
hasSse2:
+
hasSse3:
+
hasSse41:
+
hasSse42:
+
hasSse4a:
+
hasSsse3:
+
hasTsxHle:
+
hasTsxRtm:
+
hasType:
+
hasVaes:
+
hasVpclmulqdq:
+
hasX87fpu:
+
hasXop:
+
high:
+
hilbert:
+
identity:
+
ijgrid:
+
im2col:
+
Im2ColGEMM:
+
im2colgemm_conv2d:
+
im2colgemm_conv2d_gradient:
+
imag:
+
imag=:
+
implDeprecatedBy:
+
index_fill:
+
index_select:
+
init:
+
initForEach:
+
initMetadataArray:
+
initSpanSlices:
+
initStridedIteration:
+
initTensorMetadata:
+
insert:
+
inShape:
+
iqr:
+
isAllInt:
+
isBool:
+
is_C_contiguous:
+
isContiguous:
+
is_F_contiguous:
+
is_grad_needed:
+
isHypervisorPresent:
+
isInt:
+
isNaN:
+
isNotNaN:
+
isOpenArray:
+
item:
+
items:
+
IterKind:
+
Iter_Values:
+
Jaccard:
+
kaiming_normal:
+
kaiming_uniform:
+
kde:
+
KDTree:
+
kdTree:
+
KernelFunc:
+
KernelKind:
+
kmeans:
+
knBox:
+
knCustom:
+
knEpanechnikov:
+
knGauss:
+
KnownSupportsCopyMem:
+
knTriangular:
+
knTrig:
+
LASER_MAXRANK:
+
LASER_MEM_ALIGN:
+
laswp:
+
layoutOnDevice:
+
least_squares_solver:
+
letsGoDeeper:
+
lgamma:
+
Linear:
+
linear:
+
linear_backward:
+
LinearGate:
+
linspace:
+
ln:
+
ln1p:
+
load_imdb:
+
load_mnist:
+
log10:
+
log2:
+
logspace:
+
logsumexp:
+
low:
+
lu_permuted:
+
m128:
+
m128d:
+
m128i:
+
m256:
+
m256d:
+
m256i:
+
m512:
+
m512d:
+
m512i:
+
mabs:
+
makeKernel:
+
makeUniversal:
+
makeUniversalLocal:
+
Manhattan:
+
map:
+
map2:
+
map2_inline:
+
map3_inline:
+
map_inline:
+
masked_axis_fill:
+
masked_axis_select:
+
masked_fill:
+
masked_fill_along_axis:
+
masked_select:
+
MatMulGate:
+
MatrixKind:
+
MatrixView:
+
max:
+
MaxPool2D:
+
maxpool2d:
+
maxpool2d_backward:
+
MaxPool2DGate:
+
MAXRANK:
+
mclamp:
+
mcopySign:
+
mean:
+
mean_absolute_error:
+
MeanGate:
+
mean_relative_error:
+
mean_squared_error:
+
median:
+
melwise_div:
+
melwise_mul:
+
menumerate:
+
menumerateZip:
+
meshgrid:
+
MeshGridIndexing:
+
Metadata:
+
MetadataArray:
+
MicroKernel:
+
min:
+
Minkowski:
+
mitems:
+
mkGenBand:
+
mkGeneral:
+
mkGenTriDiag:
+
mkPosDef:
+
mkPosDefBand:
+
mkPosDefTriDiag:
+
mkSymmetric:
+
mm256_add_epi16:
+
mm256_add_epi32:
+
mm256_add_epi64:
+
mm256_add_epi8:
+
mm256_add_pd:
+
mm256_add_ps:
+
mm256_and_ps:
+
mm256_and_si256:
+
mm256_castps256_ps128:
+
mm256_castps_si256:
+
mm256_castsi256_ps:
+
mm256_cmpgt_epi32:
+
mm256_cvtepi32_ps:
+
mm256_cvtps_epi32:
+
mm256_extractf128_ps:
+
mm256_fmadd_pd:
+
mm256_fmadd_ps:
+
mm256_i32gather_epi32:
+
mm256_load_pd:
+
mm256_load_ps:
+
mm256_load_si256:
+
mm256_loadu_pd:
+
mm256_loadu_ps:
+
mm256_loadu_si256:
+
mm256_max_ps:
+
mm256_min_ps:
+
mm256_movemask_epi8:
+
mm256_mul_epu32:
+
mm256_mullo_epi16:
+
mm256_mullo_epi32:
+
mm256_mul_pd:
+
mm256_mul_ps:
+
mm256_or_ps:
+
mm256_set1_epi16:
+
mm256_set1_epi32:
+
mm256_set1_epi64x:
+
mm256_set1_epi8:
+
mm256_set1_pd:
+
mm256_set1_ps:
+
mm256_setzero_pd:
+
mm256_setzero_ps:
+
mm256_setzero_si256:
+
mm256_shuffle_epi32:
+
mm256_slli_epi32:
+
mm256_srli_epi32:
+
mm256_srli_epi64:
+
mm256_store_pd:
+
mm256_store_ps:
+
mm256_storeu_pd:
+
mm256_storeu_ps:
+
mm256_storeu_si256:
+
mm256_sub_ps:
+
mm512_add_epi16:
+
mm512_add_epi32:
+
mm512_add_epi64:
+
mm512_add_epi8:
+
mm512_add_pd:
+
mm512_add_ps:
+
mm512_and_si512:
+
mm512_castps_si512:
+
mm512_castsi512_ps:
+
mm512_cmpgt_epi32_mask:
+
mm512_cvtepi32_ps:
+
mm512_cvtps_epi32:
+
mm512_fmadd_pd:
+
mm512_fmadd_ps:
+
mm512_i32gather_epi32:
+
mm512_load_pd:
+
mm512_load_ps:
+
mm512_load_si512:
+
mm512_loadu_pd:
+
mm512_loadu_ps:
+
mm512_loadu_si512:
+
mm512_maskz_set1_epi32:
+
mm512_max_ps:
+
mm512_min_ps:
+
mm512_movepi8_mask:
+
mm512_movm_epi32:
+
mm512_mullo_epi32:
+
mm512_mullo_epi64:
+
mm512_mul_pd:
+
mm512_mul_ps:
+
mm512_or_ps:
+
mm512_set1_epi16:
+
mm512_set1_epi32:
+
mm512_set1_epi64:
+
mm512_set1_epi8:
+
mm512_set1_pd:
+
mm512_set1_ps:
+
mm512_setzero_pd:
+
mm512_setzero_ps:
+
mm512_setzero_si512:
+
mm512_slli_epi32:
+
mm512_srli_epi32:
+
mm512_store_pd:
+
mm512_store_ps:
+
mm512_storeu_pd:
+
mm512_storeu_ps:
+
mm512_storeu_si512:
+
mm512_sub_ps:
+
mm_add_epi16:
+
mm_add_epi32:
+
mm_add_epi64:
+
mm_add_epi8:
+
mm_add_pd:
+
mm_add_ps:
+
mm_add_ss:
+
mm_and_si128:
+
mmask16:
+
mmask64:
+
mmax:
+
mm_castps_si128:
+
mm_castsi128_ps:
+
mm_cmpgt_epi32:
+
mm_cvtepi32_ps:
+
mm_cvtps_epi32:
+
mm_cvtsi128_si32:
+
mm_cvtss_f32:
+
mm_extract_epi16:
+
mm_i32gather_epi32:
+
mmin:
+
mm_load_pd:
+
mm_load_ps:
+
mm_load_si128:
+
mm_load_ss:
+
mm_loadu_pd:
+
mm_loadu_ps:
+
mm_loadu_si128:
+
mm_max_ps:
+
mm_max_ss:
+
mm_min_ps:
+
mm_min_ss:
+
mm_movehdup_ps:
+
mm_movehl_ps:
+
mm_moveldup_ps:
+
mm_movelh_ps:
+
mm_movemask_epi8:
+
mm_mul_epu32:
+
mm_mullo_epi16:
+
mm_mullo_epi32:
+
mm_mul_pd:
+
mm_mul_ps:
+
mm_or_ps:
+
mm_or_si128:
+
mm_set1_epi16:
+
mm_set1_epi32:
+
mm_set1_epi64x:
+
mm_set1_epi8:
+
mm_set1_pd:
+
mm_set1_ps:
+
mm_set_epi32:
+
mm_setzero_pd:
+
mm_setzero_ps:
+
mm_setzero_si128:
+
mm_shuffle_epi32:
+
mm_slli_epi32:
+
mm_slli_epi64:
+
mm_srli_epi32:
+
mm_srli_epi64:
+
mm_store_pd:
+
mm_store_ps:
+
mm_storeu_pd:
+
mm_storeu_ps:
+
mm_storeu_si128:
+
mm_sub_pd:
+
mm_sub_ps:
+
mnegate:
+
moveaxis:
+
mpairs:
+
mreciprocal:
+
mrelu:
+
MSELoss:
+
mse_loss:
+
msigmoid:
+
mtanh:
+
mzip:
+
naive_gemv_fallback:
+
nchw_channels:
+
nchw_height:
+
nchw_width:
+
nearestNeighbors:
+
negate:
+
network:
+
newClStorage:
+
newClTensor:
+
newContext:
+
newConv2dDesc:
+
newConvAlgoSpace:
+
newCudaStorage:
+
newCudaTensor:
+
newCudnn4DTensorDesc:
+
newCudnnConvKernelDesc:
+
newDiffs:
+
newMatrixUninitColMajor:
+
newParents:
+
newSeqUninit:
+
newSGD:
+
newTensor:
+
newTensorUninit:
+
newTensorWith:
+
newTiles:
+
NNPackAuto:
+
nnpack_conv2d:
+
nnpack_conv2d_gradient:
+
nnp_activation:
+
nnp_convolution_algorithm:
+
nnp_convolution_inference:
+
nnp_convolution_input_gradient:
+
nnp_convolution_kernel_gradient:
+
nnp_convolution_output:
+
nnp_convolution_transform_strategy:
+
nnp_convolution_transform_strategy_block_based:
+
nnp_convolution_transform_strategy_tuple_based:
+
nnp_deinitialize:
+
nnp_fully_connected_inference:
+
nnp_fully_connected_inference_f16f32:
+
nnp_fully_connected_output:
+
nnp_initialize:
+
nnp_max_pooling_output:
+
nnp_padding:
+
nnp_profile:
+
nnp_relu_input_gradient:
+
nnp_relu_output:
+
nnp_size:
+
nnp_softmax_output:
+
nnp_status:
+
nnp_status_invalid_activation:
+
nnp_status_invalid_activation_parameters:
+
nnp_status_invalid_output_subsampling:
+
Node:
+
no_grad_mode:
+
nonzero:
+
numberOne:
+
numerical_gradient:
+
Offset_Values:
+
OMP_FOR_THRESHOLD:
+
omp_get_max_threads:
+
omp_get_num_threads:
+
omp_get_thread_num:
+
OMP_MAX_REDUCE_BLOCKS:
+
omp_parallel_blocks:
+
omp_parallel_countup:
+
omp_parallel_forup:
+
omp_parallel_reduce_blocks:
+
omp_set_num_threads:
+
ones:
+
ones_like:
+
opencl:
+
Optimizer:
+
optimizer:
+
optimizerAdam:
+
optimizerSGD:
+
optimizerSGDMomentum:
+
orgqr:
+
ormqr:
+
outShape:
+
overload:
+
pack_A_mc_kc:
+
pack_B_kc_nc:
+
pairs:
+
pairwiseDistances:
+
partitionMNK:
+
Payload:
+
PayloadKind:
+
pca:
+
PCA_Detailed:
+
pca_detailed:
+
percentile:
+
permute:
+
permuteImpl:
+
phase:
+
pinv:
+
pkSeq:
+
pkVar:
+
pop:
+
prefetch:
+
PrefetchLocality:
+
PrefetchRW:
+
pretty:
+
prettyImpl:
+
product:
+
pthreadpool_t:
+
qr:
+
query:
+
query_ball_point:
+
radToDeg:
+
randomNormalTensor:
+
randomTensor:
+
rank:
+
raw_data_unaligned:
+
RawImmutableView:
+
RawMutableView:
+
read_csv:
+
readFloat32BE:
+
readFloat32LE:
+
readFloat64BE:
+
readFloat64LE:
+
read_hdf5:
+
read_image:
+
readInt32BE:
+
readInt32LE:
+
readInt64BE:
+
readInt64LE:
+
read_mnist_images:
+
read_mnist_labels:
+
read_npy:
+
readUInt16LE:
+
readUInt32BE:
+
readUInt32LE:
+
readUInt64BE:
+
readUInt64LE:
+
real:
+
real=:
+
reciprocal:
+
reduce:
+
reduce_axis_inline:
+
reduce_inline:
+
register_node:
+
relative_error:
+
relu:
+
ReluActivation:
+
relu_backward:
+
replaceNodes:
+
replaceSymsByIdents:
+
reshape:
+
ReshapeGate:
+
reshapeImpl:
+
reshape_no_copy:
+
reshape_with_copy:
+
returnEmptyIfEmpty:
+
reversed:
+
rewriteTensor_AddMultiply:
+
rewriteTensor_MultiplyAdd:
+
rewriteTensor_MultiplyAdd_inplace:
+
rewriteToTensorReshape:
+
roll:
+
round:
+
round_step_down:
+
round_step_up:
+
same:
+
set_diagonal:
+
setLen:
+
setZero:
+
SGD:
+
SGDMomentum:
+
sgn:
+
shape_to_strides:
+
sigmoid:
+
SigmoidActivation:
+
sigmoid_backward:
+
sigmoid_cross_entropy:
+
sigmoid_cross_entropy_backward:
+
SigmoidCrossEntropyLoss:
+
sin:
+
sinc:
+
sinh:
+
size:
+
Size2D:
+
SizeHW:
+
skipIfEmpty:
+
sliceDispatchImpl:
+
slicer:
+
slicerImpl:
+
slicerMut:
+
slice_typed_dispatch:
+
slice_typed_dispatch_mut:
+
slice_typed_dispatch_var:
+
SmallDiffs:
+
softmax:
+
SoftmaxActivation:
+
softmax_cross_entropy:
+
softmax_cross_entropy_backward:
+
SoftmaxCrossEntropyLoss:
+
solve:
+
sort:
+
sorted:
+
sparse_softmax_cross_entropy:
+
sparse_softmax_cross_entropy_backward:
+
SparseSoftmaxCrossEntropyLoss:
+
split:
+
sqrt:
+
square:
+
squared_error:
+
squeeze:
+
squeezeImpl:
+
stable_softmax:
+
stack:
+
std:
+
Step:
+
SteppedSlice:
+
streaming_max_sumexp:
+
stride:
+
stridedBodyTemplate:
+
stridedChunkOffset:
+
stridedCoordsIteration:
+
stridedIteration:
+
stridedIterationYield:
+
stridedVarsSetup:
+
SubGate:
+
sum:
+
SumGate:
+
SupportedDecomposition:
+
svd:
+
svd_randomized:
+
syevr:
+
symeig:
+
syrk:
+
SyrkKind:
+
tan:
+
tanh:
+
TanhActivation:
+
tanh_backward:
+
Tensor:
+
Tiles:
+
tnInner:
+
tnLeaf:
+
toArrayOfSlices:
+
toClpointer:
+
to_csv:
+
toFlatSeq:
+
toMatrixView:
+
toMetadata:
+
toMetadataArray:
+
to_ptr:
+
toRawSeq:
+
toSeq1D:
+
toSeq2D:
+
toSeq3D:
+
toSeq4D:
+
toSeq5D:
+
toTensor:
+
toUnsafeView:
+
transpose:
+
TreeNodeKind:
+
tri:
+
triangular:
+
triangularKernel:
+
trigonometric:
+
trigonometricKernel:
+
tril:
+
tril_unit_diag:
+
tril_unit_diag_mut:
+
tripleStridedIteration:
+
tripleStridedIterationYield:
+
triu:
+
trunc:
+
ukernel_generator:
+
ukernel_generic_impl:
+
ukernel_simd_impl:
+
unsafe_raw_buf:
+
unsafe_raw_offset:
+
unsqueeze:
+
unsqueezeImpl:
+
unwrap_period:
+
update:
+
valid:
+
Values:
+
vander:
+
vandermonde:
+
Variable:
+
variable:
+
variance:
+
whitespaceTokenizer:
+
withCompilerOptimHints:
+
with_diagonal:
+
withMemoryOptimHints:
+
write_bmp:
+
write_hdf5:
+
write_jpg:
+
write_npy:
+
write_png:
+
write_tga:
+
x86_AVX:
+
x86_AVX2:
+
x86_AVX512:
+
x86_AVX_FMA:
+
x86_Generic:
+
x86only:
+
x86_SSE:
+
x86_SSE2:
+
x86_SSE4_1:
+
x86_ukernel:
+
xavier_normal:
+
xavier_uniform:
+
xygrid:
+
yann_normal:
+
yann_uniform:
+
zeroGrads:
+
zeros:
+
zeros_like:
+
zip:
+
zipAxis:
+
+
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init.html b/init.html new file mode 100644 index 000000000..4f7bfd3d9 --- /dev/null +++ b/init.html @@ -0,0 +1,575 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/init + + + + + + + + + +Arraymancer - src/arraymancer/nn/init + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/init

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc kaiming_normal(shape: varargs[int]; T: type): Tensor[T]
+
+ +

Kaiming He initialisation for trainable layers preceding a ReLU activation. Kaiming initialization is recommended for relu activated layers.

+

Weight is sampled from a normal distribution of mean 0 and standard deviation โˆš(2/fan_in) with fan_in the number of input unit in the forward pass.

+

This preserves the magnitude of the variance of the weight during the forward pass

+

Paper:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc kaiming_uniform(shape: varargs[int]; T: type): Tensor[T]
+
+ +

Kaiming He initialisation for trainable layers preceding a ReLU activation. Kaiming initialization is recommended for relu activated layers.

+

Weight is sampled from an uniform distribution of range -โˆš3 * โˆš(2/fan_in), โˆš3 * โˆš(2/fan_in) with fan_in the number of input unit in the forward pass.

+

This preserves the magnitude of the variance of the weight during the forward pass

+

Paper:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc xavier_normal(shape: varargs[int]; T: type): Tensor[T]
+
+ +

Xavier Glorot initialisation for trainable layers preceding a linear activation (sigmoid, tanh). Xavier initialization is recommended for sigmoid, tanh and softsign activated layers.

+

Weight is sampled from a normal distribution of mean 0 and standard deviation โˆš(2/(fan_in+fan_out)) with fan_in the number of input units in the forward pass. and fan_out the number of input units during the backward pass (and not output units during the forward pass).

+

This provides a balance between preserving the magnitudes of the variance of the weight during the forward pass, and the backward pass.

+

Paper:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc xavier_uniform(shape: varargs[int]; T: type): Tensor[T]
+
+ +

Xavier Glorot initialisation for trainable layers preceding a linear activation (sigmoid, tanh). Xavier initialization is recommended for sigmoid, tanh and softsign activated layers.

+

Weight is sampled from an uniform distribution of range -โˆš3 * โˆš(2/(fan_in+fan_out)), โˆš3 * โˆš(2/(fan_in+fan_out)) with fan_in the number of input units in the forward pass. and fan_out the number of input units during the backward pass (and not output units during the forward pass).

+

This provides a balance between preserving the magnitudes of the variance of the weight during the forward pass, and the backward pass.

+

Paper:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc yann_normal(shape: varargs[int]; T: type): Tensor[T]
+
+ +

Yann Lecun initialisation for trainable layers

+

Weight is sampled from a normal distribution of mean 0 and standard deviation โˆš(1/fan_in) with fan_in the number of input unit in the forward pass.

+

This preserves the magnitude of the variance of the weight during the forward pass

+

Paper:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc yann_uniform(shape: varargs[int]; T: type): Tensor[T]
+
+ +

Yann Lecun initialisation for trainable layers

+

Weight is sampled from an uniform distribution of range โˆš(3/fan_in), โˆš(3/fan_in) with fan_in the number of input unit in the forward pass.

+

This preserves the magnitude of the variance of the weight during the forward pass

+

Paper:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init.idx b/init.idx new file mode 100644 index 000000000..8ef2bf52b --- /dev/null +++ b/init.idx @@ -0,0 +1,7 @@ +nimTitle init init.html module src/arraymancer/nn/init 0 +nim kaiming_uniform init.html#kaiming_uniform,varargs[int],type proc kaiming_uniform(shape: varargs[int]; T: type): Tensor[T] 100 +nim kaiming_normal init.html#kaiming_normal,varargs[int],type proc kaiming_normal(shape: varargs[int]; T: type): Tensor[T] 117 +nim xavier_uniform init.html#xavier_uniform,varargs[int],type proc xavier_uniform(shape: varargs[int]; T: type): Tensor[T] 144 +nim xavier_normal init.html#xavier_normal,varargs[int],type proc xavier_normal(shape: varargs[int]; T: type): Tensor[T] 166 +nim yann_uniform init.html#yann_uniform,varargs[int],type proc yann_uniform(shape: varargs[int]; T: type): Tensor[T] 201 +nim yann_normal init.html#yann_normal,varargs[int],type proc yann_normal(shape: varargs[int]; T: type): Tensor[T] 215 diff --git a/init_colmajor.html b/init_colmajor.html new file mode 100644 index 000000000..1fb2311f2 --- /dev/null +++ b/init_colmajor.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/init_colmajor + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/init_colmajor + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/init_colmajor

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc newMatrixUninitColMajor[T](M: var Tensor[T]; rows, cols: int) {.noinit,
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init_colmajor.idx b/init_colmajor.idx new file mode 100644 index 000000000..4a0b61bc1 --- /dev/null +++ b/init_colmajor.idx @@ -0,0 +1,2 @@ +nimTitle init_colmajor init_colmajor.html module src/arraymancer/linear_algebra/helpers/init_colmajor 0 +nim newMatrixUninitColMajor init_colmajor.html#newMatrixUninitColMajor,Tensor[T],int,int proc newMatrixUninitColMajor[T](M: var Tensor[T]; rows, cols: int) 9 diff --git a/init_copy_cpu.html b/init_copy_cpu.html new file mode 100644 index 000000000..2249d5685 --- /dev/null +++ b/init_copy_cpu.html @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/init_copy_cpu + + + + + + + + + +Arraymancer - src/arraymancer/tensor/init_copy_cpu + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/init_copy_cpu

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc clone[T](t: Tensor[T]; layout: OrderType = rowMajor): Tensor[T] {.noinit.}
+
+ + Input:
- A tensor
+

Returns:

+
- A full copy. The copy is recreated as a contiguous tensor.
+  If the input was a slice, unused values are discarded.
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init_copy_cpu.idx b/init_copy_cpu.idx new file mode 100644 index 000000000..0b48a0b98 --- /dev/null +++ b/init_copy_cpu.idx @@ -0,0 +1,2 @@ +nimTitle init_copy_cpu init_copy_cpu.html module src/arraymancer/tensor/init_copy_cpu 0 +nim clone init_copy_cpu.html#clone,Tensor[T],OrderType proc clone[T](t: Tensor[T]; layout: OrderType = rowMajor): Tensor[T] 21 diff --git a/init_copy_cuda.html b/init_copy_cuda.html new file mode 100644 index 000000000..bc631f48a --- /dev/null +++ b/init_copy_cuda.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/init_copy_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/init_copy_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/init_copy_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc clone[T](t: CudaTensor[T]): CudaTensor[T] {.noinit, noSideEffect.}
+
+ + Clone (deep copy) a CudaTensor. Copy will not share its data with the original. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init_copy_cuda.idx b/init_copy_cuda.idx new file mode 100644 index 000000000..0f8de0e02 --- /dev/null +++ b/init_copy_cuda.idx @@ -0,0 +1,2 @@ +nimTitle init_copy_cuda init_copy_cuda.html module src/arraymancer/tensor/init_copy_cuda 0 +nim clone init_copy_cuda.html#clone,CudaTensor[T] proc clone[T](t: CudaTensor[T]): CudaTensor[T] 26 diff --git a/init_cpu.html b/init_cpu.html new file mode 100644 index 000000000..abf920ab8 --- /dev/null +++ b/init_cpu.html @@ -0,0 +1,936 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/init_cpu + + + + + + + + + +Arraymancer - src/arraymancer/tensor/init_cpu + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/init_cpu

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc arange[T: SomeNumber](start, stop, step: T): Tensor[T] {.noinit.}
+
+ +

Creates a new 1d-tensor with values evenly spaced by step in the half-open interval [start, stop)

+

Resulting size is ceil((stop - start) / step)

+

โš ๏ธ Warnings: To limit floating point rounding issues, size is computed by converting to float64.

+
  • It is recommended to add a small epsilon for non-integer steps
  • +
  • float64 cannot represent exactly integers over 2^32 (~4.3 billions)
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc geomspace[T: SomeFloat](start, stop: T; num: int; endpoint = true): Tensor[
+    float] {.noinit, inline.}
+
+ +

Creates a new 1d-tensor with num values linearly spaced in a log scale (i.e. a geometric progression). This is similar to logspace, but with the interval endpoints specified directly as start, stop (endpoint == true) or in the half open interval [start, stop) (endpoint == false).

+

Resulting size is num.

+ +   Source +Edit + +
+
+ +
+
+
+
proc linspace[T: SomeNumber](start, stop: T; num: int; endpoint = true): Tensor[
+    float] {.noinit.}
+
+ +

Creates a new 1d-tensor with num values linearly spaced between the closed interval start, stop (endpoint == true) or in the half open interval [start, stop) (endpoint == false).

+

Resulting size is num.

+ +   Source +Edit + +
+
+ +
+
+
+
proc logspace[T: SomeNumber](start, stop: T; num: int; base = 10.0;
+                             endpoint = true): Tensor[float] {.noinit.}
+
+ +

Creates a new 1d-tensor with num values linearly spaced in log space of base base either in the closed interval start, stop (endpoint == true) or in the half open interval [start, stop) (endpoint == false).

+

Note that the given start, stop arguments refer to the exponents of base!

+

Resulting size is num.

+ +   Source +Edit + +
+
+ +
+
+
+
proc newTensorUninit[T](shape: Metadata): Tensor[T] {.noinit, inline.}
+
+ + Creates a new Tensor on Cpu backend Input:
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A Tensor of the proper shape with NO initialization
+

Warning โš  Tensor data is uninitialized and contains garbage.

+ +   Source +Edit + +
+
+
+
proc newTensorUninit[T](shape: varargs[int]): Tensor[T] {.noinit, inline.}
+
+ + Creates a new Tensor on Cpu backend Input:
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A Tensor of the proper shape with NO initialization
+

Warning โš  Tensor data is uninitialized and contains garbage.

+ +   Source +Edit + +
+
+
+
proc newTensorUninit[T](size: int): Tensor[T] {.noinit, inline.}
+
+ + Overload above taking varargs[int] to avoid "expression * cannot be called" errors if called in a template. Warning: This will create a 1D tensor! +   Source +Edit + +
+
+ +
+
+
+
proc newTensorWith[T](shape: Metadata; value: T): Tensor[T] {.noinit.}
+
+ + Creates a new Tensor filled with the given value Input:
- Shape of the Tensor
+- Type of its elements
+- Value to initialize its elements
+

Result:

+
- A Tensor of the proper shape initialized with
+  the given value
+ +   Source +Edit + +
+
+
+
proc newTensorWith[T](shape: varargs[int]; value: T): Tensor[T] {.noinit.}
+
+ + Creates a new Tensor filled with the given value Input:
- Shape of the Tensor
+- Type of its elements
+- Value to initialize its elements
+

Result:

+
- A Tensor of the proper shape initialized with
+  the given value
+ +   Source +Edit + +
+
+ +
+
+
+
proc ones[T: SomeNumber | Complex[float32] | Complex[float64]](shape: Metadata): Tensor[
+    T] {.noinit, inline.}
+
+ + Creates a new Tensor filled with 1 Input:
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A one-ed Tensor of the same shape
+ +   Source +Edit + +
+
+
+
proc ones[T: SomeNumber | Complex[float32] | Complex[float64]](
+    shape: varargs[int]): Tensor[T] {.noinit, inline.}
+
+ + Creates a new Tensor filled with 1 Input:
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A one-ed Tensor of the same shape
+ +   Source +Edit + +
+
+ +
+
+
+
proc ones_like[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]): Tensor[
+    T] {.noinit, inline.}
+
+ + Creates a new Tensor filled with 1 with the same shape as the input and filled with 1 Input:
- Tensor
+

Result:

+
- A one-ed Tensor of the same shape
+ +   Source +Edit + +
+
+ +
+
+
+
proc randomNormalTensor[T: SomeFloat](shape: varargs[int]; mean: T = 0;
+                                      std: T = 1): Tensor[T] {.noinit.}
+
+ +

Creates a new Tensor filled with values in the normal distribution

+

Random seed can be set by importing random and randomize(seed) Input:

+
- a shape
+- the mean (default 0)
+- the standard deviation (default 1)
+

Result:

+
- A tensor of the input shape filled with random values in the normal distribution
+ +   Source +Edit + +
+
+ +
+
+
+
proc randomTensor(shape: varargs[int]; max: int): Tensor[int] {.noinit,
+    ...raises: [], tags: [], forbids: [].}
+
+ +

Creates a new int Tensor filled with values between 0 and max (inclusive).

+

Random seed can be set by importing random and randomize(seed) Input:

+
- a shape
+- the max value possible (integer, inclusive)
+- a tensor backend
+

Result:

+
- A tensor of the input shape filled with random value between 0 and max input value (excluded)
+ +   Source +Edit + +
+
+
+
proc randomTensor[T: SomeFloat](shape: varargs[int]; max: T): Tensor[T] {.noinit.}
+
+ +

Creates a new float Tensor filled with values between 0 and max.

+

Random seed can be set by importing random and randomize(seed) Input:

+
- a shape
+- the max value possible (float)
+- a tensor backend
+

Result:

+
- A tensor of the input shape filled with random value between 0 and max input value
+ +   Source +Edit + +
+
+
+
proc randomTensor[T](shape: varargs[int]; sample_source: openArray[T]): Tensor[T] {.
+    noinit.}
+
+ +

Creates a new Tensor filled with values uniformly sampled from sample_source

+

Random seed can be set by importing random and randomize(seed) Input:

+
- a shape
+- a sample_source
+

Result:

+
- A tensor of the input shape filled with random values from ``sample_source``
+ +   Source +Edit + +
+
+
+
proc randomTensor[T](shape: varargs[int]; slice: Slice[T]): Tensor[T] {.noinit.}
+
+ +

Creates a new int Tensor filled with values in the Slice range (inclusive).

+

Random seed can be set by importing random and randomize(seed) Input:

+
- a shape
+- a range/slice
+- a tensor backend
+

Result:

+
- A tensor of the input shape filled with random value in the slice range
+ +   Source +Edit + +
+
+ +
+
+
+
proc zeros[T: SomeNumber | Complex[float32] | Complex[float64]](shape: Metadata): Tensor[
+    T] {.noinit, inline.}
+
+ +

Creates a new Tensor filled with 0

+

Input:

+
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A zero-ed Tensor of the input shape on backend Cpu
+ +   Source +Edit + +
+
+
+
proc zeros[T: SomeNumber | Complex[float32] | Complex[float64]](
+    shape: varargs[int]): Tensor[T] {.noinit, inline.}
+
+ +

Creates a new Tensor filled with 0

+

Input:

+
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A zero-ed Tensor of the input shape on backend Cpu
+ +   Source +Edit + +
+
+ +
+
+
+
proc zeros_like[T: SomeNumber | Complex[float32] | Complex[float64]](
+    t: Tensor[T]): Tensor[T] {.noinit, inline.}
+
+ + Creates a new Tensor filled with 0 with the same shape as the input Input:
- Shape of the Tensor
+- Type of its elements
+

Result:

+
- A zero-ed Tensor of the same shape
+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template arange[T: SomeNumber](start, stop: T): Tensor[T]
+
+ + +   Source +Edit + +
+
+
+
template arange[T: SomeNumber](stop: T): Tensor[T]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init_cpu.idx b/init_cpu.idx new file mode 100644 index 000000000..f6c973c59 --- /dev/null +++ b/init_cpu.idx @@ -0,0 +1,29 @@ +nimTitle init_cpu init_cpu.html module src/arraymancer/tensor/init_cpu 0 +nim newTensorUninit init_cpu.html#newTensorUninit,varargs[int] proc newTensorUninit[T](shape: varargs[int]): Tensor[T] 26 +nim newTensorUninit init_cpu.html#newTensorUninit,int proc newTensorUninit[T](size: int): Tensor[T] 39 +nim newTensorUninit init_cpu.html#newTensorUninit,Metadata proc newTensorUninit[T](shape: Metadata): Tensor[T] 44 +nim newTensorWith init_cpu.html#newTensorWith,varargs[int],T proc newTensorWith[T](shape: varargs[int]; value: T): Tensor[T] 59 +nim newTensorWith init_cpu.html#newTensorWith,Metadata,T proc newTensorWith[T](shape: Metadata; value: T): Tensor[T] 80 +nim zeros init_cpu.html#zeros,varargs[int] proc zeros[T: SomeNumber | Complex[float32] | Complex[float64]](shape: varargs[int]): Tensor[\n T] 103 +nim zeros init_cpu.html#zeros,Metadata proc zeros[T: SomeNumber | Complex[float32] | Complex[float64]](shape: Metadata): Tensor[\n T] 113 +nim zeros_like init_cpu.html#zeros_like,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc zeros_like[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]): Tensor[\n T] 123 +nim ones init_cpu.html#ones,varargs[int] proc ones[T: SomeNumber | Complex[float32] | Complex[float64]](shape: varargs[int]): Tensor[\n T] 132 +nim ones init_cpu.html#ones,Metadata proc ones[T: SomeNumber | Complex[float32] | Complex[float64]](shape: Metadata): Tensor[\n T] 145 +nim ones_like init_cpu.html#ones_like,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc ones_like[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]): Tensor[\n T] 158 +nim arange init_cpu.html#arange,T,T,T proc arange[T: SomeNumber](start, stop, step: T): Tensor[T] 167 +nim arange init_cpu.html#arange.t,T template arange[T: SomeNumber](stop: T): Tensor[T] 197 +nim arange init_cpu.html#arange.t,T,T template arange[T: SomeNumber](start, stop: T): Tensor[T] 207 +nim randomTensor init_cpu.html#randomTensor,varargs[int],T proc randomTensor[T: SomeFloat](shape: varargs[int]; max: T): Tensor[T] 222 +nim randomTensor init_cpu.html#randomTensor,varargs[int],int proc randomTensor(shape: varargs[int]; max: int): Tensor[int] 234 +nim randomTensor init_cpu.html#randomTensor,varargs[int],Slice[T] proc randomTensor[T](shape: varargs[int]; slice: Slice[T]): Tensor[T] 246 +nim randomTensor init_cpu.html#randomTensor,varargs[int],openArray[T] proc randomTensor[T](shape: varargs[int]; sample_source: openArray[T]): Tensor[T] 258 +nim randomNormalTensor init_cpu.html#randomNormalTensor,varargs[int],T,T proc randomNormalTensor[T: SomeFloat](shape: varargs[int]; mean: T = 0; std: T = 1): Tensor[\n T] 289 +nim linspace init_cpu.html#linspace,T,T,int proc linspace[T: SomeNumber](start, stop: T; num: int; endpoint = true): Tensor[float] 306 +nim logspace init_cpu.html#logspace,T,T,int,float proc logspace[T: SomeNumber](start, stop: T; num: int; base = 10.0; endpoint = true): Tensor[\n float] 330 +nim geomspace init_cpu.html#geomspace,T,T,int proc geomspace[T: SomeFloat](start, stop: T; num: int; endpoint = true): Tensor[float] 346 +nimgrp ones init_cpu.html#ones-procs-all proc 132 +nimgrp newtensoruninit init_cpu.html#newTensorUninit-procs-all proc 26 +nimgrp zeros init_cpu.html#zeros-procs-all proc 103 +nimgrp randomtensor init_cpu.html#randomTensor-procs-all proc 222 +nimgrp newtensorwith init_cpu.html#newTensorWith-procs-all proc 59 +nimgrp arange init_cpu.html#arange-templates-all template 197 diff --git a/init_cuda.html b/init_cuda.html new file mode 100644 index 000000000..92c8a4b57 --- /dev/null +++ b/init_cuda.html @@ -0,0 +1,508 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/init_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/init_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/init_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc cpu[T: SomeFloat](t: CudaTensor[T]): Tensor[T] {.noSideEffect, noinit.}
+
+ + Convert a tensor on a Cuda device to a tensor on Cpu. +   Source +Edit + +
+
+ +
+
+
+
proc cuda[T: SomeFloat](t: Tensor[T]): CudaTensor[T] {.noinit.}
+
+ + Convert a tensor on Cpu to a tensor on a Cuda device. +   Source +Edit + +
+
+ +
+
+
+
proc ones_like[T: SomeFloat](t: CudaTensor[T]): CudaTensor[T] {.noinit, inline.}
+
+ + Creates a new CudaTensor filled with 1 with the same shape as the input and filled with 1 Input:
- A CudaTensor
+

Result:

+
- A one-ed CudaTensor of the same shape
+ +   Source +Edit + +
+
+ +
+
+
+
proc zeros_like[T: SomeFloat](t: CudaTensor[T]): CudaTensor[T] {.noinit, inline.}
+
+ + Creates a new CudaTensor filled with 0 with the same shape as the input Input:
- Shape of the CudaTensor
+- Type of its elements
+

Result:

+
- A zero-ed CudaTensor of the same shape
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init_cuda.idx b/init_cuda.idx new file mode 100644 index 000000000..9708a12c4 --- /dev/null +++ b/init_cuda.idx @@ -0,0 +1,5 @@ +nimTitle init_cuda init_cuda.html module src/arraymancer/tensor/init_cuda 0 +nim cuda init_cuda.html#cuda,Tensor[T: SomeFloat] proc cuda[T: SomeFloat](t: Tensor[T]): CudaTensor[T] 23 +nim cpu init_cuda.html#cpu,CudaTensor[T: SomeFloat] proc cpu[T: SomeFloat](t: CudaTensor[T]): Tensor[T] 43 +nim zeros_like init_cuda.html#zeros_like,CudaTensor[T: SomeFloat] proc zeros_like[T: SomeFloat](t: CudaTensor[T]): CudaTensor[T] 62 +nim ones_like init_cuda.html#ones_like,CudaTensor[T: SomeFloat] proc ones_like[T: SomeFloat](t: CudaTensor[T]): CudaTensor[T] 73 diff --git a/init_opencl.html b/init_opencl.html new file mode 100644 index 000000000..1b74ca67b --- /dev/null +++ b/init_opencl.html @@ -0,0 +1,508 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/init_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/init_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/init_opencl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc cpu[T: SomeFloat](t: ClTensor[T]): Tensor[T] {.noinit.}
+
+ + Convert a tensor on an OpenCL device to a tensor on Cpu. +   Source +Edit + +
+
+ +
+
+
+
proc ones_like[T: SomeFloat](t: ClTensor[T]): ClTensor[T] {.noinit, inline.}
+
+ + Creates a new ClTensor filled with 1 with the same shape as the input and filled with 1 Input:
- A CudaTensor
+

Result:

+
- A one-ed ClTensor of the same shape
+ +   Source +Edit + +
+
+ +
+
+
+
proc opencl[T: SomeFloat](t: Tensor[T]): ClTensor[T] {.noinit.}
+
+ + Convert a tensor on Cpu to a tensor on an OpenCL device. +   Source +Edit + +
+
+ +
+
+
+
proc zeros_like[T: SomeFloat](t: ClTensor[T]): ClTensor[T] {.noinit, inline.}
+
+ + Creates a new ClTensor filled with 0 with the same shape as the input Input:
- Shape of the CudaTensor
+- Type of its elements
+

Result:

+
- A zero-ed ClTensor of the same shape
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/init_opencl.idx b/init_opencl.idx new file mode 100644 index 000000000..b79610b2a --- /dev/null +++ b/init_opencl.idx @@ -0,0 +1,5 @@ +nimTitle init_opencl init_opencl.html module src/arraymancer/tensor/init_opencl 0 +nim opencl init_opencl.html#opencl,Tensor[T: SomeFloat] proc opencl[T: SomeFloat](t: Tensor[T]): ClTensor[T] 22 +nim cpu init_opencl.html#cpu,ClTensor[T: SomeFloat] proc cpu[T: SomeFloat](t: ClTensor[T]): Tensor[T] 43 +nim zeros_like init_opencl.html#zeros_like,ClTensor[T: SomeFloat] proc zeros_like[T: SomeFloat](t: ClTensor[T]): ClTensor[T] 68 +nim ones_like init_opencl.html#ones_like,ClTensor[T: SomeFloat] proc ones_like[T: SomeFloat](t: ClTensor[T]): ClTensor[T] 79 diff --git a/initialization.html b/initialization.html new file mode 100644 index 000000000..c71cce010 --- /dev/null +++ b/initialization.html @@ -0,0 +1,780 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/tensor/initialization + + + + + + + + + +Arraymancer - src/arraymancer/laser/tensor/initialization + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/tensor/initialization

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

Initialization and copy routines

+ +
+

Procs

+
+
+
+
proc copyFrom[T](dst: var Tensor[T]; src: Tensor[T])
+
+ +

Copy the source tensor into the destination tensor. Both should have the same shape. If destination tensor is a view only the data exposed by the view is modified.

+

This is useful to update subslices of an existing tensor.

+

โš ๏ธ Warning: The data exposed by the destination tensor will be overwritten. If destination tensor is a view, all views of that data will be changed. They however conserve their shape and strides.

+

Note: The copy is not recursive.

+ +   Source +Edit + +
+
+ +
+
+
+
proc copyFromRaw[T](dst: var Tensor[T]; buffer: ptr T; len: Natural)
+
+ + Copy data from the buffer into the destination tensor. Destination tensor size and buffer length should be the same +   Source +Edit + +
+
+ +
+
+
+
proc deepCopy[T](dst: var Tensor[T]; src: Tensor[T])
+
+ +

Performs a deep copy of y and copies it into x. Deepcopy is recursive including for ref types and custom types that implement deepCopy.

+

Note that if x was already initialized with a storage, the storage will be detached from x. This does not write into existing storage.

+ +   Source +Edit + +
+
+ +
+
+
+
proc fromBuffer[T](rawBuffer: pointer; shape: varargs[int]): Tensor[T]
+
+ + Call fromBuffer with layout = rowMajor +   Source +Edit + +
+
+
+
proc fromBuffer[T](rawBuffer: pointer; shape: varargs[int];
+                   layout: static OrderType): Tensor[T]
+
+ +

Creates a Tensor[T] from a raw pointer. Make sure that the explicit type given to this proc actually matches the data stored behind the pointer! The size derived from the given shape must match the size of the buffer!

+

Its counterpart toUnsafeView can be used to obtain ptr UncheckedArray from a Tensor.

+ +   Source +Edit + +
+
+
+
proc fromBuffer[T](rawBuffer: ptr UncheckedArray[T]; shape: varargs[int]): Tensor[
+    T]
+
+ + Call fromBuffer with layout = rowMajor +   Source +Edit + +
+
+
+
proc fromBuffer[T](rawBuffer: ptr UncheckedArray[T]; shape: varargs[int];
+                   layout: static OrderType): Tensor[T]
+
+ +

Creates a Tensor[T] from a raw buffer, cast as ptr UncheckedArray[T]. The size derived from the given shape must match the size of the buffer!

+

If you type cast a raw pointer to ptr UncheckedArray[T] before handing it to this proc, make sure to cast to the correct type as we cannot check the validity of the type!

+

Its counterpart toUnsafeView can be used to obtain ptr UncheckedArray from a Tensor.

+ +   Source +Edit + +
+
+ +
+
+
+
func initTensorMetadata(result: var Tensor; size: var int; shape: Metadata;
+                        layout: static OrderType = rowMajor)
+
+ + result metadata and size will be initialized in-place +   Source +Edit + +
+
+
+
func initTensorMetadata(result: var Tensor; size: var int;
+                        shape: openArray[int];
+                        layout: static OrderType = rowMajor)
+
+ + result metadata and size will be initialized in-place +   Source +Edit + +
+
+ +
+
+
+
func item[T](t: Tensor[T]): T {.inline.}
+
+ + Returns the value of the input Tensor as a scalar (without changing its type). This only works for Tensors (of any rank) that contain one single element. If the tensor has more than one element IndexDefect is raised. +   Source +Edit + +
+
+
+
func item[T_IN, T_OUT](t: Tensor[T_IN]; _: typedesc[T_OUT]): T_OUT
+
+ + Returns the value of the input Tensor as a scalar of the selected type. This only works for Tensors (of any rank) that contain one single element. If the tensor has more than one element IndexDefect is raised. +   Source +Edit + +
+
+ +
+
+
+
proc newTensor[T](shape: Metadata): Tensor[T]
+
+ + +   Source +Edit + +
+
+
+
proc newTensor[T](shape: varargs[int]): Tensor[T]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc setZero[T](t: var Tensor[T]; check_contiguous: static bool = true)
+
+ +

Reset/initialize the tensor data to binary zero. The tensor metadata is not touched. Input tensor must be contiguous. For seq based Tensors the underlying sequence will be reset and set back to the tensors size.

+

โš ๏ธ Warning: The data of the input tensor will be overwritten. If destination tensor is a view, all views of that data will be changed. They however conserve their shape and strides.

+ +   Source +Edit + +
+
+ +
+
+
+
func toMetadata(s: varargs[int]): Metadata {....raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc toTensor[T](a: openArray[T]): auto
+
+ + Convert an openArray to a Tensor Input:
- An array or a seq (can be nested)
+

Result:

+
- A Tensor of the same shape
+ +   Source +Edit + +
+
+ +
+
+
+
func toUnsafeView[T: KnownSupportsCopyMem](t: Tensor[T];
+    aligned: static bool = true): ptr UncheckedArray[T] {.inline.}
+
+ +

Returns an unsafe view of the valid data as a ptr UncheckedArray. Its counterpart fromBuffer can be used to create a Tensor fromptr UncheckedArray.

+

Unsafe: the pointer can outlive the input tensor.

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template toMetadata(m: Metadata): Metadata
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/initialization.idx b/initialization.idx new file mode 100644 index 000000000..bae8a97b9 --- /dev/null +++ b/initialization.idx @@ -0,0 +1,23 @@ +nimTitle initialization initialization.html module src/arraymancer/laser/tensor/initialization 0 +nim toMetadata initialization.html#toMetadata,varargs[int] proc toMetadata(s: varargs[int]): Metadata 25 +nim toMetadata initialization.html#toMetadata.t,Metadata template toMetadata(m: Metadata): Metadata 30 +nim initTensorMetadata initialization.html#initTensorMetadata,Tensor,int,openArray[int],staticOrderType proc initTensorMetadata(result: var Tensor; size: var int; shape: openArray[int];\n layout: static OrderType = rowMajor) 53 +nim initTensorMetadata initialization.html#initTensorMetadata,Tensor,int,Metadata,staticOrderType proc initTensorMetadata(result: var Tensor; size: var int; shape: Metadata;\n layout: static OrderType = rowMajor) 60 +nim deepCopy initialization.html#deepCopy,Tensor[T],Tensor[T] proc deepCopy[T](dst: var Tensor[T]; src: Tensor[T]) 67 +nim copyFrom initialization.html#copyFrom,Tensor[T],Tensor[T] proc copyFrom[T](dst: var Tensor[T]; src: Tensor[T]) 101 +nim copyFromRaw initialization.html#copyFromRaw,Tensor[T],ptr.T,Natural proc copyFromRaw[T](dst: var Tensor[T]; buffer: ptr T; len: Natural) 136 +nim setZero initialization.html#setZero,Tensor[T],staticbool proc setZero[T](t: var Tensor[T]; check_contiguous: static bool = true) 158 +nim newTensor initialization.html#newTensor,varargs[int] proc newTensor[T](shape: varargs[int]): Tensor[T] 187 +nim newTensor initialization.html#newTensor,Metadata proc newTensor[T](shape: Metadata): Tensor[T] 195 +nim toTensor initialization.html#toTensor,openArray[T] proc toTensor[T](a: openArray[T]): auto 229 +nim fromBuffer initialization.html#fromBuffer,ptr.UncheckedArray[T],varargs[int],staticOrderType proc fromBuffer[T](rawBuffer: ptr UncheckedArray[T]; shape: varargs[int];\n layout: static OrderType): Tensor[T] 245 +nim fromBuffer initialization.html#fromBuffer,ptr.UncheckedArray[T],varargs[int] proc fromBuffer[T](rawBuffer: ptr UncheckedArray[T]; shape: varargs[int]): Tensor[T] 258 +nim fromBuffer initialization.html#fromBuffer,pointer,varargs[int],staticOrderType proc fromBuffer[T](rawBuffer: pointer; shape: varargs[int]; layout: static OrderType): Tensor[\n T] 262 +nim fromBuffer initialization.html#fromBuffer,pointer,varargs[int] proc fromBuffer[T](rawBuffer: pointer; shape: varargs[int]): Tensor[T] 272 +nim toUnsafeView initialization.html#toUnsafeView,Tensor[T: KnownSupportsCopyMem],staticbool proc toUnsafeView[T: KnownSupportsCopyMem](t: Tensor[T]; aligned: static bool = true): ptr UncheckedArray[\n T] 276 +nim item initialization.html#item,Tensor[T_IN],typedesc[T_OUT] proc item[T_IN, T_OUT](t: Tensor[T_IN]; _: typedesc[T_OUT]): T_OUT 283 +nim item initialization.html#item,Tensor[T] proc item[T](t: Tensor[T]): T 311 +nimgrp newtensor initialization.html#newTensor-procs-all proc 187 +nimgrp inittensormetadata initialization.html#initTensorMetadata-procs-all proc 53 +nimgrp frombuffer initialization.html#fromBuffer-procs-all proc 245 +nimgrp item initialization.html#item-procs-all proc 283 diff --git a/io.html b/io.html new file mode 100644 index 000000000..d44552f2e --- /dev/null +++ b/io.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/io + + + + + + + + + +Arraymancer - src/arraymancer/io + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/io

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+ +
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/io.idx b/io.idx new file mode 100644 index 000000000..611c297e9 --- /dev/null +++ b/io.idx @@ -0,0 +1 @@ +nimTitle io io.html module src/arraymancer/io 0 diff --git a/io_csv.html b/io_csv.html new file mode 100644 index 000000000..ae7247ea9 --- /dev/null +++ b/io_csv.html @@ -0,0 +1,485 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/io/io_csv + + + + + + + + + +Arraymancer - src/arraymancer/io/io_csv + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/io/io_csv

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc read_csv[T: SomeNumber | bool | string](csvPath: string;
+    skipHeader = false; separator = ','; quote = '\"'): Tensor[T] {.noinit.}
+
+ +

Load a csv into a Tensor. All values must be of the same type.

+

If there is a header row, it can be skipped.

+

The reading of CSV files currently does not handle parsing a tensor created with toCsv. This is because the dimensional information becomes part of the CSV output and the parser has no option to reconstruct the correct tensor shape. Instead of a NxMx...xZ tensor we always construct a NxM tensor, where N-1 is the rank of the original tensor and M is the total size (total number of elements) of the original tensor!

+

Input:

+
  • csvPath: a path to the csvfile
  • +
  • skipHeader: should read_csv skip the first row
  • +
  • separator: a char, default ','
  • +
  • quote: a char, default '"' (single and double quotes must be escaped). Separators inside quoted strings are ignored, for example: "foo", "bar, baz" corresponds to 2 columns not 3.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc to_csv[T](tensor: Tensor[T]; csvPath: string; separator = ',')
+
+ +

Stores a tensor in a csv file. Can handle tensors of arbitrary dimension by using a schema (= csv columns) of

+

dimension_1, dimension_2, ..., dimension_(tensor.rank), value

+

where the 'dimension_i' columns contain indices, and the actual tensor values are stored in the 'value' column.

+

For example the tensor @[@[1, 2, 3], @[4, 5, 6]].toTensor() is stored as:

+

dimension_1,dimension_2,value 0,0,1 0,1,2 0,2,3 1,0,4 1,1,5 1,2,6

+

Input:

+
  • tensor: the tensor to store
  • +
  • csvPath: output path of the csvfile
  • +
  • separator: a char, default ','
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/io_csv.idx b/io_csv.idx new file mode 100644 index 000000000..7f2a77271 --- /dev/null +++ b/io_csv.idx @@ -0,0 +1,3 @@ +nimTitle io_csv io_csv.html module src/arraymancer/io/io_csv 0 +nim read_csv io_csv.html#read_csv,string,char,char proc read_csv[T: SomeNumber | bool | string](csvPath: string; skipHeader = false;\n separator = ','; quote = '\"'): Tensor[T] 52 +nim to_csv io_csv.html#to_csv,Tensor[T],string,char proc to_csv[T](tensor: Tensor[T]; csvPath: string; separator = ',') 118 diff --git a/io_hdf5.html b/io_hdf5.html new file mode 100644 index 000000000..a5f73f60c --- /dev/null +++ b/io_hdf5.html @@ -0,0 +1,546 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/io/io_hdf5 + + + + + + + + + +Arraymancer - src/arraymancer/io/io_hdf5 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/io/io_hdf5

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc read_hdf5[T: SomeNumber](h5f: var H5FileObj; name, group = ""; number = -1): Tensor[
+    T] {.noinit, inline.}
+
+ + wrapper around the real read_hdf5 to provide a nicer interface without having to worry about some and none +   Source +Edit + +
+
+
+
proc read_hdf5[T: SomeNumber](h5f: var H5FileObj; name, group: Option[string];
+                              number: Option[int]): Tensor[T] {.noinit.}
+
+ +

Reads a .h5 file (written by arraymancer) and returns a tensor of the specified type. If the tensor is stored in a different type in the file, it will be converted.

+

Input:

+
  • The H5 file object we read from
  • +
  • A non-generic name of the tensor to read
  • +
  • A group different from the root group in which tensor is stored
  • +
  • if generic names are used the number-th tensor to read
  • +
+

Output:

+
  • A tensor
  • +
+ +   Source +Edit + +
+
+
+
proc read_hdf5[T: SomeNumber](hdf5Path: string; name, group = ""; number = -1): Tensor[
+    T] {.noinit, inline.}
+
+ + convenience wrapper around read_hdf5 with var H5DataSet argument. opens the given H5 file for reading and then calls the read proc +   Source +Edit + +
+
+ +
+
+
+
proc write_hdf5[T: SomeNumber](h5f: var H5FileObj; t: Tensor[T];
+                               name, group: Option[string])
+
+ +

Exports a tensor to a hdf5 file To keep this a simple convenience proc, the tensor is stored in the root group of the hdf5 file under a generic name. If the name argument is given, the tensor is stored under this name instead. If the group argument is given the tensor is stored in that group instead of the root group. Note: if no name is given, we need to visit the whole file to check for existing tensors. This will introduce a small overhead!

+

Input:

+
  • The tensor to write
  • +
  • The H5 file object we write to
  • +
  • An optional name for the dataset of the tensor in the file Useful to store multiple tensors in a single HDF5 file
  • +
  • An optional name for a group to store the tensor in
  • +
+ +   Source +Edit + +
+
+
+
proc write_hdf5[T: SomeNumber](h5f: var H5FileObj; t: Tensor[T];
+                               name, group = "") {.inline.}
+
+ + wrapper around the real write_hdf5 to provide a nicer interface without having to worry about some and none +   Source +Edit + +
+
+
+
proc write_hdf5[T: SomeNumber](t: Tensor[T]; hdf5Path: string; name, group = "") {.
+    inline.}
+
+ + convenience wrapper around write_hdf5 with var H5DataSet argument. opens the given H5 file for writing and then calls the write proc +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/io_hdf5.idx b/io_hdf5.idx new file mode 100644 index 000000000..804dbf11c --- /dev/null +++ b/io_hdf5.idx @@ -0,0 +1,9 @@ +nimTitle io_hdf5 io_hdf5.html module src/arraymancer/io/io_hdf5 0 +nim read_hdf5 io_hdf5.html#read_hdf5,H5File,Option[string],Option[string],Option[int] proc read_hdf5[T: SomeNumber](h5f: var H5FileObj; name, group: Option[string];\n number: Option[int]): Tensor[T] 61 +nim read_hdf5 io_hdf5.html#read_hdf5,H5File,string,string,int proc read_hdf5[T: SomeNumber](h5f: var H5FileObj; name, group = ""; number = -1): Tensor[\n T] 114 +nim read_hdf5 io_hdf5.html#read_hdf5,string,string,string,int proc read_hdf5[T: SomeNumber](hdf5Path: string; name, group = ""; number = -1): Tensor[\n T] 125 +nim write_hdf5 io_hdf5.html#write_hdf5,H5File,Tensor[T: SomeNumber],Option[string],Option[string] proc write_hdf5[T: SomeNumber](h5f: var H5FileObj; t: Tensor[T];\n name, group: Option[string]) 135 +nim write_hdf5 io_hdf5.html#write_hdf5,H5File,Tensor[T: SomeNumber],string,string proc write_hdf5[T: SomeNumber](h5f: var H5FileObj; t: Tensor[T]; name, group = "") 176 +nim write_hdf5 io_hdf5.html#write_hdf5,Tensor[T: SomeNumber],string,string,string proc write_hdf5[T: SomeNumber](t: Tensor[T]; hdf5Path: string; name, group = "") 186 +nimgrp writehdf5 io_hdf5.html#write_hdf5-procs-all proc 135 +nimgrp readhdf5 io_hdf5.html#read_hdf5-procs-all proc 61 diff --git a/io_image.html b/io_image.html new file mode 100644 index 000000000..27950c032 --- /dev/null +++ b/io_image.html @@ -0,0 +1,544 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/io/io_image + + + + + + + + + +Arraymancer - src/arraymancer/io/io_image + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/io/io_image

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc read_image(buffer: seq[byte]): Tensor[uint8] {.
+    ...raises: [STBIException, ValueError], tags: [], forbids: [].}
+
+ +

Read an image from a buffer and loads it into a Tensoruint8 of shape Channel x Height x Width. Channel is 1 for greyscale, 3 for RGB.

+

Supports JPEG, PNG, TGA, BMP, PSD, GIF, HDR, PIC, PNM See stb_image https://github.com/nothings/stb/blob/master/stb_image.h

+ +   Source +Edit + +
+
+
+
proc read_image(filepath: string): Tensor[uint8] {.
+    ...raises: [STBIException, ValueError], tags: [], forbids: [].}
+
+ +

Read an image file and loads it into a Tensoruint8 of shape Channel x Height x Width. Channel is 1 for greyscale, 3 for RGB.

+

Supports JPEG, PNG, TGA, BMP, PSD, GIF, HDR, PIC, PNM See stb_image https://github.com/nothings/stb/blob/master/stb_image.h

+

Usage example with conversion to 0..1 float: .. code:: nim let raw_img = read_image('path/to/image.png') let img = raw_img.map_inline: x.float32 / 255.0

+ +   Source +Edit + +
+
+ +
+
+
+
proc write_bmp(img: Tensor[uint8]; filepath: string) {....raises: [ValueError],
+    tags: [], forbids: [].}
+
+ + Create an image file from a tensor +   Source +Edit + +
+
+ +
+
+
+
proc write_jpg(img: Tensor[uint8]; filepath: string; quality = 100) {.
+    ...raises: [ValueError], tags: [], forbids: [].}
+
+ + Create a jpeg image file from a tensor +   Source +Edit + +
+
+ +
+
+
+
proc write_png(img: Tensor[uint8]; filepath: string) {....raises: [ValueError],
+    tags: [], forbids: [].}
+
+ + Create an image file from a tensor +   Source +Edit + +
+
+ +
+
+
+
proc write_tga(img: Tensor[uint8]; filepath: string) {....raises: [ValueError],
+    tags: [], forbids: [].}
+
+ + Create an image file from a tensor +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/io_image.idx b/io_image.idx new file mode 100644 index 000000000..98a566c09 --- /dev/null +++ b/io_image.idx @@ -0,0 +1,8 @@ +nimTitle io_image io_image.html module src/arraymancer/io/io_image 0 +nim read_image io_image.html#read_image,string proc read_image(filepath: string): Tensor[uint8] 20 +nim read_image io_image.html#read_image,seq[byte] proc read_image(buffer: seq[byte]): Tensor[uint8] 39 +nim write_png io_image.html#write_png,Tensor[uint8],string proc write_png(img: Tensor[uint8]; filepath: string) 72 +nim write_bmp io_image.html#write_bmp,Tensor[uint8],string proc write_bmp(img: Tensor[uint8]; filepath: string) 73 +nim write_tga io_image.html#write_tga,Tensor[uint8],string proc write_tga(img: Tensor[uint8]; filepath: string) 74 +nim write_jpg io_image.html#write_jpg,Tensor[uint8],string,int proc write_jpg(img: Tensor[uint8]; filepath: string; quality = 100) 76 +nimgrp readimage io_image.html#read_image-procs-all proc 20 diff --git a/io_npy.html b/io_npy.html new file mode 100644 index 000000000..4b7bce883 --- /dev/null +++ b/io_npy.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/io/io_npy + + + + + + + + + +Arraymancer - src/arraymancer/io/io_npy + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/io/io_npy

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc read_npy[T: SomeNumber](npyPath: string): Tensor[T] {.noinit.}
+
+ +

Reads a .npy file and returns a Tensor of the specified type. If the ndarray is stored in a different type inside the file, it will be converted.

+

Input:

+
  • The path to a numpy file as string
  • +
+

Output:

+
  • A tensor
  • +
+

Only integer, unsigned integer and float ndarrays are supported at the moment.

+ +   Source +Edit + +
+
+ +
+
+
+
proc write_npy[T: SomeNumber](t: Tensor[T]; npyPath: string)
+
+ +

Export a Tensor to the Numpy format

+

Input:

+
  • The tensor
  • +
  • The path to a numpy file as string
  • +
+

Only integer, unsigned integer and float ndarrays are supported at the moment.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/io_npy.idx b/io_npy.idx new file mode 100644 index 000000000..ad8266aae --- /dev/null +++ b/io_npy.idx @@ -0,0 +1,3 @@ +nimTitle io_npy io_npy.html module src/arraymancer/io/io_npy 0 +nim read_npy io_npy.html#read_npy,string proc read_npy[T: SomeNumber](npyPath: string): Tensor[T] 64 +nim write_npy io_npy.html#write_npy,Tensor[T: SomeNumber],string proc write_npy[T: SomeNumber](t: Tensor[T]; npyPath: string) 117 diff --git a/io_stream_readers.html b/io_stream_readers.html new file mode 100644 index 000000000..c214a5f3b --- /dev/null +++ b/io_stream_readers.html @@ -0,0 +1,676 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/io/io_stream_readers + + + + + + + + + +Arraymancer - src/arraymancer/io/io_stream_readers + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/io/io_stream_readers

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
proc readFloat32BE(stream: Stream): float32 {.inline,
+    ...raises: [IOError, OSError], tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readFloat32LE(stream: Stream): float32 {.inline,
+    ...raises: [IOError, OSError], tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readFloat64BE(stream: Stream): float64 {.inline,
+    ...raises: [IOError, OSError], tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readFloat64LE(stream: Stream): float64 {.inline,
+    ...raises: [IOError, OSError], tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readInt32BE(stream: Stream): int32 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readInt32LE(stream: Stream): int32 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readInt64BE(stream: Stream): int64 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readInt64LE(stream: Stream): int64 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readUInt16LE(stream: Stream): uint16 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readUInt32BE(stream: Stream): uint32 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readUInt32LE(stream: Stream): uint32 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readUInt64BE(stream: Stream): uint64 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc readUInt64LE(stream: Stream): uint64 {.inline, ...raises: [IOError, OSError],
+    tags: [ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/io_stream_readers.idx b/io_stream_readers.idx new file mode 100644 index 000000000..098d695e2 --- /dev/null +++ b/io_stream_readers.idx @@ -0,0 +1,14 @@ +nimTitle io_stream_readers io_stream_readers.html module src/arraymancer/io/io_stream_readers 0 +nim readInt32BE io_stream_readers.html#readInt32BE,Stream proc readInt32BE(stream: Stream): int32 7 +nim readInt64BE io_stream_readers.html#readInt64BE,Stream proc readInt64BE(stream: Stream): int64 11 +nim readUInt32BE io_stream_readers.html#readUInt32BE,Stream proc readUInt32BE(stream: Stream): uint32 15 +nim readUInt64BE io_stream_readers.html#readUInt64BE,Stream proc readUInt64BE(stream: Stream): uint64 19 +nim readFloat32BE io_stream_readers.html#readFloat32BE,Stream proc readFloat32BE(stream: Stream): float32 23 +nim readFloat64BE io_stream_readers.html#readFloat64BE,Stream proc readFloat64BE(stream: Stream): float64 27 +nim readInt32LE io_stream_readers.html#readInt32LE,Stream proc readInt32LE(stream: Stream): int32 31 +nim readInt64LE io_stream_readers.html#readInt64LE,Stream proc readInt64LE(stream: Stream): int64 35 +nim readUInt16LE io_stream_readers.html#readUInt16LE,Stream proc readUInt16LE(stream: Stream): uint16 39 +nim readUInt32LE io_stream_readers.html#readUInt32LE,Stream proc readUInt32LE(stream: Stream): uint32 43 +nim readUInt64LE io_stream_readers.html#readUInt64LE,Stream proc readUInt64LE(stream: Stream): uint64 47 +nim readFloat32LE io_stream_readers.html#readFloat32LE,Stream proc readFloat32LE(stream: Stream): float32 51 +nim readFloat64LE io_stream_readers.html#readFloat64LE,Stream proc readFloat64LE(stream: Stream): float64 55 diff --git a/kde.html b/kde.html new file mode 100644 index 000000000..15db4cb2e --- /dev/null +++ b/kde.html @@ -0,0 +1,665 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/stats/kde + + + + + + + + + +Arraymancer - src/arraymancer/stats/kde + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/stats/kde

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
KernelFunc = proc (x, x_i, bw: float): float {.inline.}
+
+ + +   Source +Edit + +
+
+
+
KernelKind = enum
+  knCustom = "custom", knBox = "box", knTriangular = "triangular",
+  knTrig = "trigonometric", knEpanechnikov = "epanechnikov", knGauss = "gauss"
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc boxKernel(x`gensym1, x_i`gensym1, bw`gensym1: float): float {.inline,
+    ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc epanechnikovKernel(x`gensym4, x_i`gensym4, bw`gensym4: float): float {.
+    inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gaussKernel(x, x_i, bw: float): float {.inline, ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc kde[T: SomeNumber; U: int | Tensor[SomeNumber] | openArray[SomeNumber]](
+    t: Tensor[T]; kernel: static KernelFunc; kernelKind = knCustom;
+    adjust: float = 1.0; samples: U = 1000; bw: float = NaN; normalize = false;
+    cutoff: float = NaN; weights: Tensor[T] = newTensor[T](0)): Tensor[float]
+
+ +

Returns the kernel density estimation for the 1D tensor t. The returned Tensor[float] contains samples elements. The input will be converted to float.

+

The bandwidth is estimated using Silverman's rule of thumb.

+

adjust can be used to scale the automatic bandwidth calculation. Note that this assumes the data is roughly normal distributed. To override the automatic bandwidth calculation, hand the bw manually. If normalize is true the result will be normalized such that the integral over it is equal to 1.

+

By default the evaluation points will be samples linearly spaced points between [min(t), max(t)]. If desired the evaluation points can be given explicitly by handing a Tensor[float] | openArray[float] as samples.

+

The kernel is the kernel function that will be used. Unless you want to use a custom kernel function, call the convenience wrapper below, which only takes a KernelKind (either as string or directly as an enum value) below, which defaults to a gaussian kernel.

+

Custom kernel functions are supported by handing a function of signature

+

KernelFunc = proc(x, x_i, bw: float): float

+

to this procedure and setting the kernelKind to knCustom. This requires to also hand a cutoff, which is the window of s[j] - t[i] <= cutoff, where s[j] is the j-th sample and t[i] the i-th input value. Only this window is considered for the kernel summation for efficiency. Set it such that the contribution of the custom kernel is very small (or 0) outside that range.

+ +   Source +Edit + +
+
+
+
proc kde[T: SomeNumber; U: KernelKind | string;
+         V: int | Tensor[SomeNumber] | openArray[SomeNumber]](t: Tensor[T];
+    kernel: U = "gauss"; adjust: float = 1.0; samples: V = 1000;
+    bw: float = NaN; normalize = false; weights: Tensor[T] = newTensor[T](0)): Tensor[
+    float]
+
+ +

This is a convenience wrapper around the above defined kde proc, which takes a kernel as a string corresponding to the string value of the KernelKind enum or a KernelKind value directly, which does not require to manually hand a kernel procedure.

+

By default a gaussian kernel is used.

+ +   Source +Edit + +
+
+ +
+
+
+
proc triangularKernel(x`gensym2, x_i`gensym2, bw`gensym2: float): float {.
+    inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc trigonometricKernel(x`gensym3, x_i`gensym3, bw`gensym3: float): float {.
+    inline, ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template makeKernel(fn: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/kde.idx b/kde.idx new file mode 100644 index 000000000..a74332f1e --- /dev/null +++ b/kde.idx @@ -0,0 +1,18 @@ +nimTitle kde kde.html module src/arraymancer/stats/kde 0 +nim knCustom kde.html#knCustom KernelKind.knCustom 23 +nim knBox kde.html#knBox KernelKind.knBox 23 +nim knTriangular kde.html#knTriangular KernelKind.knTriangular 23 +nim knTrig kde.html#knTrig KernelKind.knTrig 23 +nim knEpanechnikov kde.html#knEpanechnikov KernelKind.knEpanechnikov 23 +nim knGauss kde.html#knGauss KernelKind.knGauss 23 +nim KernelKind kde.html#KernelKind enum KernelKind 23 +nim KernelFunc kde.html#KernelFunc type KernelFunc 31 +nim makeKernel kde.html#makeKernel.t,untyped template makeKernel(fn: untyped): untyped 33 +nim boxKernel kde.html#boxKernel,float,float,float proc boxKernel(x`gensym1, x_i`gensym1, bw`gensym1: float): float 38 +nim triangularKernel kde.html#triangularKernel,float,float,float proc triangularKernel(x`gensym2, x_i`gensym2, bw`gensym2: float): float 39 +nim trigonometricKernel kde.html#trigonometricKernel,float,float,float proc trigonometricKernel(x`gensym3, x_i`gensym3, bw`gensym3: float): float 40 +nim epanechnikovKernel kde.html#epanechnikovKernel,float,float,float proc epanechnikovKernel(x`gensym4, x_i`gensym4, bw`gensym4: float): float 41 +nim gaussKernel kde.html#gaussKernel,float,float,float proc gaussKernel(x, x_i, bw: float): float 45 +nim kde kde.html#kde,Tensor[T: SomeNumber],staticKernelFunc,float,U,float,float,Tensor[T: SomeNumber] proc kde[T: SomeNumber; U: int | Tensor[SomeNumber] | openArray[SomeNumber]](\n t: Tensor[T]; kernel: static KernelFunc; kernelKind = knCustom;\n adjust: float = 1.0; samples: U = 1000; bw: float = NaN; normalize = false;\n cutoff: float = NaN; weights: Tensor[T] = newTensor[T](0)): Tensor[float] 81 +nim kde kde.html#kde,Tensor[T: SomeNumber],U,float,V,float,Tensor[T: SomeNumber] proc kde[T: SomeNumber; U: KernelKind | string;\n V: int | Tensor[SomeNumber] | openArray[SomeNumber]](t: Tensor[T];\n kernel: U = "gauss"; adjust: float = 1.0; samples: V = 1000;\n bw: float = NaN; normalize = false; weights: Tensor[T] = newTensor[T](0)): Tensor[\n float] 174 +nimgrp kde kde.html#kde-procs-all proc 81 diff --git a/kdtree.html b/kdtree.html new file mode 100644 index 000000000..c5a4af642 --- /dev/null +++ b/kdtree.html @@ -0,0 +1,646 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/spatial/kdtree + + + + + + + + + +Arraymancer - src/arraymancer/spatial/kdtree + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/spatial/kdtree

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
KDTree[T] = ref object
+  data*: Tensor[T]           ## k-d data stored in this tree
+  leafSize*: int             ## maximum size of elements in a leaf node, default: 16
+  k*: int                    ## dimension of a single data point
+  n*: int                    ## number of data points
+  maxes*: Tensor[T]          ## maximum values along each dimension of `n` data points
+  mins*: Tensor[T]           ## minimum values along each dimension of `n` data points
+  tree*: Node[T]             ## the root node of the tree
+  size*: int                 ## number of nodes in the tree
+  
+
+ + +   Source +Edit + +
+
+
+
Node[T] = ref object
+  level*: int                ## the level this node is at in the tree
+  id*: int                   ## a unique id for this node
+  case kind*: TreeNodeKind   ## is it a leaf node or an inner node?
+  of tnInner:
+      lesser*: Node[T]       ## nodes on the "lesser" side of `split`
+      greater*: Node[T]      ## nodes on the "greater" side of `split`
+      split_dim*: int        ## the dimension this node splits the space at
+      split*: float          ## the value at which the space is split in `split_dim`
+    
+  of tnLeaf:
+      children*: int         ## number of indices stored in this node
+      idx*: Tensor[int]      ## the indices of the input data stored in this node
+    
+  
+
+ + +   Source +Edit + +
+
+
+
TreeNodeKind = enum
+  tnLeaf, tnInner
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc clone[T](kd: KDTree[T]): KDTree[T]
+
+ + +   Source +Edit + +
+
+
+
proc clone[T](n: Node[T]): Node[T]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc kdTree[T](data: Tensor[T]; leafSize = 16; copyData = true;
+               balancedTree: static bool = true): KDTree[T]
+
+ +

Builds a k-d tree based on the input data.

+

data must be of shape (n, k) where n is the number of input data points and k the dimension of each point.

+

leafSize is the maximum number of elements to be stored in a single leaf node.

+

If balancedTree is true, we split along the most separated axis at the median point. Otherwise we split in the middle. The former leads to slightly longer build times, as we have to compute the median (which implies sorting the data along the axis), but results in a more balanced tree.

+ +   Source +Edit + +
+
+ +
+
+
+
proc query[T](tree: KDTree[T]; x: Tensor[T]; k = 1; eps = 0.0;
+              metric: typedesc[AnyMetric] = Euclidean; p = 2.0;
+              distanceUpperBound = Inf): tuple[dist: Tensor[T], idx: Tensor[int]]
+
+ +

Queries the k-d tree at point x for the k closest neighbors.

+

distanceUpperBound can be set to stop the search even if less than k neighbors have been found within that hyperradius.

+

eps is the relative epsilon for distance comparison by which distanceUpperBound is scaled.

+

metric is the distance metric to be used to compute the distances between points. By default we use the Euclidean metric.

+

If the Minkowski metric is used, p is the used power. This affects the way distances between points are computed. For certain values other metrics are recovered:

+
  • p = 1: Manhattan distance
  • +
  • p = 2: Euclidean distance
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc query_ball_point[T](tree: KDTree[T]; x: Tensor[T]; radius: float;
+                         eps = 0.0; metric: typedesc[AnyMetric] = Euclidean;
+                         p = 2.0): tuple[dist: Tensor[T], idx: Tensor[int]]
+
+ +

Queries the k-d tree around point x for all points within the hyperradius radius.

+

eps is the relative epsilon for distance comparison by which distanceUpperBound is scaled.

+

metric is the distance metric to be used to compute the distances between points. By default we use the euclidean metric.

+

If the Minkowski metric is used, p is the used power. This affects the way distances between points are computed. For certain values other metrics are recovered:

+
  • p = 1: Manhattan distance
  • +
  • p = 2: Euclidean distance
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/kdtree.idx b/kdtree.idx new file mode 100644 index 000000000..43a317dd5 --- /dev/null +++ b/kdtree.idx @@ -0,0 +1,12 @@ +nimTitle kdtree kdtree.html module src/arraymancer/spatial/kdtree 0 +nim tnLeaf kdtree.html#tnLeaf TreeNodeKind.tnLeaf 21 +nim tnInner kdtree.html#tnInner TreeNodeKind.tnInner 21 +nim TreeNodeKind kdtree.html#TreeNodeKind enum TreeNodeKind 21 +nim Node kdtree.html#Node type Node 24 +nim KDTree kdtree.html#KDTree type KDTree 38 +nim clone kdtree.html#clone,Node[T] proc clone[T](n: Node[T]): Node[T] 50 +nim clone kdtree.html#clone,KDTree[T] proc clone[T](kd: KDTree[T]): KDTree[T] 64 +nim kdTree kdtree.html#kdTree,Tensor[T],int,staticbool proc kdTree[T](data: Tensor[T]; leafSize = 16; copyData = true;\n balancedTree: static bool = true): KDTree[T] 188 +nim query kdtree.html#query,KDTree[T],Tensor[T],int,float,typedesc[AnyMetric],float proc query[T](tree: KDTree[T]; x: Tensor[T]; k = 1; eps = 0.0;\n metric: typedesc[AnyMetric] = Euclidean; p = 2.0;\n distanceUpperBound = Inf): tuple[dist: Tensor[T], idx: Tensor[int]] 371 +nim query_ball_point kdtree.html#query_ball_point,KDTree[T],Tensor[T],float,float,typedesc[AnyMetric],float proc query_ball_point[T](tree: KDTree[T]; x: Tensor[T]; radius: float; eps = 0.0;\n metric: typedesc[AnyMetric] = Euclidean; p = 2.0): tuple[\n dist: Tensor[T], idx: Tensor[int]] 399 +nimgrp clone kdtree.html#clone-procs-all proc 50 diff --git a/kmeans.html b/kmeans.html new file mode 100644 index 000000000..abffa6b1a --- /dev/null +++ b/kmeans.html @@ -0,0 +1,484 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/ml/clustering/kmeans + + + + + + + + + +Arraymancer - src/arraymancer/ml/clustering/kmeans + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/ml/clustering/kmeans

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc kmeans[T: SomeFloat](x: Tensor[T]; centroids: Tensor[T]): Tensor[int] {.
+    noinit.}
+
+ + K-Means Clustering Inputs: +

Returns:

+ + +   Source +Edit + +
+
+
+
proc kmeans[T: SomeFloat](x: Tensor[T]; n_clusters = 10; tol: float = 0.0001;
+                          n_init = 10; max_iters = 300; seed = 1000;
+                          random = false): tuple[labels: Tensor[int],
+    centroids: Tensor[T], inertia: T] {.noinit.}
+
+ + K-Means Clustering Inputs:
  • x: A matrix of shape Nb of observations, Nb of features
  • +
  • n_clusters: The number of cluster centroids to compute
  • +
  • tol: early stopping criterion if centroids move less than this amount on an iteration
  • +
  • max_iters: maximum total passes over x before stopping
  • +
  • seed: random seed for reproducability
  • +
+

Returns:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/kmeans.idx b/kmeans.idx new file mode 100644 index 000000000..8a376e42d --- /dev/null +++ b/kmeans.idx @@ -0,0 +1,4 @@ +nimTitle kmeans kmeans.html module src/arraymancer/ml/clustering/kmeans 0 +nim kmeans kmeans.html#kmeans,Tensor[T: SomeFloat],int,float,int,int,int proc kmeans[T: SomeFloat](x: Tensor[T]; n_clusters = 10; tol: float = 0.0001;\n n_init = 10; max_iters = 300; seed = 1000; random = false): tuple[\n labels: Tensor[int], centroids: Tensor[T], inertia: T] 172 +nim kmeans kmeans.html#kmeans,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc kmeans[T: SomeFloat](x: Tensor[T]; centroids: Tensor[T]): Tensor[int] 205 +nimgrp kmeans kmeans.html#kmeans-procs-all proc 172 diff --git a/lapack.html b/lapack.html new file mode 100644 index 000000000..7a687c871 --- /dev/null +++ b/lapack.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/lapack + + + + + + + + + +Arraymancer - src/arraymancer/tensor/lapack + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/lapack

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc frobenius_inner_prod[T](a, b: Tensor[T]): T
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/lapack.idx b/lapack.idx new file mode 100644 index 000000000..7e997a71b --- /dev/null +++ b/lapack.idx @@ -0,0 +1,2 @@ +nimTitle lapack lapack.html module src/arraymancer/tensor/lapack 0 +nim frobenius_inner_prod lapack.html#frobenius_inner_prod,Tensor[T],Tensor[T] proc frobenius_inner_prod[T](a, b: Tensor[T]): T 19 diff --git a/least_squares.html b/least_squares.html new file mode 100644 index 000000000..037ecfdeb --- /dev/null +++ b/least_squares.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/least_squares + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/least_squares + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/least_squares

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc least_squares_solver[T: SomeFloat](a, b: Tensor[T]; rcond = -1.T): tuple[
+    solution: Tensor[T], residuals: Tensor[T], matrix_rank: int,
+    singular_values: Tensor[T]] {.noinit.}
+
+ +

Solves the given linear least squares problem:

+

minimize | Ax - y |

+

where the matrix A is our input tensor a and the resulting vector y is given by our input tensor b.

+

a needs to be of shape N x M. b may either be of shape N or N x K, where K represents the number of solutions to search for. One solution for each k_i is returned.

+

rcond is the condition for singular values to be considered zero, s(i) <= rcond * s(i) are treated as zero.

+

If rcond = -1 is used, it determines the size automatically (to the machine precision).

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/least_squares.idx b/least_squares.idx new file mode 100644 index 000000000..1526d1438 --- /dev/null +++ b/least_squares.idx @@ -0,0 +1,2 @@ +nimTitle least_squares least_squares.html module src/arraymancer/linear_algebra/least_squares 0 +nim least_squares_solver least_squares.html#least_squares_solver,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc least_squares_solver[T: SomeFloat](a, b: Tensor[T]; rcond = -1.T): tuple[\n solution: Tensor[T], residuals: Tensor[T], matrix_rank: int,\n singular_values: Tensor[T]] 8 diff --git a/least_squares_lapack.html b/least_squares_lapack.html new file mode 100644 index 000000000..d2f8991c1 --- /dev/null +++ b/least_squares_lapack.html @@ -0,0 +1,453 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/least_squares_lapack + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/least_squares_lapack + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/least_squares_lapack

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gelsd[T: SomeFloat](a, b: Tensor[T]; solution, residuals: var Tensor[T];
+                         singular_values: var Tensor[T]; matrix_rank: var int;
+                         rcond = -1.T)
+
+ +

Wrapper around LAPACK's gelsd taking tensors and preparing the required work space.

+

rcond is the condition for singular values to be considered zero, s(i) <= rcond * s(i) are treated as zero.

+

If rcond = -1 is used, it determines the size automatically (to the machine precision).

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/least_squares_lapack.idx b/least_squares_lapack.idx new file mode 100644 index 000000000..55a9b3fc4 --- /dev/null +++ b/least_squares_lapack.idx @@ -0,0 +1,2 @@ +nimTitle least_squares_lapack least_squares_lapack.html module src/arraymancer/linear_algebra/helpers/least_squares_lapack 0 +nim gelsd least_squares_lapack.html#gelsd,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],int proc gelsd[T: SomeFloat](a, b: Tensor[T]; solution, residuals: var Tensor[T];\n singular_values: var Tensor[T]; matrix_rank: var int;\n rcond = -1.T) 18 diff --git a/linear.html b/linear.html new file mode 100644 index 000000000..68695992a --- /dev/null +++ b/linear.html @@ -0,0 +1,581 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/linear + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/linear + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/linear

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
Linear[T] = object
+  weight*: Variable[Tensor[T]]
+  bias*: Variable[Tensor[T]]
+
+
+ + +   Source +Edit + +
+
+
+
LinearGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + TODO: use fused AddMatMul gate: C <- alpha AB + beta C +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc forward[T](self: Linear[T]; input: Variable[Tensor[T]]): Variable[Tensor[T]]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Linear[T]];
+             numInput, numOutput: int): Linear[T]
+
+ + Initializes a linear layer with numInput input features and numOutput output features. Using Kaiming He initialisation for weights to provide decent performance in most cases. Biases are usually set to zero. +   Source +Edit + +
+
+ +
+
+
+
func inShape[T](self: Linear[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc linear[TT](input, weight: Variable[TT]; bias: Variable[TT] = nil): Variable[
+    TT]
+
+ + Input: +

Return:

+
  • Weight * x + bias
  • +
+

Future TODO: In the future the linear layer will allow different input layout so that x can also be of shape batch_size, in_features

+

Warning โš :

+
  • Experimental, there is no tests yet for this layer
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
func outShape[T](self: Linear[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/linear.idx b/linear.idx new file mode 100644 index 000000000..01ff86350 --- /dev/null +++ b/linear.idx @@ -0,0 +1,8 @@ +nimTitle linear linear.html module src/arraymancer/nn/layers/linear 0 +nim LinearGate linear.html#LinearGate type LinearGate 20 +nim linear linear.html#linear,Variable[TT],Variable[TT],Variable[TT] proc linear[TT](input, weight: Variable[TT]; bias: Variable[TT] = nil): Variable[TT] 75 +nim Linear linear.html#Linear object Linear 117 +nim init linear.html#init,Context[Tensor[T]],typedesc[Linear[T]],int,int proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[Linear[T]];\n numInput, numOutput: int): Linear[T] 121 +nim forward linear.html#forward,Linear[T],Variable[Tensor[T]] proc forward[T](self: Linear[T]; input: Variable[Tensor[T]]): Variable[Tensor[T]] 133 +nim outShape linear.html#outShape,Linear[T] proc outShape[T](self: Linear[T]): seq[int] 136 +nim inShape linear.html#inShape,Linear[T] proc inShape[T](self: Linear[T]): seq[int] 138 diff --git a/linear_algebra.html b/linear_algebra.html new file mode 100644 index 000000000..d0ace37d2 --- /dev/null +++ b/linear_algebra.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+ +
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/linear_algebra.idx b/linear_algebra.idx new file mode 100644 index 000000000..4a9c40de5 --- /dev/null +++ b/linear_algebra.idx @@ -0,0 +1 @@ +nimTitle linear_algebra linear_algebra.html module src/arraymancer/linear_algebra 0 diff --git a/linear_systems.html b/linear_systems.html new file mode 100644 index 000000000..cbd072f30 --- /dev/null +++ b/linear_systems.html @@ -0,0 +1,485 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/linear_systems + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/linear_systems + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/linear_systems

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
MatrixKind = enum
+  mkGeneral, mkGenBand, mkGenTriDiag, mkSymmetric, mkPosDef, mkPosDefBand,
+  mkPosDefTriDiag
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc solve[T: SomeFloat](a, b: Tensor[T]; kind: MatrixKind = mkGeneral): Tensor[
+    T]
+
+ +

Compute the solution X to the system of linear equations AX = B.

+

Multiple right-hand sides can be solved simultaneously.

+

Input:

+
  • a, a MxM matrix
  • +
  • b, a vector of length M, or a MxN matrix. In the latter case, each column is interpreted as a separate RHS to solve for.
  • +
+

Output:

+
  • Tensor with same shape as b
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/linear_systems.idx b/linear_systems.idx new file mode 100644 index 000000000..09fb2ab62 --- /dev/null +++ b/linear_systems.idx @@ -0,0 +1,10 @@ +nimTitle linear_systems linear_systems.html module src/arraymancer/linear_algebra/linear_systems 0 +nim mkGeneral linear_systems.html#mkGeneral MatrixKind.mkGeneral 11 +nim mkGenBand linear_systems.html#mkGenBand MatrixKind.mkGenBand 11 +nim mkGenTriDiag linear_systems.html#mkGenTriDiag MatrixKind.mkGenTriDiag 11 +nim mkSymmetric linear_systems.html#mkSymmetric MatrixKind.mkSymmetric 11 +nim mkPosDef linear_systems.html#mkPosDef MatrixKind.mkPosDef 11 +nim mkPosDefBand linear_systems.html#mkPosDefBand MatrixKind.mkPosDefBand 11 +nim mkPosDefTriDiag linear_systems.html#mkPosDefTriDiag MatrixKind.mkPosDefTriDiag 11 +nim MatrixKind linear_systems.html#MatrixKind enum MatrixKind 11 +nim solve linear_systems.html#solve,Tensor[T: SomeFloat],Tensor[T: SomeFloat],MatrixKind proc solve[T: SomeFloat](a, b: Tensor[T]; kind: MatrixKind = mkGeneral): Tensor[T] 20 diff --git a/math_functions.html b/math_functions.html new file mode 100644 index 000000000..f03b80016 --- /dev/null +++ b/math_functions.html @@ -0,0 +1,1342 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/math_functions + + + + + + + + + +Arraymancer - src/arraymancer/tensor/math_functions + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/math_functions

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
ConvolveMode = enum
+  full, same, valid
+
+ + +   Source +Edit + +
+
+
+
CorrelateMode = ConvolveMode
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc `-`[T: SomeNumber](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Negate all values of a Tensor +   Source +Edit + +
+
+ +
+
+
+
proc abs(t: Tensor[Complex[float32]]): Tensor[float32] {.noinit, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Return a Tensor with absolute values of all elements +   Source +Edit + +
+
+
+
proc abs(t: Tensor[Complex[float64]]): Tensor[float64] {.noinit, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Return a Tensor with absolute values of all elements +   Source +Edit + +
+
+
+
proc abs[T: SomeNumber](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Return a Tensor with absolute values of all elements +   Source +Edit + +
+
+ +
+
+
+
proc clamp[T](t: Tensor[T]; min, max: T): Tensor[T] {.noinit.}
+
+ + Return a Tensor with all elements clamped to the interval min, max. +   Source +Edit + +
+
+ +
+
+
+
proc classify[T: SomeFloat](t: Tensor[T]): Tensor[FloatClass] {.noinit.}
+
+ +

Element-wise classify function (returns a tensor with the float class of each element).

+

Returns: A FloatClass tensor where each value is one of the following:

+
  • fcNormal: value is an ordinary nonzero floating point value
  • +
  • fcSubnormal: value is a subnormal (a very small) floating point value
  • +
  • fcZero: value is zero
  • +
  • fcNegZero: value is the negative zero
  • +
  • fcNan: value is Not a Number (NaN)
  • +
  • fcInf: value is positive infinity
  • +
  • fcNegInf: value is negative infinity
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc convolve[T: SomeNumber | Complex32 | Complex64](t1, t2: Tensor[T];
+    mode = ConvolveMode.full): Tensor[T] {.noinit.}
+
+ +

Returns the discrete, linear convolution of two one-dimensional tensors.

+

The convolution operator is often seen in signal processing, where it models the effect of a linear time-invariant system on a signal (Wikipedia, โ€œConvolutionโ€, https://en.wikipedia.org/wiki/Convolution).

+

The convolution is defined as the integral of the product of the two tensors after one is reflected about the y-axis and shifted n positions, for all values of n in which the tensors overlap (since the integral will be zero outside of that window).

+

Inputs:

+
  • t1, t2: Input tensors of size N and M respectively.
  • +
  • mode: Convolution mode (full, same, valid):
    • full: This is the default mode. It returns the convolution at each point of overlap, with an output shape of (N+M-1,). At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen.
    • +
    +
    • same: Returns an output of length max(M, N). Boundary effects are still visible.
    • +
    • valid: Returns output of length max(M, N) - min(M, N) + 1. The convolution is only given for points where the signals overlap completely. Values outside the signal boundary have no effect.
    • +
    +
  • +
+

Output:

+
  • Convolution tensor of same type as the inputs and size according to the mode.
  • +
+

Notes:

+
  • The API of this function is the same as the one of numpy.convolve.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc copySign[T: SomeFloat](t1, t2: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Element-wise copySign function (combines 2 tensors, taking the magnitudes from t1 and the signs from t2)

+

This uses nim's copySign under the hood, and thus has the same properties. That is, it works for values which are NaN, infinity or zero (all of which can carry a sign) but does not work for integers.

+ +   Source +Edit + +
+
+ +
+
+
+
proc correlate[T: Complex32 | Complex64](t1, t2: Tensor[T];
+    mode = CorrelateMode.valid): Tensor[T] {.noinit.}
+
+ +

Returns the cross-correlation of two one-dimensional complex tensors.

+

The correlation is defined as the integral of the product of the two tensors after the second one is conjugated and shifted n positions, for all values of n in which the tensors overlap (since the integral will be zero outside of that window).

+

Inputs:

+
  • t1, t2: Input tensors of size N and M respectively.
  • +
  • mode: Correlation mode (full, same, valid):
    • full: It returns the correlation at each point of overlap, with an output shape of (N+M-1,). At the end-points of the correlation, the signals do not overlap completely, and boundary effects may be seen.
    • +
    +
    • same: Returns an output of length max(M, N). Boundary effects are still visible.
    • +
    • valid: This is the default mode. Returns output of length max(M, N) - min(M, N) + 1. The correlation is only given for points where the signals overlap completely. Values outside the signal boundary have no effect.
    • +
    +
  • +
+

Output:

+
  • Correlation tensor of same type as the inputs and size according to the mode.
  • +
+

Notes:

+
  • The API of this function is the same as the one of numpy.correlate.
  • +
  • Note that (as with np.correlate) the default correlation mode is valid, which is different than the default convolution mode (full).
  • +
+ +   Source +Edit + +
+
+
+
proc correlate[T: SomeNumber](t1, t2: Tensor[T]; mode = CorrelateMode.valid): Tensor[
+    T] {.noinit.}
+
+ +

Returns the cross-correlation of two one-dimensional real tensors.

+

The correlation is defined as the integral of the product of the two tensors after the second one is shifted n positions, for all values of n in which the tensors overlap (since the integral will be zero outside of that window).

+

Inputs:

+
  • t1, t2: Input tensors of size N and M respectively.
  • +
  • mode: Correlation mode (full, same, valid):
    • full: It returns the correlation at each point of overlap, with an output shape of (N+M-1,). At the end-points of the correlation, the signals do not overlap completely, and boundary effects may be seen.
    • +
    +
    • same: Returns an output of length max(M, N). Boundary effects are still visible.
    • +
    • valid: This is the default mode. Returns output of length max(M, N) - min(M, N) + 1. The correlation is only given for points where the signals overlap completely. Values outside the signal boundary have no effect.
    • +
    +
  • +
+

Output:

+
  • Correlation tensor of same type as the inputs and size according to the mode.
  • +
+

Notes:

+
  • The API of this function is the same as the one of numpy.correlate.
  • +
  • Note that (as with np.correlate) the default correlation mode is valid, which is different than the default convolution mode (full).
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc elwise_div[T: SomeFloat](a, b: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Element-wise division +   Source +Edit + +
+
+
+
proc elwise_div[T: SomeInteger](a, b: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Element-wise division +   Source +Edit + +
+
+ +
+
+
+
proc elwise_mul[T](a, b: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Element-wise multiply +   Source +Edit + +
+
+ +
+
+
+
proc floorMod[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted floorMod operation: floorMod(tensor, tensor). +   Source +Edit + +
+
+
+
proc floorMod[T: SomeNumber](t: Tensor[T]; val: T): Tensor[T] {.noinit.}
+
+ + Broadcasted floorMod operation: floorMod(tensor, scalar). +   Source +Edit + +
+
+
+
proc floorMod[T: SomeNumber](val: T; t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted floorMod operation: floorMod(scalar, tensor). +   Source +Edit + +
+
+ +
+
+
+
proc mabs[T](t: var Tensor[T])
+
+ + Return a Tensor with absolute values of all elements +   Source +Edit + +
+
+ +
+
+
+
proc max[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] {.noinit.}
+
+ +

Compare any number of arrays and return a new array containing the element-wise maxima.

+

As in nim's built-in max procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+
+
proc max[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Compare two arrays and return a new array containing the element-wise maxima.

+

As in nim's built-in max procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+ +
+
+
+
proc mclamp[T](t: var Tensor[T]; min, max: T)
+
+ + Update the Tensor with all elements clamped to the interval min, max. +   Source +Edit + +
+
+ +
+
+
+
proc mcopySign[T: SomeFloat](t1: var Tensor[T]; t2: Tensor[T])
+
+ +

In-place element-wise copySign function (changes the signs of the elements of t1 to match those of t2)

+

This uses nim's copySign under the hood, and thus has the same properties. That is, it works for values which are NaN, infinity or zero (all of which can carry a sign) but does not work for integers.

+ +   Source +Edit + +
+
+ +
+
+
+
proc melwise_div[T: SomeFloat](a: var Tensor[T]; b: Tensor[T])
+
+ + Element-wise division (in-place) +   Source +Edit + +
+
+
+
proc melwise_div[T: SomeInteger](a: var Tensor[T]; b: Tensor[T])
+
+ + Element-wise division (in-place) +   Source +Edit + +
+
+ +
+
+
+
proc melwise_mul[T](a: var Tensor[T]; b: Tensor[T])
+
+ + Element-wise multiply +   Source +Edit + +
+
+ +
+
+
+
proc min[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] {.noinit.}
+
+ +

Compare any number of arrays and return a new array containing the element-wise minima.

+

As in nim's built-in min procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+
+
proc min[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Compare two arrays and return a new array containing the element-wise minima.

+

As in nim's built-in min procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+ +
+
+
+
proc mmax[T: SomeNumber](t1: var Tensor[T]; args: varargs[Tensor[T]])
+
+ +

In-place element-wise maxima of N tensors.

+

As in nim's built-in max procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+
+
proc mmax[T: SomeNumber](t1: var Tensor[T]; t2: Tensor[T])
+
+ +

In-place element-wise maxima of two tensors.

+

As in nim's built-in max procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+ +
+
+
+
proc mmin[T: SomeNumber](t1: var Tensor[T]; args: varargs[Tensor[T]])
+
+ +

In-place element-wise minima of N tensors.

+

As in nim's built-in min procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+
+
proc mmin[T: SomeNumber](t1: var Tensor[T]; t2: Tensor[T])
+
+ +

In-place element-wise minima of two tensors.

+

As in nim's built-in min procedure if one of the elements being compared is a NaN, then the non NaN element is returned.

+ +   Source +Edit + +
+
+ +
+
+
+
proc mnegate[T: SomeSignedInt | SomeFloat](t: var Tensor[T])
+
+ + Negate in-place all elements of the tensor (10 -> -10) +   Source +Edit + +
+
+ +
+
+
+
proc mreciprocal[T: Complex[float32] or Complex[float64]](t: var Tensor[T])
+
+ + Apply the reciprocal 1/x in-place to all elements of the Tensor +   Source +Edit + +
+
+
+
proc mreciprocal[T: SomeFloat](t: var Tensor[T])
+
+ + Apply the reciprocal 1/x in-place to all elements of the Tensor +   Source +Edit + +
+
+ +
+
+
+
proc negate[T: SomeSignedInt | SomeFloat](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Return a tensor with all elements negated (10 -> -10) +   Source +Edit + +
+
+ +
+
+
+
proc phase(t: Tensor[Complex[float32]]): Tensor[float32] {.noinit, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Return a Tensor with phase values of all elements +   Source +Edit + +
+
+
+
proc phase(t: Tensor[Complex[float64]]): Tensor[float64] {.noinit, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Return a Tensor with phase values of all elements +   Source +Edit + +
+
+ +
+
+
+
proc reciprocal[T: Complex[float32] or Complex[float64]](t: Tensor[T]): Tensor[T] {.
+    noinit.}
+
+ + Return a tensor with the reciprocal 1/x of all elements +   Source +Edit + +
+
+
+
proc reciprocal[T: SomeFloat](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Return a tensor with the reciprocal 1/x of all elements +   Source +Edit + +
+
+ +
+
+
+
proc sgn[T: SomeNumber](t: Tensor[T]): Tensor[int] {.noinit.}
+
+ +

Element-wise sgn function (returns a tensor with the sign of each element)

+

Returns:

+
  • -1 for negative numbers and NegInf,
    +
  • +
  • 1 for positive numbers and Inf,
  • +
  • 0 for positive zero, negative zero and NaN
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc sinc[T: SomeFloat](t: Tensor[T]; normalized: static bool = true): Tensor[T] {.
+    noinit.}
+
+ +

Return the normalized or non-normalized sinc function of a Tensor

+

For values other than 0, the normalized sinc function is equal to sin(PI * x) / (PI * x), while the non-normalized sync function is equal to sin(x) / x. sinc(0) takes the limit value 1 in both cases, making sinc not only everywhere continuous but also infinitely differentiable.

+

Inputs:

+
  • t: Input real tensor.
  • +
  • normalized: Select whether to return the normalized or non-normalized sync. This argument is static so it must be set at compile time. The default is true (i.e. to return the normalized sync).
  • +
+

Result:

+
  • New tensor with the sinc values of all the input tensor elements.
  • +
+ +   Source +Edit + +
+
+
+
proc sinc[T: SomeFloat](x: T; normalized: static bool = true): T {.inline.}
+
+ +

Return the normalized or non-normalized sinc function.

+

For values other than 0, the normalized sinc function is equal to sin(PI * x) / (PI * x), while the non-normalized sync function is equal to sin(x) / x. sinc(0) takes the limit value 1 in both cases, making sinc not only everywhere continuous but also infinitely differentiable.

+

Inputs:

+
  • t: Real input value.
  • +
  • normalized: Select whether to return the normalized or non-normalized sync. This argument is static so it must be set at compile time. The default is true (i.e. to return the normalized sync).
  • +
+

Result:

+
  • Calculated sinc value
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc square[T](t`gensym101: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc square[T](t`gensym101: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc square[T](x: T): T {.inline.}
+
+ + Return x*x +   Source +Edit + +
+
+
+
proc square[T](x: T): T {.inline.}
+
+ + Return x*x +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/math_functions.idx b/math_functions.idx new file mode 100644 index 000000000..312948ea5 --- /dev/null +++ b/math_functions.idx @@ -0,0 +1,65 @@ +nimTitle math_functions math_functions.html module src/arraymancer/tensor/math_functions 0 +nim elwise_mul math_functions.html#elwise_mul,Tensor[T],Tensor[T] proc elwise_mul[T](a, b: Tensor[T]): Tensor[T] 25 +nim melwise_mul math_functions.html#melwise_mul,Tensor[T],Tensor[T] proc melwise_mul[T](a: var Tensor[T]; b: Tensor[T]) 29 +nim elwise_div math_functions.html#elwise_div,Tensor[T: SomeInteger],Tensor[T: SomeInteger] proc elwise_div[T: SomeInteger](a, b: Tensor[T]): Tensor[T] 33 +nim elwise_div math_functions.html#elwise_div,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc elwise_div[T: SomeFloat](a, b: Tensor[T]): Tensor[T] 37 +nim melwise_div math_functions.html#melwise_div,Tensor[T: SomeInteger],Tensor[T: SomeInteger] proc melwise_div[T: SomeInteger](a: var Tensor[T]; b: Tensor[T]) 41 +nim melwise_div math_functions.html#melwise_div,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc melwise_div[T: SomeFloat](a: var Tensor[T]; b: Tensor[T]) 45 +nim reciprocal math_functions.html#reciprocal,Tensor[T: SomeFloat] proc reciprocal[T: SomeFloat](t: Tensor[T]): Tensor[T] 49 +nim mreciprocal math_functions.html#mreciprocal,Tensor[T: SomeFloat] proc mreciprocal[T: SomeFloat](t: var Tensor[T]) 53 +nim reciprocal math_functions.html#reciprocal,Tensor[T: Complex[system.float32] or Complex[system.float64]] proc reciprocal[T: Complex[float32] or Complex[float64]](t: Tensor[T]): Tensor[T] 57 +nim mreciprocal math_functions.html#mreciprocal,Tensor[T: Complex[system.float32] or Complex[system.float64]] proc mreciprocal[T: Complex[float32] or Complex[float64]](t: var Tensor[T]) 62 +nim negate math_functions.html#negate,Tensor[T: int or int8 or int16 or int32 or int64 or float or float32 or float64] proc negate[T: SomeSignedInt | SomeFloat](t: Tensor[T]): Tensor[T] 67 +nim mnegate math_functions.html#mnegate,Tensor[T: int or int8 or int16 or int32 or int64 or float or float32 or float64] proc mnegate[T: SomeSignedInt | SomeFloat](t: var Tensor[T]) 71 +nim `-` math_functions.html#-,Tensor[T: SomeNumber] proc `-`[T: SomeNumber](t: Tensor[T]): Tensor[T] 75 +nim abs math_functions.html#abs,Tensor[T: SomeNumber] proc abs[T: SomeNumber](t: Tensor[T]): Tensor[T] 80 +nim abs math_functions.html#abs,Tensor[Complex[float64]] proc abs(t: Tensor[Complex[float64]]): Tensor[float64] 85 +nim abs math_functions.html#abs,Tensor[Complex[float32]] proc abs(t: Tensor[Complex[float32]]): Tensor[float32] 89 +nim mabs math_functions.html#mabs,Tensor[T] proc mabs[T](t: var Tensor[T]) 93 +nim phase math_functions.html#phase,Tensor[Complex[float64]] proc phase(t: Tensor[Complex[float64]]): Tensor[float64] 99 +nim phase math_functions.html#phase,Tensor[Complex[float32]] proc phase(t: Tensor[Complex[float32]]): Tensor[float32] 103 +nim sgn math_functions.html#sgn,Tensor[T: SomeNumber] proc sgn[T: SomeNumber](t: Tensor[T]): Tensor[int] 107 +nim copySign math_functions.html#copySign,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc copySign[T: SomeFloat](t1, t2: Tensor[T]): Tensor[T] 117 +nim mcopySign math_functions.html#mcopySign,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc mcopySign[T: SomeFloat](t1: var Tensor[T]; t2: Tensor[T]) 124 +nim floorMod math_functions.html#floorMod,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc floorMod[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] 131 +nim floorMod math_functions.html#floorMod,Tensor[T: SomeNumber],T proc floorMod[T: SomeNumber](t: Tensor[T]; val: T): Tensor[T] 135 +nim floorMod math_functions.html#floorMod,T,Tensor[T: SomeNumber] proc floorMod[T: SomeNumber](val: T; t: Tensor[T]): Tensor[T] 139 +nim clamp math_functions.html#clamp,Tensor[T],T,T proc clamp[T](t: Tensor[T]; min, max: T): Tensor[T] 143 +nim mclamp math_functions.html#mclamp,Tensor[T],T,T proc mclamp[T](t: var Tensor[T]; min, max: T) 147 +nim max math_functions.html#max,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc max[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] 151 +nim max math_functions.html#max,varargs[Tensor[T: SomeNumber]] proc max[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] 158 +nim mmax math_functions.html#mmax,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc mmax[T: SomeNumber](t1: var Tensor[T]; t2: Tensor[T]) 167 +nim mmax math_functions.html#mmax,Tensor[T: SomeNumber],varargs[Tensor[T: SomeNumber]] proc mmax[T: SomeNumber](t1: var Tensor[T]; args: varargs[Tensor[T]]) 174 +nim min math_functions.html#min,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc min[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] 183 +nim min math_functions.html#min,varargs[Tensor[T: SomeNumber]] proc min[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] 190 +nim mmin math_functions.html#mmin,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc mmin[T: SomeNumber](t1: var Tensor[T]; t2: Tensor[T]) 199 +nim mmin math_functions.html#mmin,Tensor[T: SomeNumber],varargs[Tensor[T: SomeNumber]] proc mmin[T: SomeNumber](t1: var Tensor[T]; args: varargs[Tensor[T]]) 206 +nim square math_functions.html#square,T proc square[T](x: T): T 215 +nim square math_functions.html#square proc square[T](t`gensym101: Tensor[T]): Tensor[T] 55 +nim square math_functions.html#square,T_2 proc square[T](x: T): T 215 +nim square math_functions.html#square_2 proc square[T](t`gensym101: Tensor[T]): Tensor[T] 55 +nim sinc math_functions.html#sinc,T,staticbool proc sinc[T: SomeFloat](x: T; normalized: static bool = true): T 221 +nim sinc math_functions.html#sinc,Tensor[T: SomeFloat],staticbool proc sinc[T: SomeFloat](t: Tensor[T]; normalized: static bool = true): Tensor[T] 243 +nim classify math_functions.html#classify,Tensor[T: SomeFloat] proc classify[T: SomeFloat](t: Tensor[T]): Tensor[FloatClass] 261 +nim full math_functions.html#full ConvolveMode.full 275 +nim same math_functions.html#same ConvolveMode.same 275 +nim valid math_functions.html#valid ConvolveMode.valid 275 +nim ConvolveMode math_functions.html#ConvolveMode enum ConvolveMode 275 +nim convolve math_functions.html#convolve,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex32 or Complex64],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex32 or Complex64] proc convolve[T: SomeNumber | Complex32 | Complex64](t1, t2: Tensor[T];\n mode = ConvolveMode.full): Tensor[T] 302 +nim CorrelateMode math_functions.html#CorrelateMode type CorrelateMode 347 +nim correlate math_functions.html#correlate,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc correlate[T: SomeNumber](t1, t2: Tensor[T]; mode = CorrelateMode.valid): Tensor[\n T] 349 +nim correlate math_functions.html#correlate,Tensor[T: Complex32 or Complex64],Tensor[T: Complex32 or Complex64] proc correlate[T: Complex32 | Complex64](t1, t2: Tensor[T];\n mode = CorrelateMode.valid): Tensor[T] 392 +nimgrp correlate math_functions.html#correlate-procs-all proc 349 +nimgrp reciprocal math_functions.html#reciprocal-procs-all proc 49 +nimgrp square math_functions.html#square-procs-all proc 215 +nimgrp phase math_functions.html#phase-procs-all proc 99 +nimgrp melwisediv math_functions.html#melwise_div-procs-all proc 41 +nimgrp max math_functions.html#max-procs-all proc 151 +nimgrp floormod math_functions.html#floorMod-procs-all proc 131 +nimgrp mreciprocal math_functions.html#mreciprocal-procs-all proc 53 +nimgrp mmin math_functions.html#mmin-procs-all proc 199 +nimgrp sinc math_functions.html#sinc-procs-all proc 221 +nimgrp abs math_functions.html#abs-procs-all proc 80 +nimgrp min math_functions.html#min-procs-all proc 183 +nimgrp mmax math_functions.html#mmax-procs-all proc 167 +nimgrp elwisediv math_functions.html#elwise_div-procs-all proc 33 diff --git a/math_ops_fusion.html b/math_ops_fusion.html new file mode 100644 index 000000000..2e8fdfdb7 --- /dev/null +++ b/math_ops_fusion.html @@ -0,0 +1,484 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/math_ops_fusion/math_ops_fusion + + + + + + + + + +Arraymancer - src/arraymancer/math_ops_fusion/math_ops_fusion + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/math_ops_fusion/math_ops_fusion

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

The auto-rewrite do not seem to work :/

+
+

Procs

+
+
+
+
proc expm1(x: float32): float32 {.importc: "expm1f", header: "<math.h>",
+                                  ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc expm1(x: float64): float64 {.importc: "expm1", header: "<math.h>",
+                                  ...raises: [], tags: [], forbids: [].}
+
+ + Compute exp(x) - 1 and avoids catastrophic cancellation if x ~= 0 i.e. if x ~= 0 exp(x) - 1 ~= x but normal float rounding would do exp(0) - 1 = 0 instead. +   Source +Edit + +
+
+ +
+
+
+
proc ln1p(x: float32): float32 {.importc: "log1pf", header: "<math.h>",
+                                 ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc ln1p(x: float64): float64 {.importc: "log1p", header: "<math.h>",
+                                 ...raises: [], tags: [], forbids: [].}
+
+ + Compute ln( 1+x ) and avoids catastrophic cancellation if x << 1 i.e. if x << 1 ln(1+x) ~= x but normal float rounding would do ln(1) = 0 instead. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/math_ops_fusion.idx b/math_ops_fusion.idx new file mode 100644 index 000000000..75e5bf9b8 --- /dev/null +++ b/math_ops_fusion.idx @@ -0,0 +1,7 @@ +nimTitle math_ops_fusion math_ops_fusion.html module src/arraymancer/math_ops_fusion/math_ops_fusion 0 +nim ln1p math_ops_fusion.html#ln1p,float32 proc ln1p(x: float32): float32 19 +nim ln1p math_ops_fusion.html#ln1p,float64 proc ln1p(x: float64): float64 20 +nim expm1 math_ops_fusion.html#expm1,float32 proc expm1(x: float32): float32 24 +nim expm1 math_ops_fusion.html#expm1,float64 proc expm1(x: float64): float64 25 +nimgrp ln1p math_ops_fusion.html#ln1p-procs-all proc 19 +nimgrp expm1 math_ops_fusion.html#expm1-procs-all proc 24 diff --git a/maxpool2D.html b/maxpool2D.html new file mode 100644 index 000000000..37e746a9d --- /dev/null +++ b/maxpool2D.html @@ -0,0 +1,587 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/layers/maxpool2D + + + + + + + + + +Arraymancer - src/arraymancer/nn/layers/maxpool2D + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/layers/maxpool2D

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
MaxPool2D[T] = object
+  
+
+ + +   Source +Edit + +
+
+
+
MaxPool2DGate[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc forward[T](self: MaxPool2D[T]; input: Variable[Tensor[T]]): Variable[
+    Tensor[T]]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[MaxPool2D[T]];
+             inShape: seq[int]; kernelSize, padding, stride: Size2D): MaxPool2D[
+    T]
+
+ + Creates an 2d maxpool layer. Input:
- ``inShape`` Expected shape if input in the form of ``[C, H_in, W_in]``
+- ``kernelSize`` Height and width of the pooling kernel.
+- ``padding`` Size2D tuple with height and width of the padding
+- ``stride`` Size2D tuple with height and width of the stride
+

Returns the created MaxPool2D.

+ +   Source +Edit + +
+
+ +
+
+
+
func inShape[T](self: MaxPool2D[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc maxpool2d[TT](input: Variable[TT]; kernel: Size2D;
+                   padding: Size2D = (0, 0); stride: Size2D = (1, 1)): Variable[
+    TT]
+
+ + Input:
- ``input`` Variable wrapping a 4D Tensor shape [N,C,H_in,W_in]
+- ``kernel`` Height (kH) and width (kW) of the pooling kernel.
+- ``padding`` Size2D tuple with height and width of the padding
+- ``stride`` Size2D tuple with height and width of the stride
+

Returns:

+
- A variable with a pooled 4D Tensor of shape [N,C,H_out,W_out], where
+   H_out = (H_in + (2*padding.height) - kH) / stride.height + 1
+   W_out = (W_in + (2*padding.width) - kW) / stride.width + 1
+

Warning โš :

+
  • Experimental, there is no tests yet for this layer
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
func outShape[T](self: MaxPool2D[T]): seq[int]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/maxpool2D.idx b/maxpool2D.idx new file mode 100644 index 000000000..d10cd4a00 --- /dev/null +++ b/maxpool2D.idx @@ -0,0 +1,8 @@ +nimTitle maxpool2D maxpool2D.html module src/arraymancer/nn/layers/maxpool2D 0 +nim MaxPool2DGate maxpool2D.html#MaxPool2DGate type MaxPool2DGate 20 +nim maxpool2d maxpool2D.html#maxpool2d,Variable[TT],Size2D,Size2D,Size2D proc maxpool2d[TT](input: Variable[TT]; kernel: Size2D; padding: Size2D = (0, 0);\n stride: Size2D = (1, 1)): Variable[TT] 81 +nim MaxPool2D maxpool2D.html#MaxPool2D object MaxPool2D 115 +nim init maxpool2D.html#init,Context[Tensor[T]],typedesc[MaxPool2D[T]],seq[int],Size2D,Size2D,Size2D proc init[T](ctx: Context[Tensor[T]]; layerType: typedesc[MaxPool2D[T]];\n inShape: seq[int]; kernelSize, padding, stride: Size2D): MaxPool2D[T] 121 +nim forward maxpool2D.html#forward,MaxPool2D[T],Variable[Tensor[T]] proc forward[T](self: MaxPool2D[T]; input: Variable[Tensor[T]]): Variable[Tensor[T]] 147 +nim outShape maxpool2D.html#outShape,MaxPool2D[T] proc outShape[T](self: MaxPool2D[T]): seq[int] 154 +nim inShape maxpool2D.html#inShape,MaxPool2D[T] proc inShape[T](self: MaxPool2D[T]): seq[int] 172 diff --git a/mean_square_error_loss.html b/mean_square_error_loss.html new file mode 100644 index 000000000..3ef5e2052 --- /dev/null +++ b/mean_square_error_loss.html @@ -0,0 +1,475 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/loss/mean_square_error_loss + + + + + + + + + +Arraymancer - src/arraymancer/nn/loss/mean_square_error_loss + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/loss/mean_square_error_loss

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor, ml, autograd +
+
+
+

Types

+
+
+
MSELoss[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc mse_loss[TT](input: Variable[TT]; target: TT): Variable[TT]
+
+ + Mean square error loss function. Input:
  • An input variable of predicted values of shape batch_size, features
  • +
  • The ground truth of the same shape
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/mean_square_error_loss.idx b/mean_square_error_loss.idx new file mode 100644 index 000000000..5787fda69 --- /dev/null +++ b/mean_square_error_loss.idx @@ -0,0 +1,3 @@ +nimTitle mean_square_error_loss mean_square_error_loss.html module src/arraymancer/nn/loss/mean_square_error_loss 0 +nim MSELoss mean_square_error_loss.html#MSELoss type MSELoss 19 +nim mse_loss mean_square_error_loss.html#mse_loss,Variable[TT],TT proc mse_loss[TT](input: Variable[TT]; target: TT): Variable[TT] 59 diff --git a/memory.html b/memory.html new file mode 100644 index 000000000..e0945a18b --- /dev/null +++ b/memory.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/private/memory + + + + + + + + + +Arraymancer - src/arraymancer/laser/private/memory + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/private/memory

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
func align_raw_data(T: typedesc; p: pointer): ptr UncheckedArray[T:type]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/memory.idx b/memory.idx new file mode 100644 index 000000000..f76e9b7eb --- /dev/null +++ b/memory.idx @@ -0,0 +1,2 @@ +nimTitle memory memory.html module src/arraymancer/laser/private/memory 0 +nim align_raw_data memory.html#align_raw_data,typedesc,pointer proc align_raw_data(T: typedesc; p: pointer): ptr UncheckedArray[T:type] 8 diff --git a/memory_optimization_hints.html b/memory_optimization_hints.html new file mode 100644 index 000000000..183f323e9 --- /dev/null +++ b/memory_optimization_hints.html @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/memory_optimization_hints + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/memory_optimization_hints + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/memory_optimization_hints

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Templates

+
+
+
+
template assume_aligned[T](data: ptr T; n: csize_t): ptr T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template withMemoryOptimHints()
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/memory_optimization_hints.idx b/memory_optimization_hints.idx new file mode 100644 index 000000000..eec7274c3 --- /dev/null +++ b/memory_optimization_hints.idx @@ -0,0 +1,3 @@ +nimTitle memory_optimization_hints memory_optimization_hints.html module src/arraymancer/tensor/backend/memory_optimization_hints 0 +nim withMemoryOptimHints memory_optimization_hints.html#withMemoryOptimHints.t template withMemoryOptimHints() 24 +nim assume_aligned memory_optimization_hints.html#assume_aligned.t,ptr.T,csize_t template assume_aligned[T](data: ptr T; n: csize_t): ptr T 41 diff --git a/ml.html b/ml.html new file mode 100644 index 000000000..ba86b1d97 --- /dev/null +++ b/ml.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/ml + + + + + + + + + +Arraymancer - src/arraymancer/ml + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/ml

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+ +
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/ml.idx b/ml.idx new file mode 100644 index 000000000..ea08734e3 --- /dev/null +++ b/ml.idx @@ -0,0 +1 @@ +nimTitle ml ml.html module src/arraymancer/ml 0 diff --git a/mnist.html b/mnist.html new file mode 100644 index 000000000..27ccdeead --- /dev/null +++ b/mnist.html @@ -0,0 +1,541 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/datasets/mnist + + + + + + + + + +Arraymancer - src/arraymancer/datasets/mnist + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/datasets/mnist

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc load_mnist(cache: static bool = true; fashion_mnist: static bool = false): Mnist
+
+ + Loads the MNIST dataset into a tuple with fields:
  • train_images
  • +
  • train_labels
  • +
  • test_images
  • +
  • test_labels
  • +
+

If fashion_mnist = true is provided, the Fashion MNIST dataset will be loaded instead.

+

Use the cache argument (bool) as false to cleanup the files each time.

+

The cache by default will be in ~/.cache/arraymancer on Unix and %USERNAME%/.cache/arraymancer on Windows, this can be changed with the XDG_CACHE_HOME environment variable.

+

This proc will:

+
  • download the files if necessary
  • +
  • unzip them
  • +
  • load into a tuple
  • +
  • delete the downloaded files if cache is false
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc read_mnist_images(imgsPath: string): Tensor[uint8] {.noinit,
+    ...raises: [IOError, OSError],
+    tags: [ReadDirEffect, WriteIOEffect, ReadIOEffect], forbids: [].}
+
+ + Load MNIST images into a Tensor[uint8] Input:
  • A path to a MNIST images file
  • +
+

Returns:

+
  • A tensor of images with shape (N, H, W)
    • N, number of images
    • +
    • H, height
    • +
    • W, width
    • +
    +
  • +
+

MNIST data can be downloaded here: http://yann.lecun.com/exdb/mnist/

+

Fashion MNIST data can be downloaded here: http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/

+ +   Source +Edit + +
+
+ +
+
+
+
proc read_mnist_labels(labelsPath: string): Tensor[uint8] {.noinit,
+    ...raises: [IOError, OSError],
+    tags: [ReadDirEffect, WriteIOEffect, ReadIOEffect], forbids: [].}
+
+ + Load MNIST labels into a Tensor[uint8] from a file Input:
  • A path to a MNIST labels file
  • +
+

Returns:

+
  • A tensor of labels with shape (N)
    • N, number of images
    • +
    +
  • +
+

MNIST data can be downloaded here: http://yann.lecun.com/exdb/mnist/

+

Fashion MNIST data can be downloaded here: http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/

+ +   Source +Edit + +
+
+
+
proc read_mnist_labels(stream: Stream): Tensor[uint8] {.noinit,
+    ...raises: [IOError, OSError], tags: [WriteIOEffect, ReadIOEffect], forbids: [].}
+
+ + Load MNIST labels into a Tensor[uint8] from a file Input:
  • A stream of MNIST labels data
  • +
+

Returns:

+
  • A tensor of labels with shape (N)
    • N, number of images
    • +
    +
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/mnist.idx b/mnist.idx new file mode 100644 index 000000000..1ca99cddb --- /dev/null +++ b/mnist.idx @@ -0,0 +1,6 @@ +nimTitle mnist mnist.html module src/arraymancer/datasets/mnist 0 +nim read_mnist_images mnist.html#read_mnist_images,string proc read_mnist_images(imgsPath: string): Tensor[uint8] 100 +nim read_mnist_labels mnist.html#read_mnist_labels,Stream proc read_mnist_labels(stream: Stream): Tensor[uint8] 120 +nim read_mnist_labels mnist.html#read_mnist_labels,string proc read_mnist_labels(labelsPath: string): Tensor[uint8] 139 +nim load_mnist mnist.html#load_mnist,staticbool,staticbool proc load_mnist(cache: static bool = true; fashion_mnist: static bool = false): Mnist 184 +nimgrp readmnistlabels mnist.html#read_mnist_labels-procs-all proc 120 diff --git a/naive_l2_gemv.html b/naive_l2_gemv.html new file mode 100644 index 000000000..60c2f7213 --- /dev/null +++ b/naive_l2_gemv.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/fallback/naive_l2_gemv + + + + + + + + + +Arraymancer - src/arraymancer/tensor/fallback/naive_l2_gemv + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/fallback/naive_l2_gemv

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc naive_gemv_fallback[T: SomeInteger](alpha: T; A: Tensor[T]; x: Tensor[T];
+    beta: T; y: var Tensor[T])
+
+ + y <- alpha * A * x + beta * y +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/naive_l2_gemv.idx b/naive_l2_gemv.idx new file mode 100644 index 000000000..7e1d80338 --- /dev/null +++ b/naive_l2_gemv.idx @@ -0,0 +1,2 @@ +nimTitle naive_l2_gemv naive_l2_gemv.html module src/arraymancer/tensor/fallback/naive_l2_gemv 0 +nim naive_gemv_fallback naive_l2_gemv.html#naive_gemv_fallback,T,Tensor[T: SomeInteger],Tensor[T: SomeInteger],T,Tensor[T: SomeInteger] proc naive_gemv_fallback[T: SomeInteger](alpha: T; A: Tensor[T]; x: Tensor[T];\n beta: T; y: var Tensor[T]) 24 diff --git a/nav.css b/nav.css new file mode 100644 index 000000000..0a35153b0 --- /dev/null +++ b/nav.css @@ -0,0 +1,138 @@ +/* +Original credits: Flyx and NimYAML: https://github.com/flyx/NimYAML/blob/e9658f22ecc25444fac361af3a0eca4f3b152578/doc/style.css +*/ + +header { + position: fixed; + top: 0; + left: 0; + right: 0; + height: 50px; + background: #111; + margin: 0; + padding: 0; + z-index: 1; + white-space: nowrap; + text-overflow: ellipsis; +} + +header a { + display: inline-block; + line-height: 50px; + font-size: large; + padding-left: 5px; + padding-right: 5px; +} + +header a.active { + background: #877 !important; + color: black !important; +} + +header span { + display: inline-block; + line-height: 30px; + font-size: large; + color: white; + padding-left: 15px; + padding-right: 5px; +} + +header span a { + display: block; +} + +header span ul { + opacity: 0; + visibility: hidden; + transition:visibility 0s linear 0.5s, opacity 0.5s linear; + position: absolute; + top: 100%; + list-style: none; + background: #111; + margin: 0; +} + +header span ul:after { + content: ""; + clear: both; + opacity: 1; + visibility: visible; + transition-delay:0s; +} + +header span:hover > ul { + opacity: 1; + visibility: visible; + transition-delay:0s; +} + +header span ul a { + padding: 0 10px; + line-height: 25px; +} + +header span ul.monospace a { + font-size: smaller; + font-family: "Source Code Pro", Menlo, "Courier New", Courier, monospace; +} + +header span ul span ul { + max-height: 800px;/* you can change as you need it */ + overflow:auto;/* to get scroll */ +} + + +header a:link, +header a:visited { + background: inherit; + color: #aaa; +} + +header a:hover { + background: inherit; + color: white; + text-decoration: inherit; +} + +header a:active { + background: #222; + color: white; + text-decoration: inherit; +} + +a.pagetitle:link, +a.pagetitle:hover, +a.pagetitle:active, +a.pagetitle:visited { + background: inherit; + color: white; + text-decoration: inherit; +} + +body { + margin-left: 0; + margin-right: 0; + margin-top: 55px; + margin-bottom: 5px; + padding: 0; +} + +html { + background-color: rgba(252, 248, 244, 0.75); +} + +/* necessary for links to scroll to the right position */ +dt a:before { + margin-top: -50px; + height: 50px; + content: ' '; + display: block; + visibility: hidden; +} + +object { + margin-left: auto; + margin-right: auto; + display: block; +} diff --git a/neighbors.html b/neighbors.html new file mode 100644 index 000000000..3850ed476 --- /dev/null +++ b/neighbors.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/spatial/neighbors + + + + + + + + + +Arraymancer - src/arraymancer/spatial/neighbors + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/spatial/neighbors

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc nearestNeighbors[T](X: Tensor[T]; eps: float; metric: typedesc[AnyMetric];
+                         p = 2.0; useNaiveNearestNeighbor: static bool = false): seq[
+    Tensor[int]]
+
+ +

Computes nearest neighbors of all points in X that are within a distance of eps under the given metric.

+

The input tensor X must be of rank 2 and contain data as:

+
  • [n_observations, n_dimensions]
  • +
+

If the Minkowski metric is used p corresponds to the power used for the metric.

+

If useNaiveNearestNeighbor is set to true a naive nearest neighbor computation is performed. This is not advised, as it is significantly slower than the default approach using a k-d tree.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/neighbors.idx b/neighbors.idx new file mode 100644 index 000000000..21c870746 --- /dev/null +++ b/neighbors.idx @@ -0,0 +1,2 @@ +nimTitle neighbors neighbors.html module src/arraymancer/spatial/neighbors 0 +nim nearestNeighbors neighbors.html#nearestNeighbors,Tensor[T],float,typedesc[AnyMetric],float,staticbool proc nearestNeighbors[T](X: Tensor[T]; eps: float; metric: typedesc[AnyMetric];\n p = 2.0; useNaiveNearestNeighbor: static bool = false): seq[\n Tensor[int]] 14 diff --git a/nested_containers.html b/nested_containers.html new file mode 100644 index 000000000..fbaaede9f --- /dev/null +++ b/nested_containers.html @@ -0,0 +1,505 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/private/nested_containers + + + + + + + + + +Arraymancer - src/arraymancer/laser/private/nested_containers + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/private/nested_containers

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
func getShape(s: string; parent_shape = Metadata()): Metadata {....raises: [],
+    tags: [], forbids: [].}
+
+ + Handle strings / avoid interpretation as openArraychar +   Source +Edit + +
+
+
+
func getShape[T](s: openArray[T]; parent_shape = Metadata()): Metadata
+
+ + Get the shape of nested seqs/arrays Important โš : at each nesting level, only the length of the first element is used for the shape. Ensure before or after that seqs have the expected length or that the total number of elements matches the product of the dimensions. +   Source +Edit + +
+
+ +
+ +
+
+
+

Iterators

+
+
+
+
iterator flatIter(s: string): string {.noSideEffect, ...raises: [], tags: [],
+                                       forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
iterator flatIter[T](s: openArray[T]): auto {.noSideEffect.}
+
+ + Inline iterator on any-depth seq or array Returns values in order +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nested_containers.idx b/nested_containers.idx new file mode 100644 index 000000000..24b84de3e --- /dev/null +++ b/nested_containers.idx @@ -0,0 +1,7 @@ +nimTitle nested_containers nested_containers.html module src/arraymancer/laser/private/nested_containers 0 +nim flatIter nested_containers.html#flatIter.i,string iterator flatIter(s: string): string 12 +nim flatIter nested_containers.html#flatIter.i,openArray[T] iterator flatIter[T](s: openArray[T]): auto 15 +nim getShape nested_containers.html#getShape,string proc getShape(s: string; parent_shape = Metadata()): Metadata 25 +nim getShape nested_containers.html#getShape,openArray[T] proc getShape[T](s: openArray[T]; parent_shape = Metadata()): Metadata 34 +nimgrp getshape nested_containers.html#getShape-procs-all proc 25 +nimgrp flatiter nested_containers.html#flatIter-iterators-all iterator 12 diff --git a/nimdoc.out.css b/nimdoc.out.css new file mode 100644 index 000000000..1417d9eff --- /dev/null +++ b/nimdoc.out.css @@ -0,0 +1,1026 @@ +/* +Stylesheet for use with Docutils/rst2html. + +See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to +customize this style sheet. + +Modified from Chad Skeeters' rst2html-style +https://bitbucket.org/cskeeters/rst2html-style/ + +Modified by Boyd Greenfield and narimiran +*/ + +:root { + --primary-background: #fff; + --secondary-background: ghostwhite; + --third-background: #e8e8e8; + --info-background: #50c050; + --warning-background: #c0a000; + --error-background: #e04040; + --border: #dde; + --text: #222; + --anchor: #07b; + --anchor-focus: #607c9f; + --input-focus: #1fa0eb; + --strong: #3c3c3c; + --hint: #9A9A9A; + --nim-sprite-base64: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAN4AAAA9CAYAAADCt9ebAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFFmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDE5LTEyLTAzVDAxOjAzOjQ4KzAxOjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAxOS0xMi0wM1QwMjoyODo0MSswMTowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAxOS0xMi0wM1QwMjoyODo0MSswMTowMCIgZGM6Zm9ybWF0PSJpbWFnZS9wbmciIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDozMzM0ZjAxYS0yMDExLWE1NGQtOTVjNy1iOTgxMDFlMDFhMmEiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6MzMzNGYwMWEtMjAxMS1hNTRkLTk1YzctYjk4MTAxZTAxYTJhIiB4bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ9InhtcC5kaWQ6MzMzNGYwMWEtMjAxMS1hNTRkLTk1YzctYjk4MTAxZTAxYTJhIj4gPHhtcE1NOkhpc3Rvcnk+IDxyZGY6U2VxPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY3JlYXRlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDozMzM0ZjAxYS0yMDExLWE1NGQtOTVjNy1iOTgxMDFlMDFhMmEiIHN0RXZ0OndoZW49IjIwMTktMTItMDNUMDE6MDM6NDgrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyMS4wIChXaW5kb3dzKSIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4PsixkAAAJ5klEQVR4nO2dfbBUZR3HP3vvxVD0zo0ACXxBuQMoQjJ1DfMl0NIhNcuSZqQhfGt6UWtK06xJexkrmywVRTQlHCIdtclC0zBJvYIvvEUgZpc3XyC7RVbKlQu1/fHdbc+uu2fPOfs85+y55/nMnBl2z+5zfnc5v/M8z+8119XVRYroAG4HfgvMT1YUR4MMAa4HLkhakCRoSVqAELwLeBY4C7gF+D6QS1QiR1ROAJ4Dzk9akKQwoXhtwL4GxvHjU8AKoNPz3leAu4HBFq+bAyZZHD9rDAK+BywDDklYlkQxoXhfAtYAEw2MVckQYBHwU6or99nA08BBFq49GngUeBIYaWH8rNEJdAOXA60Jy5I4jSreSOBKYDzwBPCJhiUqcSjwe2BWnc9NLnxuvMFrnwqsAqYBBwBfNzh2FpmNfs9jkhakWcg1aFxZiH5UL3cDnwf+Xue7BwFjgFHAOwuv24tyob3cO0LIshP4EbCn8Pq/wKvA9sLxMvCvOmPsA1yDZnHv/nEv2mM+F0IeR4m8z7lM7tMbUbzj0CxX7YfbAXwaWFJ4PRrNIu9FS9KJyEIZN68CG4DnkRJtLBw7gHHAYuDdNb77EDAjBhkHIk7xKoiqeK3IwjilzuceQJvoZjdQ/AMZaeoZiWYgBXSEwyleBW0Rv3cR9ZUO4LSI48fN2wN+bi5wJNBvUZaBSCaVy48oxpVhwDdMC5ISxpJRh6/DLGEUrxXt29YBQ+2IkwquR76ofZIWxJFegireNLSnm48skFmmDfmiVgJHJyuKI620ADOpbWEcDPwYOZKD7OmyxCTkXL+wzueOiEEWR8poQb60V4A7kLm/yFjgKeALuM1xLfYDbkX+zEGe98cAX0Oui6viF8vR7OS6urragW2UZr21wK+Aiwlu7XPoN3sYOAd4H6WH1SnA0qSEcjQnRT/e1bgnsw16kGPez4/lyCBF48oNwL+TFGSAsgCndI4qFBVvJ0owdZhjL3CnxfHzBo8+YBMyol0CHBijrKbHS/LoA7Yio9sPgJNr/QHekLGR6MffL+KP4SjnHmQxtoXNmbQP+CHyV75hYDzTIWNpWkU8iR5mq71vVsZqXgtcFqNQ/wG2IOtfD8oi6AX+Ujj+isKz8sBrnu+1okyGdmD/wnEgcDClTIdRyJRvI1cvCMciq7At4rj5eoCPAusbHCfLigda/VyKgi+AtyreMGAzykGzQQ/wO+BxSlkCuy1dq8hw5OieUjimYT+x9bHCdWwS1823Ez1EXmhgjKwrXpHzkduuanbCtzGX+NkPPAj8GincNkPjNkIO5dadUjiOB95m+BonopQpm8R58/0JJbHWy2eshVM8sRvdbyurKV4Hmoka2WA/iwwLP6d+QmzSdKC92GzK/W9R+Q3woQbHCELcN991wJcjftcpXolngKm18vFmoVonYcgDv0Qz5pqGREuOTuA8lPYUZbndh0LJNpkUqgZx33xvomim7RG+6xSvnOm1gqQXoyiMoKxFs8VZpFfpQHvQK4HDUPnAsBa9bxGP0tUjF+IYCkxFew+/G3owdq20pgjzt3uPRscs/o43IaOhH2f4ZaAPRyZQP6vgbuCbyGext87F0sgIZFI/N8BnlwBnolovcWAjq/uzwM0+55cBJ0UYN84ZL+rfbnLMM4FfUDv7Z1XlCe8FetETbleNL7+CZrnvMjCVDuTOOA84Hf+96ga0PC8qXY50FQsuMg+41+d8p885R4n7gdt8zo+qvDkmUF4fZQXwEbS+99KDMhlWkw0eALqQglXyDDCdcovf+4lv5jPNXJ9zWc/FDMMdPudGVCreRlTWwVtWbynwYVQQCFSp61Q042WJLUjB1nneuw8tvXo97x1Lugvg+j1Mo9boySLVHtJFWqsthx5GlbSGeN5bigrHdqPl52Zj4qWLXvTQWY4KOX2ccgPMBLRcuy9+0YzhguXN4GuYq2Zc2R/NZg+hfYt3/9ZCepdQthmB4vIWIYOTbWyWzGt2Y0izG1fqjlltxnsdpbPMRMmd3lqTTumqMw7FZY5G5mSHw5dalreiRWYGWjbZ7gYUlFa0xOtIWA4vk1E6zWEoI+FvyYrjSAO1FG8DCmQGKd+DJFsGogWVVFiP/GWbga9Svg9NgtPQvnd04fUNCcriSBF+vqZ5nn9PQ+Xs4q401oI6EP0R+BkyXoAeAtcgBfwidnvkVaMVFTO6n1JoWTfqiONw1MVP8e6l3GVwOPJZXW5VItGGiuduAu5CZdOrMQJ1CHqpIFccS+LxaD/3Hcr7vF0Xw7UdAwQ/xduLGkJ6aUMhVAuwU006B3wM+ZLmozJ5QRhWkGs9yjKw1fhwDsq8eE/F+y+i1CeHIxD1wppupXrA5xyUOjQHMzU3cyjTeS2aaaN2Fzoc1bhch3xspuqBTkDulQVUz1q4mYEbNuewQD3FexGFS1VjOLoRHwOOinj9HAooXY2CSidHHKeSI5GFcRWNdSxqR7VH1iHHeTV24R+X53C8hSCBvPPqnD8B+AOygn6OYAm0ORSGthLl8B0d4DtRmIKsoMsJF1U/Hi1dt6DusIN8PrsIlUdwOAITpDFlC6q3MTbgmHm011qGepOvQSXPipyOCujW6rxqk0dRWYsVFe8PRSn5JxWOoEvdfOGzfnF5tnCRK+bGi33MoB1hL0U5d1H5J5oVD6A5mp8sQS6KSWh5e0jEcR4BPmhKqJA4xTM3XuxjBlW8DuRacDU3y0myNbNTPHPjxT5m0GTN15A/zVFiI+HKYzgc/ydMlrRfgmQWuYn0F91xJEQYxVuDnMcOrQAWJi2EI72ErQviwqLEQpQ+5XBEIqzi3YWLwF+BMiMcjshEqYR1Gdk1KmxBsaR9SQviSDdRFK8fxVU+YliWZmcbcq7vSFoQR/qJWvuxD0WgLDYoSzPzAqowtjVhORwDhEaKru4GPoliGgcyy4Hj0DLT4TBCo9WO88jQ8Bns97lLghvRTOfqqDiMYqrM+HyUYdBtaLykeRmlK12C9rQOh1FM1vd/HqUIzaT5e+LVoh/VxByHShs6HFaw0VjjHhTxP5d0LT+fRnu5q3HuAodlbHW02Q5cDByM+sw1642cRylCx6PeZiuTFScUFxK+f19QovaRS+t4tsasxhvABbZbSfUCV6CM7qtQl6Fm4E1U22UqcAYqvZ42fgJMxH6vdYc5nkBlSW6Pq4fbS6hb6jg0u9yGug7FyS5U1+UcVBbwbFSuMM1sQ1bXK4A9CcviqM0e9H80HdUxCpwIa4McygA/GfgAcCJqmGKKXUixupEv7nHsLc2agWNQ0d9OzC+PHNHIo1XeLCoe8kkqXiUtwKFoWXoEKqk3BpWLaC8cXsV8HT1J+tFTZKvn+DMqFZi1knvtyKg1O2lBHADcCVxEedNSAP4HJcsr0NNWHVUAAAAASUVORK5CYII="); + + --keyword: #5e8f60; + --identifier: #222; + --comment: #484a86; + --operator: #155da4; + --punctuation: black; + --other: black; + --escapeSequence: #c4891b; + --number: #252dbe; + --literal: #a4255b; + --program: #6060c0; + --option: #508000; + --raw-data: #a4255b; + + --clipboard-image-normal: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' style='color: black' fill='none' viewBox='0 0 24 24' stroke='currentColor'%3E %3Cpath stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M9 5H7a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2V7a2 2 0 00-2-2h-2M9 5a2 2 0 002 2h2a2 2 0 002-2M9 5a2 2 0 012-2h2a2 2 0 012 2' /%3E %3C/svg%3E"); + --clipboard-image-selected: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' style='color: black' viewBox='0 0 20 20' fill='currentColor'%3E %3Cpath d='M8 3a1 1 0 011-1h2a1 1 0 110 2H9a1 1 0 01-1-1z' /%3E %3Cpath d='M6 3a2 2 0 00-2 2v11a2 2 0 002 2h8a2 2 0 002-2V5a2 2 0 00-2-2 3 3 0 01-3 3H9a3 3 0 01-3-3z' /%3E %3C/svg%3E"); + --clipboard-image: var(--clipboard-image-normal) +} + +[data-theme="dark"] { + --primary-background: #171921; + --secondary-background: #1e202a; + --third-background: #2b2e3b; + --info-background: #008000; + --warning-background: #807000; + --error-background: #c03000; + --border: #0e1014; + --text: #fff; + --anchor: #8be9fd; + --anchor-focus: #8be9fd; + --input-focus: #8be9fd; + --strong: #bd93f9; + --hint: #7A7C85; + --nim-sprite-base64: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARMAAABMCAYAAABOBlMuAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFFmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDE5LTEyLTAzVDAxOjE4OjIyKzAxOjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAxOS0xMi0wM1QwMToyMDoxMCswMTowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAxOS0xMi0wM1QwMToyMDoxMCswMTowMCIgZGM6Zm9ybWF0PSJpbWFnZS9wbmciIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDplZGViMzU3MC1iNmZjLWQyNDQtYTExZi0yMjc5YmY4NDNhYTAiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6ZWRlYjM1NzAtYjZmYy1kMjQ0LWExMWYtMjI3OWJmODQzYWEwIiB4bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ9InhtcC5kaWQ6ZWRlYjM1NzAtYjZmYy1kMjQ0LWExMWYtMjI3OWJmODQzYWEwIj4gPHhtcE1NOkhpc3Rvcnk+IDxyZGY6U2VxPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY3JlYXRlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDplZGViMzU3MC1iNmZjLWQyNDQtYTExZi0yMjc5YmY4NDNhYTAiIHN0RXZ0OndoZW49IjIwMTktMTItMDNUMDE6MTg6MjIrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyMS4wIChXaW5kb3dzKSIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4JZNR8AAAfG0lEQVR4nO2deViTZ7r/7yxkJaxJ2MK+GCBAMCwS1kgUFQSKK4XWWqsz1jpjp3b0tDP1V+eqU391fqfT/mpPPd20drTFDS0KFEVWJSGAEgLIZpAICBJACIRs549Rj1WILAkBfD/XlevySp68z/0S3+/7vPdzLyidTgcLkU2bd+z39/f/q1gshsrKSoJELFCa2iaEuU9K6kb+8uXxv54/fzE8L/eswNT2zCfQpjbAGKS8lPFKSEjIXiaTCSEhIeDj4xNnapsQ5j6rktZGp6UlfxIdzQVzCplmanvmG1hTG2BIAtlc26CgoDfT0tL2e3l5AQCAjY0NkMnk/a9s2k6rrKw8UV8n1JjYTIQ5RlAw14KzmL3xze1vfJyUuMJaq9UCFovFm9qu+YbBxcSPFUYkk8l2Q0NDsvo6ocrQx5+I8Ih4bz6f/0l8fHyKlZXV4/dRKBQwmcwwMpn8A4FAoPgHhH9bV1sxa488wZxoaycnJ/a9e/duCa5fkc3WvAiTI4Ib77p+XdqHG9anbfLy8gAAgLGxMdBpF+bjvzExqJj4scKI0dHRnwQHB++orq7+AgDeMuTxJ2Jl4rqU9PT0EwEBAUQCgTDuGAaDAampqYepVKpHUHDk325Ulw0a266YuFW+Gzdu/MDPz29jfn7+XgA4aOw5ESZP6kvpCXv3vnM8NiaSamVl+fj9BepGNDoGFRN7e/slcXFxO1xcXMDJyWnH7j//H/fi4uJdgutXmgw5z5O8smn7X9euXbvf29sbMBjMhONQKBRYWVlBbGzsbjMzM3JoOG+/sKKwy1h2rd/4elpGRsYuLy+vaDweD2w2Oy1h5ZrCvEunEaeeiVnMiabyl/F2/+X9P+8JDPQHHA5napMWBAYTk6DgSNuEhIS9DAYDAP7tq1i6dOkqOp3OWbNu0wens44emeoxA9lcWwKBYEMkEm2JRKIdHo+3QKFQWJ1Op8ZgMER3d/dVq1evTnFycpr0MSkUCsTExGzH4/Gk1LTME/39/TI0Go1FoVCg1WrVY2NjipGRkcGRkRH5dPwrEZHLXMPCwjJSUlIy3dzcfB+97+rqGhYSEpIOAIiYmBguN3zL77dt3uPh4W5qUxYUBhMTb2/vjeHh4cvR6P/dILK0tITIyEg7BweHr363/Z3Ampqaf1Zcu/zMKiVsyVJvMplsRyKR7IhEor2FhYUbhUJhJCYm2pFIJB6JRAIymQx4PB7QaDRoNBowMzMDJycnwOOn7icjEokQGxu7icFgbLp///7jFY1WqwWlUgkjIyOgUCgO7Ni5Rz48PCwfHh7uGRkZeaBQKOSjo6ODCoVCXlNVKn/6uCsT13FXrVr1emho6BYKhfLMnP7+/omrU9LPX8g+UThloxEMxqJFXjxESAyPQcSEExrLWLNmzW57e/txP/fw8ABHR8cdDAaDt3xF2ru9vb03sVgs0cbGxs/FxWVZUlISj0aj+dna2oKtrS1M5PcwJCgUCry8vODRrs84vPfoH6OjoyCXy6Gvr+/R6+CWrX9s7evrk/b19bWr1Wqli4sLZ8OGDe95eXmxUSjUuAd0cHDwjoqK2sYKXFIhvnldYYTTQpgU4/8+jyASCYDGoCd+ZkYYF8OICYezl8PhuOkbQyAQIDo62s/NzS2np6cHbGxsgEajAYFAAAwGA1gsFia6CE0NgUAABwcHsLe3B61WC2q1eo9WqwWNRgNKpRLUajUQiUSgUCh6zwGHwwGTydzo5+eXBQBnZu8MEJ5keHhYPqyYWMtHR0ZBpVIhYj9FUDONgOUvT12+du3avMDAQJjssdRqNWCxCyrEZdLodDoQi8Ulx44de628NL/V1Pa8iERE8l2dHB2CJvpcq9Nqbt1qKURWj1Njxld0ZGTkAW9v70kLCQC8sEIC8O/HKx8fn2gmk8kHgCk7pRFmzrWyAikASE1tx0Jj2uH0EZHL/N7YtuvT4OBgzmz4OBYSeDweIiMjt2S++vtMP1YYEmmJsCCY8mNOIJtr6+zsHBcZGXmIw+G4mZubG8m0hU9HRwcUFxe/KxQKTyDRsQjznSmJCS9+dVRERMTfQ0NDo2xtbfUGiSFMjtHRUaitrc3Jzc09kHvxVLmp7UFAmC6oZQkvrZLL5RJhReHtiQb5scKIXC7371FRUX90dnYGIpE4JR8Jgn40Gg20t7fXFxYWfnr9+vWjz8sdYi+Osh4vzgUBwZSgtu94V+fs7Hx7YGCgra6u7khLS0u2RCwYeTQgKmYFh8fj/f/g4OAldnZ2prR1wdPd3Q1CofBQSUnJkdLi3N8E93FCY6k+Pj48FxcXjlar1ZSWlh65VvYr4kREmDNg79+/D3FxcW5OTk5uXl5evNbW1tL0jK3ZXV1d1ykUintycvInoaGhdkj+gvGxs7MDPp+/m0AgWMQvS/lyeHhYTqPRPJycnIJSU1NZ3t7eW2g0Gly/fv2oWq1Gij0hzClQ/gHhpLS0tEM8Hm/7I8Ho7++HlpYWsLa2Bg8PDxOb+OKhUCigqakJ7t+/D25ubuDu7g4oFAp0Oh08ePAAvv7666TTWUdzTG0nAsKTYMU3ryuSU18+4+bmFrZo0SIOAICVlRUsXrx4zkakLnRIJBI8CgJ8MtdJp9NBZ2enqL29XWRC8xAQxgUNAHD+3L8KGhoaCp78ABES04JCoX4jJAAAAwMDUFtbe96YpRMQEKbL41DU5ubmko6Ojj2PSgggzD36+/vrb9y4cX425zzw93/8EBjon2is44+NjSkePBjqGRwc7G5v7xBV19w8U5B/3qgrr9+/uWtXUuKKD/TZ9MXh/066/OuFmunO8dGBQ98HBbGSp/t9U6LRaDXK0dHBoeFhuVzeL22/0yFqamopufjLqRJ933ssJi0tLSXV1dWHGAzGbuObOzs8ubqa71vZKpUKOjo6blwpOF8zm/Mu5cVkLlkSaswprAHAaVihgK7O7oSGxltvfXLon3nXK4RHT2cdN4pfKDCAlZyUuMJan02nTmczAaBmunPw4qI3cbnh0/36XICq0+lgcPABp7OrK629vUP5z8++LLh2XXD05L++yxrvC4/F5EZ12WBS8saLS5Ys2U2lUufUY45SqQSlUgkqlQrUavXj19jYGGg0GtBoNKDT6UCn05VotVq1TqfToFAojFar1eh0Og0Wi8XhcDgeGo1+/PhgZmYGOBwOsFgsmJmZ/eY1F+nt7YXa2trs2Z73wdCQBgCMHp1IJpHA09MdPD3dLRIS+OtKisvWvbP7vf2lZdePVFwzbHTwyMiI3hidkZFRUKvUYzOZ48HQkBIA5nWqBAqFAktLC7C0tADmIh88Pz4uMSyUk7hn776DV4tKPn/6d/lNxp1MJqsRCASf8vn8XdMpOjRTVCoVjI2NgUqlAq1WCyMjI9DX1wf379+Hvr6+/Q8ePOgdGRmRKxSKx0WLFAqFXKlUKnQ6nUar1arHq47mxwrD4/F4Eg6HI2GxWDwej7cgkUjWFAqFam5uTjU3N6eRyeQPLSwswNraGqysrIBAIDwWFywW+zja11Qi29LSclIikeSZZPJZBovBAI8XA8HBQR9kZZ3lR8cmvFZSlGe00p8IkwONRkNERBj4+i7a4+XpHv307/IbMakWlciXJbx0nMPh7Jqo0JGh0el0MDo6Cl1dXSCVSkEmk7177969W319fe1DQ0M9KpVKoVarlWq1WjndNhUPG3ApAWDcOxLTLwSDwWAOotFoDBaLxRMIBAsrKysne3t7Xzqd7k2n0/c4OzsDlUoFHA4364IyMDAATU1NxdWikhcq6tXKyhJezljPJZKI2eERS5cZeoWCMD2srCwhPX0tVzk2djiCG//GtfLLUoBxShB0dHTU3Lx580sLC4vtJBLJKMZoNBqQSqUglUqPdnR01PT09DT19/fLHjx40DM0NNQ72933GiSVGgB4JFQK+LfoSAGgnL04yppEIh2xtLS0t7GxcaFSqR7Ozs4fMRgMcHR0nJX8pJs3b54Ui8UXjT7RHIRMIkFK8irfwcEHPwQELUmqvYHUGJkLmJubw8YNa/i9vfffY/px3myQiDTPiEl9nVDDX576jaenZ7SnpyfLUJNrNBqQyWRw+/bt4x0dHTdkMlltV1dXw/XygjkdEv4wB0YOAK0AUM70C8HQ6fSzdDrdm0qlejg6OrLc3Ny2MBiMadWjfR4PHjyAmzdvZs/1v5MxoVAokJK8iicWS95k+nH+s0EiQhqpzQGoVFtYk5a87ba0XQAA34xbpagg/5zoT7s/OGNnZ8eaaYkBuVwOnZ2d5VKpVNTS0lLS2NhYWFVZ3Dujg5qQh6uY+ocvCAiKIPn4+Jz19PSMdnV15VCpVL6Dg4NBViw6nQ5EItHRpqamqzM+2DzHzo4O69amftLQeKsAZrDLgmBY/PyYsCIhfs+SiKUFE5Y8EwqFx11cXDihoaFTjjFAoVAwPDwMHR0dourq6jNCofDHhZqUVnvjmgIAcgAgJyg40mLRokX8kJCQjT4+PussLS1n1JPl7t27UFxcfHguB6mNjY2B7G4naNRTWyygUCjAYDGAx+PB0sICSCSi3vFYLBbCwjjA8vddBQtATKb7d3saBwc7IJPJBpsHjUGDGRYLJBIJLK0sAfucmyIGg4FFi3y8AwNZtycUk5KiS02vvf7WWQaDkejg4DApQwAeh3xDaWnpPoFAcPxFqnP6sEvgGf+A8Bx3d/cvIyIiNi1evHjT8wpNj8fAwACUlZW9P9dD5+/ckcFbf9gd2dcnn9LNAovF4inmZHtXNxdOdBR3+/JlS33pdP29wolEInA4weuiYxOy5vvuTkeHDHb+8c8xvb33Z3R9/N+Df+uIjYk02DwkEsna2trS1d/fNyGeF7uTyw1/7g3R3t4O2OxA/TVghULhcQqFQk1JSfmYSNR/5wD4d6EfgUBwvLS09IhUKhW9qAV5H9YjKQwJi6uvrKw8ERoamhkSEpKp7w7yJEqlEiQSyZmysrJv53qjdaVSCZdyTk+3qFMrAJRHRPLPN95qeifj5fU7mYt8JhyMRqMhMJDFdnF25gDAvBYTpXIMWlpay2fq/8m5mDcIABYGnEcGAGI/VlhBZWX1yZdSkz55OX0dV5+7w9bGGvz8mPrFpK62QskJjf2GTqd7x8bGbpnID4BCoUAmk0lLSkqOiESik2UleS/MakQflYKrXQDQxY1a3tTe3i6KiIjY5OXlxX7e9+rr6wsuXbr0t4ffn9OgMWjghMZQRcLp+8GulRVI/QPC37Wxtnal0ajJtjY2E451ZjiBra31vE9lR2PQQKFQaAAwo98Yi8Xq9fpPd56HO6rlvKWJv/PwcK+JilyCmajWMw6HAzs7+rMFpQOCIn6zHywSFvXm5eUdFAqFZ9Rq9bgHa2trq79w4cK+zz49cAARkmcpL81v/a/Dhz49d+7c3qqqqjyVSjXuOJ1OBxKJpDw3N/fA5V+zax6978cKw/sHhM/raMrnUVdboSy4fPWQSFSjd5yFBQWIRNKEd2IEw1J4JUd88WL+R51d3XrHWVDMnxUTa2tr1zXrNiUGsrmPf7DS4tymCxcu7Kuurs55+kKQSqVN586d23vs+8NHDXUCC5Wzp3/Iy8rKeruysvLM2Nhvo7VVKhXU1tYWnj17du/T7UOdnZ2D7OzsfGGB09raVi4S1RzXl0eFw+EAj8chYjKLVFffyOrq1C8mJBLpWTFRKBRyDofzC4vFWvXk+1ev/CLOzs7eKxAIslQqFeh0Oujp6enKzs7em/XTd7OayTqfKb56sT4rK+sPAoHg5KO/o0KhAKFQmHXy5MkdF3/5+TeZmctXpIXZ29v7zqVcKWNRX1epuXu3U/y8pEw0GmndOZt0dnXVDw0P6/W5oNHoZ30mQ0NDPb29vfvj4+Pf3rR5B/7od188XnEUXr4gDgmL+0NfX5/U19d3d3l5+YGfTnyDtLmcIhXXLsu4UcvfR6PRGGtra9eysrIjYrE45+kt4Fheou/69es/unnz5vm7d+/Wmsre2WRkZGTQ1DYg/JYGiUiTm1ugBAC9IfHPiEmDpFITE7fqJI/H27lmzZpDq5LWtz55t6wUXO3ihMYerK+vz2tpaUFaM0yT8tL81ujYle+TSCTrvEunBU9/voTLd92wYcPHVCqV39XVdXCu7+oYCp1O90Kc50Jk3I5+xVcv1jc3N5d4enpSMzIyvkpK3sh78nORsKg3++yPBS/q1q+hKCm61DSekERGJ3ikp6d/ERsbm1xVVXWwtbX1hRFtFAqFPMLMUyZsDyoQCI7LZDKIiIjwzczM/GpV0vro2TTsRSUqZoX3+vXrP1u9enXi0NAQiESirIdRtggIc5oJ40zq6uryGhoa8ry8vBJCQ0O9USjU94mrN7yWc+EnvaXb5gJMvxCMp6cnl0Kh2Le1tZVXXLs8L1LXefGrWRkZGZ/x+XyeUqkEkUh0vqenZ14HZyG8OEwoJjdrygd37NxTEBkZmWBtbQ3BwcEeKBTq+/UbX3/355Pfzlmn66qk9dGbN29+k8PhbCSRSNDZ2Snb9ae/HCkpKTksEhbN2QTD5NSX+Vu3bj0cHBzsjcFg4O7du1BWVvbNwxB9BIQ5j94I2Fu3bhXW19cDl8sFLBYLHA7Hg0wmf/e77e84ffXlPz6fLSMnQ2paZkJ4eHjmtm3b+B4eHvZkMhlQKBTY29s72dvbfxgUFJT8x7ffP1NRUfHjXErnZ/qFYKKjo7dt3rz5g8DAQPtH/XHa2tpqGhsbC55/BASEuYFeMblz505NTU3NgfDw8PcwGAygUCjw9fW1IJPJn/1130Hv0tLSI4WXL4hny9inYS+Osvbz80tgMpn8jIwMPovFch2vpoiDgwM4ODhwfH19OYsWLeJv3/Hu+cbGxquzXZz5aZYlvMRJT0/fFhkZue3JZmfd3d0gEolOIr4ShPmEXjFpkFRqXlrzSnFnZ+d7Tk5OjzNfXVxcICMjY6ezszNnVdL6vU8HWhmbgKAIkrOzMyc1NTXz0YU4maAuOp0OK1as4EVFRfGEQqHg1dfePHzr1q2rs71S8WOF4f38/BLS09M/iIyM5DxdxLq5uVlcVVU1bgVwBIS5il4xAQCQyWRigUBwJikpKe3JVGQcDgdLly7l2tranti0ecf7IpEoy9hbxX6sMDydTvdevXr1ltjY2F3u7u6AxT73FJ7B3Nwc4uLiwthsdphQKCzZkL7l0/r6+oKbNeVG90+EhMXZL1++fFtycvKHrq6uz4igUqmE5ubmEiTHCWG+8dwrUXD9imz9xtd/jIuLS7N5KpsTjUZDUFCQE4PB+F4oFGYmJW888Mv5k4UTHGpGxC9LYaenp78VEhKyxdHRESgUyoyOh0KhwNraGuLi4qIDAgKi6+rqyjekb/mHMSN6N6RvSdu+ffseNpsdZm09ftuW+vp6EIvFSB9hhHnHpG7rUqm0orW1tdXS0tLj6TIEaDQaaDQaxMfH811dXTl/3Xfw+JUrVz411J01cfWG6IiIiC07d+5McHNzs7ewMGyOFw6HAwcHB6BSqVx3d/fwz7/4rkAgEBwXCoUnHpZonDGrU9J5MTEx27du3Zrm4uKC0beaqq6u/ry+vj7XEPMiIMwmkxKTimuXZe/u+fCkp6fnexPdUfF4PPj7+1szGIydLi4unF1/+kvenTt3RG1tbRXTqfma8lIG39/fP/HVV19NZrFYHpMpzjQTzMzMwNPTE+Pp6Zng6emZ4Ofnl5CesfV8bW1tznQe3/wDwvFeXl7Rvr6+Ca+88kpaUFCQh74GXzqdDrq7u6GpqankRQmdR1hYTNrhUFVVlcXj8d6ysrKy0OfstLS0hPj4eC6Xy+U2NzeDRCI5/sa2XeX37t1rGhwc7BoYGJBN1P+FFbiE5OzszGaxWImvvvrqpoCAAKfp+ERmCpPJBCaTmcnhcDJLS0u/TE59+YxUKhXoi/lg+oVgrKysGJaWlna2trYeaWlpXDabvTMgIGDSfp2KiorzbW1tL0zoPMLCYtJX6uVfs2u++PKowMPDgz+ZIslEIhECAgKAxWJlajSazJ6eHmhra4PW1tZvtmz9o6Czs7O+r6+vfWxsbFir1WosLCzsV6xYkcnj8d7z9vaelmPV0Hh5eYGnp+f2mJiY7UVFRZ/HL0v5tru7+5ZGo1FisVg8Docj4fF4CxsbG1c+nx/m7e39sYeHB7i4uIC5ufmU6r4ODQ1BZWXlifkSrYuA8DRTumIrKytPent78728vCb9HRQKBVgsFhwcHIBOpwObzd4yNja2RaVSwdDQEHR1dcHo6CjQaDRwdXWdsWPV0KBQKPDw8AA7O7udERERO2tra2FgYACoVCo4OTkBjUYDMpkMeDz+8WuqaLVaaGxsbL19+/YzSX8ICPOFqYrJidDQ0AwvLy/e80c/CwaDARKJBI86BdJoNHB3dwe1Wj0nViL6IJPJwGQywdnZGZRKJRAIBDBUx8OBgQEoLS39BtkORpjPTJg1PB61N64pmpqarvb39xvUiLkuJE9CJpPBxsbGYEICANDZ2SlHgtQQ5jtTEhMAgLq6ulyJRFJvDGNeREZGRkAikRSUFuci2cEI85opi0l+7hmBWCzOeV6dToTJcfv27cHr168jxbgR5j1TFhMAgObm5hKZDNl0MAQtLS3Xzpw6hkS8Isx7piUmUqlUIBAIJuyjgzA5Ojs7QSKRINGuCAuCaYmJsKKw68qVK59KJJIu5HFneiiVSigqKjouEolOmtoWBARDMC0xAQC4+MvPJadOnXq3ra1N8yL0dDEkOp0OSktLy/Pz8w8+3d4CAWG+Mm0xAQA4fuy/jl+8ePGju3fvGsqeBY9Wq4XKysrWU6dOvX31yi8mKyyFgGBoZiQmAAD/79D+fadPn96PCMrz0el0UFVV1frtt9+mj9fiAgFhPjNjMQEAyMvLO3Ds2LE/tLS0INmuerh27Vr9999//xoiJAgLEYOEntbVVigB4PNNm3cMpqSkfMRms50McdyFgkqlgqKiovJTp069nZ97BhEShAWJQePYj373xdF1GzbLFQrFx6Ghob766ne8KNy7dw+KiopO5ubmfmTK4tsICMbG4EkxWT99d35l4rre/v7+D0NCQvh0Ot3QU8wL1Go1SKVSTX5+/sH8/PyDSP8bhIWOUTLsLuVklQcFR65pbGzcvnLlyvfc3NwsCASCMaaac+h0OhgaGoLq6uqaCxcu/OV01tGcTw7uM7VZCAhGx2jpug/vxAd58atzoqKitq1cuXKnvb29saabE+h0Oqiurpbm5eUdrK6uPlspuDrvY0hmO4YIhUIBGq1/X2CmNqFQKL3/79HomZ/z82xEowyy9zFr80zGDqPn/hdeviBmL47ad+fOnRsRERGbQkNDo62srIw97azT2dkJxcXFx0tKSo7Mdh8hY4LD4TDPH2U4MFjMc6tLmZmZzaj+Aw6H0/t9PB4PGCxmRudNJBL0ngeZTAI0Gj3jv+1szfM88Hic8cUEAKCmqlQOAN/ELU2qkEgkySwWK3HRokVcBoMxG9MbDZ1OB83NzdDU1FRQW1t7XiAQHJ+ovu18pbr6Rg6L5ZtoM0EhcUPT0tJW8tWRb0vQqIkvgKqqmhnVfrl2TfANXo+gjKlUio4OWc1M5sjOzjnQUH8rbqLPu3t6moaGhmfc+3q25tGHUqmECoEIUKbIrVkcEkONiIh4jcvlvu7s7OxLo9GmVe7QVCgUCujq6oKGhoaCioqKo9XV1WeM3YDMVPDik1gpyas+XrVyeaKXl8czjyANjbcgI/MNmkg49Q4ECPOH3NyC4RUr+M8IcHt7B1y9WlKRl3/5kElKnD1sfXEoJCzueEBAQGJYWFgGk8nk2djYAIFAgLm4pTw6Ogqjo6Mgl8vhxo0b50tLS4/U19fnLvS2FIWXfxEDQNLmLW9ueW1TxtchHDaQyWRTm4VgYkZHR6G+vhF+/NfP+y5e+vVjiVgwZpKVydOwF0dZW1lZOTGZTD6bzU4LCAiIptPp8HTDL1MwOjoKLS0tUFdXd1IsFudIpdKKgYGB7tloJTrX4MUnsVJTEj9etzY10dHRAQAAGm81wcsZW5CVyQInL69gNCGBjwcAGBx8ANnncypOnTr3H9nn/reD55wovvrQpyIHAHFUzIocGo3mQaPRfBwdHVlubm7bXF1dgcFgABqNNvruglwuh7t374JMJoOOjo7P79y5I+ru7m7q7e1tXQi7MzOh8PIv4pCw2DdaWtte37Au7aPIyCWAxWABjUbPif9HCMbjURtKiaQBfvr5zH9evlJ0uLQ4r/nJMXNiZTIRrMAlJAcHB18HBweWo6Mjy8rKajeJRAJLS0uwtLQECwsLoFAogMfjAYvFgpmZ2XNXMyqVCoaHh2FoaAiGh4cfvwYGBqCvrw+6u7vfvnfvXlNvb29rT09Pq0QsUM7S6c4rNqS/lrZ5U+YPRBKR9M7u9xwqBUUvtNAudH766XSLE8PR49ixE78/8tVnX403Zk7fUR46NUUAIPIPCMdTKJTdNjY2QKPRgE6nA51OB1tbWyCRSIDD4YBAIAAejwcCgfDYUajVakGlUoFarQadTvfY79HX1wf9/f0gl8tBLpfDvXv3HvXw+dxQPYYXMj+d+P7Mmzv+5OHr6/OJWq1GBHeB09TcUiKuq/coKS3/eqIx/wPkiIXC3w6YjAAAAABJRU5ErkJggg=="); + + --keyword: #ff79c6; + --identifier: #f8f8f2; + --comment: #6272a4; + --operator: #ff79c6; + --punctuation: #f8f8f2; + --other: #f8f8f2; + --escapeSequence: #bd93f9; + --number: #bd93f9; + --literal: #f1fa8c; + --program: #9090c0; + --option: #90b010; + --raw-data: #8be9fd; + + --clipboard-image-normal: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' style='color: lightgray' fill='none' viewBox='0 0 24 24' stroke='currentColor'%3E %3Cpath stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M9 5H7a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2V7a2 2 0 00-2-2h-2M9 5a2 2 0 002 2h2a2 2 0 002-2M9 5a2 2 0 012-2h2a2 2 0 012 2' /%3E %3C/svg%3E"); + --clipboard-image-selected: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' style='color: lightgray' viewBox='0 0 20 20' fill='currentColor'%3E %3Cpath d='M8 3a1 1 0 011-1h2a1 1 0 110 2H9a1 1 0 01-1-1z' /%3E %3Cpath d='M6 3a2 2 0 00-2 2v11a2 2 0 002 2h8a2 2 0 002-2V5a2 2 0 00-2-2 3 3 0 01-3 3H9a3 3 0 01-3-3z' /%3E %3C/svg%3E"); + --clipboard-image: var(--clipboard-image-normal); +} + +@media (prefers-color-scheme: dark) { + [data-theme="auto"] { + --primary-background: #171921; + --secondary-background: #1e202a; + --third-background: #2b2e3b; + --info-background: #008000; + --warning-background: #807000; + --error-background: #c03000; + --border: #0e1014; + --text: #fff; + --anchor: #8be9fd; + --anchor-focus: #8be9fd; + --input-focus: #8be9fd; + --strong: #bd93f9; + --hint: #7A7C85; + --nim-sprite-base64: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARMAAABMCAYAAABOBlMuAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFFmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDE5LTEyLTAzVDAxOjE4OjIyKzAxOjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAxOS0xMi0wM1QwMToyMDoxMCswMTowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAxOS0xMi0wM1QwMToyMDoxMCswMTowMCIgZGM6Zm9ybWF0PSJpbWFnZS9wbmciIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDplZGViMzU3MC1iNmZjLWQyNDQtYTExZi0yMjc5YmY4NDNhYTAiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6ZWRlYjM1NzAtYjZmYy1kMjQ0LWExMWYtMjI3OWJmODQzYWEwIiB4bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ9InhtcC5kaWQ6ZWRlYjM1NzAtYjZmYy1kMjQ0LWExMWYtMjI3OWJmODQzYWEwIj4gPHhtcE1NOkhpc3Rvcnk+IDxyZGY6U2VxPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY3JlYXRlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDplZGViMzU3MC1iNmZjLWQyNDQtYTExZi0yMjc5YmY4NDNhYTAiIHN0RXZ0OndoZW49IjIwMTktMTItMDNUMDE6MTg6MjIrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyMS4wIChXaW5kb3dzKSIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4JZNR8AAAfG0lEQVR4nO2deViTZ7r/7yxkJaxJ2MK+GCBAMCwS1kgUFQSKK4XWWqsz1jpjp3b0tDP1V+eqU391fqfT/mpPPd20drTFDS0KFEVWJSGAEgLIZpAICBJACIRs549Rj1WILAkBfD/XlevySp68z/0S3+/7vPdzLyidTgcLkU2bd+z39/f/q1gshsrKSoJELFCa2iaEuU9K6kb+8uXxv54/fzE8L/eswNT2zCfQpjbAGKS8lPFKSEjIXiaTCSEhIeDj4xNnapsQ5j6rktZGp6UlfxIdzQVzCplmanvmG1hTG2BIAtlc26CgoDfT0tL2e3l5AQCAjY0NkMnk/a9s2k6rrKw8UV8n1JjYTIQ5RlAw14KzmL3xze1vfJyUuMJaq9UCFovFm9qu+YbBxcSPFUYkk8l2Q0NDsvo6ocrQx5+I8Ih4bz6f/0l8fHyKlZXV4/dRKBQwmcwwMpn8A4FAoPgHhH9bV1sxa488wZxoaycnJ/a9e/duCa5fkc3WvAiTI4Ib77p+XdqHG9anbfLy8gAAgLGxMdBpF+bjvzExqJj4scKI0dHRnwQHB++orq7+AgDeMuTxJ2Jl4rqU9PT0EwEBAUQCgTDuGAaDAampqYepVKpHUHDk325Ulw0a266YuFW+Gzdu/MDPz29jfn7+XgA4aOw5ESZP6kvpCXv3vnM8NiaSamVl+fj9BepGNDoGFRN7e/slcXFxO1xcXMDJyWnH7j//H/fi4uJdgutXmgw5z5O8smn7X9euXbvf29sbMBjMhONQKBRYWVlBbGzsbjMzM3JoOG+/sKKwy1h2rd/4elpGRsYuLy+vaDweD2w2Oy1h5ZrCvEunEaeeiVnMiabyl/F2/+X9P+8JDPQHHA5napMWBAYTk6DgSNuEhIS9DAYDAP7tq1i6dOkqOp3OWbNu0wens44emeoxA9lcWwKBYEMkEm2JRKIdHo+3QKFQWJ1Op8ZgMER3d/dVq1evTnFycpr0MSkUCsTExGzH4/Gk1LTME/39/TI0Go1FoVCg1WrVY2NjipGRkcGRkRH5dPwrEZHLXMPCwjJSUlIy3dzcfB+97+rqGhYSEpIOAIiYmBguN3zL77dt3uPh4W5qUxYUBhMTb2/vjeHh4cvR6P/dILK0tITIyEg7BweHr363/Z3Ampqaf1Zcu/zMKiVsyVJvMplsRyKR7IhEor2FhYUbhUJhJCYm2pFIJB6JRAIymQx4PB7QaDRoNBowMzMDJycnwOOn7icjEokQGxu7icFgbLp///7jFY1WqwWlUgkjIyOgUCgO7Ni5Rz48PCwfHh7uGRkZeaBQKOSjo6ODCoVCXlNVKn/6uCsT13FXrVr1emho6BYKhfLMnP7+/omrU9LPX8g+UThloxEMxqJFXjxESAyPQcSEExrLWLNmzW57e/txP/fw8ABHR8cdDAaDt3xF2ru9vb03sVgs0cbGxs/FxWVZUlISj0aj+dna2oKtrS1M5PcwJCgUCry8vODRrs84vPfoH6OjoyCXy6Gvr+/R6+CWrX9s7evrk/b19bWr1Wqli4sLZ8OGDe95eXmxUSjUuAd0cHDwjoqK2sYKXFIhvnldYYTTQpgU4/8+jyASCYDGoCd+ZkYYF8OICYezl8PhuOkbQyAQIDo62s/NzS2np6cHbGxsgEajAYFAAAwGA1gsFia6CE0NgUAABwcHsLe3B61WC2q1eo9WqwWNRgNKpRLUajUQiUSgUCh6zwGHwwGTydzo5+eXBQBnZu8MEJ5keHhYPqyYWMtHR0ZBpVIhYj9FUDONgOUvT12+du3avMDAQJjssdRqNWCxCyrEZdLodDoQi8Ulx44de628NL/V1Pa8iERE8l2dHB2CJvpcq9Nqbt1qKURWj1Njxld0ZGTkAW9v70kLCQC8sEIC8O/HKx8fn2gmk8kHgCk7pRFmzrWyAikASE1tx0Jj2uH0EZHL/N7YtuvT4OBgzmz4OBYSeDweIiMjt2S++vtMP1YYEmmJsCCY8mNOIJtr6+zsHBcZGXmIw+G4mZubG8m0hU9HRwcUFxe/KxQKTyDRsQjznSmJCS9+dVRERMTfQ0NDo2xtbfUGiSFMjtHRUaitrc3Jzc09kHvxVLmp7UFAmC6oZQkvrZLL5RJhReHtiQb5scKIXC7371FRUX90dnYGIpE4JR8Jgn40Gg20t7fXFxYWfnr9+vWjz8sdYi+Osh4vzgUBwZSgtu94V+fs7Hx7YGCgra6u7khLS0u2RCwYeTQgKmYFh8fj/f/g4OAldnZ2prR1wdPd3Q1CofBQSUnJkdLi3N8E93FCY6k+Pj48FxcXjlar1ZSWlh65VvYr4kREmDNg79+/D3FxcW5OTk5uXl5evNbW1tL0jK3ZXV1d1ykUintycvInoaGhdkj+gvGxs7MDPp+/m0AgWMQvS/lyeHhYTqPRPJycnIJSU1NZ3t7eW2g0Gly/fv2oWq1Gij0hzClQ/gHhpLS0tEM8Hm/7I8Ho7++HlpYWsLa2Bg8PDxOb+OKhUCigqakJ7t+/D25ubuDu7g4oFAp0Oh08ePAAvv7666TTWUdzTG0nAsKTYMU3ryuSU18+4+bmFrZo0SIOAICVlRUsXrx4zkakLnRIJBI8CgJ8MtdJp9NBZ2enqL29XWRC8xAQxgUNAHD+3L8KGhoaCp78ABES04JCoX4jJAAAAwMDUFtbe96YpRMQEKbL41DU5ubmko6Ojj2PSgggzD36+/vrb9y4cX425zzw93/8EBjon2is44+NjSkePBjqGRwc7G5v7xBV19w8U5B/3qgrr9+/uWtXUuKKD/TZ9MXh/066/OuFmunO8dGBQ98HBbGSp/t9U6LRaDXK0dHBoeFhuVzeL22/0yFqamopufjLqRJ933ssJi0tLSXV1dWHGAzGbuObOzs8ubqa71vZKpUKOjo6blwpOF8zm/Mu5cVkLlkSaswprAHAaVihgK7O7oSGxltvfXLon3nXK4RHT2cdN4pfKDCAlZyUuMJan02nTmczAaBmunPw4qI3cbnh0/36XICq0+lgcPABp7OrK629vUP5z8++LLh2XXD05L++yxrvC4/F5EZ12WBS8saLS5Ys2U2lUufUY45SqQSlUgkqlQrUavXj19jYGGg0GtBoNKDT6UCn05VotVq1TqfToFAojFar1eh0Og0Wi8XhcDgeGo1+/PhgZmYGOBwOsFgsmJmZ/eY1F+nt7YXa2trs2Z73wdCQBgCMHp1IJpHA09MdPD3dLRIS+OtKisvWvbP7vf2lZdePVFwzbHTwyMiI3hidkZFRUKvUYzOZ48HQkBIA5nWqBAqFAktLC7C0tADmIh88Pz4uMSyUk7hn776DV4tKPn/6d/lNxp1MJqsRCASf8vn8XdMpOjRTVCoVjI2NgUqlAq1WCyMjI9DX1wf379+Hvr6+/Q8ePOgdGRmRKxSKx0WLFAqFXKlUKnQ6nUar1arHq47mxwrD4/F4Eg6HI2GxWDwej7cgkUjWFAqFam5uTjU3N6eRyeQPLSwswNraGqysrIBAIDwWFywW+zja11Qi29LSclIikeSZZPJZBovBAI8XA8HBQR9kZZ3lR8cmvFZSlGe00p8IkwONRkNERBj4+i7a4+XpHv307/IbMakWlciXJbx0nMPh7Jqo0JGh0el0MDo6Cl1dXSCVSkEmk7177969W319fe1DQ0M9KpVKoVarlWq1WjndNhUPG3ApAWDcOxLTLwSDwWAOotFoDBaLxRMIBAsrKysne3t7Xzqd7k2n0/c4OzsDlUoFHA4364IyMDAATU1NxdWikhcq6tXKyhJezljPJZKI2eERS5cZeoWCMD2srCwhPX0tVzk2djiCG//GtfLLUoBxShB0dHTU3Lx580sLC4vtJBLJKMZoNBqQSqUglUqPdnR01PT09DT19/fLHjx40DM0NNQ72933GiSVGgB4JFQK+LfoSAGgnL04yppEIh2xtLS0t7GxcaFSqR7Ozs4fMRgMcHR0nJX8pJs3b54Ui8UXjT7RHIRMIkFK8irfwcEHPwQELUmqvYHUGJkLmJubw8YNa/i9vfffY/px3myQiDTPiEl9nVDDX576jaenZ7SnpyfLUJNrNBqQyWRw+/bt4x0dHTdkMlltV1dXw/XygjkdEv4wB0YOAK0AUM70C8HQ6fSzdDrdm0qlejg6OrLc3Ny2MBiMadWjfR4PHjyAmzdvZs/1v5MxoVAokJK8iicWS95k+nH+s0EiQhqpzQGoVFtYk5a87ba0XQAA34xbpagg/5zoT7s/OGNnZ8eaaYkBuVwOnZ2d5VKpVNTS0lLS2NhYWFVZ3Dujg5qQh6uY+ocvCAiKIPn4+Jz19PSMdnV15VCpVL6Dg4NBViw6nQ5EItHRpqamqzM+2DzHzo4O69amftLQeKsAZrDLgmBY/PyYsCIhfs+SiKUFE5Y8EwqFx11cXDihoaFTjjFAoVAwPDwMHR0dourq6jNCofDHhZqUVnvjmgIAcgAgJyg40mLRokX8kJCQjT4+PussLS1n1JPl7t27UFxcfHguB6mNjY2B7G4naNRTWyygUCjAYDGAx+PB0sICSCSi3vFYLBbCwjjA8vddBQtATKb7d3saBwc7IJPJBpsHjUGDGRYLJBIJLK0sAfucmyIGg4FFi3y8AwNZtycUk5KiS02vvf7WWQaDkejg4DApQwAeh3xDaWnpPoFAcPxFqnP6sEvgGf+A8Bx3d/cvIyIiNi1evHjT8wpNj8fAwACUlZW9P9dD5+/ckcFbf9gd2dcnn9LNAovF4inmZHtXNxdOdBR3+/JlS33pdP29wolEInA4weuiYxOy5vvuTkeHDHb+8c8xvb33Z3R9/N+Df+uIjYk02DwkEsna2trS1d/fNyGeF7uTyw1/7g3R3t4O2OxA/TVghULhcQqFQk1JSfmYSNR/5wD4d6EfgUBwvLS09IhUKhW9qAV5H9YjKQwJi6uvrKw8ERoamhkSEpKp7w7yJEqlEiQSyZmysrJv53qjdaVSCZdyTk+3qFMrAJRHRPLPN95qeifj5fU7mYt8JhyMRqMhMJDFdnF25gDAvBYTpXIMWlpay2fq/8m5mDcIABYGnEcGAGI/VlhBZWX1yZdSkz55OX0dV5+7w9bGGvz8mPrFpK62QskJjf2GTqd7x8bGbpnID4BCoUAmk0lLSkqOiESik2UleS/MakQflYKrXQDQxY1a3tTe3i6KiIjY5OXlxX7e9+rr6wsuXbr0t4ffn9OgMWjghMZQRcLp+8GulRVI/QPC37Wxtnal0ajJtjY2E451ZjiBra31vE9lR2PQQKFQaAAwo98Yi8Xq9fpPd56HO6rlvKWJv/PwcK+JilyCmajWMw6HAzs7+rMFpQOCIn6zHywSFvXm5eUdFAqFZ9Rq9bgHa2trq79w4cK+zz49cAARkmcpL81v/a/Dhz49d+7c3qqqqjyVSjXuOJ1OBxKJpDw3N/fA5V+zax6978cKw/sHhM/raMrnUVdboSy4fPWQSFSjd5yFBQWIRNKEd2IEw1J4JUd88WL+R51d3XrHWVDMnxUTa2tr1zXrNiUGsrmPf7DS4tymCxcu7Kuurs55+kKQSqVN586d23vs+8NHDXUCC5Wzp3/Iy8rKeruysvLM2Nhvo7VVKhXU1tYWnj17du/T7UOdnZ2D7OzsfGGB09raVi4S1RzXl0eFw+EAj8chYjKLVFffyOrq1C8mJBLpWTFRKBRyDofzC4vFWvXk+1ev/CLOzs7eKxAIslQqFeh0Oujp6enKzs7em/XTd7OayTqfKb56sT4rK+sPAoHg5KO/o0KhAKFQmHXy5MkdF3/5+TeZmctXpIXZ29v7zqVcKWNRX1epuXu3U/y8pEw0GmndOZt0dnXVDw0P6/W5oNHoZ30mQ0NDPb29vfvj4+Pf3rR5B/7od188XnEUXr4gDgmL+0NfX5/U19d3d3l5+YGfTnyDtLmcIhXXLsu4UcvfR6PRGGtra9eysrIjYrE45+kt4Fheou/69es/unnz5vm7d+/Wmsre2WRkZGTQ1DYg/JYGiUiTm1ugBAC9IfHPiEmDpFITE7fqJI/H27lmzZpDq5LWtz55t6wUXO3ihMYerK+vz2tpaUFaM0yT8tL81ujYle+TSCTrvEunBU9/voTLd92wYcPHVCqV39XVdXCu7+oYCp1O90Kc50Jk3I5+xVcv1jc3N5d4enpSMzIyvkpK3sh78nORsKg3++yPBS/q1q+hKCm61DSekERGJ3ikp6d/ERsbm1xVVXWwtbX1hRFtFAqFPMLMUyZsDyoQCI7LZDKIiIjwzczM/GpV0vro2TTsRSUqZoX3+vXrP1u9enXi0NAQiESirIdRtggIc5oJ40zq6uryGhoa8ry8vBJCQ0O9USjU94mrN7yWc+EnvaXb5gJMvxCMp6cnl0Kh2Le1tZVXXLs8L1LXefGrWRkZGZ/x+XyeUqkEkUh0vqenZ14HZyG8OEwoJjdrygd37NxTEBkZmWBtbQ3BwcEeKBTq+/UbX3/355Pfzlmn66qk9dGbN29+k8PhbCSRSNDZ2Snb9ae/HCkpKTksEhbN2QTD5NSX+Vu3bj0cHBzsjcFg4O7du1BWVvbNwxB9BIQ5j94I2Fu3bhXW19cDl8sFLBYLHA7Hg0wmf/e77e84ffXlPz6fLSMnQ2paZkJ4eHjmtm3b+B4eHvZkMhlQKBTY29s72dvbfxgUFJT8x7ffP1NRUfHjXErnZ/qFYKKjo7dt3rz5g8DAQPtH/XHa2tpqGhsbC55/BASEuYFeMblz505NTU3NgfDw8PcwGAygUCjw9fW1IJPJn/1130Hv0tLSI4WXL4hny9inYS+Osvbz80tgMpn8jIwMPovFch2vpoiDgwM4ODhwfH19OYsWLeJv3/Hu+cbGxquzXZz5aZYlvMRJT0/fFhkZue3JZmfd3d0gEolOIr4ShPmEXjFpkFRqXlrzSnFnZ+d7Tk5OjzNfXVxcICMjY6ezszNnVdL6vU8HWhmbgKAIkrOzMyc1NTXz0YU4maAuOp0OK1as4EVFRfGEQqHg1dfePHzr1q2rs71S8WOF4f38/BLS09M/iIyM5DxdxLq5uVlcVVU1bgVwBIS5il4xAQCQyWRigUBwJikpKe3JVGQcDgdLly7l2tranti0ecf7IpEoy9hbxX6sMDydTvdevXr1ltjY2F3u7u6AxT73FJ7B3Nwc4uLiwthsdphQKCzZkL7l0/r6+oKbNeVG90+EhMXZL1++fFtycvKHrq6uz4igUqmE5ubmEiTHCWG+8dwrUXD9imz9xtd/jIuLS7N5KpsTjUZDUFCQE4PB+F4oFGYmJW888Mv5k4UTHGpGxC9LYaenp78VEhKyxdHRESgUyoyOh0KhwNraGuLi4qIDAgKi6+rqyjekb/mHMSN6N6RvSdu+ffseNpsdZm09ftuW+vp6EIvFSB9hhHnHpG7rUqm0orW1tdXS0tLj6TIEaDQaaDQaxMfH811dXTl/3Xfw+JUrVz411J01cfWG6IiIiC07d+5McHNzs7ewMGyOFw6HAwcHB6BSqVx3d/fwz7/4rkAgEBwXCoUnHpZonDGrU9J5MTEx27du3Zrm4uKC0beaqq6u/ry+vj7XEPMiIMwmkxKTimuXZe/u+fCkp6fnexPdUfF4PPj7+1szGIydLi4unF1/+kvenTt3RG1tbRXTqfma8lIG39/fP/HVV19NZrFYHpMpzjQTzMzMwNPTE+Pp6Zng6emZ4Ofnl5CesfV8bW1tznQe3/wDwvFeXl7Rvr6+Ca+88kpaUFCQh74GXzqdDrq7u6GpqankRQmdR1hYTNrhUFVVlcXj8d6ysrKy0OfstLS0hPj4eC6Xy+U2NzeDRCI5/sa2XeX37t1rGhwc7BoYGJBN1P+FFbiE5OzszGaxWImvvvrqpoCAAKfp+ERmCpPJBCaTmcnhcDJLS0u/TE59+YxUKhXoi/lg+oVgrKysGJaWlna2trYeaWlpXDabvTMgIGDSfp2KiorzbW1tL0zoPMLCYtJX6uVfs2u++PKowMPDgz+ZIslEIhECAgKAxWJlajSazJ6eHmhra4PW1tZvtmz9o6Czs7O+r6+vfWxsbFir1WosLCzsV6xYkcnj8d7z9vaelmPV0Hh5eYGnp+f2mJiY7UVFRZ/HL0v5tru7+5ZGo1FisVg8Docj4fF4CxsbG1c+nx/m7e39sYeHB7i4uIC5ufmU6r4ODQ1BZWXlifkSrYuA8DRTumIrKytPent78728vCb9HRQKBVgsFhwcHIBOpwObzd4yNja2RaVSwdDQEHR1dcHo6CjQaDRwdXWdsWPV0KBQKPDw8AA7O7udERERO2tra2FgYACoVCo4OTkBjUYDMpkMeDz+8WuqaLVaaGxsbL19+/YzSX8ICPOFqYrJidDQ0AwvLy/e80c/CwaDARKJBI86BdJoNHB3dwe1Wj0nViL6IJPJwGQywdnZGZRKJRAIBDBUx8OBgQEoLS39BtkORpjPTJg1PB61N64pmpqarvb39xvUiLkuJE9CJpPBxsbGYEICANDZ2SlHgtQQ5jtTEhMAgLq6ulyJRFJvDGNeREZGRkAikRSUFuci2cEI85opi0l+7hmBWCzOeV6dToTJcfv27cHr168jxbgR5j1TFhMAgObm5hKZDNl0MAQtLS3Xzpw6hkS8Isx7piUmUqlUIBAIJuyjgzA5Ojs7QSKRINGuCAuCaYmJsKKw68qVK59KJJIu5HFneiiVSigqKjouEolOmtoWBARDMC0xAQC4+MvPJadOnXq3ra1N8yL0dDEkOp0OSktLy/Pz8w8+3d4CAWG+Mm0xAQA4fuy/jl+8ePGju3fvGsqeBY9Wq4XKysrWU6dOvX31yi8mKyyFgGBoZiQmAAD/79D+fadPn96PCMrz0el0UFVV1frtt9+mj9fiAgFhPjNjMQEAyMvLO3Ds2LE/tLS0INmuerh27Vr9999//xoiJAgLEYOEntbVVigB4PNNm3cMpqSkfMRms50McdyFgkqlgqKiovJTp069nZ97BhEShAWJQePYj373xdF1GzbLFQrFx6Ghob766ne8KNy7dw+KiopO5ubmfmTK4tsICMbG4EkxWT99d35l4rre/v7+D0NCQvh0Ot3QU8wL1Go1SKVSTX5+/sH8/PyDSP8bhIWOUTLsLuVklQcFR65pbGzcvnLlyvfc3NwsCASCMaaac+h0OhgaGoLq6uqaCxcu/OV01tGcTw7uM7VZCAhGx2jpug/vxAd58atzoqKitq1cuXKnvb29saabE+h0Oqiurpbm5eUdrK6uPlspuDrvY0hmO4YIhUIBGq1/X2CmNqFQKL3/79HomZ/z82xEowyy9zFr80zGDqPn/hdeviBmL47ad+fOnRsRERGbQkNDo62srIw97azT2dkJxcXFx0tKSo7Mdh8hY4LD4TDPH2U4MFjMc6tLmZmZzaj+Aw6H0/t9PB4PGCxmRudNJBL0ngeZTAI0Gj3jv+1szfM88Hic8cUEAKCmqlQOAN/ELU2qkEgkySwWK3HRokVcBoMxG9MbDZ1OB83NzdDU1FRQW1t7XiAQHJ+ovu18pbr6Rg6L5ZtoM0EhcUPT0tJW8tWRb0vQqIkvgKqqmhnVfrl2TfANXo+gjKlUio4OWc1M5sjOzjnQUH8rbqLPu3t6moaGhmfc+3q25tGHUqmECoEIUKbIrVkcEkONiIh4jcvlvu7s7OxLo9GmVe7QVCgUCujq6oKGhoaCioqKo9XV1WeM3YDMVPDik1gpyas+XrVyeaKXl8czjyANjbcgI/MNmkg49Q4ECPOH3NyC4RUr+M8IcHt7B1y9WlKRl3/5kElKnD1sfXEoJCzueEBAQGJYWFgGk8nk2djYAIFAgLm4pTw6Ogqjo6Mgl8vhxo0b50tLS4/U19fnLvS2FIWXfxEDQNLmLW9ueW1TxtchHDaQyWRTm4VgYkZHR6G+vhF+/NfP+y5e+vVjiVgwZpKVydOwF0dZW1lZOTGZTD6bzU4LCAiIptPp8HTDL1MwOjoKLS0tUFdXd1IsFudIpdKKgYGB7tloJTrX4MUnsVJTEj9etzY10dHRAQAAGm81wcsZW5CVyQInL69gNCGBjwcAGBx8ANnncypOnTr3H9nn/reD55wovvrQpyIHAHFUzIocGo3mQaPRfBwdHVlubm7bXF1dgcFgABqNNvruglwuh7t374JMJoOOjo7P79y5I+ru7m7q7e1tXQi7MzOh8PIv4pCw2DdaWtte37Au7aPIyCWAxWABjUbPif9HCMbjURtKiaQBfvr5zH9evlJ0uLQ4r/nJMXNiZTIRrMAlJAcHB18HBweWo6Mjy8rKajeJRAJLS0uwtLQECwsLoFAogMfjAYvFgpmZ2XNXMyqVCoaHh2FoaAiGh4cfvwYGBqCvrw+6u7vfvnfvXlNvb29rT09Pq0QsUM7S6c4rNqS/lrZ5U+YPRBKR9M7u9xwqBUUvtNAudH766XSLE8PR49ixE78/8tVnX403Zk7fUR46NUUAIPIPCMdTKJTdNjY2QKPRgE6nA51OB1tbWyCRSIDD4YBAIAAejwcCgfDYUajVakGlUoFarQadTvfY79HX1wf9/f0gl8tBLpfDvXv3HvXw+dxQPYYXMj+d+P7Mmzv+5OHr6/OJWq1GBHeB09TcUiKuq/coKS3/eqIx/wPkiIXC3w6YjAAAAABJRU5ErkJggg=="); + + --keyword: #ff79c6; + --identifier: #f8f8f2; + --comment: #6272a4; + --operator: #ff79c6; + --punctuation: #f8f8f2; + --other: #f8f8f2; + --escapeSequence: #bd93f9; + --number: #bd93f9; + --literal: #f1fa8c; + --program: #9090c0; + --option: #90b010; + --raw-data: #8be9fd; + + --clipboard-image-normal: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' style='color: lightgray' fill='none' viewBox='0 0 24 24' stroke='currentColor'%3E %3Cpath stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M9 5H7a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2V7a2 2 0 00-2-2h-2M9 5a2 2 0 002 2h2a2 2 0 002-2M9 5a2 2 0 012-2h2a2 2 0 012 2' /%3E %3C/svg%3E"); + --clipboard-image-selected: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' style='color: lightgray' viewBox='0 0 20 20' fill='currentColor'%3E %3Cpath d='M8 3a1 1 0 011-1h2a1 1 0 110 2H9a1 1 0 01-1-1z' /%3E %3Cpath d='M6 3a2 2 0 00-2 2v11a2 2 0 002 2h8a2 2 0 002-2V5a2 2 0 00-2-2 3 3 0 01-3 3H9a3 3 0 01-3-3z' /%3E %3C/svg%3E"); + --clipboard-image: var(--clipboard-image-normal); + } +} + +.theme-select-wrapper { + display: flex; + align-items: center; +} + +html { + font-size: 100%; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; } + +body { + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-weight: 400; + font-size: 1.125em; + line-height: 1.5; + color: var(--text); + background-color: var(--primary-background); } + +/* Skeleton grid */ +.container { + position: relative; + width: 100%; + max-width: 1050px; + margin: 0 auto; + padding: 0; + box-sizing: border-box; } + +.column, .columns { + width: 100%; + float: left; + box-sizing: border-box; + margin-left: 1%; } + +@media print { + #global-links, .link-seesrc, .theme-switch-wrapper, #searchInputDiv, .search-groupby { + display:none; + } + .columns { + width:100% !important; + } +} + +.column:first-child, .columns:first-child { + margin-left: 0; } + +.container .row { + display: flex; } + +.three.columns { + width: 25.0%; + height: 100vh; + position: sticky; + top: 0px; + overflow-y: auto; + padding: 2px; +} + +.nine.columns { + width: 75.0%; + padding-left: 1.5em; } + +.twelve.columns { + width: 100%; + margin-left: 0; } + +@media screen and (max-width: 860px) { + .three.columns { + display: none; + } + .nine.columns { + width: 98.0%; + } + body { + font-size: 1em; + line-height: 1.35; + } +} + +cite { + font-style: italic !important; } + + +/* Nim search input */ +div#searchInputDiv { + margin-bottom: 1em; +} +input#searchInput { + width: 80%; +} + +/* + * Some custom formatting for input forms. + * This also fixes input form colors on Firefox with a dark system theme on Linux. + */ +input { + -moz-appearance: none; + background-color: var(--secondary-background); + color: var(--text); + border: 1px solid var(--border); + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-size: 0.9em; + padding: 6px; +} + +input:focus { + border: 1px solid var(--input-focus); + box-shadow: 0 0 3px var(--input-focus); +} + +select { + -moz-appearance: none; + background-color: var(--secondary-background); + color: var(--text); + border: 1px solid var(--border); + font-family: "Lato", "Helvetica Neue", "HelveticaNeue", Helvetica, Arial, sans-serif; + font-size: 0.9em; + padding: 6px; +} + +select:focus { + border: 1px solid var(--input-focus); + box-shadow: 0 0 3px var(--input-focus); +} + +/* Docgen styles */ + +:target { + border: 2px solid #B5651D; + border-style: dotted; +} + +/* Links */ +a { + color: var(--anchor); + text-decoration: none; +} + +a span.Identifier { + text-decoration: underline; + text-decoration-color: #aab; +} + +a.reference-toplevel { + font-weight: bold; +} + +a.nimdoc { + word-spacing: 0.3em; +} + +a.toc-backref { + text-decoration: none; + color: var(--text); +} + +a.link-seesrc { + color: #607c9f; + font-size: 0.9em; + font-style: italic; +} + +a:hover, a:focus { + color: var(--anchor-focus); + text-decoration: underline; +} + +a:hover span.Identifier { + color: var(--anchor); +} + + +sub, sup { + position: relative; + font-size: 75%; + line-height: 0; + vertical-align: baseline; } + +sup { + top: -0.5em; } + +sub { + bottom: -0.25em; } + +img { + width: auto; + height: auto; + max-width: 100%; + vertical-align: middle; + border: 0; + -ms-interpolation-mode: bicubic; } + +@media print { + * { + color: black !important; + text-shadow: none !important; + background: transparent !important; + box-shadow: none !important; } + + a, a:visited { + text-decoration: underline; } + + a[href]:after { + content: " (" attr(href) ")"; } + + abbr[title]:after { + content: " (" attr(title) ")"; } + + .ir a:after, + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; } + + pre, blockquote { + border: 1px solid #999; + page-break-inside: avoid; } + + thead { + display: table-header-group; } + + tr, img { + page-break-inside: avoid; } + + img { + max-width: 100% !important; } + + @page { + margin: 0.5cm; } + + h1 { + page-break-before: always; } + + h1.title { + page-break-before: avoid; } + + p, h2, h3 { + orphans: 3; + widows: 3; } + + h2, h3 { + page-break-after: avoid; } +} + + +p { + margin-top: 0.5em; + margin-bottom: 0.5em; } + +small { + font-size: 85%; } + +strong { + font-weight: 600; + font-size: 0.95em; + color: var(--strong); } + +em { + font-style: italic; } + +h1 { + font-size: 1.8em; + font-weight: 400; + padding-bottom: .25em; + border-bottom: 6px solid var(--third-background); + margin-top: 2.5em; + margin-bottom: 1em; + line-height: 1.2em; } + +h1.title { + padding-bottom: 1em; + border-bottom: 0px; + font-size: 2.5em; + text-align: center; + font-weight: 900; + margin-top: 0.75em; + margin-bottom: 0em; } + +h2 { + font-size: 1.3em; + margin-top: 2em; } + +h2.subtitle { + margin-top: 0em; + text-align: center; } + +h3 { + font-size: 1.125em; + font-style: italic; + margin-top: 1.5em; } + +h4 { + font-size: 1.125em; + margin-top: 1em; } + +h5 { + font-size: 1.125em; + margin-top: 0.75em; } + +h6 { + font-size: 1.1em; } + + +ul, ol { + padding: 0; + margin-top: 0.5em; + margin-left: 0.75em; } + +ul ul, ul ol, ol ol, ol ul { + margin-bottom: 0; + margin-left: 1.25em; } + +ul.simple > li { + list-style-type: circle; } + +ul.simple-boot li { + list-style-type: none; + margin-left: 0em; + margin-bottom: 0.5em; } + +ol.simple > li, ul.simple > li { + margin-bottom: 0.2em; + margin-left: 0.4em } + +ul.simple.simple-toc > li { + margin-top: 1em; } + +ul.simple-toc { + list-style: none; + font-size: 0.9em; + margin-left: -0.3em; + margin-top: 1em; } + +ul.simple-toc > li { + list-style-type: none; } + +ul.simple-toc-section { + list-style-type: circle; + margin-left: 0.8em; + color: #6c9aae; } + +ul.nested-toc-section { + list-style-type: circle; + margin-left: -0.75em; + color: var(--text); } + +ul.nested-toc-section > li { + margin-left: 1.25em; } + + +ol.arabic { + list-style: decimal; } + +ol.loweralpha { + list-style: lower-alpha; } + +ol.upperalpha { + list-style: upper-alpha; } + +ol.lowerroman { + list-style: lower-roman; } + +ol.upperroman { + list-style: upper-roman; } + +ul.auto-toc { + list-style-type: none; } + + +dl { + margin-bottom: 1.5em; } + +dt { + margin-bottom: -0.5em; + margin-left: 0.0em; } + +dd { + margin-left: 2.0em; + margin-bottom: 3.0em; + margin-top: 0.5em; } + + +hr { + margin: 2em 0; + border: 0; + border-top: 1px solid #aaa; } + +hr.footnote { + width: 25%; + border-top: 0.15em solid #999; + margin-bottom: 0.15em; + margin-top: 0.15em; +} +div.footnote-group { + margin-left: 1em; +} +div.footnote-label { + display: inline-block; + min-width: 1.7em; +} + +div.option-list { + border: 0.1em solid var(--border); +} +div.option-list-item { + padding-left: 12em; + padding-right: 0; + padding-bottom: 0.3em; + padding-top: 0.3em; +} +div.odd { + background-color: var(--secondary-background); +} +div.option-list-label { + margin-left: -11.5em; + margin-right: 0em; + min-width: 11.5em; + display: inline-block; + vertical-align: top; +} +div.option-list-description { + width: calc(100% - 1em); + padding-left: 1em; + padding-right: 0; + display: inline-block; +} + +blockquote { + font-size: 0.9em; + font-style: italic; + padding-left: 0.5em; + margin-left: 0; + border-left: 5px solid #bbc; +} + +blockquote.markdown-quote { + font-size: 0.9rem; /* use rem to avoid recursion */ + font-style: normal; +} + +.pre, span.tok { + font-family: "Source Code Pro", Monaco, Menlo, Consolas, "Courier New", monospace; + font-weight: 500; + font-size: 0.85em; + color: var(--text); + background-color: var(--third-background); + padding-left: 3px; + padding-right: 3px; + border-radius: 4px; +} + +span.tok { + border: 1px solid #808080; + padding-bottom: 0.1em; + margin-right: 0.2em; +} + +.copyToClipBoard { + position: relative; +} + +pre { + font-family: "Source Code Pro", Monaco, Menlo, Consolas, "Courier New", monospace; + color: var(--text); + font-weight: 500; + display: inline-block; + box-sizing: border-box; + min-width: 100%; + padding: 0.5em; + margin-top: 0.5em; + margin-bottom: 0.5em; + font-size: 0.85em; + white-space: pre !important; + overflow-y: hidden; + overflow-x: visible; + background-color: var(--secondary-background); + border: 1px solid var(--border); + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; +} + +.copyToClipBoardBtn { + visibility: hidden; + position: absolute; + width: 24px; + border-radius: 4px; + background-image: var(--clipboard-image); + right: 5px; + top: 13px; + background-color: var(--secondary-background); + padding: 11px; + border: 0; +} + +.copyToClipBoard:hover .copyToClipBoardBtn { + visibility: visible; +} + +.pre-scrollable { + max-height: 340px; + overflow-y: scroll; } + + +/* Nim line-numbered tables */ +.line-nums-table { + width: 100%; + table-layout: fixed; } + +table.line-nums-table { + border-radius: 4px; + border: 1px solid #cccccc; + background-color: ghostwhite; + border-collapse: separate; + margin-top: 15px; + margin-bottom: 25px; } + +.line-nums-table tbody { + border: none; } + +.line-nums-table td pre { + border: none; + background-color: transparent; } + +.line-nums-table td.blob-line-nums { + width: 28px; } + +.line-nums-table td.blob-line-nums pre { + color: #b0b0b0; + -webkit-filter: opacity(75%); + filter: opacity(75%); + text-align: right; + border-color: transparent; + background-color: transparent; + padding-left: 0px; + margin-left: 0px; + padding-right: 0px; + margin-right: 0px; } + + +table { + max-width: 100%; + background-color: transparent; + margin-top: 0.5em; + margin-bottom: 1.5em; + border-collapse: collapse; + border-color: var(--third-background); + border-spacing: 0; + font-size: 0.9em; +} + +table th, table td { + padding: 0px 0.5em 0px; + border-color: var(--third-background); +} + +table th { + background-color: var(--third-background); + border-color: var(--third-background); + font-weight: bold; } + +table th.docinfo-name { + background-color: transparent; + text-align: right; +} + +table tr:hover { + background-color: var(--third-background); } + + +/* rst2html default used to remove borders from tables and images */ +.borderless, table.borderless td, table.borderless th { + border: 0; } + +table.borderless td, table.borderless th { + /* Override padding for "table.docutils td" with "! important". + The right padding separates the table cells. */ + padding: 0 0.5em 0 0 !important; } + +.admonition { + padding: 0.3em; + background-color: var(--secondary-background); + border-left: 0.4em solid #7f7f84; + margin-bottom: 0.5em; + -webkit-box-shadow: 0 5px 8px -6px rgba(0,0,0,.2); + -moz-box-shadow: 0 5px 8px -6px rgba(0,0,0,.2); + box-shadow: 0 5px 8px -6px rgba(0,0,0,.2); +} +.admonition-info { + border-color: var(--info-background); +} +.admonition-info-text { + color: var(--info-background); +} +.admonition-warning { + border-color: var(--warning-background); +} +.admonition-warning-text { + color: var(--warning-background); +} +.admonition-error { + border-color: var(--error-background); +} +.admonition-error-text { + color: var(--error-background); +} + +.first { + /* Override more specific margin styles with "! important". */ + margin-top: 0 !important; } + +.last, .with-subtitle { + margin-bottom: 0 !important; } + +.hidden { + display: none; } + +blockquote.epigraph { + margin: 2em 5em; } + +dl.docutils dd { + margin-bottom: 0.5em; } + +object[type="image/svg+xml"], object[type="application/x-shockwave-flash"] { + overflow: hidden; } + + +div.figure { + margin-left: 2em; + margin-right: 2em; } + +div.footer, div.header { + clear: both; + text-align: center; + color: #666; + font-size: smaller; } + +div.footer { + padding-top: 5em; } + +div.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; } + +div.line-block div.line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; } + +div.topic { + margin: 2em; } + +div.search_results { + background-color: var(--third-background); + margin: 3em; + padding: 1em; + border: 1px solid #4d4d4d; } + +div#global-links ul { + margin-left: 0; + list-style-type: none; } + +div#global-links > simple-boot { + margin-left: 3em; } + +hr.docutils { + width: 75%; } + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; } + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; } + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; } + +.align-left { + text-align: left; } + +.align-center { + clear: both; + text-align: center; } + +.align-right { + text-align: right; } + +/* reset inner alignment in figures */ +div.align-right { + text-align: inherit; } + +p.attribution { + text-align: right; + margin-left: 50%; } + +p.caption { + font-style: italic; } + +p.credits { + font-style: italic; + font-size: smaller; } + +p.label { + white-space: nowrap; } + +p.rubric { + font-weight: bold; + font-size: larger; + color: maroon; + text-align: center; } + +p.topic-title { + font-weight: bold; } + +pre.address { + margin-bottom: 0; + margin-top: 0; + font: inherit; } + +pre.literal-block, pre.doctest-block, pre.math, pre.code { + margin-left: 2em; + margin-right: 2em; } + +pre.code .ln { + color: grey; } + +/* line numbers */ +pre.code, code { + background-color: #eeeeee; } + +pre.code .comment, code .comment { + color: #5c6576; } + +pre.code .keyword, code .keyword { + color: #3B0D06; + font-weight: bold; } + +pre.code .literal.string, code .literal.string { + color: #0c5404; } + +pre.code .name.builtin, code .name.builtin { + color: #352b84; } + +pre.code .deleted, code .deleted { + background-color: #DEB0A1; } + +pre.code .inserted, code .inserted { + background-color: #A3D289; } + +span.classifier { + font-style: oblique; } + +span.classifier-delimiter { + font-weight: bold; } + +span.problematic { + color: #b30000; } + +span.section-subtitle { + /* font-size relative to parent (h1..h6 element) */ + font-size: 80%; } + +span.DecNumber { + color: var(--number); } + +span.BinNumber { + color: var(--number); } + +span.HexNumber { + color: var(--number); } + +span.OctNumber { + color: var(--number); } + +span.FloatNumber { + color: var(--number); } + +span.Identifier { + color: var(--identifier); } + +span.Keyword { + font-weight: 600; + color: var(--keyword); } + +span.StringLit { + color: var(--literal); } + +span.LongStringLit { + color: var(--literal); } + +span.CharLit { + color: var(--literal); } + +span.EscapeSequence { + color: var(--escapeSequence); } + +span.Operator { + color: var(--operator); } + +span.Punctuation { + color: var(--punctuation); } + +span.Comment, span.LongComment { + font-style: italic; + font-weight: 400; + color: var(--comment); } + +span.RegularExpression { + color: darkviolet; } + +span.TagStart { + color: darkviolet; } + +span.TagEnd { + color: darkviolet; } + +span.Key { + color: #252dbe; } + +span.Value { + color: #252dbe; } + +span.RawData { + color: var(--raw-data); } + +span.Assembler { + color: #252dbe; } + +span.Preprocessor { + color: #252dbe; } + +span.Directive { + color: #252dbe; } + +span.option { + font-weight: bold; + font-family: "Source Code Pro", Monaco, Menlo, Consolas, "Courier New", monospace; + color: var(--option); } + +span.Prompt { + font-weight: bold; + color: red; } + +span.ProgramOutput { + font-weight: bold; + color: #808080; } + +span.program { + font-weight: bold; + color: var(--program); + text-decoration: underline; + text-decoration-color: var(--hint); + text-decoration-thickness: 0.05em; + text-underline-offset: 0.15em; } + +span.Command, span.Rule, span.Hyperlink, +span.Label, span.Reference, span.Other { + color: var(--other); } + +/* Pop type, const, proc, and iterator defs in nim def blocks */ +dt pre > span.Identifier, dt pre > span.Operator { + color: var(--identifier); + font-weight: 700; } + +dt pre > span.Keyword ~ span.Identifier, dt pre > span.Identifier ~ span.Identifier, +dt pre > span.Operator ~ span.Identifier, dt pre > span.Other ~ span.Identifier { + color: var(--identifier); + font-weight: inherit; } + +/* Nim sprite for the footer (taken from main page favicon) */ +.nim-sprite { + display: inline-block; + width: 51px; + height: 14px; + background-position: 0 0; + background-size: 51px 14px; + -webkit-filter: opacity(50%); + filter: opacity(50%); + background-repeat: no-repeat; + background-image: var(--nim-sprite-base64); + margin-bottom: 5px; } + +span.pragmadots { + /* Position: relative frees us up to make the dots + look really nice without fucking up the layout and + causing bulging in the parent container */ + position: relative; + /* 1px down looks slightly nicer */ + top: 1px; + padding: 2px; + background-color: var(--third-background); + border-radius: 4px; + margin: 0 2px; + cursor: pointer; + font-size: 0.8em; } + +span.pragmadots:hover { + background-color: var(--hint); } + +span.pragmawrap { + display: none; } + +span.attachedType { + display: none; + visibility: hidden; } diff --git a/nlp.html b/nlp.html new file mode 100644 index 000000000..2c8f6e171 --- /dev/null +++ b/nlp.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nlp + + + + + + + + + +Arraymancer - src/arraymancer/nlp + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nlp

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tokenizers +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nlp.idx b/nlp.idx new file mode 100644 index 000000000..0278d6eed --- /dev/null +++ b/nlp.idx @@ -0,0 +1 @@ +nimTitle nlp nlp.html module src/arraymancer/nlp 0 diff --git a/nn.html b/nn.html new file mode 100644 index 000000000..e967d4203 --- /dev/null +++ b/nn.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn + + + + + + + + + +Arraymancer - src/arraymancer/nn + + + + + + + +Fork me on GitHub + + +
+ +
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nn.idx b/nn.idx new file mode 100644 index 000000000..7b0330fa8 --- /dev/null +++ b/nn.idx @@ -0,0 +1 @@ +nimTitle nn nn.html module src/arraymancer/nn 0 diff --git a/nn_dsl.html b/nn_dsl.html new file mode 100644 index 000000000..b8360fceb --- /dev/null +++ b/nn_dsl.html @@ -0,0 +1,474 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/nn_dsl + + + + + + + + + +Arraymancer - src/arraymancer/nn/nn_dsl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/nn_dsl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Macros

+
+
+
+
macro network(modelName: untyped; config: untyped): untyped
+
+ +

Declare a neural network.

+

Example usage:

+
network DemoNet:
+  layers h, w:
+    cv1:        Conv2D(@[1, h, w], 20, (5, 5))
+    mp1:        Maxpool2D(cv1.outShape, (2,2), (0,0), (2,2))
+    cv2:        Conv2D(mp1.outShape, 50, (5, 5))
+    mp2:        MaxPool2D(cv2.outShape, (2,2), (0,0), (2,2))
+    fl:         Flatten(mp2.outShape)
+    hidden:     Linear(fl.outShape[0], 500)
+    classifier: Linear(500, 10)
+  forward x:
+    x.cv1.relu.mp1.cv2.relu.mp2.fl.hidden.relu.classifier
+
+let
+  ctx = newContext Tensor[float32]
+  model = ctx.init(DemoNet, 28, 28)

Custom layers can be created by providing a type, an init-function, and a forward-function. The type could look like this:

+
type
+  MyLayer*[T] = object
+    someWeights*: Variable[Tensor[T]]
+    importantInfo*: seq[int]

It is important that the type has exactly one generic parameter which corresponds to the underlying type (e.g., float32 or int8). The init-function is required to adhere to the following structure:

+
proc init*[T](
+  ctx: Context[Tensor[T]], # could also be Context[AnyTensor[T]] for example
+  layerType: typedesc[MyLayer[T]],
+  myInitParam: string
+  # ... here you can add all the necessary init parameters, like shapes and number of output features
+): MyLayer[T] =
+  discard # your init stuff

The only requirement for the forward function is that the first parameter must be of your layer type like this:

+
proc forward*[T](self: MyLayer[T], myInput: SpecialInputType, doNothing: bool): Variable[Tensor[T]] =
+  if not doNothing:
+    result = myInput.yourComputations(self.importantInfo, self.someWeights)

Often it is also useful to provide proc outShape(m: MyLayer): seq[int] and possibly proc inShape(m: MyLayer): seq[int] functions.

+

Your custom layer can then be used for example like this:

+
network DemoNet2:
+  layers:
+    myLayer:    MyLayer(myInitParam = "hello!")
+    fl:         Flatten(myLayer.outShape)
+    hidden:     Linear(fl.outShape[0], 500)
+    classifier: Linear(500, 10)
+  forward x:
+    x.myLayer(doNothing = false).fl.hidden.relu.classifier
+   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nn_dsl.idx b/nn_dsl.idx new file mode 100644 index 000000000..2ef66f3de --- /dev/null +++ b/nn_dsl.idx @@ -0,0 +1,2 @@ +nimTitle nn_dsl nn_dsl.html module src/arraymancer/nn/nn_dsl 0 +nim network nn_dsl.html#network.m,untyped,untyped macro network(modelName: untyped; config: untyped): untyped 312 diff --git a/nn_primitives.html b/nn_primitives.html new file mode 100644 index 000000000..798f665cd --- /dev/null +++ b/nn_primitives.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives + + + + + + + +Fork me on GitHub + + +
+ +
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nn_primitives.idx b/nn_primitives.idx new file mode 100644 index 000000000..244f01617 --- /dev/null +++ b/nn_primitives.idx @@ -0,0 +1 @@ +nimTitle nn_primitives nn_primitives.html module src/arraymancer/nn_primitives 0 diff --git a/nnp_activation.html b/nnp_activation.html new file mode 100644 index 000000000..7e25fedd6 --- /dev/null +++ b/nnp_activation.html @@ -0,0 +1,599 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_activation + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_activation + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_activation

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc mrelu[T](t: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc msigmoid[T: SomeFloat](t: var Tensor[T])
+
+ + Logistic sigmoid activation function, f(x) = 1 / (1 + \exp(-x)) Note: Canonical sigmoid is not stable for large negative value +   Source +Edit + +
+
+ +
+
+
+
proc mtanh[T: SomeFloat](t: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc relu[T](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc relu_backward[T](gradient: Tensor[T]; cached_tensor: Tensor[T]): Tensor[T] {.
+    noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc sigmoid[T: SomeFloat](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Logistic sigmoid activation function, f(x) = 1 / (1 + \exp(-x)) Note: Canonical sigmoid is not stable for large negative value Please use sigmoid_cross_entropy for the final layer for better stability and performance +   Source +Edit + +
+
+ +
+
+
+
proc sigmoid_backward[T](gradient: Tensor[T]; cached_tensor: Tensor[T]): Tensor[
+    T] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc tanh[T: SomeFloat](t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc tanh_backward[T](gradient: Tensor[T]; cached_tensor: Tensor[T]): Tensor[T] {.
+    noinit.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_activation.idx b/nnp_activation.idx new file mode 100644 index 000000000..087a34aeb --- /dev/null +++ b/nnp_activation.idx @@ -0,0 +1,10 @@ +nimTitle nnp_activation nnp_activation.html module src/arraymancer/nn_primitives/nnp_activation 0 +nim sigmoid nnp_activation.html#sigmoid,Tensor[T: SomeFloat] proc sigmoid[T: SomeFloat](t: Tensor[T]): Tensor[T] 25 +nim relu nnp_activation.html#relu,Tensor[T] proc relu[T](t: Tensor[T]): Tensor[T] 35 +nim tanh nnp_activation.html#tanh,Tensor[T: SomeFloat] proc tanh[T: SomeFloat](t: Tensor[T]): Tensor[T] 38 +nim msigmoid nnp_activation.html#msigmoid,Tensor[T: SomeFloat] proc msigmoid[T: SomeFloat](t: var Tensor[T]) 44 +nim mrelu nnp_activation.html#mrelu,Tensor[T] proc mrelu[T](t: var Tensor[T]) 52 +nim mtanh nnp_activation.html#mtanh,Tensor[T: SomeFloat] proc mtanh[T: SomeFloat](t: var Tensor[T]) 55 +nim sigmoid_backward nnp_activation.html#sigmoid_backward,Tensor[T],Tensor[T] proc sigmoid_backward[T](gradient: Tensor[T]; cached_tensor: Tensor[T]): Tensor[T] 61 +nim relu_backward nnp_activation.html#relu_backward,Tensor[T],Tensor[T] proc relu_backward[T](gradient: Tensor[T]; cached_tensor: Tensor[T]): Tensor[T] 65 +nim tanh_backward nnp_activation.html#tanh_backward,Tensor[T],Tensor[T] proc tanh_backward[T](gradient: Tensor[T]; cached_tensor: Tensor[T]): Tensor[T] 72 diff --git a/nnp_conv2d_cudnn.html b/nnp_conv2d_cudnn.html new file mode 100644 index 000000000..f5a86137d --- /dev/null +++ b/nnp_conv2d_cudnn.html @@ -0,0 +1,498 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_conv2d_cudnn + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_conv2d_cudnn + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_conv2d_cudnn

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc conv2d[T: SomeFloat](input, kernel, bias: CudaTensor[T];
+                          padding: SizeHW = [0, 0];
+                          strides, dilation: SizeHW = [1, 1]): CudaTensor[T] {.
+    noinit.}
+
+ + Input:
- ``input`` 4D Tensor batch of images of the size [N,C_in,H_in,W_in]
+- ``kernel`` 4D Tensor convolving kernel filters of the size [C_out,C_in,kH,kW]
+- ``bias`` 3D Tensor bias of the size [C_out,1,1]
+ +   Source +Edit + +
+
+ +
+
+
+
proc conv2d_backward[T: SomeFloat](input, kernel, bias: CudaTensor[T];
+                                   padding: SizeHW = [0, 0];
+                                   strides, dilation: SizeHW = [1, 1];
+                                   grad_output: CudaTensor[T]; grad_input,
+    grad_kernel, grad_bias: var CudaTensor[T])
+
+ +

Computes gradients of a 2D convolution. Intended to be used after conv2d to calculate gradients in backward pass.

+

Input:

+
- ``input`` 4D Tensor batch of images of the size [N,C_in,H_in,W_in]
+- ``kernel`` 4D Tensor convolving kernel weights of the size [C_out,C_in,kH,kW]
+- ``bias`` 3D Tensor bias of the size [C_out,1,1] or an empty tensor for no bias
+- ``padding`` SizeHW tuple with height and width of the padding
+- ``strides`` SizeHW tuple with height and width of the convolution strides
+- ``dilation`` SizeHW tuple with a rescaling factor of the convolution
+- ``grad_output`` 4D tensor gradient of the next layer of the size [N,C_out,H_out,W_out]
+- ``grad_input`` tensor where the gradient w.r.t input will be written
+- ``grad_kernel`` tensor where the gradient w.r.t convolution kernel will be written
+- ``grad_bias`` tensor where the gradient w.r.t bias will be written
+

Note: grad_input, grad_kernel and grad_bias will be overwritten. They must have the same shape as the corresponding input, kernel and bias

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_conv2d_cudnn.idx b/nnp_conv2d_cudnn.idx new file mode 100644 index 000000000..acfc6ffcb --- /dev/null +++ b/nnp_conv2d_cudnn.idx @@ -0,0 +1,3 @@ +nimTitle nnp_conv2d_cudnn nnp_conv2d_cudnn.html module src/arraymancer/nn_primitives/nnp_conv2d_cudnn 0 +nim conv2d nnp_conv2d_cudnn.html#conv2d,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],SizeHW,SizeHW,SizeHW proc conv2d[T: SomeFloat](input, kernel, bias: CudaTensor[T];\n padding: SizeHW = [0, 0];\n strides, dilation: SizeHW = [1, 1]): CudaTensor[T] 20 +nim conv2d_backward nnp_conv2d_cudnn.html#conv2d_backward,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],SizeHW,SizeHW,SizeHW,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc conv2d_backward[T: SomeFloat](input, kernel, bias: CudaTensor[T];\n padding: SizeHW = [0, 0];\n strides, dilation: SizeHW = [1, 1];\n grad_output: CudaTensor[T]; grad_input,\n grad_kernel, grad_bias: var CudaTensor[T]) 74 diff --git a/nnp_convolution.html b/nnp_convolution.html new file mode 100644 index 000000000..9f3fe165a --- /dev/null +++ b/nnp_convolution.html @@ -0,0 +1,540 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_convolution + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_convolution + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_convolution

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
Conv2DAlgorithm = enum
+  Im2ColGEMM, NNPackAuto
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc conv2d[T](input, weight, bias: Tensor[T]; padding: Size2D = (0, 0);
+               stride: Size2D = (1, 1); algorithm = Conv2DAlgorithm.Im2ColGEMM): Tensor[
+    T] {.inline.}
+
+ +

Computes a 2D convolution over input images. Intended to be used in 2d convolution forward pass. This applies a 2D cross-correlation, not to be confused with the mathematical convolution.

+

Input:

+
- ``input`` 4D Tensor batch of images of the size [N,C_in,H_in,W_in]
+- ``weight`` 4D Tensor convolving kernel weights of the size [C_out,C_in,kH,kW]
+- ``bias`` 3D Tensor bias of the size [C_out,1,1] or an empty tensor for no bias
+- ``padding`` Size2D tuple with height and width of the padding
+- ``stride`` Size2D tuple with height and width of the stride
+- ``algorithm`` algorithm to be used in the convolution
+

Returns:

+
- A 4D Tensor of sized [N,C_out,H_out,W_out], where
+   H_out = (H_in + (2*padding.height) - kH) / stride.height + 1
+   W_out = (W_in + (2*padding.width) - kW) / stride.width + 1
+

Valid algorithms:

+
  • Im2ColGEMM im2col + GEMM algorithm, this is the default
  • +
  • NNPackAuto Use NNPack and let it auto detect the best algorithm
  • +
+

Future: bias will leverage the upcoming Optional type to be really optional.

+ +   Source +Edit + +
+
+ +
+
+
+
proc conv2d_backward[T](input, weight, bias: Tensor[T]; padding: Size2D;
+                        stride: Size2D; grad_output: Tensor[T];
+                        grad_input, grad_weight, grad_bias: var Tensor[T];
+                        algorithm = Conv2DAlgorithm.Im2ColGEMM)
+
+ +

Computes gradients of a 2D convolution. Intended to be used after conv2d to calculate gradients in backward pass.

+

Input:

+
- ``input`` 4D Tensor batch of images of the size [N,C_in,H_in,W_in]
+- ``weight`` 4D Tensor convolving kernel weights of the size [C_out,C_in,kH,kW]
+- ``bias`` 3D Tensor bias of the size [C_out,1,1] or an empty tensor for no bias
+- ``padding`` Size2D tuple with height and width of the padding
+- ``stride`` Size2D tuple with height and width of the stride
+- ``grad_output`` 4D tensor gradient of the next layer of the size [N,C_out,H_out,W_out]
+- ``grad_input`` tensor where the gradient w.r.t input will be written
+- ``grad_weight`` tensor where the gradient w.r.t weight will be written
+- ``grad_bias`` tensor where the gradient w.r.t bias will be written
+- ``algorithm`` algorithm to be used in the convolution
+

Valid algorithms:

+
  • Im2ColGEMM im2col + GEMM algorithm, this is the default
  • +
  • NNPackAuto Use NNPack and let it auto detect the best algorithm
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_convolution.idx b/nnp_convolution.idx new file mode 100644 index 000000000..15524ac8c --- /dev/null +++ b/nnp_convolution.idx @@ -0,0 +1,6 @@ +nimTitle nnp_convolution nnp_convolution.html module src/arraymancer/nn_primitives/nnp_convolution 0 +nim Im2ColGEMM nnp_convolution.html#Im2ColGEMM Conv2DAlgorithm.Im2ColGEMM 24 +nim NNPackAuto nnp_convolution.html#NNPackAuto Conv2DAlgorithm.NNPackAuto 24 +nim Conv2DAlgorithm nnp_convolution.html#Conv2DAlgorithm enum Conv2DAlgorithm 24 +nim conv2d nnp_convolution.html#conv2d,Tensor[T],Tensor[T],Tensor[T],Size2D,Size2D proc conv2d[T](input, weight, bias: Tensor[T]; padding: Size2D = (0, 0);\n stride: Size2D = (1, 1); algorithm = Conv2DAlgorithm.Im2ColGEMM): Tensor[\n T] 28 +nim conv2d_backward nnp_convolution.html#conv2d_backward,Tensor[T],Tensor[T],Tensor[T],Size2D,Size2D,Tensor[T],Tensor[T],Tensor[T],Tensor[T] proc conv2d_backward[T](input, weight, bias: Tensor[T]; padding: Size2D;\n stride: Size2D; grad_output: Tensor[T];\n grad_input, grad_weight, grad_bias: var Tensor[T];\n algorithm = Conv2DAlgorithm.Im2ColGEMM) 65 diff --git a/nnp_embedding.html b/nnp_embedding.html new file mode 100644 index 000000000..3bd594557 --- /dev/null +++ b/nnp_embedding.html @@ -0,0 +1,494 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_embedding + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_embedding + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_embedding

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc embedding[T; Idx: byte or char or SomeInteger](vocab_id: Tensor[Idx];
+    weight: Tensor[T]): Tensor[T]
+
+ +

Returns embeddings from a weight embedding matrix and vocab_id to represent the part of the global vocabulary present.

+

The main use-case is for natural language processing. Words (or characters or group of words) need to be encoded into arbitrary integers first that will be used to index the weight embedding matrix.

+

During training, words that are related will get become close in some dimensions of the embedding.

+

For example, if we want to encode a text containing 10000 different words into a 300-dimensional vector, we will require a 10000, 300 embedding matrix.

+

Make sure to add an index to represent <UNKNOWN> words. (Words present during test that didn't exist in the training vocabulary)

+

If working with variable-length sequences a <START>, <STOP> and <PAD> "words" are also useful

+

In summary it's a lookup table that maps words to meanings in a high-dimensional space and that can be trained.

+

Input:

+ +

Result:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc embedding_backward[T; Idx: byte or char or SomeInteger](
+    dWeight: var Tensor[T]; vocab_id: Tensor[Idx]; dOutput: Tensor[T];
+    padding_idx: Idx; scale_grad_by_freq: static[bool] = false)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_embedding.idx b/nnp_embedding.idx new file mode 100644 index 000000000..1eeef4360 --- /dev/null +++ b/nnp_embedding.idx @@ -0,0 +1,3 @@ +nimTitle nnp_embedding nnp_embedding.html module src/arraymancer/nn_primitives/nnp_embedding 0 +nim embedding nnp_embedding.html#embedding,Tensor[Idx: byte or char or int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64],Tensor[T] proc embedding[T; Idx: byte or char or SomeInteger](vocab_id: Tensor[Idx];\n weight: Tensor[T]): Tensor[T] 11 +nim embedding_backward nnp_embedding.html#embedding_backward,Tensor[T],Tensor[Idx: byte or char or int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64],Tensor[T],Idx,static[bool] proc embedding_backward[T; Idx: byte or char or SomeInteger](dWeight: var Tensor[T];\n vocab_id: Tensor[Idx]; dOutput: Tensor[T]; padding_idx: Idx;\n scale_grad_by_freq: static[bool] = false) 57 diff --git a/nnp_gru.html b/nnp_gru.html new file mode 100644 index 000000000..f0c70ef86 --- /dev/null +++ b/nnp_gru.html @@ -0,0 +1,663 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_gru + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_gru + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_gru

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc gru_backward[T: SomeFloat](dInput, dHidden0, dW3s0, dW3sN, dU3s, dbW3s,
+                                dbU3s: var Tensor[T];
+                                dOutput, dHiddenN: Tensor[T];
+                                cached_inputs: seq[Tensor[T]];
+                                cached_hiddens: seq[seq[Tensor[T]]];
+                                W3s0, W3sN, U3s, rs, zs, ns, Uhs: Tensor[T])
+
+ + โš ๏ธ API subject to change to match CuDNNs +   Source +Edit + +
+
+ +
+
+
+
proc gru_cell_backward[T: SomeFloat](dx, dh, dW3, dU3, dbW3, dbU3: var Tensor[T];
+                                     dnext: Tensor[T]; x, h, W3, U3: Tensor[T];
+                                     r, z, n, Uh: Tensor[T])
+
+ + Input:
  • dx, dh, dW3, dU3: respectively gradients of +
  • +
  • dbW3 and dbU3: gradients of the biases for W3 and U3 weights
  • +
  • dnext: gradient flowing back from the next layer
  • +
  • x, h, W3, U3: inputs saved from the forward pass
  • +
  • r, z, n, Uh: intermediate results saved from the forward pass of shape batch_size, hidden_size
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc gru_cell_forward[T: SomeFloat](input, W3, U3, bW3, bU3: Tensor[T];
+                                    r, z, n, Uh, hidden: var Tensor[T])
+
+ + Input: +

Output:

+
  • r, z, n, Uh: intermediate tensors saved for backpropagation. of shape batch_size, hidden_size
  • +
  • y == h'(t): The next hidden state of the GRU Cell. (GRU output and next hidden state are the same)
  • +
+

โš ๏ธ Input/output updated in place:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc gru_cell_inference[T: SomeFloat](input: Tensor[T];
+                                      W3, U3, bW3, bU3: Tensor[T];
+                                      hidden: var Tensor[T])
+
+ + Input: +

Output (in-place):

+
  • y == h'(t): The next hidden state of the GRU Cell. (GRU output and next hidden state are the same)
  • +
+

โš ๏ธ Input/Output updated in-place:

+ +

This is an optimized function when backpropagation is not needed.

+ +   Source +Edit + +
+
+ +
+
+
+
proc gru_forward[T: SomeFloat](input: Tensor[T]; W3s0, W3sN: Tensor[T];
+                               U3s, bW3s, bU3s: Tensor[T];
+                               rs, zs, ns, Uhs: var Tensor[T];
+                               output, hidden: var Tensor[T];
+                               cached_inputs: var seq[Tensor[T]];
+                               cached_hiddens: var seq[seq[Tensor[T]]])
+
+ +

โš ๏ธ API subject to change to match CuDNNs

+

Bidirectional support is not implemented

+

Inputs:

+ +

Outputs:

+ +

โš ๏ธ Input/Output updated in-place:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc gru_inference[T: SomeFloat](input: Tensor[T]; W3s0, W3sN: Tensor[T];
+                                 U3s, bW3s, bU3s: Tensor[T];
+                                 output, hidden: var Tensor[T])
+
+ +

Bidirectional support is not implemented

+

Inputs:

+ +

Outputs:

+ +

โš ๏ธ Input/Output updated in-place:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_gru.idx b/nnp_gru.idx new file mode 100644 index 000000000..a5a48fd5e --- /dev/null +++ b/nnp_gru.idx @@ -0,0 +1,7 @@ +nimTitle nnp_gru nnp_gru.html module src/arraymancer/nn_primitives/nnp_gru 0 +nim gru_cell_inference nnp_gru.html#gru_cell_inference,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc gru_cell_inference[T: SomeFloat](input: Tensor[T]; W3, U3, bW3, bU3: Tensor[T];\n hidden: var Tensor[T]) 40 +nim gru_cell_forward nnp_gru.html#gru_cell_forward,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc gru_cell_forward[T: SomeFloat](input, W3, U3, bW3, bU3: Tensor[T];\n r, z, n, Uh, hidden: var Tensor[T]) 90 +nim gru_cell_backward nnp_gru.html#gru_cell_backward,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc gru_cell_backward[T: SomeFloat](dx, dh, dW3, dU3, dbW3, dbU3: var Tensor[T];\n dnext: Tensor[T]; x, h, W3, U3: Tensor[T];\n r, z, n, Uh: Tensor[T]) 148 +nim gru_inference nnp_gru.html#gru_inference,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc gru_inference[T: SomeFloat](input: Tensor[T]; W3s0, W3sN: Tensor[T];\n U3s, bW3s, bU3s: Tensor[T];\n output, hidden: var Tensor[T]) 194 +nim gru_forward nnp_gru.html#gru_forward,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],seq[Tensor[T: SomeFloat]],seq[seq[Tensor[T: SomeFloat]]] proc gru_forward[T: SomeFloat](input: Tensor[T]; W3s0, W3sN: Tensor[T];\n U3s, bW3s, bU3s: Tensor[T];\n rs, zs, ns, Uhs: var Tensor[T];\n output, hidden: var Tensor[T];\n cached_inputs: var seq[Tensor[T]];\n cached_hiddens: var seq[seq[Tensor[T]]]) 263 +nim gru_backward nnp_gru.html#gru_backward,Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],seq[Tensor[T: SomeFloat]],seq[seq[Tensor[T: SomeFloat]]],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc gru_backward[T: SomeFloat](dInput, dHidden0, dW3s0, dW3sN, dU3s, dbW3s, dbU3s: var Tensor[\n T]; dOutput, dHiddenN: Tensor[T]; cached_inputs: seq[Tensor[T]];\n cached_hiddens: seq[seq[Tensor[T]]];\n W3s0, W3sN, U3s, rs, zs, ns, Uhs: Tensor[T]) 376 diff --git a/nnp_linear.html b/nnp_linear.html new file mode 100644 index 000000000..882028964 --- /dev/null +++ b/nnp_linear.html @@ -0,0 +1,496 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_linear + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_linear + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_linear

+
+ +
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc linear[T](input, weight: Tensor[T]; bias: Tensor[T]; output: var Tensor[T]) {.
+    inline.}
+
+ + +   Source +Edit + +
+
+
+
proc linear[T](input, weight: Tensor[T]; output: var Tensor[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc linear_backward[T](input, weight, gradOutput: Tensor[T];
+                        gradInput, gradWeight, gradBias: var Tensor[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+
+
proc linear_backward[T](input, weight, gradOutput: Tensor[T];
+                        gradInput, gradWeight: var Tensor[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_linear.idx b/nnp_linear.idx new file mode 100644 index 000000000..ecc591c6c --- /dev/null +++ b/nnp_linear.idx @@ -0,0 +1,7 @@ +nimTitle nnp_linear nnp_linear.html module src/arraymancer/nn_primitives/nnp_linear 0 +nim linear nnp_linear.html#linear,Tensor[T],Tensor[T],Tensor[T],Tensor[T] proc linear[T](input, weight: Tensor[T]; bias: Tensor[T]; output: var Tensor[T]) 20 +nim linear nnp_linear.html#linear,Tensor[T],Tensor[T],Tensor[T] proc linear[T](input, weight: Tensor[T]; output: var Tensor[T]) 31 +nim linear_backward nnp_linear.html#linear_backward,Tensor[T],Tensor[T],Tensor[T],Tensor[T],Tensor[T],Tensor[T] proc linear_backward[T](input, weight, gradOutput: Tensor[T];\n gradInput, gradWeight, gradBias: var Tensor[T]) 39 +nim linear_backward nnp_linear.html#linear_backward,Tensor[T],Tensor[T],Tensor[T],Tensor[T],Tensor[T] proc linear_backward[T](input, weight, gradOutput: Tensor[T];\n gradInput, gradWeight: var Tensor[T]) 57 +nimgrp linear nnp_linear.html#linear-procs-all proc 20 +nimgrp linearbackward nnp_linear.html#linear_backward-procs-all proc 39 diff --git a/nnp_maxpooling.html b/nnp_maxpooling.html new file mode 100644 index 000000000..34776beb5 --- /dev/null +++ b/nnp_maxpooling.html @@ -0,0 +1,475 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_maxpooling + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_maxpooling + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_maxpooling

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc maxpool2d[T](input: Tensor[T]; kernel: Size2D; padding: Size2D = (0, 0);
+                  stride: Size2D = (1, 1)): tuple[max_indices: Tensor[int],
+    maxpooled: Tensor[T]] {.noinit.}
+
+ + MaxPool 2D forward pass +   Source +Edit + +
+
+ +
+
+
+
proc maxpool2d_backward[T](cached_input_shape: openArray[int] | Metadata;
+                           cached_max_indices: Tensor[int];
+                           gradOutput: Tensor[T]): Tensor[T] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_maxpooling.idx b/nnp_maxpooling.idx new file mode 100644 index 000000000..62b83593d --- /dev/null +++ b/nnp_maxpooling.idx @@ -0,0 +1,3 @@ +nimTitle nnp_maxpooling nnp_maxpooling.html module src/arraymancer/nn_primitives/nnp_maxpooling 0 +nim maxpool2d nnp_maxpooling.html#maxpool2d,Tensor[T],Size2D,Size2D,Size2D proc maxpool2d[T](input: Tensor[T]; kernel: Size2D; padding: Size2D = (0, 0);\n stride: Size2D = (1, 1)): tuple[max_indices: Tensor[int],\n maxpooled: Tensor[T]] 19 +nim maxpool2d_backward nnp_maxpooling.html#maxpool2d_backward,,Tensor[int],Tensor[T] proc maxpool2d_backward[T](cached_input_shape: openArray[int] | Metadata;\n cached_max_indices: Tensor[int]; gradOutput: Tensor[T]): Tensor[\n T] 68 diff --git a/nnp_numerical_gradient.html b/nnp_numerical_gradient.html new file mode 100644 index 000000000..bd5034d72 --- /dev/null +++ b/nnp_numerical_gradient.html @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_numerical_gradient + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_numerical_gradient + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_numerical_gradient

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc numerical_gradient[T: not Tensor](input: T; f: (proc (x: T): T);
+                                       h = T(0.00001)): T {.inline.}
+
+ + Compute numerical gradient for any function w.r.t. to an input value, useful for gradient checking, recommend using float64 types to assure numerical precision. The gradient is calculated as: (f(x + h) - f(x - h)) / (2*h) where h is a small number, typically 1e-5. +   Source +Edit + +
+
+
+
proc numerical_gradient[T](input: Tensor[T]; f: (proc (x: Tensor[T]): T);
+                           h = T(0.00001)): Tensor[T] {.noinit.}
+
+ + Compute numerical gradient for any function w.r.t. to an input Tensor, useful for gradient checking, recommend using float64 types to assure numerical precision. The gradient is calculated as: (f(x + h) - f(x - h)) / (2*h) where h is a small number, typically 1e-5 f(x) will be called for each input elements with +h and -h pertubation. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_numerical_gradient.idx b/nnp_numerical_gradient.idx new file mode 100644 index 000000000..387362a47 --- /dev/null +++ b/nnp_numerical_gradient.idx @@ -0,0 +1,4 @@ +nimTitle nnp_numerical_gradient nnp_numerical_gradient.html module src/arraymancer/nn_primitives/nnp_numerical_gradient 0 +nim numerical_gradient nnp_numerical_gradient.html#numerical_gradient,T,,typeof(T(0.00001)) proc numerical_gradient[T: not Tensor](input: T; f: (proc (x: T): T); h = T(0.00001)): T 17 +nim numerical_gradient nnp_numerical_gradient.html#numerical_gradient,Tensor[T],,typeof(T(0.00001)) proc numerical_gradient[T](input: Tensor[T]; f: (proc (x: Tensor[T]): T);\n h = T(0.00001)): Tensor[T] 25 +nimgrp numericalgradient nnp_numerical_gradient.html#numerical_gradient-procs-all proc 17 diff --git a/nnp_sigmoid_cross_entropy.html b/nnp_sigmoid_cross_entropy.html new file mode 100644 index 000000000..05ae5435a --- /dev/null +++ b/nnp_sigmoid_cross_entropy.html @@ -0,0 +1,487 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_sigmoid_cross_entropy + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_sigmoid_cross_entropy + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_sigmoid_cross_entropy

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc sigmoid_cross_entropy[T](input, target: Tensor[T]): T
+
+ +

Sigmoid function + Cross-Entropy loss fused in one layer.

+

Input:

+
  • A Tensor
  • +
  • The target values
  • +
+

Returns:

+
  • Apply a sigmoid activation and returns the cross-entropy loss.
  • +
+

Shape:

+
  • Both the cache and target shape should be batch_size, features i.e. number of samples as first dimension
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc sigmoid_cross_entropy_backward[T](gradient: Tensor[T] or T;
+                                       cached_tensor: Tensor[T];
+                                       target: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Derivatives of sigmoid_cross_entropy Input:
  • The input gradient as a scalar or a Tensor
  • +
  • A cache tensor that contains data from before the forward pass
  • +
  • The target values
  • +
+

Shape:

+
  • Both the cache and target shape should be batch_size, features i.e. number of samples as first dimension
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_sigmoid_cross_entropy.idx b/nnp_sigmoid_cross_entropy.idx new file mode 100644 index 000000000..98096137a --- /dev/null +++ b/nnp_sigmoid_cross_entropy.idx @@ -0,0 +1,3 @@ +nimTitle nnp_sigmoid_cross_entropy nnp_sigmoid_cross_entropy.html module src/arraymancer/nn_primitives/nnp_sigmoid_cross_entropy 0 +nim sigmoid_cross_entropy nnp_sigmoid_cross_entropy.html#sigmoid_cross_entropy,Tensor[T],Tensor[T] proc sigmoid_cross_entropy[T](input, target: Tensor[T]): T 23 +nim sigmoid_cross_entropy_backward nnp_sigmoid_cross_entropy.html#sigmoid_cross_entropy_backward,,Tensor[T],Tensor[T] proc sigmoid_cross_entropy_backward[T](gradient: Tensor[T] or T;\n cached_tensor: Tensor[T]; target: Tensor[T]): Tensor[\n T] 56 diff --git a/nnp_softmax.html b/nnp_softmax.html new file mode 100644 index 000000000..bbab2a170 --- /dev/null +++ b/nnp_softmax.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_softmax + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_softmax + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_softmax

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc softmax[T](input: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

For each sample in a tensor: do an exponential normalization of each of its class features xi exp(xi) / โˆ‘i exp(xi)

+

Input:

+ +

Output:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_softmax.idx b/nnp_softmax.idx new file mode 100644 index 000000000..f4383af93 --- /dev/null +++ b/nnp_softmax.idx @@ -0,0 +1,2 @@ +nimTitle nnp_softmax nnp_softmax.html module src/arraymancer/nn_primitives/nnp_softmax 0 +nim softmax nnp_softmax.html#softmax,Tensor[T] proc softmax[T](input: Tensor[T]): Tensor[T] 18 diff --git a/nnp_softmax_cross_entropy.html b/nnp_softmax_cross_entropy.html new file mode 100644 index 000000000..ae7209a64 --- /dev/null +++ b/nnp_softmax_cross_entropy.html @@ -0,0 +1,556 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/nnp_softmax_cross_entropy + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/nnp_softmax_cross_entropy + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/nnp_softmax_cross_entropy

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc softmax_cross_entropy[T](input, target: Tensor[T]): T
+
+ +

Softmax function + Cross-Entropy loss fused in one layer.

+

Input:

+ +

Returns:

+
  • Apply a softmax activation and returns the cross-entropy loss.
  • +
+

Softmax_cross_entropy measures the cross-entropy error for multiclass classification. Classes are mutually exclusive (only 1 label is true) but the truth labels (target) need not be.

+

Note: Instead of one-hot-encoded labels, it is more efficient to use sparse_softmax_cross_entropy instead of feeding softmax_cross_entropy.

+

For example if your true probablities are (car: 0.10, airplane: 0.60, bike: 0.05, bus: 0.25), you have to use softmax_cross_entropy

+

However if your true probablities are (car: 0, airplane: 1, bike: 0, bus: 0) (a one-hot-encoded vector), you should prefer sparse_softmax_cross_entropy

+ +   Source +Edit + +
+
+ +
+
+
+
proc softmax_cross_entropy_backward[T](gradient: Tensor[T] or T;
+                                       cached_tensor: Tensor[T];
+                                       target: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Derivatives of softmax_cross_entropy Input:
  • The input gradient as a scalar or a Tensor
  • +
  • A cache tensor that contains data from before the forward pass
  • +
  • The target values
  • +
+

Shape:

+
  • Both the cache and target shape should be batchsize, features i.e. number of samples as first dimension
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc sparse_softmax_cross_entropy[T; Idx: SomeNumber or byte or char or enum](
+    input: Tensor[T]; target: Tensor[Idx]): T
+
+ +

Softmax function + Cross-Entropy loss fused in one layer.

+

Input:

+ +

Returns:

+
  • Apply a softmax activation and returns the cross-entropy loss.
  • +
+

sparse_softmax_cross_entropy measures the cross-entropy error for multiclass classification. Classes are mutually exclusive (only 1 label is true).

+

Important: 0, 0, 1 means label 2 is true i.e. labels start at 0

+

Note: Instead of one-hot-encoded labels, it is more efficient to use sparse_softmax_cross_entropy instead of feeding softmax_cross_entropy.

+

For example if your true probablities are (car: 0.10, airplane: 0.60, bike: 0.05, bus: 0.25), you have to use softmax_cross_entropy

+

However if your true probablities are (car: 0, airplane: 1, bike: 0, bus: 0) (a one-hot-encoded vector), you should prefer sparse_softmax_cross_entropy

+ +   Source +Edit + +
+
+ +
+
+
+
proc sparse_softmax_cross_entropy_backward[T;
+    Idx: SomeNumber or byte or char or enum](gradient: Tensor[T] or T;
+    cached_tensor: Tensor[T]; target: Tensor[Idx]): Tensor[T] {.noinit.}
+
+ + Derivatives of sparse_softmax_cross_entropy Input:
  • The input gradient as a scalar or a Tensor
  • +
  • A cache tensor that contains data from before the forward pass
  • +
  • The target values
  • +
+

Shape:

+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnp_softmax_cross_entropy.idx b/nnp_softmax_cross_entropy.idx new file mode 100644 index 000000000..6105b2015 --- /dev/null +++ b/nnp_softmax_cross_entropy.idx @@ -0,0 +1,5 @@ +nimTitle nnp_softmax_cross_entropy nnp_softmax_cross_entropy.html module src/arraymancer/nn_primitives/nnp_softmax_cross_entropy 0 +nim softmax_cross_entropy nnp_softmax_cross_entropy.html#softmax_cross_entropy,Tensor[T],Tensor[T] proc softmax_cross_entropy[T](input, target: Tensor[T]): T 23 +nim sparse_softmax_cross_entropy nnp_softmax_cross_entropy.html#sparse_softmax_cross_entropy,Tensor[T],Tensor[Idx: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or byte or char or enum] proc sparse_softmax_cross_entropy[T; Idx: SomeNumber or byte or char or enum](\n input: Tensor[T]; target: Tensor[Idx]): T 64 +nim softmax_cross_entropy_backward nnp_softmax_cross_entropy.html#softmax_cross_entropy_backward,,Tensor[T],Tensor[T] proc softmax_cross_entropy_backward[T](gradient: Tensor[T] or T;\n cached_tensor: Tensor[T]; target: Tensor[T]): Tensor[\n T] 125 +nim sparse_softmax_cross_entropy_backward nnp_softmax_cross_entropy.html#sparse_softmax_cross_entropy_backward,,Tensor[T],Tensor[Idx: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or byte or char or enum] proc sparse_softmax_cross_entropy_backward[T; Idx: SomeNumber or byte or char or enum](\n gradient: Tensor[T] or T; cached_tensor: Tensor[T]; target: Tensor[Idx]): Tensor[\n T] 158 diff --git a/nnpack.html b/nnpack.html new file mode 100644 index 000000000..58aa984a1 --- /dev/null +++ b/nnpack.html @@ -0,0 +1,1243 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/backend/nnpack + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/backend/nnpack + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/backend/nnpack

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

@brief Status code for any NNPACK function call. @brief Activation applied applied after a convolutional or fully-connected layer. @brief Algorithm for computing convolutional layers. For backward compatibility@brief Size of images, kernels, and pooling filters in NNPACK. @brief Padding of images in NNPACK. @brief Profiling information about time spent in different phases of a function call. @brief Computes output of a 2D convolutional layer from input and kernel tensors. @details This function targets training of convolutional neural networks and performs forward propagation. It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. For minibatch size 1, use nnp_convolution_inference for optimal performance. @param algorithm The type of algorithm to use for convolution. Possible values are:

  • nnp_convolution_algorithm_auto -- let the function choose the algorithm.
  • +
  • nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8.
  • +
  • nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16.
  • +
  • nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). Supports only 3x3 kernels.
  • +
+

@param batch_size The number of images on the input and output of the convolutional layer. @param input_channels The number of channels (AKA features, dimensions) in the input images. @param output_channels The number of channels (AKA features, dimensions) in the output images. @param input_size Size of input images, excluding implicit zero-padding. @param input_padding Implicit zero-padding of input images. @param kernel_size Kernel size. @paramin input A 4D tensor inputbatch_sizeinput_size.height. @paramin kernel A 4D tensor kerneloutput_channelskernel_size.height. @paramin bias A 1D array biasoutput_channels. @paramout output A 4D tensor outputbatch_sizeoutput_size.height where output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - (kernel_size.height - 1) output_size.width = (input_padding.left + input_size.width + input_padding.right) - (kernel_size.width - 1) @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @paramout profile An optional pointer to profiling structure. If provided, the structure would record time spent in different phases of the computation.

+@brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors. @details This function targets training of convolutional neural networks and performs backward propagation. It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. @param algorithm The type of algorithm to use for convolution. Possible values are:
  • nnp_convolution_algorithm_auto -- let the function choose the algorithm.
  • +
  • nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8.
  • +
  • nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16.
  • +
  • nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). Supports only 3x3 kernels.
  • +
+

@param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients). @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). @param input_size Size of input images and their gradients, excluding implicit zero-padding. @param input_padding Implicit zero-padding of input images. @param kernel_size Kernel size. @paramin grad_output A 4D tensor grad_outputbatch_sizeoutput_size.height where output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - (kernel_size.height - 1) output_size.width = (input_padding.left + input_size.width + input_padding.right) - (kernel_size.width - 1) @paramin kernel A 4D tensor kerneloutput_channelskernel_size.height. @paramout grad_input A 4D tensor grad_inputbatch_sizeinput_size.height. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @paramout profile An optional pointer to profiling structure. If provided, the structure would record time spent in different phases of the computation.

+@brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors. @details This function targets training of convolutional neural networks and performs backward propagation. It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. @param algorithm The type of algorithm to use for convolution. Possible values are:
  • nnp_convolution_algorithm_auto -- let the function choose the algorithm.
  • +
  • nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8.
  • +
  • nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16.
  • +
+

@param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. @param input_channels The number of channels (AKA features, dimensions) in the input images. @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). @param input_size Size of input images and their gradients, excluding implicit zero-padding. @param input_padding Implicit zero-padding of input images. @param kernel_size Kernel size. @paramin input A 4D tensor inputbatch_sizeinput_size.height. @paramin grad_output A 4D tensor grad_outputbatch_sizeoutput_size.height where output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - (kernel_size.height - 1) output_size.width = (input_padding.left + input_size.width + input_padding.right) - (kernel_size.width - 1) @paramout grad_kernel A 4D tensor grad_kerneloutput_channelskernel_size.height. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @paramout profile An optional pointer to profiling structure. If provided, the structure would record time spent in different phases of the computation.

+@brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor. @details This function targets prediction with convolutional neural networks and performs forward propagation. @param algorithm The type of algorithm to use for convolution. Possible values are:
  • nnp_convolution_algorithm_auto -- let the function choose the algorithm.
  • +
  • nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8.
  • +
  • nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16.
  • +
  • nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). Supports only 3x3 kernels.
  • +
+

@param transform_strategy A strategy that guides computation of kernel transforms coefficients. Possible values are:

+
  • nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed coefficients.
  • +
  • nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed coefficients.
  • +
+

@param input_channels The number of channels (AKA features, dimensions) in the input image. @param output_channels The number of channels (AKA features, dimensions) in the output image. @param input_size Size of input image, excluding implicit zero-padding. @param input_padding Implicit zero-padding of input image. @param kernel_size Kernel size. @param output_subsampling Subsample region for output, also known as convolution stride. @paramin input A 3D tensor inputinput_channelsinput_size.width. @paramin kernel A 4D tensor kerneloutput_channelskernel_size.height. @paramin bias A 1D array biasoutput_channels. @paramout output A 3D tensor outputoutput_channelsoutput_size.width where output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - (kernel_size.height - 1) output_size.width = (input_padding.left + input_size.width + input_padding.right) - (kernel_size.width - 1) @paramin workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes. If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size of required workspace memory at the workspace_size location, and exit without computations. If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory before and deallocate after this computation, potentially at significant runtime cost. @paramin,out workspace_size Pointer to the size of workspace buffer. If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to the location specified by this pointer. If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of the buffer, in bytes. If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK would allocate memory before and deallocate after this computation, potentially at significant runtime cost. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @paramout profile An optional pointer to profiling structure. If provided, the structure would record time spent in different phases of the computation.

+@brief Computes output of a fully connected layer from input and kernel matrices. @details This function targets training of convolutional neural networks and performs forward propagation. It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. For minibatch size 1, use nnp_fully_connected_inference for optimal performance. @param batch_size The number of vectors on the input and output of the fully connected layer. @param input_channels The number of channels (AKA features, dimensions) in the input matrix. @param output_channels The number of channels (AKA features, dimensions) in the output matrix. @paramin input A 2D matrix inputbatch_size. @paramin kernel A 2D matrix kerneloutput_channels. @paramout output A 2D matrix outputbatch_size. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. @details This function targets prediction with convolutional neural networks and performs forward propagation. @param input_channels The number of channels (AKA features, dimensions) in the input vector. @param output_channels The number of channels (AKA features, dimensions) in the output vector. @paramin input A 1D array inputinput_channels of FP32 elements. @paramin kernel A 2D matrix kerneloutput_channels of FP32 elements. @paramout output A 1D array outputoutput_channels of FP32 elements. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. @details This function targets prediction with convolutional neural networks and performs forward propagation. @param input_channels The number of channels (AKA features, dimensions) in the input vector. @param output_channels The number of channels (AKA features, dimensions) in the output vector. @paramin input A 1D array inputinput_channels of FP32 elements. @paramin kernel A 2D matrix kerneloutput_channels of FP16 (ARM alternative format) elements. @paramout output A 1D array outputoutput_channels of FP32 elements. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @brief Computes output of a max-pooling layer for an input tensor. @details This function targets both prediction and training of convolutional neural networks and performs forward propagation. Is is optimized for both large and small minibatch sizes. @param batch_size The number of images on the input and output of the max-pooling layer. @param channels The number of channels (AKA features, dimensions) in both input and output images. @param input_size Size of input images, excluding implicit zero-padding. @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but affect the output size. @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported. @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported. @paramin input A 4D tensor inputbatch_sizeinput_size.height. @paramout output A 4D tensor outputbatch_sizeoutput_size.height where output_size.height = ceil( (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) / pooling_stride.height) + 1 output_size.width = ceil( (input_padding.left + input_size.width + input_padding.right - pooling_size.width) / pooling_stride.width) + 1 @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @brief Computes output of a softmax layer for an input matrix. @details This function targets both prediction and training of convolutional neural networks and performs forward propagation. Is is optimized for both large and small minibatch sizes. @param batch_size The number of vectors on the input and output of the softmax layer. @param channels The number of channels (AKA features, dimensions) in both input and output vectors. @paramin input A 2D matrix inputbatch_size. @paramout output A 2D matrix outputbatch_size. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix. @details This function targets both prediction and training of convolutional neural networks and performs forward propagation. Is is optimized for both large and small minibatch sizes. @param batch_size The number of vectors on the input and output of the ReLU layer. @param channels The number of channels (AKA features, dimensions) in both input and output matrices. @paramin input A 2D matrix inputbatch_size. @paramout output A 2D matrix outputbatch_size. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization. @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices. @details This function targets training of convolutional neural networks and performs backward propagation. Is is optimized for both large and small minibatch sizes. @param batch_size The number of vectors on the input and output of the ReLU layer. @param channels The number of channels (AKA features, dimensions) in both input and output matrices. @paramin input A 2D matrix inputbatch_size. @paramout output A 2D matrix outputbatch_size. @param threadpool A thread pool for parallelization of the computation. If threadpool is NULL, the computation would run on the caller thread without parallelization.

+ +
+

Types

+
+
+
nnp_activation {.size: 4.} = enum
+  nnp_activation_identity = 0, ## * ReLU activation f(x) := max(0, x)
+  nnp_activation_relu = 1
+
+ +
  • Identity activation f(x) := x, i.e. no transformation
  • +
+ +   Source +Edit + +
+
+
+
nnp_convolution_algorithm {.size: 4.} = enum
+  nnp_convolution_algorithm_auto = 0, ## * Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8.
+  nnp_convolution_algorithm_ft8x8 = 1, ## * Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16.
+  nnp_convolution_algorithm_ft16x16 = 2, ## * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels.
+  nnp_convolution_algorithm_wt8x8 = 3, ## * Direct convolution via implicit GEMM.
+  nnp_convolution_algorithm_implicit_gemm = 4, ## * Direct convolution implementation.
+  nnp_convolution_algorithm_direct = 5, ## *
+                                         ##  Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16.
+                                         ##  Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP),
+                                         ##  on non-supported processors falls back to nnp_convolution_algorithm_wt8x8.
+                                         ## 
+  nnp_convolution_algorithm_wt8x8_fp16 = 6
+
+ +
  • Let NNPACK choose the algorithm depending on layer parameters
  • +
+ +   Source +Edit + +
+
+
+
nnp_convolution_transform_strategy {.size: 4.} = enum
+  nnp_convolution_transform_strategy_compute = 1,
+  nnp_convolution_transform_strategy_precompute = 2,
+  nnp_convolution_transform_strategy_reuse = 3
+
+ + +   Source +Edit + +
+
+
+
nnp_padding {.bycopy.} = object
+  top*: csize_t              ## * Padding above the image data
+                             ## * Padding on the right of image data
+  right*: csize_t            ## * Padding below the image data
+  bottom*: csize_t           ## * Padding on the left of image data
+  left*: csize_t
+
+
+ + +   Source +Edit + +
+
+
+
nnp_profile {.bycopy.} = object
+  total*: cdouble            ## * Time spent inside the function call, in seconds.
+                             ## * Time spend on transformation of the input or input gradient tensor, in seconds.
+  input_transform*: cdouble  ## * Time spend on transformation of the kernel or kernel gradient tensor, in seconds.
+  kernel_transform*: cdouble ## * Time spend on transformation of the output or output gradient tensor, in seconds.
+  output_transform*: cdouble ## * Time spend on multiplication-accumulation of transformed coefficients, in seconds.
+  block_multiplication*: cdouble
+
+
+ + +   Source +Edit + +
+
+
+
nnp_size {.bycopy.} = object
+  width*: csize_t ## * Width (horizontal size) of an image, kernel, or pooling filter.
+                  ## * Height (vertical size) of an image, kernel, or pooling filter.
+  height*: csize_t
+
+
+ + +   Source +Edit + +
+
+
+
nnp_status {.size: 4.} = enum
+  nnp_status_success = 0,   ## * NNPACK function was called with batch_size == 0.
+  nnp_status_invalid_batch_size = 2, ## * NNPACK function was called with channels == 0.
+  nnp_status_invalid_channels = 3, ## * NNPACK function was called with input_channels == 0.
+  nnp_status_invalid_input_channels = 4, ## * NNPACK function was called with output_channels == 0.
+  nnp_status_invalid_output_channels = 5, ## * NNPACK function was called with input_size.height == 0 or input_size.width == 0
+  nnp_status_invalid_input_size = 10, ## * NNPACK function was called with input_stride.height == 0 or input_stride.width == 0
+  nnp_status_invalid_input_stride = 11, ## * NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.:
+                                         ## 
+                                         ##   - input_padding.left   >= kernel_size.width  (>= pooling_size.width)
+                                         ##   - input_padding.right  >= kernel_size.width  (>= pooling_size.width)
+                                         ##   - input_padding.top    >= kernel_size.height (>= pooling_size.height)
+                                         ##   - input_padding.bottom >= kernel_size.height (>= pooling_size.height)
+                                         ## 
+  nnp_status_invalid_input_padding = 12, ## * NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0
+  nnp_status_invalid_kernel_size = 13, ## * NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0
+  nnp_status_invalid_pooling_size = 14, ## * NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0
+  nnp_status_invalid_pooling_stride = 15, ## * NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration
+  nnp_status_invalid_algorithm = 16, ## * NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum
+  nnp_status_invalid_transform_strategy = 17, ## * NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0
+  nnp_status_unsupported_input_size = 20, ## * NNPACK does not support the particular input stride for the function
+  nnp_status_unsupported_input_stride = 21, ## * NNPACK does not support the particular input padding for the function
+  nnp_status_unsupported_input_padding = 22, ## * NNPACK does not support the particular kernel size for the function
+  nnp_status_unsupported_kernel_size = 23, ## * NNPACK does not support the particular pooling size for the function
+  nnp_status_unsupported_pooling_size = 24, ## * NNPACK does not support the particular pooling stride for the function
+  nnp_status_unsupported_pooling_stride = 25, ## * NNPACK does not support the particular convolution algorithm for the function
+  nnp_status_unsupported_algorithm = 26, ## * NNPACK does not support the particular convolution transform strategy for the algorithm
+  nnp_status_unsupported_transform_strategy = 27, ## * NNPACK does not support the particular activation function for the function
+  nnp_status_unsupported_activation = 28, ## * NNPACK does not support the particular activation function parameters for the function
+  nnp_status_unsupported_activation_parameters = 29, ## * NNPACK function was called before the library was initialized
+  nnp_status_uninitialized = 50, ## * NNPACK does not implement this function for the host CPU
+  nnp_status_unsupported_hardware = 51, ## * NNPACK failed to allocate memory for temporary buffers
+  nnp_status_out_of_memory = 52, ## * Scratch space buffer is too small
+  nnp_status_insufficient_buffer = 53, ## * Scratch space buffer is not properly aligned
+  nnp_status_misaligned_buffer = 54
+
+ +
  • The call succeeded, and all output arguments now contain valid data.
  • +
+ +   Source +Edit + +
+
+
+
pthreadpool_t = pointer
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Consts

+
+
+
nnp_convolution_transform_strategy_block_based = nnp_convolution_transform_strategy_compute
+
+ + +   Source +Edit + +
+
+
+
nnp_convolution_transform_strategy_tuple_based = nnp_convolution_transform_strategy_compute
+
+ + +   Source +Edit + +
+
+
+
nnp_status_invalid_activation = nnp_status_invalid_pooling_size
+
+ + +   Source +Edit + +
+
+
+
nnp_status_invalid_activation_parameters = nnp_status_invalid_pooling_stride
+
+ + +   Source +Edit + +
+
+
+
nnp_status_invalid_output_subsampling = nnp_status_invalid_kernel_size
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc nnp_convolution_inference(algorithm: nnp_convolution_algorithm;
+    transform_strategy: nnp_convolution_transform_strategy;
+                               input_channels: csize_t;
+                               output_channels: csize_t; input_size: nnp_size;
+                               input_padding: nnp_padding;
+                               kernel_size: nnp_size;
+                               output_subsampling: nnp_size; input: ptr cfloat;
+                               kernel: ptr cfloat; bias: ptr cfloat;
+                               output: ptr cfloat; workspace_buffer: pointer;
+                               workspace_size: ptr csize_t;
+                               activation: nnp_activation;
+                               activation_parameters: pointer;
+                               threadpool: pthreadpool_t;
+                               profile: ptr nnp_profile): nnp_status {.cdecl,
+    importc: "nnp_convolution_inference", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_convolution_input_gradient(algorithm: nnp_convolution_algorithm;
+                                    batch_size: csize_t;
+                                    input_channels: csize_t;
+                                    output_channels: csize_t;
+                                    input_size: nnp_size;
+                                    input_padding: nnp_padding;
+                                    kernel_size: nnp_size;
+                                    grad_output: ptr cfloat; kernel: ptr cfloat;
+                                    grad_input: ptr cfloat;
+                                    workspace_buffer: pointer = nil;
+                                    workspace_size: ptr csize_t = nil;
+    activation: nnp_activation = nnp_activation_identity;
+                                    activation_parameters: pointer = nil;
+                                    threadpool: pthreadpool_t = nil;
+                                    profile: ptr nnp_profile = nil): nnp_status {.
+    cdecl, importc: "nnp_convolution_input_gradient", ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_convolution_kernel_gradient(algorithm: nnp_convolution_algorithm;
+                                     batch_size: csize_t;
+                                     input_channels: csize_t;
+                                     output_channels: csize_t;
+                                     input_size: nnp_size;
+                                     input_padding: nnp_padding;
+                                     kernel_size: nnp_size; input: ptr cfloat;
+                                     grad_output: ptr cfloat;
+                                     grad_kernel: ptr cfloat;
+                                     workspace_buffer: pointer = nil;
+                                     workspace_size: ptr csize_t = nil;
+    activation: nnp_activation = nnp_activation_identity;
+                                     activation_parameters: pointer = nil;
+                                     threadpool: pthreadpool_t = nil;
+                                     profile: ptr nnp_profile = nil): nnp_status {.
+    cdecl, importc: "nnp_convolution_kernel_gradient", ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_convolution_output(algorithm: nnp_convolution_algorithm;
+                            batch_size: csize_t; input_channels: csize_t;
+                            output_channels: csize_t; input_size: nnp_size;
+                            input_padding: nnp_padding; kernel_size: nnp_size;
+                            input: ptr cfloat; kernel: ptr cfloat;
+                            bias: ptr cfloat; output: ptr cfloat;
+                            workspace_buffer: pointer = nil;
+                            workspace_size: ptr csize_t = nil; activation: nnp_activation = nnp_activation_identity;
+                            activation_parameters: pointer = nil;
+                            threadpool: pthreadpool_t = nil;
+                            profile: ptr nnp_profile = nil): nnp_status {.cdecl,
+    importc: "nnp_convolution_output", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_deinitialize(): nnp_status {.cdecl, importc: "nnp_deinitialize",
+                                      ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_fully_connected_inference(input_channels: csize_t;
+                                   output_channels: csize_t; input: ptr cfloat;
+                                   kernel: ptr cfloat; output: ptr cfloat;
+                                   threadpool: pthreadpool_t): nnp_status {.
+    cdecl, importc: "nnp_fully_connected_inference", ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_fully_connected_inference_f16f32(input_channels: csize_t;
+    output_channels: csize_t; input: ptr cfloat; kernel: pointer;
+    output: ptr cfloat; threadpool: pthreadpool_t): nnp_status {.cdecl,
+    importc: "nnp_fully_connected_inference_f16f32", ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_fully_connected_output(batch_size: csize_t; input_channels: csize_t;
+                                output_channels: csize_t; input: ptr cfloat;
+                                kernel: ptr cfloat; output: ptr cfloat;
+                                threadpool: pthreadpool_t;
+                                profile: ptr nnp_profile): nnp_status {.cdecl,
+    importc: "nnp_fully_connected_output", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_initialize(): nnp_status {.cdecl, importc: "nnp_initialize",
+                                    ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_max_pooling_output(batch_size: csize_t; channels: csize_t;
+                            input_size: nnp_size; input_padding: nnp_padding;
+                            pooling_size: nnp_size; pooling_stride: nnp_size;
+                            input: ptr cfloat; output: ptr cfloat;
+                            threadpool: pthreadpool_t): nnp_status {.cdecl,
+    importc: "nnp_max_pooling_output", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_relu_input_gradient(batch_size: csize_t; channels: csize_t;
+                             grad_output: ptr cfloat; input: ptr cfloat;
+                             grad_input: ptr cfloat; negative_slope: cfloat;
+                             threadpool: pthreadpool_t): nnp_status {.cdecl,
+    importc: "nnp_relu_input_gradient", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_relu_output(batch_size: csize_t; channels: csize_t; input: ptr cfloat;
+                     output: ptr cfloat; negative_slope: cfloat;
+                     threadpool: pthreadpool_t): nnp_status {.cdecl,
+    importc: "nnp_relu_output", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnp_softmax_output(batch_size: csize_t; channels: csize_t;
+                        input: ptr cfloat; output: ptr cfloat;
+                        threadpool: pthreadpool_t): nnp_status {.cdecl,
+    importc: "nnp_softmax_output", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnpack.idx b/nnpack.idx new file mode 100644 index 000000000..82f2d54ce --- /dev/null +++ b/nnpack.idx @@ -0,0 +1,27 @@ +nimTitle nnpack nnpack.html module src/arraymancer/nn_primitives/backend/nnpack 0 +nim nnp_status nnpack.html#nnp_status enum nnp_status 24 +nim pthreadpool_t nnpack.html#pthreadpool_t type pthreadpool_t 60 +nim nnp_status_invalid_output_subsampling nnpack.html#nnp_status_invalid_output_subsampling const nnp_status_invalid_output_subsampling 63 +nim nnp_status_invalid_activation nnpack.html#nnp_status_invalid_activation const nnp_status_invalid_activation 64 +nim nnp_status_invalid_activation_parameters nnpack.html#nnp_status_invalid_activation_parameters const nnp_status_invalid_activation_parameters 65 +nim nnp_activation nnpack.html#nnp_activation enum nnp_activation 72 +nim nnp_convolution_algorithm nnpack.html#nnp_convolution_algorithm enum nnp_convolution_algorithm 82 +nim nnp_convolution_transform_strategy nnpack.html#nnp_convolution_transform_strategy enum nnp_convolution_transform_strategy 97 +nim nnp_convolution_transform_strategy_block_based nnpack.html#nnp_convolution_transform_strategy_block_based const nnp_convolution_transform_strategy_block_based 106 +nim nnp_convolution_transform_strategy_tuple_based nnpack.html#nnp_convolution_transform_strategy_tuple_based const nnp_convolution_transform_strategy_tuple_based 107 +nim nnp_size nnpack.html#nnp_size object nnp_size 114 +nim nnp_padding nnpack.html#nnp_padding object nnp_padding 124 +nim nnp_profile nnpack.html#nnp_profile object nnp_profile 136 +nim nnp_initialize nnpack.html#nnp_initialize proc nnp_initialize(): nnp_status 145 +nim nnp_deinitialize nnpack.html#nnp_deinitialize proc nnp_deinitialize(): nnp_status 146 +nim nnp_convolution_output nnpack.html#nnp_convolution_output,nnp_convolution_algorithm,csize_t,csize_t,csize_t,nnp_size,nnp_padding,nnp_size,ptr.cfloat,ptr.cfloat,ptr.cfloat,ptr.cfloat,pointer,ptr.csize_t,nnp_activation,pointer,pthreadpool_t,ptr.nnp_profile proc nnp_convolution_output(algorithm: nnp_convolution_algorithm;\n batch_size: csize_t; input_channels: csize_t;\n output_channels: csize_t; input_size: nnp_size;\n input_padding: nnp_padding; kernel_size: nnp_size;\n input: ptr cfloat; kernel: ptr cfloat; bias: ptr cfloat;\n output: ptr cfloat; workspace_buffer: pointer = nil;\n workspace_size: ptr csize_t = nil;\n activation: nnp_activation = nnp_activation_identity;\n activation_parameters: pointer = nil;\n threadpool: pthreadpool_t = nil;\n profile: ptr nnp_profile = nil): nnp_status 182 +nim nnp_convolution_input_gradient nnpack.html#nnp_convolution_input_gradient,nnp_convolution_algorithm,csize_t,csize_t,csize_t,nnp_size,nnp_padding,nnp_size,ptr.cfloat,ptr.cfloat,ptr.cfloat,pointer,ptr.csize_t,nnp_activation,pointer,pthreadpool_t,ptr.nnp_profile proc nnp_convolution_input_gradient(algorithm: nnp_convolution_algorithm;\n batch_size: csize_t; input_channels: csize_t;\n output_channels: csize_t; input_size: nnp_size;\n input_padding: nnp_padding;\n kernel_size: nnp_size; grad_output: ptr cfloat;\n kernel: ptr cfloat; grad_input: ptr cfloat;\n workspace_buffer: pointer = nil;\n workspace_size: ptr csize_t = nil; activation: nnp_activation = nnp_activation_identity;\n activation_parameters: pointer = nil;\n threadpool: pthreadpool_t = nil;\n profile: ptr nnp_profile = nil): nnp_status 226 +nim nnp_convolution_kernel_gradient nnpack.html#nnp_convolution_kernel_gradient,nnp_convolution_algorithm,csize_t,csize_t,csize_t,nnp_size,nnp_padding,nnp_size,ptr.cfloat,ptr.cfloat,ptr.cfloat,pointer,ptr.csize_t,nnp_activation,pointer,pthreadpool_t,ptr.nnp_profile proc nnp_convolution_kernel_gradient(algorithm: nnp_convolution_algorithm;\n batch_size: csize_t; input_channels: csize_t;\n output_channels: csize_t; input_size: nnp_size;\n input_padding: nnp_padding;\n kernel_size: nnp_size; input: ptr cfloat;\n grad_output: ptr cfloat;\n grad_kernel: ptr cfloat;\n workspace_buffer: pointer = nil;\n workspace_size: ptr csize_t = nil; activation: nnp_activation = nnp_activation_identity;\n activation_parameters: pointer = nil;\n threadpool: pthreadpool_t = nil;\n profile: ptr nnp_profile = nil): nnp_status 273 +nim nnp_convolution_inference nnpack.html#nnp_convolution_inference,nnp_convolution_algorithm,nnp_convolution_transform_strategy,csize_t,csize_t,nnp_size,nnp_padding,nnp_size,nnp_size,ptr.cfloat,ptr.cfloat,ptr.cfloat,ptr.cfloat,pointer,ptr.csize_t,nnp_activation,pointer,pthreadpool_t,ptr.nnp_profile proc nnp_convolution_inference(algorithm: nnp_convolution_algorithm;\n transform_strategy: nnp_convolution_transform_strategy;\n input_channels: csize_t; output_channels: csize_t;\n input_size: nnp_size; input_padding: nnp_padding;\n kernel_size: nnp_size; output_subsampling: nnp_size;\n input: ptr cfloat; kernel: ptr cfloat;\n bias: ptr cfloat; output: ptr cfloat;\n workspace_buffer: pointer;\n workspace_size: ptr csize_t;\n activation: nnp_activation;\n activation_parameters: pointer;\n threadpool: pthreadpool_t; profile: ptr nnp_profile): nnp_status 342 +nim nnp_fully_connected_output nnpack.html#nnp_fully_connected_output,csize_t,csize_t,csize_t,ptr.cfloat,ptr.cfloat,ptr.cfloat,pthreadpool_t,ptr.nnp_profile proc nnp_fully_connected_output(batch_size: csize_t; input_channels: csize_t;\n output_channels: csize_t; input: ptr cfloat;\n kernel: ptr cfloat; output: ptr cfloat;\n threadpool: pthreadpool_t; profile: ptr nnp_profile): nnp_status 369 +nim nnp_fully_connected_inference nnpack.html#nnp_fully_connected_inference,csize_t,csize_t,ptr.cfloat,ptr.cfloat,ptr.cfloat,pthreadpool_t proc nnp_fully_connected_inference(input_channels: csize_t; output_channels: csize_t;\n input: ptr cfloat; kernel: ptr cfloat;\n output: ptr cfloat; threadpool: pthreadpool_t): nnp_status 387 +nim nnp_fully_connected_inference_f16f32 nnpack.html#nnp_fully_connected_inference_f16f32,csize_t,csize_t,ptr.cfloat,pointer,ptr.cfloat,pthreadpool_t proc nnp_fully_connected_inference_f16f32(input_channels: csize_t;\n output_channels: csize_t;\n input: ptr cfloat; kernel: pointer;\n output: ptr cfloat;\n threadpool: pthreadpool_t): nnp_status 403 +nim nnp_max_pooling_output nnpack.html#nnp_max_pooling_output,csize_t,csize_t,nnp_size,nnp_padding,nnp_size,nnp_size,ptr.cfloat,ptr.cfloat,pthreadpool_t proc nnp_max_pooling_output(batch_size: csize_t; channels: csize_t;\n input_size: nnp_size; input_padding: nnp_padding;\n pooling_size: nnp_size; pooling_stride: nnp_size;\n input: ptr cfloat; output: ptr cfloat;\n threadpool: pthreadpool_t): nnp_status 430 +nim nnp_softmax_output nnpack.html#nnp_softmax_output,csize_t,csize_t,ptr.cfloat,ptr.cfloat,pthreadpool_t proc nnp_softmax_output(batch_size: csize_t; channels: csize_t; input: ptr cfloat;\n output: ptr cfloat; threadpool: pthreadpool_t): nnp_status 448 +nim nnp_relu_output nnpack.html#nnp_relu_output,csize_t,csize_t,ptr.cfloat,ptr.cfloat,cfloat,pthreadpool_t proc nnp_relu_output(batch_size: csize_t; channels: csize_t; input: ptr cfloat;\n output: ptr cfloat; negative_slope: cfloat;\n threadpool: pthreadpool_t): nnp_status 463 +nim nnp_relu_input_gradient nnpack.html#nnp_relu_input_gradient,csize_t,csize_t,ptr.cfloat,ptr.cfloat,ptr.cfloat,cfloat,pthreadpool_t proc nnp_relu_input_gradient(batch_size: csize_t; channels: csize_t;\n grad_output: ptr cfloat; input: ptr cfloat;\n grad_input: ptr cfloat; negative_slope: cfloat;\n threadpool: pthreadpool_t): nnp_status 479 diff --git a/nnpack_interface.html b/nnpack_interface.html new file mode 100644 index 000000000..914cf1331 --- /dev/null +++ b/nnpack_interface.html @@ -0,0 +1,472 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/backend/nnpack_interface + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/backend/nnpack_interface + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/backend/nnpack_interface

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc nnpack_conv2d(input, weight, bias: Tensor[float32]; padding, stride: Size2D): Tensor[
+    float32] {.noinit, ...raises: [ValueError], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc nnpack_conv2d_gradient[T](input, weight: Tensor[float32];
+                               padding, stride: Size2D; grad_output: Tensor[T];
+                               grad_input, grad_weight: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/nnpack_interface.idx b/nnpack_interface.idx new file mode 100644 index 000000000..bb2f4f543 --- /dev/null +++ b/nnpack_interface.idx @@ -0,0 +1,3 @@ +nimTitle nnpack_interface nnpack_interface.html module src/arraymancer/nn_primitives/backend/nnpack_interface 0 +nim nnpack_conv2d nnpack_interface.html#nnpack_conv2d,Tensor[float32],Tensor[float32],Tensor[float32],Size2D,Size2D proc nnpack_conv2d(input, weight, bias: Tensor[float32]; padding, stride: Size2D): Tensor[\n float32] 18 +nim nnpack_conv2d_gradient nnpack_interface.html#nnpack_conv2d_gradient,Tensor[float32],Tensor[float32],Size2D,Size2D,Tensor[T],Tensor[T],Tensor[T] proc nnpack_conv2d_gradient[T](input, weight: Tensor[float32];\n padding, stride: Size2D; grad_output: Tensor[T];\n grad_input, grad_weight: var Tensor[T]) 60 diff --git a/opencl_backend.html b/opencl_backend.html new file mode 100644 index 000000000..911bdb0da --- /dev/null +++ b/opencl_backend.html @@ -0,0 +1,556 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/opencl_backend + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/opencl_backend + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/opencl_backend

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc clMalloc[T](size: Natural): ptr UncheckedArray[T] {.inline.}
+
+ + Internal proc. Wrap OpenCL createBuffer +   Source +Edit + +
+
+ +
+
+
+
proc deallocCl[T](p: ref [ptr UncheckedArray[T]]) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc layoutOnDevice[T: SomeFloat](t: ClTensor[T]): ClTensorLayout[T]
+
+ + Store a ClTensor shape, strides, etc information on the GPU +   Source +Edit + +
+
+ +
+
+
+
proc newClStorage[T: SomeFloat](length: int): ClStorage[T]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc toClpointer[T](p: ClStorage[T]): Pmem {.noSideEffect, inline.}
+
+ + +   Source +Edit + +
+
+
+
proc toClpointer[T](p: ClTensor[T]): Pmem {.noSideEffect, inline.}
+
+ + +   Source +Edit + +
+
+
+
proc toClpointer[T](p: ptr T | ptr UncheckedArray[T]): Pmem {.noSideEffect,
+    inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ + +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/opencl_backend.idx b/opencl_backend.idx new file mode 100644 index 000000000..9186eedce --- /dev/null +++ b/opencl_backend.idx @@ -0,0 +1,9 @@ +nimTitle opencl_backend opencl_backend.html module src/arraymancer/tensor/backend/opencl_backend 0 +nim toClpointer opencl_backend.html#toClpointer proc toClpointer[T](p: ptr T | ptr UncheckedArray[T]): Pmem 25 +nim toClpointer opencl_backend.html#toClpointer,ClStorage[T] proc toClpointer[T](p: ClStorage[T]): Pmem 28 +nim toClpointer opencl_backend.html#toClpointer,ClTensor[T] proc toClpointer[T](p: ClTensor[T]): Pmem 31 +nim clMalloc opencl_backend.html#clMalloc,Natural proc clMalloc[T](size: Natural): ptr UncheckedArray[T] 34 +nim deallocCl opencl_backend.html#deallocCl,ref. proc deallocCl[T](p: ref [ptr UncheckedArray[T]]) 41 +nim newClStorage opencl_backend.html#newClStorage,int proc newClStorage[T: SomeFloat](length: int): ClStorage[T] 48 +nim layoutOnDevice opencl_backend.html#layoutOnDevice,ClTensor[T: SomeFloat] proc layoutOnDevice[T: SomeFloat](t: ClTensor[T]): ClTensorLayout[T] 74 +nimgrp toclpointer opencl_backend.html#toClpointer-procs-all proc 25 diff --git a/opencl_global_state.html b/opencl_global_state.html new file mode 100644 index 000000000..a1166972d --- /dev/null +++ b/opencl_global_state.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/opencl_global_state + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/opencl_global_state + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/opencl_global_state

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Lets

+
+
+
clContext0 = tmpTuple_536870916[1]
+
+ + +   Source +Edit + +
+
+
+
clDevice0 = tmpTuple_536870916[0]
+
+ + +   Source +Edit + +
+
+
+
clQueue0 = tmpTuple_536870916[2]
+
+ + +   Source +Edit + +
+
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/opencl_global_state.idx b/opencl_global_state.idx new file mode 100644 index 000000000..0a4e4797a --- /dev/null +++ b/opencl_global_state.idx @@ -0,0 +1,4 @@ +nimTitle opencl_global_state opencl_global_state.html module src/arraymancer/tensor/backend/opencl_global_state 0 +nim clDevice0 opencl_global_state.html#clDevice0 let clDevice0 31 +nim clContext0 opencl_global_state.html#clContext0 let clContext0 31 +nim clQueue0 opencl_global_state.html#clQueue0 let clQueue0 31 diff --git a/openmp.html b/openmp.html new file mode 100644 index 000000000..d5f3db6b1 --- /dev/null +++ b/openmp.html @@ -0,0 +1,587 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/backend/openmp + + + + + + + + + +Arraymancer - src/arraymancer/tensor/backend/openmp + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/backend/openmp

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Templates

+
+
+
+
template omp_get_max_threads(): cint
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_get_num_threads(): cint
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_get_thread_num(): cint
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_parallel_blocks(block_offset, block_size: untyped; size: Natural;
+                             body: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_parallel_countup(i: untyped; size: Natural; body: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_parallel_forup(i: untyped; start, size: Natural; body: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_parallel_reduce_blocks[T](reduced: T;
+                                       block_offset, block_size: untyped;
+                                       size, weight: Natural;
+                                       op_final, op_init, op_middle: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template omp_set_num_threads(x: cint)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/openmp.idx b/openmp.idx new file mode 100644 index 000000000..79533feae --- /dev/null +++ b/openmp.idx @@ -0,0 +1,9 @@ +nimTitle openmp openmp.html module src/arraymancer/tensor/backend/openmp 0 +nim omp_set_num_threads openmp.html#omp_set_num_threads.t,cint template omp_set_num_threads(x: cint) 31 +nim omp_get_num_threads openmp.html#omp_get_num_threads.t template omp_get_num_threads(): cint 32 +nim omp_get_max_threads openmp.html#omp_get_max_threads.t template omp_get_max_threads(): cint 33 +nim omp_get_thread_num openmp.html#omp_get_thread_num.t template omp_get_thread_num(): cint 34 +nim omp_parallel_countup openmp.html#omp_parallel_countup.t,untyped,Natural,untyped template omp_parallel_countup(i: untyped; size: Natural; body: untyped): untyped 41 +nim omp_parallel_forup openmp.html#omp_parallel_forup.t,untyped,Natural,Natural,untyped template omp_parallel_forup(i: untyped; start, size: Natural; body: untyped): untyped 46 +nim omp_parallel_blocks openmp.html#omp_parallel_blocks.t,untyped,untyped,Natural,untyped template omp_parallel_blocks(block_offset, block_size: untyped; size: Natural;\n body: untyped): untyped 51 +nim omp_parallel_reduce_blocks openmp.html#omp_parallel_reduce_blocks.t,T,untyped,untyped,Natural,Natural,untyped,untyped,untyped template omp_parallel_reduce_blocks[T](reduced: T; block_offset, block_size: untyped;\n size, weight: Natural;\n op_final, op_init, op_middle: untyped): untyped 76 diff --git a/operators_blas_l1.html b/operators_blas_l1.html new file mode 100644 index 000000000..b7adefb34 --- /dev/null +++ b/operators_blas_l1.html @@ -0,0 +1,773 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_blas_l1 + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_blas_l1 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_blas_l1

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*`[T: SomeNumber | Complex[float32] | Complex[float64]](a: T; t: Tensor[T]): Tensor[
+    T] {.noinit.}
+
+ + Element-wise multiplication by a scalar +   Source +Edit + +
+
+
+
proc `*`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; a: T): Tensor[
+    T] {.noinit.}
+
+ + Element-wise multiplication by a scalar +   Source +Edit + +
+
+ +
+
+
+
proc `*=`[T: SomeNumber | Complex[float32] | Complex[float64]](t: var Tensor[T];
+    a: T)
+
+ + Element-wise multiplication by a scalar (in-place) +   Source +Edit + +
+
+ +
+
+
+
proc `+`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[
+    T] {.noinit.}
+
+ + Tensor addition +   Source +Edit + +
+
+
+
proc `+`[T: SomeNumber | Complex[float32] | Complex[float64]](a: Tensor[T];
+    val: T): Tensor[T] {.noinit, inline.}
+
+ + Mathematical addition of tensors and scalars is undefined. Must use a broadcasted addition instead +   Source +Edit + +
+
+
+
proc `+`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T;
+    a: Tensor[T]): Tensor[T] {.noinit, inline.}
+
+ + Mathematical addition of tensors and scalars is undefined. Must use a broadcasted addition instead +   Source +Edit + +
+
+ +
+
+
+
proc `+=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];
+    b: Tensor[T])
+
+ + Tensor in-place addition +   Source +Edit + +
+
+ +
+
+
+
proc `-`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[
+    T] {.noinit.}
+
+ + Tensor substraction +   Source +Edit + +
+
+
+
proc `-`[T: SomeNumber | Complex[float32] | Complex[float64]](a: Tensor[T];
+    val: T): Tensor[T] {.noinit, inline.}
+
+ + Mathematical subtraction of tensors and scalars is undefined. Must use a broadcasted addition instead +   Source +Edit + +
+
+
+
proc `-`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T;
+    a: Tensor[T]): Tensor[T] {.noinit, inline.}
+
+ + Mathematical subtraction of tensors and scalars is undefined. Must use a broadcasted addition instead +   Source +Edit + +
+
+ +
+
+
+
proc `-=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];
+    b: Tensor[T])
+
+ + Tensor in-place substraction +   Source +Edit + +
+
+ +
+
+
+
proc `/`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; a: T): Tensor[
+    T] {.noinit.}
+
+ + Element-wise division by a float scalar +   Source +Edit + +
+
+ +
+
+
+
proc `/=`[T: SomeFloat | Complex[float32] | Complex[float64]](t: var Tensor[T];
+    a: T)
+
+ + Element-wise division by a scalar (in-place) +   Source +Edit + +
+
+
+
proc `/=`[T: SomeInteger](t: var Tensor[T]; a: T)
+
+ + Element-wise division by a scalar (in-place) +   Source +Edit + +
+
+ +
+
+
+
proc `div`[T: SomeInteger](t: Tensor[T]; a: T): Tensor[T] {.noinit.}
+
+ + Element-wise division by an integer +   Source +Edit + +
+
+ +
+
+
+
proc dot[T: SomeFloat](a, b: Tensor[T]): T {.noSideEffect.}
+
+ + Vector to Vector dot (scalar) product +   Source +Edit + +
+
+
+
proc dot[T: SomeInteger](a, b: Tensor[T]): T {.noSideEffect.}
+
+ + Vector to Vector dot (scalar) product +   Source +Edit + +
+
+ +
+
+
+
proc `mod`[T: SomeNumber](t: Tensor[T]; val: T): Tensor[T] {.noinit.}
+
+ + Broadcasted modulo operation +   Source +Edit + +
+
+
+
proc `mod`[T: SomeNumber](val: T; t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted modulo operation +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_blas_l1.idx b/operators_blas_l1.idx new file mode 100644 index 000000000..304effd95 --- /dev/null +++ b/operators_blas_l1.idx @@ -0,0 +1,26 @@ +nimTitle operators_blas_l1 operators_blas_l1.html module src/arraymancer/tensor/operators_blas_l1 0 +nim dot operators_blas_l1.html#dot,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc dot[T: SomeFloat](a, b: Tensor[T]): T 28 +nim dot operators_blas_l1.html#dot,Tensor[T: SomeInteger],Tensor[T: SomeInteger] proc dot[T: SomeInteger](a, b: Tensor[T]): T 34 +nim `+` operators_blas_l1.html#+,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `+`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[\n T] 45 +nim `-` operators_blas_l1.html#-,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `-`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[\n T] 49 +nim `+=` operators_blas_l1.html#+=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `+=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];\n b: Tensor[T]) 56 +nim `-=` operators_blas_l1.html#-=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `-=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];\n b: Tensor[T]) 60 +nim `*` operators_blas_l1.html#*,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `*`[T: SomeNumber | Complex[float32] | Complex[float64]](a: T; t: Tensor[T]): Tensor[\n T] 67 +nim `*` operators_blas_l1.html#*,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `*`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; a: T): Tensor[\n T] 72 +nim `/` operators_blas_l1.html#/,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `/`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; a: T): Tensor[\n T] 76 +nim `div` operators_blas_l1.html#div,Tensor[T: SomeInteger],T proc `div`[T: SomeInteger](t: Tensor[T]; a: T): Tensor[T] 84 +nim `mod` operators_blas_l1.html#mod,Tensor[T: SomeNumber],T proc `mod`[T: SomeNumber](t: Tensor[T]; val: T): Tensor[T] 89 +nim `mod` operators_blas_l1.html#mod,T,Tensor[T: SomeNumber] proc `mod`[T: SomeNumber](val: T; t: Tensor[T]): Tensor[T] 94 +nim `+` operators_blas_l1.html#+,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `+`[T: SomeNumber | Complex[float32] | Complex[float64]](a: Tensor[T]; val: T): Tensor[\n T] 100 +nim `+` operators_blas_l1.html#+,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `+`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T; a: Tensor[T]): Tensor[\n T] 105 +nim `-` operators_blas_l1.html#-,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `-`[T: SomeNumber | Complex[float32] | Complex[float64]](a: Tensor[T]; val: T): Tensor[\n T] 109 +nim `-` operators_blas_l1.html#-,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `-`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T; a: Tensor[T]): Tensor[\n T] 114 +nim `*=` operators_blas_l1.html#*=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `*=`[T: SomeNumber | Complex[float32] | Complex[float64]](t: var Tensor[T]; a: T) 121 +nim `/=` operators_blas_l1.html#/=,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `/=`[T: SomeFloat | Complex[float32] | Complex[float64]](t: var Tensor[T]; a: T) 127 +nim `/=` operators_blas_l1.html#/=,Tensor[T: SomeInteger],T proc `/=`[T: SomeInteger](t: var Tensor[T]; a: T) 133 +nimgrp mod operators_blas_l1.html#mod-procs-all proc 89 +nimgrp * operators_blas_l1.html#*-procs-all proc 67 +nimgrp - operators_blas_l1.html#--procs-all proc 49 +nimgrp dot operators_blas_l1.html#dot-procs-all proc 28 +nimgrp /= operators_blas_l1.html#/=-procs-all proc 127 +nimgrp + operators_blas_l1.html#+-procs-all proc 45 diff --git a/operators_blas_l1_cuda.html b/operators_blas_l1_cuda.html new file mode 100644 index 000000000..429f77e84 --- /dev/null +++ b/operators_blas_l1_cuda.html @@ -0,0 +1,609 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_blas_l1_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_blas_l1_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_blas_l1_cuda

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*`[T: SomeFloat](a: T; t: CudaTensor[T]): CudaTensor[T] {.noinit, inline.}
+
+ + CudaTensor multiplication by a scalar +   Source +Edit + +
+
+
+
proc `*`[T: SomeFloat](t: CudaTensor[T]; a: T): CudaTensor[T] {.noinit, inline.}
+
+ + CudaTensor multiplication by a scalar +   Source +Edit + +
+
+ +
+
+
+
proc `*=`[T: SomeFloat](t: var CudaTensor[T]; a: T) {.inline.}
+
+ + CudaTensor inplace multiplication by a scalar +   Source +Edit + +
+
+ +
+
+
+
proc `+`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ + CudaTensor addition +   Source +Edit + +
+
+ +
+
+
+
proc `+=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T])
+
+ + CudaTensor in-place addition +   Source +Edit + +
+
+ +
+
+
+
proc `-`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ + CudaTensor substraction +   Source +Edit + +
+
+ +
+
+
+
proc `-=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T])
+
+ + CudaTensor in-place substraction +   Source +Edit + +
+
+ +
+
+
+
proc `/`[T: SomeFloat](t: CudaTensor[T]; val: T): CudaTensor[T] {.noinit.}
+
+ + CudaTensor division by a scalar +   Source +Edit + +
+
+ +
+
+
+
proc `/=`[T: SomeFloat](t: var CudaTensor[T]; a: T) {.inline.}
+
+ + CudaTensor in-place division by a scalar +   Source +Edit + +
+
+ +
+
+
+
proc dot[T: SomeFloat](a, b: CudaTensor[T]): T {.inline.}
+
+ + Vector to Vector dot (scalar) product +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_blas_l1_cuda.idx b/operators_blas_l1_cuda.idx new file mode 100644 index 000000000..4fedd5627 --- /dev/null +++ b/operators_blas_l1_cuda.idx @@ -0,0 +1,12 @@ +nimTitle operators_blas_l1_cuda operators_blas_l1_cuda.html module src/arraymancer/tensor/operators_blas_l1_cuda 0 +nim dot operators_blas_l1_cuda.html#dot,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc dot[T: SomeFloat](a, b: CudaTensor[T]): T 28 +nim `+=` operators_blas_l1_cuda.html#+=,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `+=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T]) 39 +nim `+` operators_blas_l1_cuda.html#+,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `+`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 51 +nim `-=` operators_blas_l1_cuda.html#-=,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `-=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T]) 62 +nim `-` operators_blas_l1_cuda.html#-,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `-`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 75 +nim `*=` operators_blas_l1_cuda.html#*=,CudaTensor[T: SomeFloat],T proc `*=`[T: SomeFloat](t: var CudaTensor[T]; a: T) 84 +nim `*` operators_blas_l1_cuda.html#*,T,CudaTensor[T: SomeFloat] proc `*`[T: SomeFloat](a: T; t: CudaTensor[T]): CudaTensor[T] 92 +nim `*` operators_blas_l1_cuda.html#*,CudaTensor[T: SomeFloat],T proc `*`[T: SomeFloat](t: CudaTensor[T]; a: T): CudaTensor[T] 101 +nim `/=` operators_blas_l1_cuda.html#/=,CudaTensor[T: SomeFloat],T proc `/=`[T: SomeFloat](t: var CudaTensor[T]; a: T) 107 +nim `/` operators_blas_l1_cuda.html#/,CudaTensor[T: SomeFloat],T proc `/`[T: SomeFloat](t: CudaTensor[T]; val: T): CudaTensor[T] 113 +nimgrp * operators_blas_l1_cuda.html#*-procs-all proc 92 diff --git a/operators_blas_l1_opencl.html b/operators_blas_l1_opencl.html new file mode 100644 index 000000000..5a7de0956 --- /dev/null +++ b/operators_blas_l1_opencl.html @@ -0,0 +1,657 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_blas_l1_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_blas_l1_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_blas_l1_opencl

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `+`(a`gensym30, b`gensym30: ClTensor[float32]): ClTensor[float32] {.noinit,
+    ...raises: [ValueError, EOpenCL], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `+`(a`gensym30, b`gensym30: ClTensor[float32]): ClTensor[float32] {.noinit,
+    ...raises: [ValueError, EOpenCL], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `+`(a`gensym101, b`gensym101: ClTensor[float64]): ClTensor[float64] {.
+    noinit, ...raises: [ValueError, EOpenCL], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `+=`(dst`gensym286: var ClTensor[float32];
+          src`gensym286: ClTensor[float32]) {....raises: [ValueError, EOpenCL],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `+=`(dst`gensym286: var ClTensor[float32];
+          src`gensym286: ClTensor[float32]) {....raises: [ValueError, EOpenCL],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `+=`(dst`gensym329: var ClTensor[float64];
+          src`gensym329: ClTensor[float64]) {....raises: [ValueError, EOpenCL],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `-`(a`gensym168, b`gensym168: ClTensor[float32]): ClTensor[float32] {.
+    noinit, ...raises: [ValueError, EOpenCL], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `-`(a`gensym168, b`gensym168: ClTensor[float32]): ClTensor[float32] {.
+    noinit, ...raises: [ValueError, EOpenCL], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `-`(a`gensym227, b`gensym227: ClTensor[float64]): ClTensor[float64] {.
+    noinit, ...raises: [ValueError, EOpenCL], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `-=`(dst`gensym372: var ClTensor[float32];
+          src`gensym372: ClTensor[float32]) {....raises: [ValueError, EOpenCL],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `-=`(dst`gensym372: var ClTensor[float32];
+          src`gensym372: ClTensor[float32]) {....raises: [ValueError, EOpenCL],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+
+
proc `-=`(dst`gensym415: var ClTensor[float64];
+          src`gensym415: ClTensor[float64]) {....raises: [ValueError, EOpenCL],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc dot(a`gensym0, b`gensym0: ClTensor[float32]): float32 {.
+    ...raises: [ValueError, EOpenCL, CLBlastError], tags: [], forbids: [].}
+
+ + Vector to Vector dot (scalar) product +   Source +Edit + +
+
+
+
proc dot(a`gensym15, b`gensym15: ClTensor[float64]): float64 {.
+    ...raises: [ValueError, EOpenCL, CLBlastError], tags: [], forbids: [].}
+
+ + Vector to Vector dot (scalar) product +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_blas_l1_opencl.idx b/operators_blas_l1_opencl.idx new file mode 100644 index 000000000..725d01275 --- /dev/null +++ b/operators_blas_l1_opencl.idx @@ -0,0 +1,20 @@ +nimTitle operators_blas_l1_opencl operators_blas_l1_opencl.html module src/arraymancer/tensor/operators_blas_l1_opencl 0 +nim dot operators_blas_l1_opencl.html#dot,, proc dot(a`gensym0, b`gensym0: ClTensor[float32]): float32 54 +nim dot operators_blas_l1_opencl.html#dot,,_2 proc dot(a`gensym15, b`gensym15: ClTensor[float64]): float64 55 +nim `+` operators_blas_l1_opencl.html#+,, proc `+`(a`gensym30, b`gensym30: ClTensor[float32]): ClTensor[float32] 102 +nim `+` operators_blas_l1_opencl.html#+,,_2 proc `+`(a`gensym30, b`gensym30: ClTensor[float32]): ClTensor[float32] 102 +nim `+` operators_blas_l1_opencl.html#+,,_3 proc `+`(a`gensym101, b`gensym101: ClTensor[float64]): ClTensor[float64] 102 +nim `-` operators_blas_l1_opencl.html#-,, proc `-`(a`gensym168, b`gensym168: ClTensor[float32]): ClTensor[float32] 102 +nim `-` operators_blas_l1_opencl.html#-,,_2 proc `-`(a`gensym168, b`gensym168: ClTensor[float32]): ClTensor[float32] 102 +nim `-` operators_blas_l1_opencl.html#-,,_3 proc `-`(a`gensym227, b`gensym227: ClTensor[float64]): ClTensor[float64] 102 +nim `+=` operators_blas_l1_opencl.html#+=,, proc `+=`(dst`gensym286: var ClTensor[float32]; src`gensym286: ClTensor[float32]) 176 +nim `+=` operators_blas_l1_opencl.html#+=,,_2 proc `+=`(dst`gensym286: var ClTensor[float32]; src`gensym286: ClTensor[float32]) 176 +nim `+=` operators_blas_l1_opencl.html#+=,,_3 proc `+=`(dst`gensym329: var ClTensor[float64]; src`gensym329: ClTensor[float64]) 176 +nim `-=` operators_blas_l1_opencl.html#-=,, proc `-=`(dst`gensym372: var ClTensor[float32]; src`gensym372: ClTensor[float32]) 176 +nim `-=` operators_blas_l1_opencl.html#-=,,_2 proc `-=`(dst`gensym372: var ClTensor[float32]; src`gensym372: ClTensor[float32]) 176 +nim `-=` operators_blas_l1_opencl.html#-=,,_3 proc `-=`(dst`gensym415: var ClTensor[float64]; src`gensym415: ClTensor[float64]) 176 +nimgrp += operators_blas_l1_opencl.html#+=-procs-all proc 63 +nimgrp - operators_blas_l1_opencl.html#--procs-all proc 60 +nimgrp dot operators_blas_l1_opencl.html#dot-procs-all proc 30 +nimgrp -= operators_blas_l1_opencl.html#-=-procs-all proc 65 +nimgrp + operators_blas_l1_opencl.html#+-procs-all proc 58 diff --git a/operators_blas_l2l3.html b/operators_blas_l2l3.html new file mode 100644 index 000000000..a27de932e --- /dev/null +++ b/operators_blas_l2l3.html @@ -0,0 +1,553 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_blas_l2l3 + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_blas_l2l3 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_blas_l2l3

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*`[T: Complex[float32] or Complex[float64]](a, b: Tensor[T]): Tensor[T] {.
+    noinit.}
+
+ +

Matrix multiplication (Matrix-Matrix and Matrix-Vector)

+

Float and complex operations use optimized BLAS like OpenBLAS, Intel MKL or BLIS.

+ +   Source +Edit + +
+
+
+
proc `*`[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Matrix multiplication (Matrix-Matrix and Matrix-Vector)

+

Float and complex operations use optimized BLAS like OpenBLAS, Intel MKL or BLIS.

+ +   Source +Edit + +
+
+ +
+
+
+
proc gemm[T: SomeFloat | Complex](alpha: T; A, B: Tensor[T]; beta: T;
+                                  C: var Tensor[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+
+
proc gemm[T: SomeInteger](alpha: T; A, B: Tensor[T]; beta: T; C: var Tensor[T]) {.
+    inline.}
+
+ + +   Source +Edit + +
+
+
+
proc gemm[T: SomeNumber](A, B: Tensor[T]; C: var Tensor[T]) {.
+    ...deprecated: "Use explicit gemm(1, A, B, 0, C) instead".}
+
+
+ Deprecated: Use explicit gemm(1, A, B, 0, C) instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc gemv[T: SomeFloat | Complex](alpha: T; A: Tensor[T]; x: Tensor[T]; beta: T;
+                                  y: var Tensor[T]) {.inline.}
+
+ + General Matrix-Vector multiplication: y <- alpha * A * x + beta * y +   Source +Edit + +
+
+
+
proc gemv[T: SomeInteger](alpha: T; A: Tensor[T]; x: Tensor[T]; beta: T;
+                          y: var Tensor[T]) {.inline.}
+
+ + General Matrix-Vector multiplication: y <- alpha * A * x + beta * y +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_blas_l2l3.idx b/operators_blas_l2l3.idx new file mode 100644 index 000000000..462db6f3a --- /dev/null +++ b/operators_blas_l2l3.idx @@ -0,0 +1,11 @@ +nimTitle operators_blas_l2l3 operators_blas_l2l3.html module src/arraymancer/tensor/operators_blas_l2l3 0 +nim gemv operators_blas_l2l3.html#gemv,T,Tensor[T: float or float32 or float64 or Complex],Tensor[T: float or float32 or float64 or Complex],T,Tensor[T: float or float32 or float64 or Complex] proc gemv[T: SomeFloat | Complex](alpha: T; A: Tensor[T]; x: Tensor[T]; beta: T;\n y: var Tensor[T]) 24 +nim gemv operators_blas_l2l3.html#gemv,T,Tensor[T: SomeInteger],Tensor[T: SomeInteger],T,Tensor[T: SomeInteger] proc gemv[T: SomeInteger](alpha: T; A: Tensor[T]; x: Tensor[T]; beta: T;\n y: var Tensor[T]) 44 +nim gemm operators_blas_l2l3.html#gemm,T,Tensor[T: float or float32 or float64 or Complex],Tensor[T: float or float32 or float64 or Complex],T,Tensor[T: float or float32 or float64 or Complex] proc gemm[T: SomeFloat | Complex](alpha: T; A, B: Tensor[T]; beta: T;\n C: var Tensor[T]) 58 +nim gemm operators_blas_l2l3.html#gemm,T,Tensor[T: SomeInteger],Tensor[T: SomeInteger],T,Tensor[T: SomeInteger] proc gemm[T: SomeInteger](alpha: T; A, B: Tensor[T]; beta: T; C: var Tensor[T]) 73 +nim gemm operators_blas_l2l3.html#gemm,Tensor[T: SomeNumber],Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc gemm[T: SomeNumber](A, B: Tensor[T]; C: var Tensor[T]) 83 +nim `*` operators_blas_l2l3.html#*,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc `*`[T: SomeNumber](a, b: Tensor[T]): Tensor[T] 88 +nim `*` operators_blas_l2l3.html#*,Tensor[T: Complex[system.float32] or Complex[system.float64]],Tensor[T: Complex[system.float32] or Complex[system.float64]] proc `*`[T: Complex[float32] or Complex[float64]](a, b: Tensor[T]): Tensor[T] 102 +nimgrp gemv operators_blas_l2l3.html#gemv-procs-all proc 24 +nimgrp gemm operators_blas_l2l3.html#gemm-procs-all proc 58 +nimgrp * operators_blas_l2l3.html#*-procs-all proc 88 diff --git a/operators_blas_l2l3_cuda.html b/operators_blas_l2l3_cuda.html new file mode 100644 index 000000000..f96b7c387 --- /dev/null +++ b/operators_blas_l2l3_cuda.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_blas_l2l3_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_blas_l2l3_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_blas_l2l3_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T]
+
+ + Matrix multiplication (Matrix-Matrix and Matrix-Vector) on CUDA +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_blas_l2l3_cuda.idx b/operators_blas_l2l3_cuda.idx new file mode 100644 index 000000000..985f47789 --- /dev/null +++ b/operators_blas_l2l3_cuda.idx @@ -0,0 +1,2 @@ +nimTitle operators_blas_l2l3_cuda operators_blas_l2l3_cuda.html module src/arraymancer/tensor/operators_blas_l2l3_cuda 0 +nim `*` operators_blas_l2l3_cuda.html#*,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `*`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 74 diff --git a/operators_blas_l2l3_opencl.html b/operators_blas_l2l3_opencl.html new file mode 100644 index 000000000..adcc0a978 --- /dev/null +++ b/operators_blas_l2l3_opencl.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_blas_l2l3_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_blas_l2l3_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_blas_l2l3_opencl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T]
+
+ + Matrix multiplication (Matrix-Matrix and Matrix-Vector) on CUDA +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_blas_l2l3_opencl.idx b/operators_blas_l2l3_opencl.idx new file mode 100644 index 000000000..51dd98dff --- /dev/null +++ b/operators_blas_l2l3_opencl.idx @@ -0,0 +1,2 @@ +nimTitle operators_blas_l2l3_opencl operators_blas_l2l3_opencl.html module src/arraymancer/tensor/operators_blas_l2l3_opencl 0 +nim `*` operators_blas_l2l3_opencl.html#*,ClTensor[T: SomeFloat],ClTensor[T: SomeFloat] proc `*`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] 80 diff --git a/operators_broadcasted.html b/operators_broadcasted.html new file mode 100644 index 000000000..97cc5e0d0 --- /dev/null +++ b/operators_broadcasted.html @@ -0,0 +1,1128 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_broadcasted + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_broadcasted + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_broadcasted

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[
+    T] {.noinit.}
+
+ +

Element-wise multiplication (Hadamard product).

+

And broadcasted element-wise multiplication.

+ +   Source +Edit + +
+
+
+
proc `*.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T];
+    val: T): Tensor[T] {.noinit.}
+
+ + Broadcasted multiplication for scalar * tensor. +   Source +Edit + +
+
+
+
proc `*.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T;
+    t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted multiplication for tensor * scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `*.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    a: var Tensor[T]; b: Tensor[T])
+
+ +

Tensor broadcasted in-place multiplication (Hadamard product)

+

Only the right hand side tensor can be broadcasted

+ +   Source +Edit + +
+
+
+
proc `*.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    t: var Tensor[T]; val: T)
+
+ + Tensor in-place multiplication with a broadcasted scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `+.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[
+    T] {.noinit, inline.}
+
+ + Broadcasted addition for tensors of incompatible but broadcastable shape. +   Source +Edit + +
+
+
+
proc `+.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T];
+    val: T): Tensor[T] {.noinit.}
+
+ + Broadcasted addition for scalar + tensor. +   Source +Edit + +
+
+
+
proc `+.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T;
+    t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted addition for tensor + scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `+.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    a: var Tensor[T]; b: Tensor[T])
+
+ +

Tensor broadcasted in-place addition.

+

Only the right hand side tensor can be broadcasted.

+ +   Source +Edit + +
+
+
+
proc `+.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    t: var Tensor[T]; val: T)
+
+ + Tensor in-place addition with a broadcasted scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `-.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[
+    T] {.noinit, inline.}
+
+ + Broadcasted addition for tensors of incompatible but broadcastable shape. +   Source +Edit + +
+
+
+
proc `-.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T];
+    val: T): Tensor[T] {.noinit.}
+
+ + Broadcasted substraction for scalar - tensor. +   Source +Edit + +
+
+
+
proc `-.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T;
+    t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted substraction for tensor - scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `-.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    a: var Tensor[T]; b: Tensor[T])
+
+ +

Tensor broadcasted in-place substraction.

+

Only the right hand side tensor can be broadcasted.

+ +   Source +Edit + +
+
+
+
proc `-.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    t: var Tensor[T]; val: T)
+
+ + Tensor in-place substraction with a broadcasted scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `.*`[T](a, b: Tensor[T]): Tensor[T] {.noinit,
+    ...deprecated: "Use `*.` instead".}
+
+
+ Deprecated: Use `*.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.+`[T](a, b: Tensor[T]): Tensor[T] {.noinit, inline,
+    ...deprecated: "Use `+.` instead".}
+
+
+ Deprecated: Use `+.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.-`[T](a, b: Tensor[T]): Tensor[T] {.noinit, inline,
+    ...deprecated: "Use `-.` instead".}
+
+
+ Deprecated: Use `-.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `./`[T](a, b: Tensor[T]): Tensor[T] {.noinit,
+    ...deprecated: "Use `/.` instead".}
+
+
+ Deprecated: Use `/.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=*`[T](a: var Tensor[T]; b: Tensor[T]) {....deprecated: "Use `*.=` instead".}
+
+
+ Deprecated: Use `*.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=+`[T](a: var Tensor[T]; b: Tensor[T]) {....deprecated: "Use `+.=` instead".}
+
+
+ Deprecated: Use `+.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=-`[T](a: var Tensor[T]; b: Tensor[T]) {....deprecated: "Use `-.=` instead".}
+
+
+ Deprecated: Use `-.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=/`[T](a: var Tensor[T]; b: Tensor[T]) {....deprecated: "Use `/.=` instead".}
+
+
+ Deprecated: Use `/.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.^`[T](base: T; t: Tensor[T]): Tensor[T] {.noinit,
+    ...deprecated: "Use `^.` instead".}
+
+
+ Deprecated: Use `^.` instead +
+ + +   Source +Edit + +
+
+
+
proc `.^`[T](t: Tensor[T]; exponent: T): Tensor[T] {.noinit,
+    ...deprecated: "Use `^.` instead".}
+
+
+ Deprecated: Use `^.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.^=`[T](t: var Tensor[T]; exponent: T) {....deprecated: "Use `^.=` instead".}
+
+
+ Deprecated: Use `^.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `/.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[
+    T] {.noinit.}
+
+ +

Tensor element-wise division

+

And broadcasted element-wise division.

+ +   Source +Edit + +
+
+
+
proc `/.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T];
+    val: T): Tensor[T] {.noinit.}
+
+ + Broadcasted division +   Source +Edit + +
+
+
+
proc `/.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T;
+    t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted division +   Source +Edit + +
+
+ +
+
+
+
proc `/.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    a: var Tensor[T]; b: Tensor[T])
+
+ +

Tensor broadcasted in-place division.

+

Only the right hand side tensor can be broadcasted.

+ +   Source +Edit + +
+
+
+
proc `/.=`[T: SomeNumber | Complex[float32] | Complex[float64]](
+    t: var Tensor[T]; val: T)
+
+ + Tensor in-place division with a broadcasted scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `^.`[T: SomeFloat | Complex[float32] | Complex[float64]](base: T;
+    t: Tensor[T]): Tensor[T] {.noinit.}
+
+ + Broadcasted exponentiation: scalar ^ tensor. +   Source +Edit + +
+
+
+
proc `^.`[T: SomeFloat | Complex[float32] | Complex[float64]](t: Tensor[T];
+    exponent: T): Tensor[T] {.noinit.}
+
+ + Compute element-wise exponentiation: tensor ^ scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `^.=`[T: SomeFloat | Complex[float32] | Complex[float64]](t: var Tensor[T];
+    exponent: T)
+
+ + Compute in-place element-wise exponentiation +   Source +Edit + +
+
+ +
+
+
+
proc `mod`[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Tensor element-wise modulo operation

+

And broadcasted element-wise modulo operation.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_broadcasted.idx b/operators_broadcasted.idx new file mode 100644 index 000000000..700f1b527 --- /dev/null +++ b/operators_broadcasted.idx @@ -0,0 +1,46 @@ +nimTitle operators_broadcasted operators_broadcasted.html module src/arraymancer/tensor/operators_broadcasted 0 +nim `+.` operators_broadcasted.html#+.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `+.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[\n T] 28 +nim `-.` operators_broadcasted.html#-.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `-.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[\n T] 33 +nim `*.` operators_broadcasted.html#*.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `*.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[\n T] 38 +nim `/.` operators_broadcasted.html#/.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `/.`[T: SomeNumber | Complex[float32] | Complex[float64]](a, b: Tensor[T]): Tensor[\n T] 45 +nim `mod` operators_broadcasted.html#mod,Tensor[T: SomeNumber],Tensor[T: SomeNumber] proc `mod`[T: SomeNumber](a, b: Tensor[T]): Tensor[T] 55 +nim `+.=` operators_broadcasted.html#+.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `+.=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];\n b: Tensor[T]) 65 +nim `-.=` operators_broadcasted.html#-.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `-.=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];\n b: Tensor[T]) 74 +nim `*.=` operators_broadcasted.html#*.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `*.=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];\n b: Tensor[T]) 83 +nim `/.=` operators_broadcasted.html#/.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `/.=`[T: SomeNumber | Complex[float32] | Complex[float64]](a: var Tensor[T];\n b: Tensor[T]) 92 +nim `+.` operators_broadcasted.html#+.,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `+.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T; t: Tensor[T]): Tensor[\n T] 107 +nim `+.` operators_broadcasted.html#+.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `+.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; val: T): Tensor[\n T] 112 +nim `-.` operators_broadcasted.html#-.,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `-.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T; t: Tensor[T]): Tensor[\n T] 117 +nim `-.` operators_broadcasted.html#-.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `-.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; val: T): Tensor[\n T] 122 +nim `*.` operators_broadcasted.html#*.,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `*.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T; t: Tensor[T]): Tensor[\n T] 127 +nim `*.` operators_broadcasted.html#*.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `*.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; val: T): Tensor[\n T] 132 +nim `/.` operators_broadcasted.html#/.,T,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `/.`[T: SomeNumber | Complex[float32] | Complex[float64]](val: T; t: Tensor[T]): Tensor[\n T] 137 +nim `/.` operators_broadcasted.html#/.,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `/.`[T: SomeNumber | Complex[float32] | Complex[float64]](t: Tensor[T]; val: T): Tensor[\n T] 145 +nim `^.` operators_broadcasted.html#^.,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `^.`[T: SomeFloat | Complex[float32] | Complex[float64]](t: Tensor[T];\n exponent: T): Tensor[T] 153 +nim `^.` operators_broadcasted.html#^.,T,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc `^.`[T: SomeFloat | Complex[float32] | Complex[float64]](base: T; t: Tensor[T]): Tensor[\n T] 158 +nim `+.=` operators_broadcasted.html#+.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `+.=`[T: SomeNumber | Complex[float32] | Complex[float64]](t: var Tensor[T];\n val: T) 166 +nim `-.=` operators_broadcasted.html#-.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `-.=`[T: SomeNumber | Complex[float32] | Complex[float64]](t: var Tensor[T];\n val: T) 172 +nim `^.=` operators_broadcasted.html#^.=,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `^.=`[T: SomeFloat | Complex[float32] | Complex[float64]](t: var Tensor[T];\n exponent: T) 178 +nim `*.=` operators_broadcasted.html#*.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `*.=`[T: SomeNumber | Complex[float32] | Complex[float64]](t: var Tensor[T];\n val: T) 184 +nim `/.=` operators_broadcasted.html#/.=,Tensor[T: int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64 or float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T proc `/.=`[T: SomeNumber | Complex[float32] | Complex[float64]](t: var Tensor[T];\n val: T) 190 +nim `.+` operators_broadcasted.html#.+,Tensor[T],Tensor[T] proc `.+`[T](a, b: Tensor[T]): Tensor[T] 200 +nim `.-` operators_broadcasted.html#.-,Tensor[T],Tensor[T] proc `.-`[T](a, b: Tensor[T]): Tensor[T] 201 +nim `.*` operators_broadcasted.html#.*,Tensor[T],Tensor[T] proc `.*`[T](a, b: Tensor[T]): Tensor[T] 202 +nim `./` operators_broadcasted.html#./,Tensor[T],Tensor[T] proc `./`[T](a, b: Tensor[T]): Tensor[T] 203 +nim `.^` operators_broadcasted.html#.^,Tensor[T],T proc `.^`[T](t: Tensor[T]; exponent: T): Tensor[T] 153 +nim `.^` operators_broadcasted.html#.^,T,Tensor[T] proc `.^`[T](base: T; t: Tensor[T]): Tensor[T] 204 +nim `.=+` operators_broadcasted.html#.=+,Tensor[T],Tensor[T] proc `.=+`[T](a: var Tensor[T]; b: Tensor[T]) 206 +nim `.=-` operators_broadcasted.html#.=-,Tensor[T],Tensor[T] proc `.=-`[T](a: var Tensor[T]; b: Tensor[T]) 207 +nim `.=*` operators_broadcasted.html#.=*,Tensor[T],Tensor[T] proc `.=*`[T](a: var Tensor[T]; b: Tensor[T]) 208 +nim `.=/` operators_broadcasted.html#.=/,Tensor[T],Tensor[T] proc `.=/`[T](a: var Tensor[T]; b: Tensor[T]) 209 +nim `.^=` operators_broadcasted.html#.^=,Tensor[T],T proc `.^=`[T](t: var Tensor[T]; exponent: T) 210 +nimgrp /. operators_broadcasted.html#/.-procs-all proc 45 +nimgrp *. operators_broadcasted.html#*.-procs-all proc 38 +nimgrp -.= operators_broadcasted.html#-.=-procs-all proc 74 +nimgrp ^. operators_broadcasted.html#^.-procs-all proc 153 +nimgrp *.= operators_broadcasted.html#*.=-procs-all proc 83 +nimgrp /.= operators_broadcasted.html#/.=-procs-all proc 92 +nimgrp +.= operators_broadcasted.html#+.=-procs-all proc 65 +nimgrp +. operators_broadcasted.html#+.-procs-all proc 28 +nimgrp -. operators_broadcasted.html#-.-procs-all proc 33 +nimgrp .^ operators_broadcasted.html#.^-procs-all proc 204 diff --git a/operators_broadcasted_cuda.html b/operators_broadcasted_cuda.html new file mode 100644 index 000000000..cc2f3871a --- /dev/null +++ b/operators_broadcasted_cuda.html @@ -0,0 +1,862 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_broadcasted_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_broadcasted_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_broadcasted_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ +

Element-wise multiplication (Hadamard product).

+

And broadcasted element-wise multiplication.

+ +   Source +Edit + +
+
+ +
+
+
+
proc `*.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T])
+
+ +

Tensor broadcasted in-place multiplication (Hadamard product)

+

Only the right hand side tensor can be broadcasted

+ +   Source +Edit + +
+
+ +
+
+
+
proc `+.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] {.noinit, inline.}
+
+ + Broadcasted addition for tensors of incompatible but broadcastable shape. +   Source +Edit + +
+
+
+
proc `+.`[T: SomeFloat](t: CudaTensor[T]; val: T): CudaTensor[T] {.noinit.}
+
+ + Broadcasted addition for scalar + tensor. +   Source +Edit + +
+
+
+
proc `+.`[T: SomeFloat](val: T; t: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ + Broadcasted addition for tensor + scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `+.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T])
+
+ +

Tensor broadcasted in-place addition.

+

Only the right hand side tensor can be broadcasted.

+ +   Source +Edit + +
+
+
+
proc `+.=`[T: SomeFloat](t: var CudaTensor[T]; val: T)
+
+ + Broadcasted addition for scalar + tensor. +   Source +Edit + +
+
+ +
+
+
+
proc `-.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] {.noinit, inline.}
+
+ + Broadcasted addition for tensors of incompatible but broadcastable shape. +   Source +Edit + +
+
+
+
proc `-.`[T: SomeFloat](t: CudaTensor[T]; val: T): CudaTensor[T] {.noinit.}
+
+ + Broadcasted substraction for scalar - tensor. +   Source +Edit + +
+
+
+
proc `-.`[T: SomeFloat](val: T; t: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ + Broadcasted substraction for tensor - scalar. +   Source +Edit + +
+
+ +
+
+
+
proc `-.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T])
+
+ +

Tensor broadcasted in-place substraction.

+

Only the right hand side tensor can be broadcasted.

+ +   Source +Edit + +
+
+
+
proc `-.=`[T: SomeFloat](t: var CudaTensor[T]; val: T)
+
+ + Broadcasted substraction for scalar - tensor. +   Source +Edit + +
+
+ +
+
+
+
proc `.*`[T](a, b: CudaTensor[T]): CudaTensor[T] {.noinit,
+    ...deprecated: "Use `*.` instead".}
+
+
+ Deprecated: Use `*.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.+`[T](a, b: CudaTensor[T]): CudaTensor[T] {.noinit, inline,
+    ...deprecated: "Use `+.` instead".}
+
+
+ Deprecated: Use `+.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.-`[T](a, b: CudaTensor[T]): CudaTensor[T] {.noinit, inline,
+    ...deprecated: "Use `-.` instead".}
+
+
+ Deprecated: Use `-.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `./`[T](a, b: CudaTensor[T]): CudaTensor[T] {.noinit,
+    ...deprecated: "Use `/.` instead".}
+
+
+ Deprecated: Use `/.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=*`[T](a: var CudaTensor[T]; b: CudaTensor[T]) {.
+    ...deprecated: "Use `*.=` instead".}
+
+
+ Deprecated: Use `*.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=+`[T](a: var CudaTensor[T]; b: CudaTensor[T]) {.
+    ...deprecated: "Use `+.=` instead".}
+
+
+ Deprecated: Use `+.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=-`[T](a: var CudaTensor[T]; b: CudaTensor[T]) {.
+    ...deprecated: "Use `-.=` instead".}
+
+
+ Deprecated: Use `-.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.=/`[T](a: var CudaTensor[T]; b: CudaTensor[T]) {.
+    ...deprecated: "Use `/.=` instead".}
+
+
+ Deprecated: Use `/.=` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `/.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ + CudaTensor substraction +   Source +Edit + +
+
+
+
proc `/.`[T: SomeFloat](val: T; t: CudaTensor[T]): CudaTensor[T] {.noinit.}
+
+ + Broadcasted division of a float by a tensor of floats. +   Source +Edit + +
+
+ +
+
+
+
proc `/.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T])
+
+ +

Tensor broadcasted in-place float division.

+

Only the right hand side tensor can be broadcasted.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_broadcasted_cuda.idx b/operators_broadcasted_cuda.idx new file mode 100644 index 000000000..4fcdf00e0 --- /dev/null +++ b/operators_broadcasted_cuda.idx @@ -0,0 +1,29 @@ +nimTitle operators_broadcasted_cuda operators_broadcasted_cuda.html module src/arraymancer/tensor/operators_broadcasted_cuda 0 +nim `+.` operators_broadcasted_cuda.html#+.,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `+.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 33 +nim `-.` operators_broadcasted_cuda.html#-.,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `-.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 38 +nim `*.` operators_broadcasted_cuda.html#*.,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `*.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 43 +nim `/.` operators_broadcasted_cuda.html#/.,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `/.`[T: SomeFloat](a, b: CudaTensor[T]): CudaTensor[T] 53 +nim `+.=` operators_broadcasted_cuda.html#+.=,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `+.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T]) 68 +nim `-.=` operators_broadcasted_cuda.html#-.=,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `-.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T]) 77 +nim `*.=` operators_broadcasted_cuda.html#*.=,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `*.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T]) 86 +nim `/.=` operators_broadcasted_cuda.html#/.=,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] proc `/.=`[T: SomeFloat](a: var CudaTensor[T]; b: CudaTensor[T]) 95 +nim `+.` operators_broadcasted_cuda.html#+.,CudaTensor[T: SomeFloat],T proc `+.`[T: SomeFloat](t: CudaTensor[T]; val: T): CudaTensor[T] 111 +nim `-.` operators_broadcasted_cuda.html#-.,CudaTensor[T: SomeFloat],T proc `-.`[T: SomeFloat](t: CudaTensor[T]; val: T): CudaTensor[T] 116 +nim `+.` operators_broadcasted_cuda.html#+.,T,CudaTensor[T: SomeFloat] proc `+.`[T: SomeFloat](val: T; t: CudaTensor[T]): CudaTensor[T] 125 +nim `-.` operators_broadcasted_cuda.html#-.,T,CudaTensor[T: SomeFloat] proc `-.`[T: SomeFloat](val: T; t: CudaTensor[T]): CudaTensor[T] 130 +nim `/.` operators_broadcasted_cuda.html#/.,T,CudaTensor[T: SomeFloat] proc `/.`[T: SomeFloat](val: T; t: CudaTensor[T]): CudaTensor[T] 135 +nim `+.=` operators_broadcasted_cuda.html#+.=,CudaTensor[T: SomeFloat],T proc `+.=`[T: SomeFloat](t: var CudaTensor[T]; val: T) 146 +nim `-.=` operators_broadcasted_cuda.html#-.=,CudaTensor[T: SomeFloat],T proc `-.=`[T: SomeFloat](t: var CudaTensor[T]; val: T) 150 +nim `.+` operators_broadcasted_cuda.html#.+,CudaTensor[T],CudaTensor[T] proc `.+`[T](a, b: CudaTensor[T]): CudaTensor[T] 157 +nim `.-` operators_broadcasted_cuda.html#.-,CudaTensor[T],CudaTensor[T] proc `.-`[T](a, b: CudaTensor[T]): CudaTensor[T] 158 +nim `.*` operators_broadcasted_cuda.html#.*,CudaTensor[T],CudaTensor[T] proc `.*`[T](a, b: CudaTensor[T]): CudaTensor[T] 159 +nim `./` operators_broadcasted_cuda.html#./,CudaTensor[T],CudaTensor[T] proc `./`[T](a, b: CudaTensor[T]): CudaTensor[T] 160 +nim `.=+` operators_broadcasted_cuda.html#.=+,CudaTensor[T],CudaTensor[T] proc `.=+`[T](a: var CudaTensor[T]; b: CudaTensor[T]) 162 +nim `.=-` operators_broadcasted_cuda.html#.=-,CudaTensor[T],CudaTensor[T] proc `.=-`[T](a: var CudaTensor[T]; b: CudaTensor[T]) 163 +nim `.=*` operators_broadcasted_cuda.html#.=*,CudaTensor[T],CudaTensor[T] proc `.=*`[T](a: var CudaTensor[T]; b: CudaTensor[T]) 164 +nim `.=/` operators_broadcasted_cuda.html#.=/,CudaTensor[T],CudaTensor[T] proc `.=/`[T](a: var CudaTensor[T]; b: CudaTensor[T]) 165 +nimgrp /. operators_broadcasted_cuda.html#/.-procs-all proc 53 +nimgrp -.= operators_broadcasted_cuda.html#-.=-procs-all proc 77 +nimgrp +.= operators_broadcasted_cuda.html#+.=-procs-all proc 68 +nimgrp +. operators_broadcasted_cuda.html#+.-procs-all proc 33 +nimgrp -. operators_broadcasted_cuda.html#-.-procs-all proc 38 diff --git a/operators_broadcasted_opencl.html b/operators_broadcasted_opencl.html new file mode 100644 index 000000000..c927032f2 --- /dev/null +++ b/operators_broadcasted_opencl.html @@ -0,0 +1,597 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_broadcasted_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_broadcasted_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_broadcasted_opencl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `*.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] {.noinit.}
+
+ +

Element-wise multiplication (Hadamard product).

+

And broadcasted element-wise multiplication.

+ +   Source +Edit + +
+
+ +
+
+
+
proc `+.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] {.noinit, inline.}
+
+ + Broadcasted addition for tensors of incompatible but broadcastable shape. +   Source +Edit + +
+
+ +
+
+
+
proc `-.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] {.noinit, inline.}
+
+ + Broadcasted addition for tensors of incompatible but broadcastable shape. +   Source +Edit + +
+
+ +
+
+
+
proc `.*`[T](a, b: ClTensor[T]): ClTensor[T] {.noinit,
+    ...deprecated: "Use `*.` instead".}
+
+
+ Deprecated: Use `*.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.+`[T](a, b: ClTensor[T]): ClTensor[T] {.noinit, inline,
+    ...deprecated: "Use `+.` instead".}
+
+
+ Deprecated: Use `+.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.-`[T](a, b: ClTensor[T]): ClTensor[T] {.noinit, inline,
+    ...deprecated: "Use `-.` instead".}
+
+
+ Deprecated: Use `-.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `./`[T](a, b: ClTensor[T]): ClTensor[T] {.noinit,
+    ...deprecated: "Use `/.` instead".}
+
+
+ Deprecated: Use `/.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `/.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] {.noinit.}
+
+ +

Element-wise multiplication (Hadamard product).

+

And broadcasted element-wise multiplication.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_broadcasted_opencl.idx b/operators_broadcasted_opencl.idx new file mode 100644 index 000000000..acb44745c --- /dev/null +++ b/operators_broadcasted_opencl.idx @@ -0,0 +1,9 @@ +nimTitle operators_broadcasted_opencl operators_broadcasted_opencl.html module src/arraymancer/tensor/operators_broadcasted_opencl 0 +nim `+.` operators_broadcasted_opencl.html#+.,ClTensor[T: SomeFloat],ClTensor[T: SomeFloat] proc `+.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] 27 +nim `-.` operators_broadcasted_opencl.html#-.,ClTensor[T: SomeFloat],ClTensor[T: SomeFloat] proc `-.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] 32 +nim `*.` operators_broadcasted_opencl.html#*.,ClTensor[T: SomeFloat],ClTensor[T: SomeFloat] proc `*.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] 42 +nim `/.` operators_broadcasted_opencl.html#/.,ClTensor[T: SomeFloat],ClTensor[T: SomeFloat] proc `/.`[T: SomeFloat](a, b: ClTensor[T]): ClTensor[T] 49 +nim `.+` operators_broadcasted_opencl.html#.+,ClTensor[T],ClTensor[T] proc `.+`[T](a, b: ClTensor[T]): ClTensor[T] 60 +nim `.-` operators_broadcasted_opencl.html#.-,ClTensor[T],ClTensor[T] proc `.-`[T](a, b: ClTensor[T]): ClTensor[T] 61 +nim `.*` operators_broadcasted_opencl.html#.*,ClTensor[T],ClTensor[T] proc `.*`[T](a, b: ClTensor[T]): ClTensor[T] 62 +nim `./` operators_broadcasted_opencl.html#./,ClTensor[T],ClTensor[T] proc `./`[T](a, b: ClTensor[T]): ClTensor[T] 63 diff --git a/operators_comparison.html b/operators_comparison.html new file mode 100644 index 000000000..723fd2984 --- /dev/null +++ b/operators_comparison.html @@ -0,0 +1,994 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_comparison + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_comparison + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_comparison

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `!=.`[T](a, b: Tensor[T]): Tensor[bool] {.noinit.}
+
+ +

Tensor element-wise inequality.

+

And broadcasted element-wise inequality.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+
+
proc `!=.`[T](t: Tensor[T]; value: T): Tensor[bool] {.noinit.}
+
+ + Tensor element-wise inequality with scalar Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `.!=`[T](a, b: Tensor[T]): Tensor[bool] {.noinit,
+    ...deprecated: "Use `!=.` instead".}
+
+
+ Deprecated: Use `!=.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.<`[T](a, b: Tensor[T]): Tensor[bool] {.noinit,
+    ...deprecated: "Use `<.` instead".}
+
+
+ Deprecated: Use `<.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.<=`[T](a, b: Tensor[T]): Tensor[bool] {.noinit,
+    ...deprecated: "Use `<=.` instead".}
+
+
+ Deprecated: Use `<=.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.==`[T](a, b: Tensor[T]): Tensor[bool] {.noinit,
+    ...deprecated: "Use `==.` instead".}
+
+
+ Deprecated: Use `==.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.>`[T](a, b: Tensor[T]): Tensor[bool] {.noinit,
+    ...deprecated: "Use `>.` instead".}
+
+
+ Deprecated: Use `>.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `.>=`[T](a, b: Tensor[T]): Tensor[bool] {.noinit,
+    ...deprecated: "Use `>=.` instead".}
+
+
+ Deprecated: Use `>=.` instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc `<.`[T](a, b: Tensor[T]): Tensor[bool] {.noinit.}
+
+ +

Tensor element-wise lesser than.

+

And broadcasted element-wise lesser than.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+
+
proc `<.`[T](t: Tensor[T]; value: T): Tensor[bool] {.noinit.}
+
+ + Tensor element-wise lesser than a scalar Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `<=.`[T](a, b: Tensor[T]): Tensor[bool] {.noinit.}
+
+ +

Tensor element-wise lesser or equal.

+

And broadcasted element-wise lesser or equal.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+
+
proc `<=.`[T](t: Tensor[T]; value: T): Tensor[bool] {.noinit.}
+
+ + Tensor element-wise lesser or equal with scalar Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `==`[T](a, b: Tensor[T]): bool {.noSideEffect.}
+
+ + Tensor comparison +   Source +Edit + +
+
+ +
+
+
+
proc `==.`[T](a, b: Tensor[T]): Tensor[bool] {.noinit.}
+
+ +

Tensor element-wise equality.

+

And broadcasted element-wise equality.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+
+
proc `==.`[T](t: Tensor[T]; value: T): Tensor[bool] {.noinit.}
+
+ + Tensor element-wise equality with scalar Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `>.`[T](a, b: Tensor[T]): Tensor[bool] {.noinit.}
+
+ +

Tensor element-wise greater than.

+

And broadcasted element-wise greater than.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+
+
proc `>.`[T](t: Tensor[T]; value: T): Tensor[bool] {.noinit.}
+
+ + Tensor element-wise greater than a scalar Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `>=.`[T](a, b: Tensor[T]): Tensor[bool] {.noinit.}
+
+ +

Tensor element-wise greater or equal.

+

And broadcasted element-wise greater or equal.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+
+
proc `>=.`[T](t: Tensor[T]; value: T): Tensor[bool] {.noinit.}
+
+ + Tensor element-wise greater or equal than a scalar Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc isNaN(t: Tensor[SomeFloat]): Tensor[bool]
+
+ + Returns a boolean tensor set to true for each element which is "Not-a-number" or set to false otherwise +   Source +Edit + +
+
+ +
+
+
+
proc isNotNaN(t: Tensor[SomeFloat]): Tensor[bool]
+
+ + Returns a boolean tensor set to false for each element which is "Not-a-number" or set to true otherwise +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template `!=.`[T](value: T; t: Tensor[T]): Tensor[bool]
+
+ + Element-wise scalar inequality with tensor Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template `<.`[T](value: T; t: Tensor[T]): Tensor[bool]
+
+ + Element-wise scalar smaller than tensor Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template `<=.`[T](value: T; t: Tensor[T]): Tensor[bool]
+
+ + Element-wise scalar smaller or equal than tensor Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template `==.`[T](value: T; t: Tensor[T]): Tensor[bool]
+
+ + Element-wise scalar equality with tensor Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template `>.`[T](value: T; t: Tensor[T]): Tensor[bool]
+
+ + Element-wise scalar greater than tensor Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template `>=.`[T](value: T; t: Tensor[T]): Tensor[bool]
+
+ + Element-wise scalar greater or equal than tensor Returns:
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_comparison.idx b/operators_comparison.idx new file mode 100644 index 000000000..5b02806b0 --- /dev/null +++ b/operators_comparison.idx @@ -0,0 +1,34 @@ +nimTitle operators_comparison operators_comparison.html module src/arraymancer/tensor/operators_comparison 0 +nim `==` operators_comparison.html#==,Tensor[T],Tensor[T] proc `==`[T](a, b: Tensor[T]): bool 21 +nim `==.` operators_comparison.html#==.,Tensor[T],Tensor[T] proc `==.`[T](a, b: Tensor[T]): Tensor[bool] 41 +nim `!=.` operators_comparison.html#!=.,Tensor[T],Tensor[T] proc `!=.`[T](a, b: Tensor[T]): Tensor[bool] 51 +nim `<=.` operators_comparison.html#<=.,Tensor[T],Tensor[T] proc `<=.`[T](a, b: Tensor[T]): Tensor[bool] 60 +nim `<.` operators_comparison.html#<.,Tensor[T],Tensor[T] proc `<.`[T](a, b: Tensor[T]): Tensor[bool] 69 +nim `>=.` operators_comparison.html#>=.,Tensor[T],Tensor[T] proc `>=.`[T](a, b: Tensor[T]): Tensor[bool] 78 +nim `>.` operators_comparison.html#>.,Tensor[T],Tensor[T] proc `>.`[T](a, b: Tensor[T]): Tensor[bool] 87 +nim `.==` operators_comparison.html#.==,Tensor[T],Tensor[T] proc `.==`[T](a, b: Tensor[T]): Tensor[bool] 96 +nim `.!=` operators_comparison.html#.!=,Tensor[T],Tensor[T] proc `.!=`[T](a, b: Tensor[T]): Tensor[bool] 97 +nim `.<=` operators_comparison.html#.<=,Tensor[T],Tensor[T] proc `.<=`[T](a, b: Tensor[T]): Tensor[bool] 98 +nim `.<` operators_comparison.html#.<,Tensor[T],Tensor[T] proc `.<`[T](a, b: Tensor[T]): Tensor[bool] 99 +nim `.>=` operators_comparison.html#.>=,Tensor[T],Tensor[T] proc `.>=`[T](a, b: Tensor[T]): Tensor[bool] 100 +nim `.>` operators_comparison.html#.>,Tensor[T],Tensor[T] proc `.>`[T](a, b: Tensor[T]): Tensor[bool] 101 +nim `==.` operators_comparison.html#==.,Tensor[T],T proc `==.`[T](t: Tensor[T]; value: T): Tensor[bool] 111 +nim `!=.` operators_comparison.html#!=.,Tensor[T],T proc `!=.`[T](t: Tensor[T]; value: T): Tensor[bool] 117 +nim `<=.` operators_comparison.html#<=.,Tensor[T],T proc `<=.`[T](t: Tensor[T]; value: T): Tensor[bool] 123 +nim `<.` operators_comparison.html#<.,Tensor[T],T proc `<.`[T](t: Tensor[T]; value: T): Tensor[bool] 129 +nim `>=.` operators_comparison.html#>=.,Tensor[T],T proc `>=.`[T](t: Tensor[T]; value: T): Tensor[bool] 135 +nim `>.` operators_comparison.html#>.,Tensor[T],T proc `>.`[T](t: Tensor[T]; value: T): Tensor[bool] 141 +nim `==.` operators_comparison.html#==..t,T,Tensor[T] template `==.`[T](value: T; t: Tensor[T]): Tensor[bool] 151 +nim `!=.` operators_comparison.html#!=..t,T,Tensor[T] template `!=.`[T](value: T; t: Tensor[T]): Tensor[bool] 157 +nim `<=.` operators_comparison.html#<=..t,T,Tensor[T] template `<=.`[T](value: T; t: Tensor[T]): Tensor[bool] 163 +nim `<.` operators_comparison.html#<..t,T,Tensor[T] template `<.`[T](value: T; t: Tensor[T]): Tensor[bool] 169 +nim `>=.` operators_comparison.html#>=..t,T,Tensor[T] template `>=.`[T](value: T; t: Tensor[T]): Tensor[bool] 175 +nim `>.` operators_comparison.html#>..t,T,Tensor[T] template `>.`[T](value: T; t: Tensor[T]): Tensor[bool] 181 +nim isNaN operators_comparison.html#isNaN,Tensor[SomeFloat] proc isNaN(t: Tensor[SomeFloat]): Tensor[bool] 191 +nim isNotNaN operators_comparison.html#isNotNaN,Tensor[SomeFloat] proc isNotNaN(t: Tensor[SomeFloat]): Tensor[bool] 198 +nimgrp !=. operators_comparison.html#!=.-procs-all proc 51 +nimgrp <. operators_comparison.html#<.-procs-all proc 69 +nimgrp ==. operators_comparison.html#==.-procs-all proc 41 +nimgrp >=. operators_comparison.html#>=.-procs-all proc 78 +nimgrp >. operators_comparison.html#>.-procs-all proc 87 +nimgrp <=. operators_comparison.html#<=.-procs-all proc 60 diff --git a/operators_logical.html b/operators_logical.html new file mode 100644 index 000000000..2c409cc1a --- /dev/null +++ b/operators_logical.html @@ -0,0 +1,521 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/operators_logical + + + + + + + + + +Arraymancer - src/arraymancer/tensor/operators_logical + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/operators_logical

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc `and`(a, b: Tensor[bool]): Tensor[bool] {.noinit, ...raises: [ValueError],
+    tags: [], forbids: [].}
+
+ +

Tensor element-wise boolean and.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `not`(a: Tensor[bool]): Tensor[bool] {.noinit, ...raises: [], tags: [],
+    forbids: [].}
+
+ +

Tensor element-wise boolean and.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `or`(a, b: Tensor[bool]): Tensor[bool] {.noinit, ...raises: [ValueError],
+    tags: [], forbids: [].}
+
+ +

Tensor element-wise boolean or.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc `xor`(a, b: Tensor[bool]): Tensor[bool] {.noinit, ...raises: [ValueError],
+    tags: [], forbids: [].}
+
+ +

Tensor element-wise boolean xor.

+

Returns:

+
  • A tensor of boolean
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/operators_logical.idx b/operators_logical.idx new file mode 100644 index 000000000..30dd95f13 --- /dev/null +++ b/operators_logical.idx @@ -0,0 +1,5 @@ +nimTitle operators_logical operators_logical.html module src/arraymancer/tensor/operators_logical 0 +nim `and` operators_logical.html#and,Tensor[bool],Tensor[bool] proc `and`(a, b: Tensor[bool]): Tensor[bool] 25 +nim `or` operators_logical.html#or,Tensor[bool],Tensor[bool] proc `or`(a, b: Tensor[bool]): Tensor[bool] 32 +nim `xor` operators_logical.html#xor,Tensor[bool],Tensor[bool] proc `xor`(a, b: Tensor[bool]): Tensor[bool] 39 +nim `not` operators_logical.html#not,Tensor[bool] proc `not`(a: Tensor[bool]): Tensor[bool] 46 diff --git a/optim_ops_fusion.html b/optim_ops_fusion.html new file mode 100644 index 000000000..030525967 --- /dev/null +++ b/optim_ops_fusion.html @@ -0,0 +1,533 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/optim_ops_fusion + + + + + + + + + +Arraymancer - src/arraymancer/tensor/optim_ops_fusion + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/optim_ops_fusion

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Templates

+
+
+
+
template rewriteTensor_AddMultiply{
+  C + `*`(A, B)
+}[T](A, B, C: Tensor[T]): auto
+
+ +

Fuse C + A * B into a single operation.

+

Operation fusion leverage the Nim compiler and should not be called explicitly.

+ +   Source +Edit + +
+
+ +
+
+
+
template rewriteTensor_MultiplyAdd{
+  `*`(A, B) + C
+}[T](A, B, C: Tensor[T]): auto
+
+ +

Fuse A*B + C into a single operation.

+

Operation fusion leverage the Nim compiler and should not be called explicitly.

+ +   Source +Edit + +
+
+ +
+
+
+
template rewriteTensor_MultiplyAdd_inplace{
+  C += `*`(A, B)
+}[T](A, B: Tensor[T]; C: var Tensor[T])
+
+ +

Fuse C+=A*B into a single operation.

+

Operation fusion leverage the Nim compiler and should not be called explicitly.

+ +   Source +Edit + +
+
+ +
+
+
+
template rewriteToTensorReshape{
+  reshape(toTensor(oa, dummy_bugfix), shape)
+}(oa: openArray; shape: varargs[int]; dummy_bugfix: static[int]): auto
+
+ +

Fuse sequence.toTensor.reshape(new_shape) into a single operation.

+

Operation fusion leverage the Nim compiler and should not be called explicitly.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/optim_ops_fusion.idx b/optim_ops_fusion.idx new file mode 100644 index 000000000..bd0a10c13 --- /dev/null +++ b/optim_ops_fusion.idx @@ -0,0 +1,5 @@ +nimTitle optim_ops_fusion optim_ops_fusion.html module src/arraymancer/tensor/optim_ops_fusion 0 +nim rewriteTensor_MultiplyAdd optim_ops_fusion.html#rewriteTensor_MultiplyAdd.t,Tensor[T],Tensor[T],Tensor[T] template rewriteTensor_MultiplyAdd{\n `*`(A, B) + C\n}[T](A, B, C: Tensor[T]): auto 50 +nim rewriteTensor_AddMultiply optim_ops_fusion.html#rewriteTensor_AddMultiply.t,Tensor[T],Tensor[T],Tensor[T] template rewriteTensor_AddMultiply{\n C + `*`(A, B)\n}[T](A, B, C: Tensor[T]): auto 57 +nim rewriteTensor_MultiplyAdd_inplace optim_ops_fusion.html#rewriteTensor_MultiplyAdd_inplace.t,Tensor[T],Tensor[T],Tensor[T] template rewriteTensor_MultiplyAdd_inplace{\n C += `*`(A, B)\n}[T](A, B: Tensor[T]; C: var Tensor[T]) 65 +nim rewriteToTensorReshape optim_ops_fusion.html#rewriteToTensorReshape.t,openArray,varargs[int],static[int] template rewriteToTensorReshape{\n reshape(toTensor(oa, dummy_bugfix), shape)\n}(oa: openArray; shape: varargs[int]; dummy_bugfix: static[int]): auto 101 diff --git a/optimizers.html b/optimizers.html new file mode 100644 index 000000000..c781a9442 --- /dev/null +++ b/optimizers.html @@ -0,0 +1,778 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/optimizers/optimizers + + + + + + + + + +Arraymancer - src/arraymancer/nn/optimizers/optimizers + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/optimizers/optimizers

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
Adam[TT] = object
+  ## Learnable weights
+  ## Decays on first and second moment
+  ## Current decay
+  ## Exponential moving averages (mean estimation)
+  ## Exponential moving averages squared (uncentered variance)
+  ## Epsilon for numerical stability when dividing
+  
+
+ + Adaptative Moment Estimation +   Source +Edit + +
+
+
+
Optimizer[TT] = SGD[TT] or Adam[TT]
+
+ + +   Source +Edit + +
+
+
+
SGD[TT] = object
+  params*: seq[Variable[TT]]
+  lr*: TT.T
+
+
+ + Stochastic gradient descent without momentum. +   Source +Edit + +
+
+
+
SGDMomentum[TT] = object
+  params*: seq[Variable[TT]]
+  lr*: TT.T                  ## Learning rate
+  momentum*: TT.T            ## Value of the momentum
+  ## Moments for momentum
+  ## Learning rate decay
+  ## Flag for Nesterov momentum
+  
+
+ + Stochastic gradient descent with momentum. Details on Nesterov momentum can be found in Sutskever et. al. 2013 +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc newSGD[T](params: varargs[Variable[Tensor[T]]]; learning_rate: T): SGD[
+    Tensor[T]] {....deprecated: "Use the optimizer macro instead".}
+
+
+ Deprecated: Use the optimizer macro instead +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc optimizer[M, T](model: M; OptimizerKind: typedesc[Adam];
+                     learning_rate: T = T(0.001); beta1: T = T(0.9);
+                     beta2: T = T(0.999); eps: T = T(1e-8)): Adam[Tensor[T]]
+
+ + Create a Adam optimizer that will update the model weight +   Source +Edit + +
+
+
+
func optimizer[M, T](model: M; OptimizerKind: typedesc[SGD]; learning_rate: T): SGD[
+    Tensor[T]]
+
+ +

Create a SGD optimizer that will update the model weight

+

Parameters:

+
  • model Model to optimize.
  • +
  • learning_rate Learning rate.
  • +
+

Returns:

+
  • A SGD optimizer with the given learning rate.
  • +
+ +   Source +Edit + +
+
+
+
proc optimizer[M, T](model: M; OptimizerKind: typedesc[SGDMomentum];
+                     learning_rate: T; momentum: T = T(0.0); decay: T = T(0.0);
+                     nesterov = false): SGDMomentum[Tensor[T]]
+
+ +

Create a SGD optimizer with optional momentum that will update the model weight

+

Parameters:

+
  • model Model to optimize.
  • +
  • learning_rate Learning rate.
  • +
  • momentum Momentum.
  • +
  • decay How much the learning rate will decay each update.
  • +
  • nesterov Whether to use Nesterov momentum or not.
  • +
+

Returns:

+
  • A SGD optimizer with momentum with the given parameters.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc optimizerAdam[M, T](model: M; learning_rate: T; beta1: T = T(0.9);
+                         beta2: T = T(0.999); eps: T = T(1e-8)): Adam[Tensor[T]] {.
+    ...deprecated: "Use optimizer(model, SGDMomentum, learning_rate) instead.".}
+
+
+ Deprecated: Use optimizer(model, SGDMomentum, learning_rate) instead. +
+ + +   Source +Edit + +
+
+ +
+
+
+
func optimizerSGD[M, T](model: M; learning_rate: T): SGD[Tensor[T]] {.
+    ...deprecated: "Use optimizer(model, SGD, learning_rate) instead.".}
+
+
+ Deprecated: Use optimizer(model, SGD, learning_rate) instead. +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc optimizerSGDMomentum[M, T](model: M; learning_rate: T; momentum = T(0.0);
+                                decay = T(0.0); nesterov = false): SGDMomentum[
+    Tensor[T]] {....deprecated: "Use optimizer(model, SGDMomentum, learning_rate) instead.".}
+
+
+ Deprecated: Use optimizer(model, SGDMomentum, learning_rate) instead. +
+ + +   Source +Edit + +
+
+ +
+
+
+
proc update(self: SGD)
+
+ +

Performs an optimization update.

+

Parameters:

+
  • self A SGD optimizer to update.
  • +
+

This proc will update the weights in the model associated with the input optimizer according to the following rule: w = w - lr * gradient

+ +   Source +Edit + +
+
+
+
proc update(self: var Adam)
+
+ + +   Source +Edit + +
+
+
+
proc update(self: var SGDMomentum)
+
+ +

Performs an optimization update.

+

Parameters:

+
  • self A SGDMomentum optimizer to update.
  • +
+

This proc will update the weights in the model associated with the input optimizer according to the following rule: w = w - lr * gradient + m * moment If nesterov is set to true then the following rule is applied instead: w = w - lr * gradient + m * v

+

v = - lr * gradient + m * moment

+

Where in both cases the moment is the gradient change applied in the previous update step and m is the momentum.

+

If decay is greater than 0, the learning rate will be modified each call according to the following: lr = lr * 1/(1 + decay)

+ +   Source +Edit + +
+
+ +
+
+
+
proc zeroGrads(o: Optimizer)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/optimizers.idx b/optimizers.idx new file mode 100644 index 000000000..606b0a513 --- /dev/null +++ b/optimizers.idx @@ -0,0 +1,18 @@ +nimTitle optimizers optimizers.html module src/arraymancer/nn/optimizers/optimizers 0 +nim SGD optimizers.html#SGD object SGD 27 +nim newSGD optimizers.html#newSGD,varargs[Variable[Tensor[T]]],T proc newSGD[T](params: varargs[Variable[Tensor[T]]]; learning_rate: T): SGD[Tensor[T]] 32 +nim update optimizers.html#update,SGD proc update(self: SGD) 35 +nim optimizer optimizers.html#optimizer,M,typedesc[SGD],T proc optimizer[M, T](model: M; OptimizerKind: typedesc[SGD]; learning_rate: T): SGD[\n Tensor[T]] 55 +nim optimizerSGD optimizers.html#optimizerSGD,M,T proc optimizerSGD[M, T](model: M; learning_rate: T): SGD[Tensor[T]] 80 +nim SGDMomentum optimizers.html#SGDMomentum object SGDMomentum 90 +nim update optimizers.html#update,SGDMomentum proc update(self: var SGDMomentum) 101 +nim optimizer optimizers.html#optimizer,M,typedesc[SGDMomentum],T,T,T proc optimizer[M, T](model: M; OptimizerKind: typedesc[SGDMomentum];\n learning_rate: T; momentum: T = T(0.0); decay: T = T(0.0);\n nesterov = false): SGDMomentum[Tensor[T]] 149 +nim optimizerSGDMomentum optimizers.html#optimizerSGDMomentum,M,T,typeof(T(0.0)),typeof(T(0.0)) proc optimizerSGDMomentum[M, T](model: M; learning_rate: T; momentum = T(0.0);\n decay = T(0.0); nesterov = false): SGDMomentum[\n Tensor[T]] 184 +nim Adam optimizers.html#Adam object Adam 194 +nim update optimizers.html#update,Adam proc update(self: var Adam) 204 +nim optimizer optimizers.html#optimizer,M,typedesc[Adam],T,T,T,T proc optimizer[M, T](model: M; OptimizerKind: typedesc[Adam];\n learning_rate: T = T(0.001); beta1: T = T(0.9);\n beta2: T = T(0.999); eps: T = T(1e-8)): Adam[Tensor[T]] 230 +nim optimizerAdam optimizers.html#optimizerAdam,M,T,T,T,T proc optimizerAdam[M, T](model: M; learning_rate: T; beta1: T = T(0.9);\n beta2: T = T(0.999); eps: T = T(1e-8)): Adam[Tensor[T]] 259 +nim Optimizer optimizers.html#Optimizer type Optimizer 269 +nim zeroGrads optimizers.html#zeroGrads,Optimizer proc zeroGrads(o: Optimizer) 271 +nimgrp update optimizers.html#update-procs-all proc 35 +nimgrp optimizer optimizers.html#optimizer-procs-all proc 55 diff --git a/overload.html b/overload.html new file mode 100644 index 000000000..81046cd27 --- /dev/null +++ b/overload.html @@ -0,0 +1,435 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/overload + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/overload + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/overload

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Macros

+
+
+
+
macro overload(overloaded_name: untyped; lapack_name: typed{nkSym}): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/overload.idx b/overload.idx new file mode 100644 index 000000000..840f08f69 --- /dev/null +++ b/overload.idx @@ -0,0 +1,2 @@ +nimTitle overload overload.html module src/arraymancer/linear_algebra/helpers/overload 0 +nim overload overload.html#overload.m,untyped, macro overload(overloaded_name: untyped; lapack_name: typed{nkSym}): untyped 5 diff --git a/p_accessors.html b/p_accessors.html new file mode 100644 index 000000000..63ce95b28 --- /dev/null +++ b/p_accessors.html @@ -0,0 +1,744 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_accessors + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_accessors + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_accessors

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

Iterators

+ +
+

Types

+
+
+
IterKind = enum
+  Values, Iter_Values, Offset_Values
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc atIndex[T](t: Tensor[T]; idx: varargs[int]): T {.noSideEffect, inline.}
+
+ + Get the value at input coordinates This used to be [] before slicing was implemented +   Source +Edit + +
+
+
+
proc atIndex[T](t: var Tensor[T]; idx: varargs[int]): var T {.noSideEffect,
+    inline.}
+
+ + Get the value at input coordinates This allows inplace operators t1,2 += 10 syntax +   Source +Edit + +
+
+ +
+
+
+
proc atIndexMut[T](t: var Tensor[T]; idx: varargs[int]; val: T) {.noSideEffect,
+    inline.}
+
+ + Set the value at input coordinates This used to be []= before slicing was implemented +   Source +Edit + +
+
+ +
+
+
+
proc getContiguousIndex[T](t: Tensor[T]; idx: int): int {.noSideEffect, inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc getIndex[T](t: Tensor[T]; idx: varargs[int]): int {.noSideEffect, inline.}
+
+ + Convert i, j, k, l ... to the proper index. +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template advanceStridedIteration(coord, backstrides, iter_pos, t, iter_offset,
+                                 iter_size: typed): untyped
+
+ + Computing the next position +   Source +Edit + +
+
+ +
+
+
+
template dualStridedIteration(strider: IterKind;
+                              t1, t2, iter_offset, iter_size: typed): untyped
+
+ + Iterate over two Tensors, displaying data as in C order, whatever the strides. +   Source +Edit + +
+
+ +
+
+
+
template dualStridedIterationYield(strider: IterKind; t1data, t2data, i,
+    t1_iter_pos, t2_iter_pos: typed)
+
+ + Iterator the return value +   Source +Edit + +
+
+ +
+
+
+
template initStridedIteration(coord, backstrides, iter_pos: untyped;
+                              t, iter_offset, iter_size: typed): untyped
+
+ + Iterator init +   Source +Edit + +
+
+ +
+
+
+
template stridedCoordsIteration(t, iter_offset, iter_size: typed): untyped
+
+ + Iterate over a Tensor, displaying data as in C order, whatever the strides. (coords) +   Source +Edit + +
+
+ +
+
+
+
template stridedIteration(strider: IterKind; t, iter_offset, iter_size: typed): untyped
+
+ + Iterate over a Tensor, displaying data as in C order, whatever the strides. +   Source +Edit + +
+
+ +
+
+
+
template stridedIterationYield(strider: IterKind; data, i, iter_pos: typed)
+
+ + Iterator the return value +   Source +Edit + +
+
+ +
+
+
+
template tripleStridedIteration(strider: IterKind;
+                                t1, t2, t3, iter_offset, iter_size: typed): untyped
+
+ + Iterate over two Tensors, displaying data as in C order, whatever the strides. +   Source +Edit + +
+
+ +
+
+
+
template tripleStridedIterationYield(strider: IterKind; t1data, t2data, t3data,
+    i, t1_iter_pos, t2_iter_pos, t3_iter_pos: typed)
+
+ + Iterator the return value +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_accessors.idx b/p_accessors.idx new file mode 100644 index 000000000..2d2026b2c --- /dev/null +++ b/p_accessors.idx @@ -0,0 +1,20 @@ +nimTitle p_accessors p_accessors.html module src/arraymancer/tensor/private/p_accessors 0 +nim getIndex p_accessors.html#getIndex,Tensor[T],varargs[int] proc getIndex[T](t: Tensor[T]; idx: varargs[int]): int 60 +nim getContiguousIndex p_accessors.html#getContiguousIndex,Tensor[T],int proc getContiguousIndex[T](t: Tensor[T]; idx: int): int 69 +nim atIndex p_accessors.html#atIndex,Tensor[T],varargs[int] proc atIndex[T](t: Tensor[T]; idx: varargs[int]): T 78 +nim atIndex p_accessors.html#atIndex,Tensor[T],varargs[int]_2 proc atIndex[T](t: var Tensor[T]; idx: varargs[int]): var T 86 +nim atIndexMut p_accessors.html#atIndexMut,Tensor[T],varargs[int],T proc atIndexMut[T](t: var Tensor[T]; idx: varargs[int]; val: T) 94 +nim Values p_accessors.html#Values IterKind.Values 131 +nim Iter_Values p_accessors.html#Iter_Values IterKind.Iter_Values 131 +nim Offset_Values p_accessors.html#Offset_Values IterKind.Offset_Values 131 +nim IterKind p_accessors.html#IterKind enum IterKind 131 +nim initStridedIteration p_accessors.html#initStridedIteration.t,untyped,untyped,untyped,typed,typed,typed template initStridedIteration(coord, backstrides, iter_pos: untyped;\n t, iter_offset, iter_size: typed): untyped 134 +nim advanceStridedIteration p_accessors.html#advanceStridedIteration.t,typed,typed,typed,typed,typed,typed template advanceStridedIteration(coord, backstrides, iter_pos, t, iter_offset, iter_size: typed): untyped 152 +nim stridedIterationYield p_accessors.html#stridedIterationYield.t,IterKind,typed,typed,typed template stridedIterationYield(strider: IterKind; data, i, iter_pos: typed) 163 +nim stridedIteration p_accessors.html#stridedIteration.t,IterKind,typed,typed,typed template stridedIteration(strider: IterKind; t, iter_offset, iter_size: typed): untyped 169 +nim stridedCoordsIteration p_accessors.html#stridedCoordsIteration.t,typed,typed,typed template stridedCoordsIteration(t, iter_offset, iter_size: typed): untyped 189 +nim dualStridedIterationYield p_accessors.html#dualStridedIterationYield.t,IterKind,typed,typed,typed,typed,typed template dualStridedIterationYield(strider: IterKind;\n t1data, t2data, i, t1_iter_pos, t2_iter_pos: typed) 205 +nim dualStridedIteration p_accessors.html#dualStridedIteration.t,IterKind,typed,typed,typed,typed template dualStridedIteration(strider: IterKind; t1, t2, iter_offset, iter_size: typed): untyped 211 +nim tripleStridedIterationYield p_accessors.html#tripleStridedIterationYield.t,IterKind,typed,typed,typed,typed,typed,typed,typed template tripleStridedIterationYield(strider: IterKind; t1data, t2data, t3data, i,\n t1_iter_pos, t2_iter_pos, t3_iter_pos: typed) 248 +nim tripleStridedIteration p_accessors.html#tripleStridedIteration.t,IterKind,typed,typed,typed,typed,typed template tripleStridedIteration(strider: IterKind;\n t1, t2, t3, iter_offset, iter_size: typed): untyped 254 +nimgrp atindex p_accessors.html#atIndex-procs-all proc 78 diff --git a/p_accessors_macros_desugar.html b/p_accessors_macros_desugar.html new file mode 100644 index 000000000..cec99b79a --- /dev/null +++ b/p_accessors_macros_desugar.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_accessors_macros_desugar + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_accessors_macros_desugar + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_accessors_macros_desugar

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro desugar(args: untyped): void
+
+ + Transform all syntactic sugar in arguments to integer or SteppedSlices It will then be dispatched to "atIndex" (if specific integers) or "slicer" if there are SteppedSlices +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_accessors_macros_desugar.idx b/p_accessors_macros_desugar.idx new file mode 100644 index 000000000..11c0fc5c3 --- /dev/null +++ b/p_accessors_macros_desugar.idx @@ -0,0 +1,2 @@ +nimTitle p_accessors_macros_desugar p_accessors_macros_desugar.html module src/arraymancer/tensor/private/p_accessors_macros_desugar 0 +nim desugar p_accessors_macros_desugar.html#desugar.m,untyped macro desugar(args: untyped): void ## Traverse top tree nodes and one-hot-encode the different conditions 30 diff --git a/p_accessors_macros_read.html b/p_accessors_macros_read.html new file mode 100644 index 000000000..7c257eecf --- /dev/null +++ b/p_accessors_macros_read.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_accessors_macros_read + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_accessors_macros_read + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_accessors_macros_read

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
FancySelectorKind = enum
+  FancyNone, FancyIndex, FancyMaskFull, FancyMaskAxis, FancyUnknownFull,
+  FancyUnknownAxis
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc getFancySelector(ast: NimNode; axis: var int; selector: var NimNode): FancySelectorKind {.
+    ...raises: [], tags: [], forbids: [].}
+
+ + Detect indexing in the form +

or with the index selector being a tensor

+ +   Source +Edit + +
+
+ +
+
+
+
proc sliceDispatchImpl(result: NimNode; args: NimNode; isRead: bool) {.
+    ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc slicer[T](t: AnyTensor[T]; ellipsis: Ellipsis;
+               slices: openArray[SteppedSlice]): AnyTensor[T] {.noinit,
+    noSideEffect.}
+
+ + Take a Tensor, Ellipsis and SteppedSlices Returns: A copy of the original Tensor Offset and strides are changed to achieve the desired effect. +   Source +Edit + +
+
+
+
proc slicer[T](t: AnyTensor[T]; slices1: openArray[SteppedSlice];
+               ellipsis: Ellipsis; slices2: openArray[SteppedSlice]): AnyTensor[
+    T] {.noinit, noSideEffect.}
+
+ + Take a Tensor, Ellipsis and SteppedSlices Returns: A copy of the original Tensor Offset and strides are changed to achieve the desired effect. +   Source +Edit + +
+
+
+
proc slicer[T](t: AnyTensor[T]; slices: openArray[SteppedSlice]): AnyTensor[T] {.
+    noinit, noSideEffect.}
+
+ + Take a Tensor and SteppedSlices Returns: A copy of the original Tensor Offset and strides are changed to achieve the desired effect. +   Source +Edit + +
+
+
+
proc slicer[T](t: AnyTensor[T]; slices: openArray[SteppedSlice];
+               ellipsis: Ellipsis): AnyTensor[T] {.noinit, noSideEffect.}
+
+ + Take a Tensor, SteppedSlices and Ellipsis Returns: A copy of the original Tensor Offset and strides are changed to achieve the desired effect. +   Source +Edit + +
+
+
+
proc slicer[T](t: Tensor[T]; slices: ArrayOfSlices): Tensor[T] {.noinit,
+    noSideEffect.}
+
+ + Take a Tensor and SteppedSlices Returns: A view of the original Tensor Offset and strides are changed to achieve the desired effect. Warning: mutating the result will mutate the original As such a var Tensor is required +   Source +Edit + +
+
+ +
+ +
+
+
+

Macros

+
+
+
+
macro slice_typed_dispatch(t: typed; args: varargs[typed]): untyped
+
+ + Typed macro so that isAllInt has typed context and we can dispatch. If args are all int, we dispatch to atIndex and return T Else, all ints are converted to SteppedSlices and we return a Tensor. Note, normal slices and _ were already converted in the [] macro TODO in total we do 3 passes over the list of arguments :/. It is done only at compile time though +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template slicerImpl[T](result: AnyTensor[T] | var AnyTensor[T];
+                       slices: ArrayOfSlices): untyped
+
+ + Slicing routine +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_accessors_macros_read.idx b/p_accessors_macros_read.idx new file mode 100644 index 000000000..fee6add9f --- /dev/null +++ b/p_accessors_macros_read.idx @@ -0,0 +1,18 @@ +nimTitle p_accessors_macros_read p_accessors_macros_read.html module src/arraymancer/tensor/private/p_accessors_macros_read 0 +nim slicerImpl p_accessors_macros_read.html#slicerImpl.t,,ArrayOfSlices template slicerImpl[T](result: AnyTensor[T] | var AnyTensor[T]; slices: ArrayOfSlices): untyped 28 +nim slicer p_accessors_macros_read.html#slicer,AnyTensor[T],openArray[SteppedSlice] proc slicer[T](t: AnyTensor[T]; slices: openArray[SteppedSlice]): AnyTensor[T] 60 +nim slicer p_accessors_macros_read.html#slicer,AnyTensor[T],openArray[SteppedSlice],Ellipsis proc slicer[T](t: AnyTensor[T]; slices: openArray[SteppedSlice]; ellipsis: Ellipsis): AnyTensor[\n T] 69 +nim slicer p_accessors_macros_read.html#slicer,AnyTensor[T],Ellipsis,openArray[SteppedSlice] proc slicer[T](t: AnyTensor[T]; ellipsis: Ellipsis; slices: openArray[SteppedSlice]): AnyTensor[\n T] 81 +nim slicer p_accessors_macros_read.html#slicer,AnyTensor[T],openArray[SteppedSlice],Ellipsis,openArray[SteppedSlice] proc slicer[T](t: AnyTensor[T]; slices1: openArray[SteppedSlice]; ellipsis: Ellipsis;\n slices2: openArray[SteppedSlice]): AnyTensor[T] 94 +nim slicer p_accessors_macros_read.html#slicer,Tensor[T],ArrayOfSlices proc slicer[T](t: Tensor[T]; slices: ArrayOfSlices): Tensor[T] 110 +nim FancyNone p_accessors_macros_read.html#FancyNone FancySelectorKind.FancyNone 124 +nim FancyIndex p_accessors_macros_read.html#FancyIndex FancySelectorKind.FancyIndex 124 +nim FancyMaskFull p_accessors_macros_read.html#FancyMaskFull FancySelectorKind.FancyMaskFull 124 +nim FancyMaskAxis p_accessors_macros_read.html#FancyMaskAxis FancySelectorKind.FancyMaskAxis 124 +nim FancyUnknownFull p_accessors_macros_read.html#FancyUnknownFull FancySelectorKind.FancyUnknownFull 124 +nim FancyUnknownAxis p_accessors_macros_read.html#FancyUnknownAxis FancySelectorKind.FancyUnknownAxis 124 +nim FancySelectorKind p_accessors_macros_read.html#FancySelectorKind enum FancySelectorKind 124 +nim getFancySelector p_accessors_macros_read.html#getFancySelector,NimNode,int,NimNode proc getFancySelector(ast: NimNode; axis: var int; selector: var NimNode): FancySelectorKind 133 +nim sliceDispatchImpl p_accessors_macros_read.html#sliceDispatchImpl,NimNode,NimNode,bool proc sliceDispatchImpl(result: NimNode; args: NimNode; isRead: bool) 198 +nim slice_typed_dispatch p_accessors_macros_read.html#slice_typed_dispatch.m,typed,varargs[typed] macro slice_typed_dispatch(t: typed; args: varargs[typed]): untyped 225 +nimgrp slicer p_accessors_macros_read.html#slicer-procs-all proc 60 diff --git a/p_accessors_macros_write.html b/p_accessors_macros_write.html new file mode 100644 index 000000000..53e11f9bf --- /dev/null +++ b/p_accessors_macros_write.html @@ -0,0 +1,672 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_accessors_macros_write + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_accessors_macros_write + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_accessors_macros_write

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc slicerMut[T](t: var Tensor[T]; ellipsis: Ellipsis;
+                  slices: openArray[SteppedSlice]; oa: openArray) {.noSideEffect.}
+
+ + Assign value from openArrays The openArray must have the same shape as the slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; ellipsis: Ellipsis;
+                  slices: openArray[SteppedSlice]; t2: Tensor[T])
+
+ + Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; ellipsis: Ellipsis;
+                  slices: openArray[SteppedSlice]; val: T) {.noSideEffect.}
+
+ + Take a var Tensor, SteppedSlices, Ellipsis and a value Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices1: openArray[SteppedSlice];
+                  ellipsis: Ellipsis; slices2: openArray[SteppedSlice];
+                  oa: openArray) {.noSideEffect.}
+
+ + Assign value from openArrays The openArray must have the same shape as the slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices1: openArray[SteppedSlice];
+                  ellipsis: Ellipsis; slices2: openArray[SteppedSlice];
+                  t2: Tensor[T])
+
+ + Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices1: openArray[SteppedSlice];
+                  ellipsis: Ellipsis; slices2: openArray[SteppedSlice]; val: T) {.
+    noSideEffect.}
+
+ + Take a var Tensor, Ellipsis, SteppedSlices, Ellipsis and a value Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];
+                  ellipsis: Ellipsis; oa: openArray) {.noSideEffect.}
+
+ + Assign value from openArrays The openArray must have the same shape as the slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];
+                  ellipsis: Ellipsis; t2: Tensor[T])
+
+ + Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];
+                  ellipsis: Ellipsis; val: T) {.noSideEffect.}
+
+ + Take a var Tensor, SteppedSlices, Ellipsis and a value Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];
+                  oa: openArray) {.noSideEffect.}
+
+ + Assign value from openArrays The openArray must have the same shape as the slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];
+                  t2: Tensor[T])
+
+ + Assign the value to the whole slice +   Source +Edit + +
+
+
+
proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice]; val: T) {.
+    noSideEffect.}
+
+ + Assign the value to the whole slice +   Source +Edit + +
+
+ +
+ +
+
+
+

Macros

+
+
+
+
macro slice_typed_dispatch_mut(t: typed; args: varargs[typed]; val: typed): untyped
+
+ + Assign val to Tensor T at slice/position args +   Source +Edit + +
+
+ +
+
+
+
macro slice_typed_dispatch_var(t: typed; args: varargs[typed]): untyped
+
+ + Typed macro so that isAllInt has typed context and we can dispatch. If args are all int, we dispatch to atIndex and return T Else, all ints are converted to SteppedSlices and we return a Tensor. Note, normal slices and _ were already converted in the [] macro TODO in total we do 3 passes over the list of arguments :/. It is done only at compile time though +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_accessors_macros_write.idx b/p_accessors_macros_write.idx new file mode 100644 index 000000000..54a68031f --- /dev/null +++ b/p_accessors_macros_write.idx @@ -0,0 +1,16 @@ +nimTitle p_accessors_macros_write p_accessors_macros_write.html module src/arraymancer/tensor/private/p_accessors_macros_write 0 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],T proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice]; val: T) 35 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Ellipsis,T proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];\n ellipsis: Ellipsis; val: T) 39 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],Ellipsis,openArray[SteppedSlice],T proc slicerMut[T](t: var Tensor[T]; ellipsis: Ellipsis;\n slices: openArray[SteppedSlice]; val: T) 50 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Ellipsis,openArray[SteppedSlice],T proc slicerMut[T](t: var Tensor[T]; slices1: openArray[SteppedSlice];\n ellipsis: Ellipsis; slices2: openArray[SteppedSlice]; val: T) 61 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],openArray proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice]; oa: openArray) 95 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Ellipsis,openArray proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];\n ellipsis: Ellipsis; oa: openArray) 100 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],Ellipsis,openArray[SteppedSlice],openArray proc slicerMut[T](t: var Tensor[T]; ellipsis: Ellipsis;\n slices: openArray[SteppedSlice]; oa: openArray) 110 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Ellipsis,openArray[SteppedSlice],openArray proc slicerMut[T](t: var Tensor[T]; slices1: openArray[SteppedSlice];\n ellipsis: Ellipsis; slices2: openArray[SteppedSlice]; oa: openArray) 121 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Tensor[T] proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice]; t2: Tensor[T]) 148 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Ellipsis,Tensor[T] proc slicerMut[T](t: var Tensor[T]; slices: openArray[SteppedSlice];\n ellipsis: Ellipsis; t2: Tensor[T]) 152 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],Ellipsis,openArray[SteppedSlice],Tensor[T] proc slicerMut[T](t: var Tensor[T]; ellipsis: Ellipsis;\n slices: openArray[SteppedSlice]; t2: Tensor[T]) 161 +nim slicerMut p_accessors_macros_write.html#slicerMut,Tensor[T],openArray[SteppedSlice],Ellipsis,openArray[SteppedSlice],Tensor[T] proc slicerMut[T](t: var Tensor[T]; slices1: openArray[SteppedSlice];\n ellipsis: Ellipsis; slices2: openArray[SteppedSlice]; t2: Tensor[T]) 170 +nim slice_typed_dispatch_mut p_accessors_macros_write.html#slice_typed_dispatch_mut.m,typed,varargs[typed],typed macro slice_typed_dispatch_mut(t: typed; args: varargs[typed]; val: typed): untyped 185 +nim slice_typed_dispatch_var p_accessors_macros_write.html#slice_typed_dispatch_var.m,typed,varargs[typed] macro slice_typed_dispatch_var(t: typed; args: varargs[typed]): untyped 314 +nimgrp slicermut p_accessors_macros_write.html#slicerMut-procs-all proc 35 diff --git a/p_activation.html b/p_activation.html new file mode 100644 index 000000000..113ce9201 --- /dev/null +++ b/p_activation.html @@ -0,0 +1,435 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/private/p_activation + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/private/p_activation + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/private/p_activation

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
proc sigmoid[T: SomeFloat](x: T): T {.inline, noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_activation.idx b/p_activation.idx new file mode 100644 index 000000000..5d8c976f3 --- /dev/null +++ b/p_activation.idx @@ -0,0 +1,2 @@ +nimTitle p_activation p_activation.html module src/arraymancer/nn_primitives/private/p_activation 0 +nim sigmoid p_activation.html#sigmoid,T proc sigmoid[T: SomeFloat](x: T): T 17 diff --git a/p_checks.html b/p_checks.html new file mode 100644 index 000000000..4b355a62c --- /dev/null +++ b/p_checks.html @@ -0,0 +1,772 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_checks + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_checks + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_checks

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
func check_axis_index(t: AnyTensor; axis, index, len: Natural) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_concat(t1, t2: Tensor; axis: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_contiguous_index(t: Tensor; idx: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_dot_prod(a, b: AnyTensor) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc check_elementwise[T, U](a: ClTensor[T]; b: ClTensor[U]) {.noSideEffect,
+    inline.}
+
+ + Check if element-wise operations can be applied to 2 Tensors +   Source +Edit + +
+
+
+
proc check_elementwise[T, U](a: CudaTensor[T]; b: CudaTensor[U]) {.noSideEffect,
+    inline.}
+
+ + Check if element-wise operations can be applied to 2 Tensors +   Source +Edit + +
+
+
+
func check_elementwise[T, U](a: Tensor[T]; b: Tensor[U]) {.inline.}
+
+ + Check if element-wise operations can be applied to 2 Tensors +   Source +Edit + +
+
+ +
+
+
+
func check_index(t: Tensor; idx: varargs[int]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_matmat(a, b: AnyTensor) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_matvec(a, b: AnyTensor) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_nested_elements(shape: Metadata; len: int) {.inline, ...raises: [],
+    tags: [], forbids: [].}
+
+ + Compare the detected shape from flatten with the real length of the data Input: -- A shape (sequence of int) -- A length (int) +   Source +Edit + +
+
+ +
+
+
+
func check_reshape(t: AnyTensor; new_shape: Metadata) {.inline.}
+
+ + +   Source +Edit + +
+
+
+
func check_reshape(t: AnyTensor; new_shape: varargs[int]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_shape(a: Tensor; b: Tensor | openArray) {.inline.}
+
+ + Compare shape +   Source +Edit + +
+
+ +
+
+
+
func check_size[T, U](a: Tensor[T]; b: Tensor[U]) {.inline.}
+
+ + Check if the total number of elements match +   Source +Edit + +
+
+ +
+
+
+
func check_squeezeAxis(t: AnyTensor; axis: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_start_end(a, b: int; dim_size: int) {.inline, ...raises: [], tags: [],
+    forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func check_steps(a, b, step: int) {.inline, ...raises: [], tags: [], forbids: [].}
+
+ + Though it might be convenient to automatically step in the correct direction like in Python I choose not to do it as this might introduce the typical silent bugs typechecking/Nim is helping avoid. +   Source +Edit + +
+
+ +
+
+
+
func check_unsqueezeAxis(t: AnyTensor; axis: int) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_checks.idx b/p_checks.idx new file mode 100644 index 000000000..bd19f8980 --- /dev/null +++ b/p_checks.idx @@ -0,0 +1,22 @@ +nimTitle p_checks p_checks.html module src/arraymancer/tensor/private/p_checks 0 +nim check_elementwise p_checks.html#check_elementwise,CudaTensor[T],CudaTensor[U] proc check_elementwise[T, U](a: CudaTensor[T]; b: CudaTensor[U]) 15 +nim check_elementwise p_checks.html#check_elementwise,ClTensor[T],ClTensor[U] proc check_elementwise[T, U](a: ClTensor[T]; b: ClTensor[U]) 15 +nim check_nested_elements p_checks.html#check_nested_elements,Metadata,int proc check_nested_elements(shape: Metadata; len: int) 24 +nim check_index p_checks.html#check_index,Tensor,varargs[int] proc check_index(t: Tensor; idx: varargs[int]) 32 +nim check_contiguous_index p_checks.html#check_contiguous_index,Tensor,int proc check_contiguous_index(t: Tensor; idx: int) 48 +nim check_elementwise p_checks.html#check_elementwise,Tensor[T],Tensor[U] proc check_elementwise[T, U](a: Tensor[T]; b: Tensor[U]) 55 +nim check_size p_checks.html#check_size,Tensor[T],Tensor[U] proc check_size[T, U](a: Tensor[T]; b: Tensor[U]) 61 +nim check_steps p_checks.html#check_steps,int,int,int proc check_steps(a, b, step: int) 70 +nim check_start_end p_checks.html#check_start_end,int,int,int proc check_start_end(a, b: int; dim_size: int) 87 +nim check_shape p_checks.html#check_shape,Tensor, proc check_shape(a: Tensor; b: Tensor | openArray) 96 +nim check_reshape p_checks.html#check_reshape,AnyTensor,Metadata proc check_reshape(t: AnyTensor; new_shape: Metadata) 109 +nim check_reshape p_checks.html#check_reshape,AnyTensor,varargs[int] proc check_reshape(t: AnyTensor; new_shape: varargs[int]) 117 +nim check_concat p_checks.html#check_concat,Tensor,Tensor,int proc check_concat(t1, t2: Tensor; axis: int) 126 +nim check_squeezeAxis p_checks.html#check_squeezeAxis,AnyTensor,int proc check_squeezeAxis(t: AnyTensor; axis: int) 133 +nim check_unsqueezeAxis p_checks.html#check_unsqueezeAxis,AnyTensor,int proc check_unsqueezeAxis(t: AnyTensor; axis: int) 137 +nim check_dot_prod p_checks.html#check_dot_prod,AnyTensor,AnyTensor proc check_dot_prod(a, b: AnyTensor) 141 +nim check_matmat p_checks.html#check_matmat,AnyTensor,AnyTensor proc check_matmat(a, b: AnyTensor) 145 +nim check_matvec p_checks.html#check_matvec,AnyTensor,AnyTensor proc check_matvec(a, b: AnyTensor) 155 +nim check_axis_index p_checks.html#check_axis_index,AnyTensor,Natural,Natural,Natural proc check_axis_index(t: AnyTensor; axis, index, len: Natural) 165 +nimgrp checkelementwise p_checks.html#check_elementwise-procs-all proc 15 +nimgrp checkreshape p_checks.html#check_reshape-procs-all proc 109 diff --git a/p_complex.html b/p_complex.html new file mode 100644 index 000000000..c1ccd7fa5 --- /dev/null +++ b/p_complex.html @@ -0,0 +1,513 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_complex + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_complex + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_complex

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Converters

+
+
+
+
converter Complex32[T: SomeNumber](x: T): Complex[float32]
+
+ + +   Source +Edit + +
+
+ +
+
+
+
converter Complex64[T: SomeNumber](x: T): Complex[float64]
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template numberOne(T: type Complex[float32]): Complex[float32]
+
+ + +   Source +Edit + +
+
+
+
template numberOne(T: type Complex[float64]): Complex[float64]
+
+ + +   Source +Edit + +
+
+
+
template numberOne(T: type SomeNumber): SomeNumber
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_complex.idx b/p_complex.idx new file mode 100644 index 000000000..ac5828e3b --- /dev/null +++ b/p_complex.idx @@ -0,0 +1,7 @@ +nimTitle p_complex p_complex.html module src/arraymancer/tensor/private/p_complex 0 +nim numberOne p_complex.html#numberOne.t,typeSomeNumber template numberOne(T: type SomeNumber): SomeNumber 3 +nim numberOne p_complex.html#numberOne.t,typeComplex[float32] template numberOne(T: type Complex[float32]): Complex[float32] 4 +nim numberOne p_complex.html#numberOne.t,typeComplex[float64] template numberOne(T: type Complex[float64]): Complex[float64] 5 +nim Complex64 p_complex.html#Complex64.c,T converter Complex64[T: SomeNumber](x: T): Complex[float64] 7 +nim Complex32 p_complex.html#Complex32.c,T converter Complex32[T: SomeNumber](x: T): Complex[float32] 11 +nimgrp numberone p_complex.html#numberOne-templates-all template 3 diff --git a/p_display.html b/p_display.html new file mode 100644 index 000000000..a03c39d5b --- /dev/null +++ b/p_display.html @@ -0,0 +1,469 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_display + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_display + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_display

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
func disp2d[T](t: Tensor[T]; alignBy = 6; alignSpacing = 3; precision = -1): string
+
+ + Display a 2D-tensor +   Source +Edit + +
+
+ +
+
+
+
proc prettyImpl[T](t: Tensor[T]; inputRank = 0; alignBy = 0; alignSpacing = 4;
+                   precision = -1): string
+
+ +

Pretty printing implementation that aligns N dimensional tensors as a table. Odd dimensions are stacked horizontally and even dimensions vertically.

+

inputRank is used to keep track of the original tensor's rank. alignBy is the spacing given each column in a sub-tensor. alignSpacing is the amount of space we want at least between the different columns. It's given separately for the special case of first columns (as they are left aligned and all others right aligned).

+

precision sets the floating point precision.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_display.idx b/p_display.idx new file mode 100644 index 000000000..4990aae6c --- /dev/null +++ b/p_display.idx @@ -0,0 +1,3 @@ +nimTitle p_display p_display.html module src/arraymancer/tensor/private/p_display 0 +nim disp2d p_display.html#disp2d,Tensor[T],int,int,int proc disp2d[T](t: Tensor[T]; alignBy = 6; alignSpacing = 3; precision = -1): string 81 +nim prettyImpl p_display.html#prettyImpl,Tensor[T],int,int,int,int proc prettyImpl[T](t: Tensor[T]; inputRank = 0; alignBy = 0; alignSpacing = 4;\n precision = -1): string 163 diff --git a/p_empty_tensors.html b/p_empty_tensors.html new file mode 100644 index 000000000..937ec10e6 --- /dev/null +++ b/p_empty_tensors.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_empty_tensors + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_empty_tensors + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_empty_tensors

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Macros

+
+
+
+
macro returnEmptyIfEmpty(tensors: varargs[untyped]): untyped
+
+ + If any of the argument tensors are empty return an empty tensor +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template skipIfEmpty(t: typed): untyped
+
+ + Skip the iteration of a "for-loop" or "while-loop" if the tensor is empty +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_empty_tensors.idx b/p_empty_tensors.idx new file mode 100644 index 000000000..ab793af09 --- /dev/null +++ b/p_empty_tensors.idx @@ -0,0 +1,3 @@ +nimTitle p_empty_tensors p_empty_tensors.html module src/arraymancer/tensor/private/p_empty_tensors 0 +nim skipIfEmpty p_empty_tensors.html#skipIfEmpty.t,typed template skipIfEmpty(t: typed): untyped 32 +nim returnEmptyIfEmpty p_empty_tensors.html#returnEmptyIfEmpty.m,varargs[untyped] macro returnEmptyIfEmpty(tensors: varargs[untyped]): untyped 38 diff --git a/p_init_cuda.html b/p_init_cuda.html new file mode 100644 index 000000000..27160e892 --- /dev/null +++ b/p_init_cuda.html @@ -0,0 +1,464 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_init_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_init_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_init_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc newCudaTensor[T: SomeFloat](shape: Metadata; layout: OrderType = colMajor): CudaTensor[
+    T] {.noinit, noSideEffect.}
+
+ + +   Source +Edit + +
+
+
+
proc newCudaTensor[T: SomeFloat](shape: varargs[int];
+                                 layout: OrderType = colMajor): CudaTensor[T] {.
+    noinit, noSideEffect.}
+
+ + Internal proc Allocate a CudaTensor WARNING: The Cuda memory is not initialized to 0 +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_init_cuda.idx b/p_init_cuda.idx new file mode 100644 index 000000000..18c35ce98 --- /dev/null +++ b/p_init_cuda.idx @@ -0,0 +1,4 @@ +nimTitle p_init_cuda p_init_cuda.html module src/arraymancer/tensor/private/p_init_cuda 0 +nim newCudaTensor p_init_cuda.html#newCudaTensor,varargs[int],OrderType proc newCudaTensor[T: SomeFloat](shape: varargs[int]; layout: OrderType = colMajor): CudaTensor[\n T] 29 +nim newCudaTensor p_init_cuda.html#newCudaTensor,Metadata,OrderType proc newCudaTensor[T: SomeFloat](shape: Metadata; layout: OrderType = colMajor): CudaTensor[\n T] 42 +nimgrp newcudatensor p_init_cuda.html#newCudaTensor-procs-all proc 29 diff --git a/p_init_opencl.html b/p_init_opencl.html new file mode 100644 index 000000000..591fa738c --- /dev/null +++ b/p_init_opencl.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_init_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_init_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_init_opencl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc newClTensor[T: SomeFloat](shape: Metadata; layout: OrderType = rowMajor): ClTensor[
+    T] {.noinit.}
+
+ + +   Source +Edit + +
+
+
+
proc newClTensor[T: SomeFloat](shape: varargs[int]; layout: OrderType = rowMajor): ClTensor[
+    T] {.noinit.}
+
+ + Internal proc Allocate a ClTensor WARNING: The OpenCL memory is not initialized to 0 +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_init_opencl.idx b/p_init_opencl.idx new file mode 100644 index 000000000..0d1c38836 --- /dev/null +++ b/p_init_opencl.idx @@ -0,0 +1,4 @@ +nimTitle p_init_opencl p_init_opencl.html module src/arraymancer/tensor/private/p_init_opencl 0 +nim newClTensor p_init_opencl.html#newClTensor,varargs[int],OrderType proc newClTensor[T: SomeFloat](shape: varargs[int]; layout: OrderType = rowMajor): ClTensor[\n T] 29 +nim newClTensor p_init_opencl.html#newClTensor,Metadata,OrderType proc newClTensor[T: SomeFloat](shape: Metadata; layout: OrderType = rowMajor): ClTensor[\n T] 38 +nimgrp newcltensor p_init_opencl.html#newClTensor-procs-all proc 29 diff --git a/p_kernels_interface_cuda.html b/p_kernels_interface_cuda.html new file mode 100644 index 000000000..746a7e689 --- /dev/null +++ b/p_kernels_interface_cuda.html @@ -0,0 +1,643 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_kernels_interface_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_kernels_interface_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_kernels_interface_cuda

+
+ +
+   Source +Edit + +
+ +

+ +
+

Templates

+
+
+
+
template cuda_assign_call[T: SomeFloat](kernel_name: untyped;
+                                        destination: var CudaTensor[T];
+                                        source: CudaTensor[T]): untyped
+
+ + Does the heavy-lifting to format the tensors for the cuda call +   Source +Edit + +
+
+ +
+
+
+
template cuda_assign_glue(kernel_name, op_name: string; binding_name: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template cuda_assignscal_call[T: SomeFloat](kernel_name: untyped;
+    destination: var CudaTensor[T]; val: T): untyped
+
+ + Does the heavy-lifting to format the tensors for the cuda call +   Source +Edit + +
+
+ +
+
+
+
template cuda_assignscal_glue(kernel_name, op_name: string;
+                              binding_name: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template cuda_binary_call[T: SomeFloat](kernel_name: untyped;
+                                        destination: var CudaTensor[T];
+                                        a, b: CudaTensor[T]): untyped
+
+ + Does the heavy-lifting to format the tensors for the cuda call +   Source +Edit + +
+
+ +
+
+
+
template cuda_binary_glue(kernel_name, op_name: string; binding_name: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template cuda_lscal_call[T: SomeFloat](kernel_name: untyped;
+                                       destination: var CudaTensor[T]; alpha: T;
+                                       source: CudaTensor[T]): untyped
+
+ + Does the heavy-lifting to format the tensors for the cuda call +   Source +Edit + +
+
+ +
+
+
+
template cuda_lscal_glue(kernel_name, op_name: string; binding_name: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template cuda_rscal_call[T: SomeFloat](kernel_name: untyped;
+                                       destination: var CudaTensor[T];
+                                       source: CudaTensor[T]; beta: T): untyped
+
+ + Does the heavy-lifting to format the tensors for the cuda call +   Source +Edit + +
+
+ +
+
+
+
template cuda_rscal_glue(kernel_name, op_name: string; binding_name: untyped): untyped
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_kernels_interface_cuda.idx b/p_kernels_interface_cuda.idx new file mode 100644 index 000000000..9e0164048 --- /dev/null +++ b/p_kernels_interface_cuda.idx @@ -0,0 +1,11 @@ +nimTitle p_kernels_interface_cuda p_kernels_interface_cuda.html module src/arraymancer/tensor/private/p_kernels_interface_cuda 0 +nim cuda_assign_glue p_kernels_interface_cuda.html#cuda_assign_glue.t,string,string,untyped template cuda_assign_glue(kernel_name, op_name: string; binding_name: untyped): untyped 40 +nim cuda_assign_call p_kernels_interface_cuda.html#cuda_assign_call.t,untyped,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] template cuda_assign_call[T: SomeFloat](kernel_name: untyped;\n destination: var CudaTensor[T];\n source: CudaTensor[T]): untyped 75 +nim cuda_binary_glue p_kernels_interface_cuda.html#cuda_binary_glue.t,string,string,untyped template cuda_binary_glue(kernel_name, op_name: string; binding_name: untyped): untyped 115 +nim cuda_binary_call p_kernels_interface_cuda.html#cuda_binary_call.t,untyped,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat] template cuda_binary_call[T: SomeFloat](kernel_name: untyped;\n destination: var CudaTensor[T];\n a, b: CudaTensor[T]): untyped 158 +nim cuda_rscal_glue p_kernels_interface_cuda.html#cuda_rscal_glue.t,string,string,untyped template cuda_rscal_glue(kernel_name, op_name: string; binding_name: untyped): untyped 203 +nim cuda_rscal_call p_kernels_interface_cuda.html#cuda_rscal_call.t,untyped,CudaTensor[T: SomeFloat],CudaTensor[T: SomeFloat],T template cuda_rscal_call[T: SomeFloat](kernel_name: untyped;\n destination: var CudaTensor[T];\n source: CudaTensor[T]; beta: T): untyped 241 +nim cuda_lscal_glue p_kernels_interface_cuda.html#cuda_lscal_glue.t,string,string,untyped template cuda_lscal_glue(kernel_name, op_name: string; binding_name: untyped): untyped 283 +nim cuda_lscal_call p_kernels_interface_cuda.html#cuda_lscal_call.t,untyped,CudaTensor[T: SomeFloat],T,CudaTensor[T: SomeFloat] template cuda_lscal_call[T: SomeFloat](kernel_name: untyped;\n destination: var CudaTensor[T]; alpha: T;\n source: CudaTensor[T]): untyped 321 +nim cuda_assignscal_glue p_kernels_interface_cuda.html#cuda_assignscal_glue.t,string,string,untyped template cuda_assignscal_glue(kernel_name, op_name: string; binding_name: untyped): untyped 361 +nim cuda_assignscal_call p_kernels_interface_cuda.html#cuda_assignscal_call.t,untyped,CudaTensor[T: SomeFloat],T template cuda_assignscal_call[T: SomeFloat](kernel_name: untyped;\n destination: var CudaTensor[T]; val: T): untyped 392 diff --git a/p_kernels_interface_opencl.html b/p_kernels_interface_opencl.html new file mode 100644 index 000000000..a1d592a12 --- /dev/null +++ b/p_kernels_interface_opencl.html @@ -0,0 +1,520 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_kernels_interface_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_kernels_interface_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_kernels_interface_opencl

+
+ +
+   Source +Edit + +
+ +

+
+

Templates

+
+
+
+
template gen_cl_apply2(kern_name, ctype, op: string): string
+
+ + Generates an OpenCL kernel for an elementwise in-place binary infix operation (like +=, -=, *.= or /.=) Input:
  • The C type
  • +
  • The C kernel name (this only helps debugging the C code)
  • +
  • The C operation (+=, -=, *.= or /.=)
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template gen_cl_apply3(kern_name, ctype, op: string): string
+
+ + Generates an OpenCL kernel for an elementwise binary infix operations (like +, -, ...) Input:
  • The C type
  • +
  • The C kernel name (this only helps debugging the C code)
  • +
  • The C operation (+, -, ...)
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template genClInfixOp(T: typedesc; ctype: string; procName: untyped;
+                      cName: string; cInfixOp: string;
+                      exported: static[bool] = true): untyped
+
+ + Generates binding to an OpenCL kernel for an elementwise binary infix operation (like +, -, ...) Input:
  • The Nim type of the elements of the input tensors
  • +
  • The equivalent C type
  • +
  • The Nim identifier of the resulting proc
  • +
  • The C kernel name (this only helps debugging the C code)
  • +
  • The C operation (+, -, ...)
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
template genClInPlaceOp(T: typedesc; ctype: string; procName: untyped;
+                        cName: string; cInfixOp: string;
+                        exported: static[bool] = true): untyped
+
+ + Generates an OpenCL kernel for an elementwise in-place binary infix operation (like +=, -=, *.= or /.=) Input:
  • The Nim type of the elements of the input tensors
  • +
  • The equivalent C type
  • +
  • The Nim identifier of the resulting proc
  • +
  • The C kernel name (this only helps debugging the C code)
  • +
  • The C operation (+=, -=, *.= or /.=)
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_kernels_interface_opencl.idx b/p_kernels_interface_opencl.idx new file mode 100644 index 000000000..2bdfe8a4f --- /dev/null +++ b/p_kernels_interface_opencl.idx @@ -0,0 +1,5 @@ +nimTitle p_kernels_interface_opencl p_kernels_interface_opencl.html module src/arraymancer/tensor/private/p_kernels_interface_opencl 0 +nim gen_cl_apply3 p_kernels_interface_opencl.html#gen_cl_apply3.t,string,string,string template gen_cl_apply3(kern_name, ctype, op: string): string 42 +nim genClInfixOp p_kernels_interface_opencl.html#genClInfixOp.t,typedesc,string,untyped,string,string,static[bool] template genClInfixOp(T: typedesc; ctype: string; procName: untyped; cName: string;\n cInfixOp: string; exported: static[bool] = true): untyped 88 +nim gen_cl_apply2 p_kernels_interface_opencl.html#gen_cl_apply2.t,string,string,string template gen_cl_apply2(kern_name, ctype, op: string): string 128 +nim genClInPlaceOp p_kernels_interface_opencl.html#genClInPlaceOp.t,typedesc,string,untyped,string,string,static[bool] template genClInPlaceOp(T: typedesc; ctype: string; procName: untyped; cName: string;\n cInfixOp: string; exported: static[bool] = true): untyped 161 diff --git a/p_logsumexp.html b/p_logsumexp.html new file mode 100644 index 000000000..603f6875f --- /dev/null +++ b/p_logsumexp.html @@ -0,0 +1,499 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/private/p_logsumexp + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/private/p_logsumexp + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/private/p_logsumexp

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc logsumexp[T: SomeFloat](t: Tensor[T]): T
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc stable_softmax[T](x, max, sumexp: T): T {.noSideEffect, inline.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc streaming_max_sumexp[T](t: Tensor[T]): tuple[max: T, sumexp: T] {.
+    noSideEffect, inline.}
+
+ + +   Source +Edit + +
+
+
+
proc streaming_max_sumexp[T](t: Tensor[T]; axis: int): Tensor[
+    tuple[max: T, sumexp: T]] {.noinit.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_logsumexp.idx b/p_logsumexp.idx new file mode 100644 index 000000000..9c67c167f --- /dev/null +++ b/p_logsumexp.idx @@ -0,0 +1,6 @@ +nimTitle p_logsumexp p_logsumexp.html module src/arraymancer/nn_primitives/private/p_logsumexp 0 +nim streaming_max_sumexp p_logsumexp.html#streaming_max_sumexp,Tensor[T] proc streaming_max_sumexp[T](t: Tensor[T]): tuple[max: T, sumexp: T] 30 +nim streaming_max_sumexp p_logsumexp.html#streaming_max_sumexp,Tensor[T],int proc streaming_max_sumexp[T](t: Tensor[T]; axis: int): Tensor[\n tuple[max: T, sumexp: T]] 41 +nim stable_softmax p_logsumexp.html#stable_softmax,T,T,T proc stable_softmax[T](x, max, sumexp: T): T 57 +nim logsumexp p_logsumexp.html#logsumexp,Tensor[T: SomeFloat] proc logsumexp[T: SomeFloat](t: Tensor[T]): T 61 +nimgrp streamingmaxsumexp p_logsumexp.html#streaming_max_sumexp-procs-all proc 30 diff --git a/p_nnp_checks.html b/p_nnp_checks.html new file mode 100644 index 000000000..af7eaf630 --- /dev/null +++ b/p_nnp_checks.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/private/p_nnp_checks + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/private/p_nnp_checks + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/private/p_nnp_checks

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc check_input_target[T](input, target: Tensor[T]) {.noSideEffect, inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_nnp_checks.idx b/p_nnp_checks.idx new file mode 100644 index 000000000..811282903 --- /dev/null +++ b/p_nnp_checks.idx @@ -0,0 +1,2 @@ +nimTitle p_nnp_checks p_nnp_checks.html module src/arraymancer/nn_primitives/private/p_nnp_checks 0 +nim check_input_target p_nnp_checks.html#check_input_target,Tensor[T],Tensor[T] proc check_input_target[T](input, target: Tensor[T]) 17 diff --git a/p_nnp_types.html b/p_nnp_types.html new file mode 100644 index 000000000..ab3268bc6 --- /dev/null +++ b/p_nnp_types.html @@ -0,0 +1,509 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn_primitives/private/p_nnp_types + + + + + + + + + +Arraymancer - src/arraymancer/nn_primitives/private/p_nnp_types + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn_primitives/private/p_nnp_types

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Types

+
+
+
Size2D = tuple[height: int, width: int]
+
+ + Tuple of height and width This is notably used to specify padding and stride parameters for Convolution2D. +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc nchw_channels[T](input: Tensor[T]): int {.inline.}
+
+ + Return number of channels of a Tensor in NCWH layout +   Source +Edit + +
+
+ +
+
+
+
proc nchw_height[T](input: Tensor[T]): int {.inline.}
+
+ + Return height of a Tensor in NCWH layout +   Source +Edit + +
+
+ +
+
+
+
proc nchw_width[T](input: Tensor[T]): int {.inline.}
+
+ + Return width of a Tensor in NCWH layout +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_nnp_types.idx b/p_nnp_types.idx new file mode 100644 index 000000000..449bf97a5 --- /dev/null +++ b/p_nnp_types.idx @@ -0,0 +1,5 @@ +nimTitle p_nnp_types p_nnp_types.html module src/arraymancer/nn_primitives/private/p_nnp_types 0 +nim Size2D p_nnp_types.html#Size2D tuple Size2D 4 +nim nchw_channels p_nnp_types.html#nchw_channels,Tensor[T] proc nchw_channels[T](input: Tensor[T]): int 10 +nim nchw_height p_nnp_types.html#nchw_height,Tensor[T] proc nchw_height[T](input: Tensor[T]): int 14 +nim nchw_width p_nnp_types.html#nchw_width,Tensor[T] proc nchw_width[T](input: Tensor[T]): int 18 diff --git a/p_operator_blas_l2l3.html b/p_operator_blas_l2l3.html new file mode 100644 index 000000000..6f4184e8a --- /dev/null +++ b/p_operator_blas_l2l3.html @@ -0,0 +1,491 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_operator_blas_l2l3 + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_operator_blas_l2l3 + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_operator_blas_l2l3

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc blasMM_C_eq_aAB_p_bC[T: SomeFloat | Complex[float32] | Complex[float64]](
+    alpha: T; a, b: Tensor[T]; beta: T; c: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc blasMV_y_eq_aAx_p_by[T: SomeFloat | Complex[float32] | Complex[float64]](
+    alpha: T; a, x: Tensor[T]; beta: T; y: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc fallbackMM_C_eq_aAB_p_bC[T: SomeInteger](alpha: T; a, b: Tensor[T];
+    beta: T; c: var Tensor[T]) {.inline.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_operator_blas_l2l3.idx b/p_operator_blas_l2l3.idx new file mode 100644 index 000000000..a8b6e65ce --- /dev/null +++ b/p_operator_blas_l2l3.idx @@ -0,0 +1,4 @@ +nimTitle p_operator_blas_l2l3 p_operator_blas_l2l3.html module src/arraymancer/tensor/private/p_operator_blas_l2l3 0 +nim blasMV_y_eq_aAx_p_by p_operator_blas_l2l3.html#blasMV_y_eq_aAx_p_by,T,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc blasMV_y_eq_aAx_p_by[T: SomeFloat | Complex[float32] | Complex[float64]](\n alpha: T; a, x: Tensor[T]; beta: T; y: var Tensor[T]) 52 +nim fallbackMM_C_eq_aAB_p_bC p_operator_blas_l2l3.html#fallbackMM_C_eq_aAB_p_bC,T,Tensor[T: SomeInteger],Tensor[T: SomeInteger],T,Tensor[T: SomeInteger] proc fallbackMM_C_eq_aAB_p_bC[T: SomeInteger](alpha: T; a, b: Tensor[T]; beta: T;\n c: var Tensor[T]) 110 +nim blasMM_C_eq_aAB_p_bC p_operator_blas_l2l3.html#blasMM_C_eq_aAB_p_bC,T,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]],T,Tensor[T: float or float32 or float64 or Complex[system.float32] or Complex[system.float64]] proc blasMM_C_eq_aAB_p_bC[T: SomeFloat | Complex[float32] | Complex[float64]](\n alpha: T; a, b: Tensor[T]; beta: T; c: var Tensor[T]) 141 diff --git a/p_shapeshifting.html b/p_shapeshifting.html new file mode 100644 index 000000000..883ac85ab --- /dev/null +++ b/p_shapeshifting.html @@ -0,0 +1,640 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/private/p_shapeshifting + + + + + + + + + +Arraymancer - src/arraymancer/tensor/private/p_shapeshifting + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/private/p_shapeshifting

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc broadcast2Impl[T](a, b: AnyTensor[T]; result: var tuple[a, b: AnyTensor[T]]) {.
+    noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc broadcastImpl(t: var AnyTensor; shape: varargs[int] | Metadata) {.
+    noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc contiguousImpl[T](t: Tensor[T]; layout: OrderType; result: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc exch_dim[T](t: Tensor[T]; dim1, dim2: int): Tensor[T] {.noinit,
+    noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc permuteImpl[T](result: var Tensor[T]; dims: varargs[int]) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc reshape_no_copy(t: AnyTensor; new_shape: varargs[int] | Metadata;
+                     result: var AnyTensor; layout: OrderType) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc reshape_with_copy[T](t: Tensor[T]; new_shape: varargs[int] | Metadata;
+                          result: var Tensor[T])
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc reshapeImpl(t: AnyTensor; new_shape: varargs[int] | Metadata;
+                 result: var AnyTensor)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc squeezeImpl(t: var AnyTensor) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+
+
proc squeezeImpl(t: var AnyTensor; axis: int) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc unsqueezeImpl(t: var AnyTensor; axis: int) {.noSideEffect.}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/p_shapeshifting.idx b/p_shapeshifting.idx new file mode 100644 index 000000000..88140d387 --- /dev/null +++ b/p_shapeshifting.idx @@ -0,0 +1,13 @@ +nimTitle p_shapeshifting p_shapeshifting.html module src/arraymancer/tensor/private/p_shapeshifting 0 +nim contiguousImpl p_shapeshifting.html#contiguousImpl,Tensor[T],OrderType,Tensor[T] proc contiguousImpl[T](t: Tensor[T]; layout: OrderType; result: var Tensor[T]) 21 +nim reshape_with_copy p_shapeshifting.html#reshape_with_copy,Tensor[T],,Tensor[T] proc reshape_with_copy[T](t: Tensor[T]; new_shape: varargs[int] | Metadata;\n result: var Tensor[T]) 31 +nim reshape_no_copy p_shapeshifting.html#reshape_no_copy,AnyTensor,,AnyTensor,OrderType proc reshape_no_copy(t: AnyTensor; new_shape: varargs[int] | Metadata;\n result: var AnyTensor; layout: OrderType) 35 +nim reshapeImpl p_shapeshifting.html#reshapeImpl,AnyTensor,,AnyTensor proc reshapeImpl(t: AnyTensor; new_shape: varargs[int] | Metadata;\n result: var AnyTensor) 40 +nim broadcastImpl p_shapeshifting.html#broadcastImpl,AnyTensor, proc broadcastImpl(t: var AnyTensor; shape: varargs[int] | Metadata) 53 +nim broadcast2Impl p_shapeshifting.html#broadcast2Impl,AnyTensor[T],AnyTensor[T],tuple[AnyTensor[T],AnyTensor[T]] proc broadcast2Impl[T](a, b: AnyTensor[T]; result: var tuple[a, b: AnyTensor[T]]) 73 +nim exch_dim p_shapeshifting.html#exch_dim,Tensor[T],int,int proc exch_dim[T](t: Tensor[T]; dim1, dim2: int): Tensor[T] 114 +nim permuteImpl p_shapeshifting.html#permuteImpl,Tensor[T],varargs[int] proc permuteImpl[T](result: var Tensor[T]; dims: varargs[int]) 122 +nim squeezeImpl p_shapeshifting.html#squeezeImpl,AnyTensor proc squeezeImpl(t: var AnyTensor) 134 +nim squeezeImpl p_shapeshifting.html#squeezeImpl,AnyTensor,int proc squeezeImpl(t: var AnyTensor; axis: int) 148 +nim unsqueezeImpl p_shapeshifting.html#unsqueezeImpl,AnyTensor,int proc unsqueezeImpl(t: var AnyTensor; axis: int) 156 +nimgrp squeezeimpl p_shapeshifting.html#squeezeImpl-procs-all proc 134 diff --git a/pca.html b/pca.html new file mode 100644 index 000000000..50eec0c9e --- /dev/null +++ b/pca.html @@ -0,0 +1,597 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/ml/dimensionality_reduction/pca + + + + + + + + + +Arraymancer - src/arraymancer/ml/dimensionality_reduction/pca + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/ml/dimensionality_reduction/pca

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
PCA_Detailed[T] = object
+  n_observations*: int
+  n_features*: int
+  n_components*: int
+  projected*: Tensor[T]
+  components*: Tensor[T]
+  mean*: Tensor[T]
+  explained_variance*: Tensor[T]
+  explained_variance_ratio*: Tensor[T]
+  singular_values*: Tensor[T]
+  noise_variance*: T
+
+
+ +

Principal Component Analysis (PCA) object with full details

+

Contains the full PCA details from an input matrix of shape n_observations, n_features

+
  • n_observations: The number of observations/samples from an input matrix of shape n_observations, n_features
  • +
  • n_features: The number of features from the input matrix of shape n_observations, n_features
  • +
  • n_components: The number of principal components asked in pca_detailed
  • +
  • projected: The result of the PCA of shape n_observations, n_components in descending order of explained variance
  • +
  • components: a matrix of shape n_features, n_components to project new data on the same orthogonal basis
  • +
  • mean: Per-feature empirical mean, equal to input.mean(axis=0)
  • +
  • explained_variance: a vector of shape n_components in descending order. Represents the amount of variance explained by each components It is equal to n_components largest eigenvalues of the covariance matrix of X.
  • +
  • explained_variance_ratio: a vector of shape n_components in descending order. Represents the percentage of variance explained by each components
  • +
  • singular_values: a vector of shape n_components in descending order. The singular values corresponding to each components. The singular values are equal to the 2-norms of the n_components cariables in the lower-dimensional space
  • +
  • noise_variance: The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. It is required to compute the estimated data covariance and score samples.## Equal to the average of (min(n_features, n_samples) - n_components) smallest eigenvalues of the covariance matrix of X.
  • +
+

The outputs mean, explained_variance, explained_variance_ratio, singular_values are squeezed to 1D and matches the features column vectors

+ +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc `$`(pca: PCA_Detailed): string
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc pca[T: SomeFloat](X: Tensor[T]; n_components = 2;
+                       center: static bool = true; n_oversamples = 5;
+                       n_power_iters = 2): tuple[projected: Tensor[T],
+    components: Tensor[T]] {.noinit.}
+
+ +

Principal Component Analysis (PCA)

+

Project the input data X of shape Observations, Features into a new coordinate system where axes (principal components) are in descending order of explained variance of the original X data i.e. the first axis explains most of the variance.

+

The rotated components cmatrix can be used to project new observations onto the same base: X' * loadings, with X' of shape Observations', Features. X' must be mean centered Its transposed can be use to reconstruct the original X: X ~= projected * components.transpose()

+

PCA requires:

+
  • mean-centered features. This procedure does the centering by default. You can pass "center = false", if your preprocessing leads to centering.
  • +
  • Features of the same scale/amplitude. Some alternatives include min-max scaling, mean normalization, standardization (mean = 0 and unit variance), rescaling column to unit-length.
  • +
+

Note: PCA without centering is also called truncated SVD, which is useful when centering is costly, for example in the case of sparse matrices from parsing text.

+

Inputs:

+ +

Returns:

+ + +   Source +Edit + +
+
+ +
+
+
+
proc pca_detailed[T: SomeFloat](X: Tensor[T]; n_components = 2;
+                                center: static bool = true; n_oversamples = 5;
+                                n_power_iters = 2): PCA_Detailed[T] {.noinit.}
+
+ +

Principal Component Analysis (PCA) with full details

+

Project the input data X of shape Observations, Features into a new coordinate system where axes (principal components) are in descending order of explained variance of the original X data i.e. the first axis explains most of the variance.

+

The rotated components cmatrix can be used to project new observations onto the same base: X' * loadings, with X' of shape Observations', Features. X' must be mean centered Its transposed can be use to reconstruct the original X: X ~= projected * components.transpose()

+

PCA requires:

+
  • mean-centered features. This procedure does the centering by default. You can pass "center = false", if your preprocessing leads to centering.
  • +
  • Features of the same scale/amplitude. Some alternatives include min-max scaling, mean normalization, standardization (mean = 0 and unit variance), rescaling column to unit-length.
  • +
+

Note: PCA without centering is also called truncated SVD, which is useful when centering is costly, for example in the case of sparse matrices from parsing text.

+

Inputs:

+ +

Returns a "Principal Component Analysis" object with the following fields

+
  • n_observations: The number of observations/samples from an input matrix of shape n_observations, n_features
  • +
  • n_features: The number of features from the input matrix of shape n_observations, n_features
  • +
  • n_components: The number of principal components asked in pca_detailed
  • +
  • projected: The result of the PCA of shape n_observations, n_components in descending order of explained variance
  • +
  • components: a matrix of shape n_features, n_components to project new data on the same orthogonal basis
  • +
  • mean: Per-feature empirical mean, equal to input.mean(axis=0)
  • +
  • explained_variance: a vector of shape n_components in descending order. Represents the amount of variance explained by each components It is equal to n_components largest eigenvalues of the covariance matrix of X.
  • +
  • explained_variance_ratio: a vector of shape n_components in descending order. Represents the percentage of variance explained by each components
  • +
  • singular_values: a vector of shape n_components in descending order. The singular values corresponding to each components. The singular values are equal to the 2-norms of the n_components cariables in the lower-dimensional space
  • +
  • noise_variance: The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. It is required to compute the estimated data covariance and score samples.## Equal to the average of (min(n_features, n_samples) - n_components) smallest eigenvalues of the covariance matrix of X.
  • +
+

The outputs mean, explained_variance, explained_variance_ratio, singular_values are squeezed to 1D and matches the features column vectors

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/pca.idx b/pca.idx new file mode 100644 index 000000000..8d0066f48 --- /dev/null +++ b/pca.idx @@ -0,0 +1,5 @@ +nimTitle pca pca.html module src/arraymancer/ml/dimensionality_reduction/pca 0 +nim pca pca.html#pca,Tensor[T: SomeFloat],int,staticbool,int,int proc pca[T: SomeFloat](X: Tensor[T]; n_components = 2; center: static bool = true;\n n_oversamples = 5; n_power_iters = 2): tuple[\n projected: Tensor[T], components: Tensor[T]] 8 +nim PCA_Detailed pca.html#PCA_Detailed object PCA_Detailed 56 +nim `$` pca.html#$,PCA_Detailed proc `$`(pca: PCA_Detailed): string 96 +nim pca_detailed pca.html#pca_detailed,Tensor[T: SomeFloat],int,staticbool,int,int proc pca_detailed[T: SomeFloat](X: Tensor[T]; n_components = 2;\n center: static bool = true; n_oversamples = 5;\n n_power_iters = 2): PCA_Detailed[T] 112 diff --git a/relu.html b/relu.html new file mode 100644 index 000000000..04d24da22 --- /dev/null +++ b/relu.html @@ -0,0 +1,474 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/activation/relu + + + + + + + + + +Arraymancer - src/arraymancer/nn/activation/relu + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/activation/relu

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
ReluActivation[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc relu[TT](a: Variable[TT]): Variable[TT]
+
+ + Input:
  • A variable
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/relu.idx b/relu.idx new file mode 100644 index 000000000..53894274c --- /dev/null +++ b/relu.idx @@ -0,0 +1,3 @@ +nimTitle relu relu.html module src/arraymancer/nn/activation/relu 0 +nim ReluActivation relu.html#ReluActivation type ReluActivation 19 +nim relu relu.html#relu,Variable[TT] proc relu[TT](a: Variable[TT]): Variable[TT] 47 diff --git a/selectors.html b/selectors.html new file mode 100644 index 000000000..538f2fddc --- /dev/null +++ b/selectors.html @@ -0,0 +1,742 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/selectors + + + + + + + + + +Arraymancer - src/arraymancer/tensor/selectors + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/selectors

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc index_fill[T; Idx: byte or char or SomeInteger](t: var Tensor[T];
+    axis: int; indices: openArray[Idx]; value: T)
+
+ + Replace elements of t indicated by their indices along axis with value This is equivalent to Numpy put. +   Source +Edit + +
+
+
+
proc index_fill[T; Idx: byte or char or SomeInteger](t: var Tensor[T];
+    axis: int; indices: Tensor[Idx]; value: T)
+
+ + Replace elements of t indicated by their indices along axis with value This is equivalent to Numpy put. +   Source +Edit + +
+
+ +
+
+
+
proc index_select[T; Idx: byte or char or SomeInteger](t: Tensor[T]; axis: int;
+    indices: openArray[Idx]): Tensor[T] {.noinit.}
+
+ + Take elements from a tensor along an axis using the indices Tensor. This is equivalent to NumPy take. The result does not share the input storage, there are copies. The tensors containing the indices can be an integer, byte or char tensor. +   Source +Edit + +
+
+
+
proc index_select[T; Idx: byte or char or SomeInteger](t: Tensor[T]; axis: int;
+    indices: Tensor[Idx]): Tensor[T] {.noinit.}
+
+ + Take elements from a tensor along an axis using the indices Tensor. This is equivalent to NumPy take. The result does not share the input storage, there are copies. The tensors containing the indices can be an integer, byte or char tensor. +   Source +Edit + +
+
+ +
+
+
+
proc masked_axis_fill[T](t: var Tensor[T]; mask: openArray[bool]; axis: int;
+                         value: T or Tensor[T])
+
+ +

Take a 1D boolean mask tensor with size equal to the t.shape[axis] The axis indexes that are set to true in the mask will be filled with value

+

Limitation: If value is a Tensor, only filling via broadcastable tensors is supported at the moment for example if filling axis of a tensor t of shape 4, 3 the corresponding shapes are valid 4, 3.masked_axis_fill(mask = 1, 3, axis = 1, value = 4, 1)

+

with values t = [ 4, 99, 2, 3, 4, 99, 1, 8, 7, 8, 6, 8].toTensor() mask = false, true, true value = [10, 20, 30, 40].toTensor()

+

result = [ 4, 10, 10, 3, 20, 20, 1, 30, 30, 8, 40, 40].toTensor()

+ +   Source +Edit + +
+
+
+
proc masked_axis_fill[T](t: var Tensor[T]; mask: Tensor[bool]; axis: int;
+                         value: T or Tensor[T])
+
+ +

Take a 1D boolean mask tensor with size equal to the t.shape[axis] The axis indexes that are set to true in the mask will be filled with value

+

Limitation: If value is a Tensor, only filling via broadcastable tensors is supported at the moment for example if filling axis of a tensor t of shape 4, 3 the corresponding shapes are valid 4, 3.masked_axis_fill(mask = 1, 3, axis = 1, value = 4, 1)

+

with values t = [ 4, 99, 2, 3, 4, 99, 1, 8, 7, 8, 6, 8].toTensor() mask = false, true, true value = [10, 20, 30, 40].toTensor()

+

result = [ 4, 10, 10, 3, 20, 20, 1, 30, 30, 8, 40, 40].toTensor()

+ +   Source +Edit + +
+
+ +
+
+
+
proc masked_axis_select[T](t: Tensor[T]; mask: openArray[bool]; axis: int): Tensor[
+    T] {.noinit.}
+
+ +

Take elements from a tensor according to the provided boolean mask. The mask must be a 1D tensor and is applied along an axis, by default 0.

+

The result will be the concatenation of values for which the mask is true.

+

For example, for a 1D tensor t t.masked_select(t > 0) will return a tensor with only the positive values of t.

+

The result does not share input storage.

+ +   Source +Edit + +
+
+
+
proc masked_axis_select[T](t: Tensor[T]; mask: Tensor[bool]; axis: int): Tensor[
+    T] {.noinit.}
+
+ +

Take elements from a tensor according to the provided boolean mask. The mask must be a 1D tensor and is applied along an axis, by default 0.

+

The result will be the concatenation of values for which the mask is true.

+

For example, for a 1D tensor t t.masked_select(t > 0) will return a tensor with only the positive values of t.

+

The result does not share input storage.

+ +   Source +Edit + +
+
+ +
+
+
+
proc masked_fill[T](t: var Tensor[T]; mask: openArray; value: T)
+
+ +

For each element t[index] of the input tensor t with index index, check if mask[index] is true. If so, fill it value. Otherwise leave it untouched.

+

Example:

+

t.masked_fill(true, false, true, true, -1)

+

or alternatively:

+

t.masked_fill(true, false, true, true): -1

+

In this version of this procedure the boolean mask, which must have the same size as the input tensor t, is an openArray of bools, i.e.:

+
  • an array or sequence of bools
  • +
  • an array of arrays of bools,
  • +
  • ...
  • +
+ +   Source +Edit + +
+
+
+
proc masked_fill[T](t: var Tensor[T]; mask: openArray; value: Tensor[T])
+
+ +

For each element t[index] of the input tensor t with index index, check if mask[index] is true. If so fill it with the _next element from the value tensor. Otherwise leave it untouched.

+

Note that this does _not fill t[index] with value[index], but with the n-th element of value where n is the number of true elements in the mask before and including the index-th mask element. Because of this, the value tensor must have at least as many elements as the number of true elements in the mask. If that is not the case an IndexDefect exception will be raised at runtime. The value tensor can have even more values which will simply be ignored.

+

Example:

+

t.masked_fill(true, false, true, true, 3, 4, -1.toTensor)

+

In this version of this procedure the boolean mask, which must have the same size as the input tensor t, is an openArray of bools, i.e.:

+
  • an array or sequence of bools
  • +
  • an array of arrays of bools,
  • +
  • ...
  • +
+ +   Source +Edit + +
+
+
+
proc masked_fill[T](t: var Tensor[T]; mask: Tensor[bool]; value: T)
+
+ +

For each element t[index] of the input tensor t with index index, check if mask[index] is true. If so, fill it value. Otherwise leave it untouched.

+

Example:

+

t.masked_fill(t > 0, -1)

+

or alternatively:

+

t.masked_fill(t > 0): -1

+

In this version of this procedure the boolean mask is a Tensor[bool] with the same size as the input tensor t.

+ +   Source +Edit + +
+
+
+
proc masked_fill[T](t: var Tensor[T]; mask: Tensor[bool]; value: Tensor[T])
+
+ +

For each element t[index] of the input tensor t with index index, check if mask[index] is true. If so fill it with the _next element from the value tensor. Otherwise leave it untouched.

+

Note that this does _not fill t[index] with value[index], but with the n-th element of value where n is the number of true elements in the mask before and including the index-th mask element. Because of this, the value tensor must have at least as many elements as the number of true elements in the mask. If that is not the case an IndexDefect exception will be raised at runtime. The value tensor can have even more values which will simply be ignored.

+

Example:

+

t.masked_fill(t > 0, 3, 4, -1.toTensor)

+

In this version of this procedure the boolean mask is a Tensor[bool] with the same size as the input tensor t.

+ +   Source +Edit + +
+
+ +
+
+
+
proc masked_fill_along_axis[T](t: var Tensor[T]; mask: Tensor[bool]; axis: int;
+                               value: T)
+
+ + Take a boolean mask tensor and for each slice of t along the axis Set the slice elements to value if their mask is true +   Source +Edit + +
+
+ +
+
+
+
proc masked_select[T](t: Tensor[T]; mask: openArray): Tensor[T] {.noinit.}
+
+ +

Take elements from a tensor according to the provided boolean mask

+

The boolean mask must be

+
  • an array or sequence of bools
  • +
  • an array of arrays of bools,
  • +
  • ...
  • +
+

Returns a flattened tensor which is the concatenation of values for which the mask is true.

+

The result does not share input storage.

+ +   Source +Edit + +
+
+
+
proc masked_select[T](t: Tensor[T]; mask: Tensor[bool]): Tensor[T] {.noinit.}
+
+ +

Take elements from a tensor according to the provided boolean mask

+

Returns a flattened tensor which is the concatenation of values for which the mask is true.

+

The result does not share input storage.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/selectors.idx b/selectors.idx new file mode 100644 index 000000000..9befecd16 --- /dev/null +++ b/selectors.idx @@ -0,0 +1,22 @@ +nimTitle selectors selectors.html module src/arraymancer/tensor/selectors 0 +nim index_select selectors.html#index_select,Tensor[T],int,Tensor[Idx: byte or char or int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64] proc index_select[T; Idx: byte or char or SomeInteger](t: Tensor[T]; axis: int;\n indices: Tensor[Idx]): Tensor[T] 27 +nim index_select selectors.html#index_select,Tensor[T],int,openArray[Idx] proc index_select[T; Idx: byte or char or SomeInteger](t: Tensor[T]; axis: int;\n indices: openArray[Idx]): Tensor[T] 46 +nim index_fill selectors.html#index_fill,Tensor[T],int,Tensor[Idx: byte or char or int or int8 or int16 or int32 or int64 or uint or uint8 or uint16 or uint32 or uint64],T proc index_fill[T; Idx: byte or char or SomeInteger](t: var Tensor[T]; axis: int;\n indices: Tensor[Idx]; value: T) 63 +nim index_fill selectors.html#index_fill,Tensor[T],int,openArray[Idx],T proc index_fill[T; Idx: byte or char or SomeInteger](t: var Tensor[T]; axis: int;\n indices: openArray[Idx]; value: T) 73 +nim masked_select selectors.html#masked_select,Tensor[T],Tensor[bool] proc masked_select[T](t: Tensor[T]; mask: Tensor[bool]): Tensor[T] 86 +nim masked_select selectors.html#masked_select,Tensor[T],openArray proc masked_select[T](t: Tensor[T]; mask: openArray): Tensor[T] 114 +nim masked_fill selectors.html#masked_fill,Tensor[T],Tensor[bool],T proc masked_fill[T](t: var Tensor[T]; mask: Tensor[bool]; value: T) 127 +nim masked_fill selectors.html#masked_fill,Tensor[T],openArray,T proc masked_fill[T](t: var Tensor[T]; mask: openArray; value: T) 161 +nim masked_fill selectors.html#masked_fill,Tensor[T],Tensor[bool],Tensor[T] proc masked_fill[T](t: var Tensor[T]; mask: Tensor[bool]; value: Tensor[T]) 183 +nim masked_fill selectors.html#masked_fill,Tensor[T],openArray,Tensor[T] proc masked_fill[T](t: var Tensor[T]; mask: openArray; value: Tensor[T]) 243 +nim masked_axis_select selectors.html#masked_axis_select,Tensor[T],Tensor[bool],int proc masked_axis_select[T](t: Tensor[T]; mask: Tensor[bool]; axis: int): Tensor[T] 309 +nim masked_axis_select selectors.html#masked_axis_select,Tensor[T],openArray[bool],int proc masked_axis_select[T](t: Tensor[T]; mask: openArray[bool]; axis: int): Tensor[T] 324 +nim masked_axis_fill selectors.html#masked_axis_fill,Tensor[T],Tensor[bool],int, proc masked_axis_fill[T](t: var Tensor[T]; mask: Tensor[bool]; axis: int;\n value: T or Tensor[T]) 361 +nim masked_axis_fill selectors.html#masked_axis_fill,Tensor[T],openArray[bool],int, proc masked_axis_fill[T](t: var Tensor[T]; mask: openArray[bool]; axis: int;\n value: T or Tensor[T]) 395 +nim masked_fill_along_axis selectors.html#masked_fill_along_axis,Tensor[T],Tensor[bool],int,T proc masked_fill_along_axis[T](t: var Tensor[T]; mask: Tensor[bool]; axis: int;\n value: T) 430 +nimgrp indexselect selectors.html#index_select-procs-all proc 27 +nimgrp indexfill selectors.html#index_fill-procs-all proc 63 +nimgrp maskedaxisselect selectors.html#masked_axis_select-procs-all proc 309 +nimgrp maskedfill selectors.html#masked_fill-procs-all proc 127 +nimgrp maskedselect selectors.html#masked_select-procs-all proc 86 +nimgrp maskedaxisfill selectors.html#masked_axis_fill-procs-all proc 361 diff --git a/sequninit.html b/sequninit.html new file mode 100644 index 000000000..38ff46df2 --- /dev/null +++ b/sequninit.html @@ -0,0 +1,435 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/private/sequninit + + + + + + + + + +Arraymancer - src/arraymancer/private/sequninit + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/private/sequninit

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
func newSeqUninit[T](len: Natural): seq[T] {.inline.}
+
+ + Creates an uninitialzed seq. Contrary to newSequnitialized in system.nim this works for any subtype T +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/sequninit.idx b/sequninit.idx new file mode 100644 index 000000000..402cab194 --- /dev/null +++ b/sequninit.idx @@ -0,0 +1,2 @@ +nimTitle sequninit sequninit.html module src/arraymancer/private/sequninit 0 +nim newSeqUninit sequninit.html#newSeqUninit,Natural proc newSeqUninit[T](len: Natural): seq[T] 18 diff --git a/shapeshifting.html b/shapeshifting.html new file mode 100644 index 000000000..ab498fc76 --- /dev/null +++ b/shapeshifting.html @@ -0,0 +1,1046 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/shapeshifting + + + + + + + + + +Arraymancer - src/arraymancer/tensor/shapeshifting + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/shapeshifting

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc append[T](t: Tensor[T]; values: openArray[T]): Tensor[T] {.noinit.}
+
+ +

Create a copy of an rank-1 input tensor with values appended to its end

+

Inputs:

+
  • Rank-1 tensor of type T
  • +
  • An open array of values of type T
  • +
+

Returns:

+
  • A copy of the input tensor t with the extra values appended at the end.
  • +
+

Notes: Append does not occur in-place (a new tensor is allocated and filled). Compared to numpy's append, this proc requires that you explicitly flatten the input tensor if its rank is greater than 1. It also does not support the axis parameter. If you want to append values along a specific axis, you should use concat instead. Examples:

echo append(1, 2, 3.toTensor, 4, 5, 6, 7) # Tensorsystem.int of shape "9" on backend "Cpu" # 1 2 3 4 5 6 7

+

echo append([1, 2, 3, 4, 5, 6].toTensor, 7, 8, 9) # Error: unhandled exception: t.rank == 1 append only works # on rank-1 tensors but first input tensor has rank 2 AssertionDefect

+

echo append([1, 2, 3, 4, 5, 6].toTensor.flatten, 7, 8, 9) # 1 2 3 4 5 6 7 8 9

+

+

+ +   Source +Edit + +
+
+
+
proc append[T](t: Tensor[T]; values: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Create a copy of an rank-1 input tensor with values appended to its end

+

Inputs:

+
  • Rank-1 tensor
  • +
  • Rank-1 tensor of extra values to append
  • +
+

Returns:

+
  • A copy of the input tensor t with the extra values appended at the end.
  • +
+

Notes: Append does not occur in-place (a new tensor is allocated and filled). To concatenate more than one tensor or tensors that you must use concat. Compared to numpy's append, this proc requires that you explicitly flatten the inputs if they are not rank-1 tensors. It also does not support the axis parameter. If you want to append the values along a specific axis, you should use concat instead. Examples:

echo append(1, 2, 3.toTensor, 4, 5, 6, 7.toTensor) # Tensorsystem.int of shape "9" on backend "Cpu" # 1 2 3 4 5 6 7

+

echo append(1, 2, 3.toTensor, [4, 5, 6, 7, 8, 9].toTensor) # Error: unhandled exception: values.rank == 1 append only works # on rank-1 tensors but extra values tensor has rank 2 AssertionDefect

+

echo append(1, 2, 3.toTensor, [4, 5, 6, 7, 8, 9].toTensor.flatten) # 1 2 3 4 5 6 7 8 9

+

+

+ +   Source +Edit + +
+
+ +
+
+
+
proc asContiguous[T](t: Tensor[T]; layout: OrderType = rowMajor;
+                     force: bool = false): Tensor[T] {.noinit.}
+
+ +

Transform a tensor with general striding to a Tensor with contiguous layout.

+

By default tensor will be rowMajor.

+

The layout is kept if the tensor is already contiguous (C Major or F major) The "force" parameter can force re-ordering to a specific layout.

+

Result is always a fully packed tensor even if the input is a contiguous slice.

+ +   Source +Edit + +
+
+ +
+
+
+
proc broadcast[T: SomeNumber](val: T; shape: Metadata): Tensor[T] {.noinit,
+    noSideEffect.}
+
+ +

Broadcast a number

+

Input:

+
  • a number to be broadcasted
  • +
  • a tensor shape that will be broadcasted to
  • +
+

Returns:

+
  • a tensor with the broadcasted shape where all elements has the broadcasted value
  • +
+

The broadcasting is made using tensor data of size 1 and 0 strides, i.e. the operation is memory efficient.

+

Warning โš : A broadcasted tensor should not be modified and only used for computation. Modifying any value from this broadcasted tensor will change all its values.

+ +   Source +Edit + +
+
+
+
proc broadcast[T: SomeNumber](val: T; shape: varargs[int]): Tensor[T] {.noinit.}
+
+ +

Broadcast a number

+

Input:

+
  • a number to be broadcasted
  • +
  • a tensor shape that will be broadcasted to
  • +
+

Returns:

+
  • a tensor with the broadcasted shape where all elements has the broadcasted value
  • +
+

The broadcasting is made using tensor data of size 1 and 0 strides, i.e. the operation is memory efficient.

+

Warning โš : A broadcasted tensor should not be modified and only used for computation. Modifying any value from this broadcasted tensor will change all its values.

+ +   Source +Edit + +
+
+
+
proc broadcast[T](t: Tensor[T]; shape: Metadata): Tensor[T] {.noinit,
+    noSideEffect.}
+
+ +

Explicitly broadcast a tensor to the specified shape.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+
+
proc broadcast[T](t: Tensor[T]; shape: varargs[int]): Tensor[T] {.noinit,
+    noSideEffect.}
+
+ +

Explicitly broadcast a tensor to the specified shape.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+ +
+
+
+
proc broadcast2[T](a, b: Tensor[T]): tuple[a, b: Tensor[T]] {.noSideEffect,
+    noinit.}
+
+ +

Broadcast 2 tensors so they have compatible shapes for element-wise computations.

+

Tensors in the tuple can be accessed with output.a and output.b

+

The returned broadcasted Tensors share the underlying data with the input.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable. A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+ +
+
+
+
func chunk[T](t: Tensor[T]; nb_chunks: Positive; axis: Natural): seq[Tensor[T]] {.
+    noinit.}
+
+ +

Splits a Tensor into n chunks along the specified axis.

+

In case a tensor cannot be split evenly, with la == length_axis, n = n_chunks it returns la mod n subtensors of size (la div n) + 1 the rest of size la div n.

+

This is consistent with numpy array_split

+ +   Source +Edit + +
+
+ +
+
+
+
proc concat[T](t_list: varargs[Tensor[T]]; axis: int): Tensor[T] {.noinit.}
+
+ + Concatenate tensors Input:
  • Tensors
  • +
  • An axis (dimension)
  • +
+

Returns:

+
  • a tensor
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc flatten(t: Tensor): Tensor {.noinit, inline.}
+
+ +

Flatten a tensor, returning a rank-1 tensor with the same data as the input.

+

This is the same as t.reshape([t.size.int]). Therefore, if possible no data copy is done and the returned tensor shares data with the input. If input is not contiguous, this is not possible and a copy will be made.

+

Input:

+
  • a tensor
  • +
+

Returns:

+
  • a tensor rank-1 tensor with the same data as the input.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc moveaxis(t: Tensor; initial: Natural; target: Natural): Tensor {.noinit.}
+
+ + Move one of the axes of a tensor into a new position Input:
  • a tensor
  • +
  • the initial position of the axes to move
  • +
  • the target position of the axes to move
  • +
+

Returns:

+
  • a tensor with moved axes but sharing the same data
  • +
+

See also:

+
  • permute
  • +
+

Usage: .. code:: nim # move dim 0 to position 2, which makes # dim 1 become dim 0 and dim 2 become dim 1 a.moveaxis(0, 2) Notes: Call .clone() if you want to make a copy of the data, otherwise changes to the data of returned tensor will affect the input tensor.

+ +   Source +Edit + +
+
+ +
+
+
+
proc permute(t: Tensor; dims: varargs[int]): Tensor {.noinit, noSideEffect.}
+
+ + Permute the dimensions of a tensor into a different order Input:
  • a tensor
  • +
  • the new dimension order
  • +
+

Returns:

+
  • a tensor with re-ordered dimensions but sharing the same data
  • +
+

See also:

+
  • moveaxis
  • +
+

Usage: .. code:: nim # keep dim 0 at position 0 and swap dims 1 and 2 a.permute(0,2,1) Notes: Call .clone() if you want to make a copy of the data, otherwise changes to the data of returned tensor will affect the input tensor.

+ +   Source +Edit + +
+
+ +
+
+
+
proc reshape(t: Tensor; new_shape: Metadata): Tensor {.noinit.}
+
+ +

Reshape a tensor. If possible no data copy is done and the returned tensor shares data with the input. If input is not contiguous, this is not possible and a copy will be made.

+

Input:

+
  • a tensor
  • +
  • a new shape. Number of elements must be the same
  • +
+

Returns:

+
  • a tensor with the same data but reshaped.
  • +
+ +   Source +Edit + +
+
+
+
proc reshape(t: Tensor; new_shape: varargs[int]): Tensor {.noinit.}
+
+ +

Reshape a tensor. If possible no data copy is done and the returned tensor shares data with the input. If input is not contiguous, this is not possible and a copy will be made.

+

Input:

+
  • a tensor
  • +
  • a new shape. Number of elements must be the same
  • +
+

Returns:

+
  • a tensor with the same data but reshaped.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc roll[T](t: Tensor[T]; shift: int): Tensor[T] {.noinit.}
+
+ +

Roll elements of tensor "globally" (i.e. across all axes).

+

This takes a tensor, flattens it, rolls the elements shift positions (taking the last shift elements of the flattened tensor and putting them at the beginning of the flattened tensor), and then reshapes the rolled tensor back to the original shape.

+

This is different from the version of this proc that accepts an axis, which rolls _slices of a tensor taken along the selected axis.

+

Input:

+
  • t: Input tensor.
  • +
  • shift: Integer number of places by which elements are shifted.
  • +
+

Return:

+
  • Output tensor, with the same shape as a.
  • +
+

Examples:

let x = arange(5) echo x.roll(2) Tensorsystem.int of shape "5" on backend "Cpu" 3 4 0 1 2 echo x.roll(-2) Tensorsystem.int of shape "5" on backend "Cpu" 2 3 4 0 1 let x2 = arange(5).reshape(2, 5) echo x2 # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |0 1 2 3 4| # |5 6 7 8 9| echo roll(x2, 1) # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |9 0 1 2 3| # |4 5 6 7 8| echo roll(x2, -1) # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |1 2 3 4 5| # |6 7 8 9 0|

+

+ +   Source +Edit + +
+
+
+
proc roll[T](t: Tensor[T]; shift: int; axis: Natural): Tensor[T] {.noinit.}
+
+ +

Roll slices of a tensor along a given axis.

+

Slices that roll beyond the last position are re-introduced at the first.

+

Note that calling this proc with a rank-1 tensor, will simply check that axis == 0 and then call the (axis-less) version of this proc.

+

Input:

+
  • t : Input tensor.
  • +
  • shift : Integer number of places by which elements are shifted.
  • +
  • axis : an axis (dimension).
  • +
+

Return:

+
  • Output tensor, with the same shape as t.
  • +
+

Notes:

+
  • numpy's roll also supports passing a list of shifts and axis, while this proc doesn't. However, you can achieve the same effect by calling roll multiple times in a row (i.e. np.roll(t, [1, 2], axis=[0, 1]) is equivalent to t.roll(1, axis=0).roll(2, axis=1) which is arguably more clear).
  • +
+

Examples:

let x = arange(5) echo x.roll(2, axis=0) Tensorsystem.int of shape "5" on backend "Cpu" 3 4 0 1 2 echo x.roll(-2, axis=0) Tensorsystem.int of shape "5" on backend "Cpu" 2 3 4 0 1

+

+

let x2 = arange(5).reshape(2, 5) echo x2 # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |0 1 2 3 4| # |5 6 7 8 9| echo roll(x2, 1, axis=0) # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |5 6 7 8 9| # |0 1 2 3 4| echo roll(x2, -1, axis=0) # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |5 6 7 8 9| # |0 1 2 3 4| echo roll(x2, 1, axis=1) # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |4 0 1 2 3| # |9 5 6 7 8| echo roll(x2, -1, axis=1) # Tensorsystem.int of shape "2, 5" on backend "Cpu" # |1 2 3 4 0| # |6 7 8 9 5| echo x2.roll(1, axis=0).roll(2, axis=1) Tensorsystem.int of shape "2, 5" on backend "Cpu"|8 9 5 6 7| |3 4 0 1 2|

+ +   Source +Edit + +
+
+ +
+
+
+
func split[T](t: Tensor[T]; chunk_size: Positive; axis: Natural): seq[Tensor[T]] {.
+    noinit.}
+
+ + Split the tensor into chunks of size chunk_size along the specified axis. Last chunk size will equal the remainder if the specified axis length is not divisible by chunk_size +   Source +Edit + +
+
+ +
+
+
+
func squeeze(t: AnyTensor): AnyTensor {.noinit.}
+
+ + Squeeze tensors. For example a Tensor of shape 4,1,3 will become 4,3 Input:
  • a tensor
  • +
+

Returns:

+
  • a tensor with singleton dimensions collapsed
  • +
+ +   Source +Edit + +
+
+
+
func squeeze(t: Tensor; axis: Natural): Tensor {.noinit.}
+
+ + Collapse the given axis, if the dimension is not 1, it does nothing. Input:
  • a tensor
  • +
  • an axis (dimension)
  • +
+

Returns:

+
  • a tensor with that axis collapsed, if it was a singleton dimension
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc stack[T](tensors: varargs[Tensor[T]]; axis: Natural = 0): Tensor[T] {.
+    noinit.}
+
+ + Join a sequence of tensors along a new axis into a new tensor. Input:
  • a tensor
  • +
  • an axis (dimension)
  • +
+

Returns:

+
  • a new stacked tensor along the new axis
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc transpose(t: Tensor): Tensor {.noinit, noSideEffect, inline.}
+
+ +

Transpose a Tensor.

+

For N-d Tensor with shape (0, 1, 2 ... n-1) the resulting tensor will have shape (n-1, ... 2, 1, 0)

+

Data is not copied or modified, only metadata is modified.

+ +   Source +Edit + +
+
+ +
+
+
+
func unsqueeze(t: Tensor; axis: Natural): Tensor {.noinit.}
+
+ + Insert a new axis just before the given axis, increasing the tensor dimension (rank) by 1 Input:
  • a tensor
  • +
  • an axis (dimension)
  • +
+

Returns:

+
  • a tensor with that new axis
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template bc(t: (Tensor | SomeNumber); shape: Metadata): untyped
+
+ + Alias for broadcast +   Source +Edit + +
+
+
+
template bc(t: (Tensor | SomeNumber); shape: varargs[int]): untyped
+
+ + Alias for broadcast +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/shapeshifting.idx b/shapeshifting.idx new file mode 100644 index 000000000..f47e89e42 --- /dev/null +++ b/shapeshifting.idx @@ -0,0 +1,32 @@ +nimTitle shapeshifting shapeshifting.html module src/arraymancer/tensor/shapeshifting 0 +nim transpose shapeshifting.html#transpose,Tensor proc transpose(t: Tensor): Tensor 27 +nim asContiguous shapeshifting.html#asContiguous,Tensor[T],OrderType,bool proc asContiguous[T](t: Tensor[T]; layout: OrderType = rowMajor; force: bool = false): Tensor[\n T] 38 +nim reshape shapeshifting.html#reshape,Tensor,varargs[int] proc reshape(t: Tensor; new_shape: varargs[int]): Tensor 59 +nim reshape shapeshifting.html#reshape,Tensor,Metadata proc reshape(t: Tensor; new_shape: Metadata): Tensor 71 +nim flatten shapeshifting.html#flatten,Tensor proc flatten(t: Tensor): Tensor 83 +nim broadcast shapeshifting.html#broadcast,Tensor[T],varargs[int] proc broadcast[T](t: Tensor[T]; shape: varargs[int]): Tensor[T] 96 +nim broadcast shapeshifting.html#broadcast,Tensor[T],Metadata proc broadcast[T](t: Tensor[T]; shape: Metadata): Tensor[T] 108 +nim broadcast shapeshifting.html#broadcast,T,varargs[int] proc broadcast[T: SomeNumber](val: T; shape: varargs[int]): Tensor[T] 120 +nim broadcast shapeshifting.html#broadcast,T,Metadata proc broadcast[T: SomeNumber](val: T; shape: Metadata): Tensor[T] 141 +nim bc shapeshifting.html#bc.t,,varargs[int] template bc(t: (Tensor | SomeNumber); shape: varargs[int]): untyped 162 +nim bc shapeshifting.html#bc.t,,Metadata template bc(t: (Tensor | SomeNumber); shape: Metadata): untyped 166 +nim broadcast2 shapeshifting.html#broadcast2,Tensor[T],Tensor[T] proc broadcast2[T](a, b: Tensor[T]): tuple[a, b: Tensor[T]] 170 +nim permute shapeshifting.html#permute,Tensor,varargs[int] proc permute(t: Tensor; dims: varargs[int]): Tensor 191 +nim moveaxis shapeshifting.html#moveaxis,Tensor,Natural,Natural proc moveaxis(t: Tensor; initial: Natural; target: Natural): Tensor 212 +nim concat shapeshifting.html#concat,varargs[Tensor[T]],int proc concat[T](t_list: varargs[Tensor[T]]; axis: int): Tensor[T] 243 +nim append shapeshifting.html#append,Tensor[T],Tensor[T] proc append[T](t: Tensor[T]; values: Tensor[T]): Tensor[T] 279 +nim append shapeshifting.html#append,Tensor[T],openArray[T] proc append[T](t: Tensor[T]; values: openArray[T]): Tensor[T] 316 +nim squeeze shapeshifting.html#squeeze,AnyTensor proc squeeze(t: AnyTensor): AnyTensor 349 +nim squeeze shapeshifting.html#squeeze,Tensor,Natural proc squeeze(t: Tensor; axis: Natural): Tensor 358 +nim unsqueeze shapeshifting.html#unsqueeze,Tensor,Natural proc unsqueeze(t: Tensor; axis: Natural): Tensor 368 +nim stack shapeshifting.html#stack,varargs[Tensor[T]],Natural proc stack[T](tensors: varargs[Tensor[T]]; axis: Natural = 0): Tensor[T] 379 +nim split shapeshifting.html#split,Tensor[T],Positive,Natural proc split[T](t: Tensor[T]; chunk_size: Positive; axis: Natural): seq[Tensor[T]] 389 +nim chunk shapeshifting.html#chunk,Tensor[T],Positive,Natural proc chunk[T](t: Tensor[T]; nb_chunks: Positive; axis: Natural): seq[Tensor[T]] 405 +nim roll shapeshifting.html#roll,Tensor[T],int proc roll[T](t: Tensor[T]; shift: int): Tensor[T] 435 +nim roll shapeshifting.html#roll,Tensor[T],int,Natural proc roll[T](t: Tensor[T]; shift: int; axis: Natural): Tensor[T] 475 +nimgrp squeeze shapeshifting.html#squeeze-procs-all proc 349 +nimgrp roll shapeshifting.html#roll-procs-all proc 435 +nimgrp broadcast shapeshifting.html#broadcast-procs-all proc 96 +nimgrp append shapeshifting.html#append-procs-all proc 279 +nimgrp reshape shapeshifting.html#reshape-procs-all proc 59 +nimgrp bc shapeshifting.html#bc-templates-all template 162 diff --git a/shapeshifting_cuda.html b/shapeshifting_cuda.html new file mode 100644 index 000000000..bba8d66ce --- /dev/null +++ b/shapeshifting_cuda.html @@ -0,0 +1,604 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/shapeshifting_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/shapeshifting_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/shapeshifting_cuda

+
+ +
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc asContiguous[T: SomeFloat](t: CudaTensor[T]; layout: OrderType = colMajor;
+                                force: bool = false): CudaTensor[T] {.
+    noSideEffect.}
+
+ +

Transform a tensor with general striding to a Tensor with contiguous layout.

+

By default CudaTensor will be colMajor (contrary to a cpu tensor).

+

By default nothing is done if the tensor is already contiguous (C Major or F major) The "force" parameter can force re-ordering to a specific layout

+ +   Source +Edit + +
+
+ +
+
+
+
proc broadcast(t: CudaTensor; shape: Metadata): CudaTensor {.noSideEffect.}
+
+ +

Explicitly broadcast a CudaTensor to the specified shape. The returned broadcasted CudaTensor share the underlying data with the input.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable. A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+
+
proc broadcast(t: CudaTensor; shape: varargs[int]): CudaTensor {.noSideEffect.}
+
+ +

Explicitly broadcast a CudaTensor to the specified shape. The returned broadcasted CudaTensor share the underlying data with the input.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable. A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+ +
+
+
+
proc broadcast2[T](a, b: CudaTensor[T]): tuple[a, b: CudaTensor[T]] {.
+    noSideEffect.}
+
+ +

Broadcast 2 tensors so they have compatible shapes for element-wise computations.

+

Tensors in the tuple can be accessed with output.a and output.b

+

The returned broadcasted Tensors share the underlying data with the input.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable. A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+ +
+
+
+
proc reshape(t: CudaTensor; new_shape: varargs[int]): CudaTensor
+
+ +

Reshape a CudaTensor without copy.

+

โš  Reshaping without copy is only possible on contiguous rowMajor Tensors

+ +   Source +Edit + +
+
+ +
+
+
+
proc squeeze(t: CudaTensor; axis: int): CudaTensor {.noSideEffect.}
+
+ + Collapse the given axis, if the dimension is not 1; it does nothing Input:
  • a CudaTensor
  • +
  • an axis (dimension)
  • +
+

Returns:

+
  • a CudaTensor with singleton dimensions collapsed
  • +
+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable.

+ +   Source +Edit + +
+
+ +
+
+
+
proc transpose(t: CudaTensor): CudaTensor {.noSideEffect.}
+
+ +

Transpose a Tensor.

+

For N-d Tensor with shape (0, 1, 2 ... n-1) the resulting tensor will have shape (n-1, ... 2, 1, 0)

+ +   Source +Edit + +
+
+ +
+
+
+
proc unsqueeze(t: CudaTensor; axis: int): CudaTensor {.noSideEffect.}
+
+ + Insert a new axis just before the given axis, increasing the CudaTensor dimension (rank) by 1
  • a tensor with that new axis
  • +
+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/shapeshifting_cuda.idx b/shapeshifting_cuda.idx new file mode 100644 index 000000000..b1e04028f --- /dev/null +++ b/shapeshifting_cuda.idx @@ -0,0 +1,10 @@ +nimTitle shapeshifting_cuda shapeshifting_cuda.html module src/arraymancer/tensor/shapeshifting_cuda 0 +nim transpose shapeshifting_cuda.html#transpose,CudaTensor proc transpose(t: CudaTensor): CudaTensor 24 +nim asContiguous shapeshifting_cuda.html#asContiguous,CudaTensor[T: SomeFloat],OrderType,bool proc asContiguous[T: SomeFloat](t: CudaTensor[T]; layout: OrderType = colMajor;\n force: bool = false): CudaTensor[T] 36 +nim reshape shapeshifting_cuda.html#reshape,CudaTensor,varargs[int] proc reshape(t: CudaTensor; new_shape: varargs[int]): CudaTensor 57 +nim broadcast shapeshifting_cuda.html#broadcast,CudaTensor,varargs[int] proc broadcast(t: CudaTensor; shape: varargs[int]): CudaTensor 65 +nim broadcast shapeshifting_cuda.html#broadcast,CudaTensor,Metadata proc broadcast(t: CudaTensor; shape: Metadata): CudaTensor 79 +nim broadcast2 shapeshifting_cuda.html#broadcast2,CudaTensor[T],CudaTensor[T] proc broadcast2[T](a, b: CudaTensor[T]): tuple[a, b: CudaTensor[T]] 93 +nim squeeze shapeshifting_cuda.html#squeeze,CudaTensor,int proc squeeze(t: CudaTensor; axis: int): CudaTensor 113 +nim unsqueeze shapeshifting_cuda.html#unsqueeze,CudaTensor,int proc unsqueeze(t: CudaTensor; axis: int): CudaTensor 126 +nimgrp broadcast shapeshifting_cuda.html#broadcast-procs-all proc 65 diff --git a/shapeshifting_opencl.html b/shapeshifting_opencl.html new file mode 100644 index 000000000..cf6f9bfd3 --- /dev/null +++ b/shapeshifting_opencl.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/shapeshifting_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/shapeshifting_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/shapeshifting_opencl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc broadcast2[T](a, b: ClTensor[T]): tuple[a, b: ClTensor[T]] {.noSideEffect,
+    noinit.}
+
+ +

Broadcast 2 tensors so they have compatible shapes for element-wise computations.

+

Tensors in the tuple can be accessed with output.a and output.b

+

The returned broadcasted Tensors share the underlying data with the input.

+

Dimension(s) of size 1 can be expanded to arbitrary size by replicating values along that dimension.

+

Warning โš : This is a no-copy operation, data is shared with the input. This proc does not guarantee that a let value is immutable. A broadcasted tensor should not be modified and only used for computation.

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/shapeshifting_opencl.idx b/shapeshifting_opencl.idx new file mode 100644 index 000000000..ab66a16a7 --- /dev/null +++ b/shapeshifting_opencl.idx @@ -0,0 +1,2 @@ +nimTitle shapeshifting_opencl shapeshifting_opencl.html module src/arraymancer/tensor/shapeshifting_opencl 0 +nim broadcast2 shapeshifting_opencl.html#broadcast2,ClTensor[T],ClTensor[T] proc broadcast2[T](a, b: ClTensor[T]): tuple[a, b: ClTensor[T]] 18 diff --git a/sigmoid.html b/sigmoid.html new file mode 100644 index 000000000..bc8feeff0 --- /dev/null +++ b/sigmoid.html @@ -0,0 +1,474 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/activation/sigmoid + + + + + + + + + +Arraymancer - src/arraymancer/nn/activation/sigmoid + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/activation/sigmoid

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
SigmoidActivation[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc sigmoid[TT](a: Variable[TT]): Variable[TT]
+
+ + Input:
  • A variable
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/sigmoid.idx b/sigmoid.idx new file mode 100644 index 000000000..4a76d61a5 --- /dev/null +++ b/sigmoid.idx @@ -0,0 +1,3 @@ +nimTitle sigmoid sigmoid.html module src/arraymancer/nn/activation/sigmoid 0 +nim SigmoidActivation sigmoid.html#SigmoidActivation type SigmoidActivation 20 +nim sigmoid sigmoid.html#sigmoid,Variable[TT] proc sigmoid[TT](a: Variable[TT]): Variable[TT] 48 diff --git a/simd.html b/simd.html new file mode 100644 index 000000000..556f6ab8d --- /dev/null +++ b/simd.html @@ -0,0 +1,3940 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/laser/simd + + + + + + + + + +Arraymancer - src/arraymancer/laser/simd + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/laser/simd

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Types

+
+
+
m128 {.importc: "__m128", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m128d {.importc: "__m128d", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m128i {.importc: "__m128i", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m256 {.importc: "__m256", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m256d {.importc: "__m256d", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m256i {.importc: "__m256i", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m512 {.importc: "__m512", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m512d {.importc: "__m512d", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
m512i {.importc: "__m512i", bycopy, header: "<x86intrin.h>".} = object
+  
+
+ + +   Source +Edit + +
+
+
+
mmask16 {.importc: "__mmask16", bycopy, header: "<x86intrin.h>".} = distinct uint16
+
+ + +   Source +Edit + +
+
+
+
mmask64 {.importc: "__mmask64", bycopy, header: "<x86intrin.h>".} = distinct uint64
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
func cvtmask64_u64(a: mmask64): uint64 {.importc: "_cvtmask64_u64", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_add_epi8(a, b: m256i): m256i {.importc: "_mm256_add_epi8", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_add_epi16(a, b: m256i): m256i {.importc: "_mm256_add_epi16", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_add_epi32(a, b: m256i): m256i {.importc: "_mm256_add_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_add_epi64(a, b: m256i): m256i {.importc: "_mm256_add_epi64", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_add_pd(a, b: m256d): m256d {.importc: "_mm256_add_pd", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_add_ps(a, b: m256): m256 {.importc: "_mm256_add_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_and_ps(a, b: m256): m256 {.importc: "_mm256_and_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + Bitwise and +   Source +Edit + +
+
+ +
+
+
+
func mm256_and_si256(a, b: m256i): m256i {.importc: "_mm256_and_si256", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Bitwise and +   Source +Edit + +
+
+ +
+
+
+
func mm256_castps256_ps128(a: m256): m128 {.importc: "_mm256_castps256_ps128",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Returns the lower part of a m256 in a m128 +   Source +Edit + +
+
+ +
+
+
+
func mm256_castps_si256(a: m256): m256i {.importc: "_mm256_castps_si256",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Cast a float32x8 vectors into a 256-bit int vector with the same bit pattern +   Source +Edit + +
+
+ +
+
+
+
func mm256_castsi256_ps(a: m256i): m256 {.importc: "_mm256_castsi256_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Cast a 256-bit int vector into a float32x8 vector with the same bit pattern +   Source +Edit + +
+
+ +
+
+
+
func mm256_cmpgt_epi32(a, b: m256i): m256i {.importc: "_mm256_cmpgt_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Compare a greater than b +   Source +Edit + +
+
+ +
+
+
+
func mm256_cvtepi32_ps(a: m256i): m256 {.importc: "_mm256_cvtepi32_ps", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Convert a int32x8 to float32x8 +   Source +Edit + +
+
+ +
+
+
+
func mm256_cvtps_epi32(a: m256): m256i {.importc: "_mm256_cvtps_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Convert a float32x8 to int32x8 +   Source +Edit + +
+
+ +
+
+
+
func mm256_extractf128_ps(v: m256; m: cint{lit}): m128 {.
+    importc: "_mm256_extractf128_ps", nodecl, header: "<x86intrin.h>",
+    ...raises: [], tags: [], forbids: [].}
+
+ + Extracts the low part (m = 0) or high part (m = 1) of a m256 into a m128 m must be a literal +   Source +Edit + +
+
+ +
+
+
+
func mm256_fmadd_pd(a, b, c: m256d): m256d {.importc: "_mm256_fmadd_pd", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_fmadd_ps(a, b, c: m256): m256 {.importc: "_mm256_fmadd_ps", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_i32gather_epi32(m: ptr (uint32 or int32); i: m256i; s: int32): m256i {.
+    importc: "_mm256_i32gather_epi32", nodecl, header: "<x86intrin.h>",
+    ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_load_pd(aligned_mem_addr: ptr float64): m256d {.
+    importc: "_mm256_load_pd", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_load_ps(aligned_mem_addr: ptr float32): m256 {.
+    importc: "_mm256_load_ps", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_load_si256(mem_addr: ptr m256i): m256i {.
+    importc: "_mm256_load_si256", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_loadu_pd(mem_addr: ptr float64): m256d {.importc: "_mm256_loadu_pd",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_loadu_ps(mem_addr: ptr float32): m256 {.importc: "_mm256_loadu_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_loadu_si256(mem_addr: ptr m256i): m256i {.
+    importc: "_mm256_loadu_si256", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_max_ps(a, b: m256): m256 {.importc: "_mm256_max_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_min_ps(a, b: m256): m256 {.importc: "_mm256_min_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_movemask_epi8(a: m256i): int32 {.importc: "_mm256_movemask_epi8",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Returns the most significant bit of each 8-bit elements in a +   Source +Edit + +
+
+ +
+
+
+
func mm256_mul_epu32(a: m256i; b: m256i): m256i {.importc: "_mm256_mul_epu32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ +

From a = {a3_hi, a3_lo, a2_hi, a2_lo, a1_hi, a1_lo, a0_hi, a0_lo} with a3, a2, a1, a0 being 64-bit number and b = {b3_hi, b3_lo, b2_hi, b2_lo, b1_hi, b1_lo, b0_hi, b0_lo}

+

Result = {a3_lo * b3_lo, a2_lo * b2_lo, a1_lo * b1_lo, a0_lo * b0_lo}. This is an extended precision multiplication 32x32 -> 64

+ +   Source +Edit + +
+
+ +
+
+
+
func mm256_mul_pd(a, b: m256d): m256d {.importc: "_mm256_mul_pd", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_mul_ps(a, b: m256): m256 {.importc: "_mm256_mul_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_mullo_epi16(a, b: m256i): m256i {.importc: "_mm256_mullo_epi16",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Multiply element-wise 2 vectors of 16 16-bit ints into intermediate 16 32-bit ints, and keep the low 16-bit parts +   Source +Edit + +
+
+ +
+
+
+
func mm256_mullo_epi32(a, b: m256i): m256i {.importc: "_mm256_mullo_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Multiply element-wise 2 vectors of 8x 32-bit ints into intermediate 8x 64-bit ints, and keep the low 32-bit parts +   Source +Edit + +
+
+ +
+
+
+
func mm256_or_ps(a, b: m256): m256 {.importc: "_mm256_or_ps", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_set1_epi8(a: int8 or uint8): m256i {.importc: "_mm256_set1_epi8",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_set1_epi16(a: int16 or uint16): m256i {.importc: "_mm256_set1_epi16",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_set1_epi32(a: int32 or uint32): m256i {.importc: "_mm256_set1_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_set1_epi64x(a: int64 or uint64): m256i {.
+    importc: "_mm256_set1_epi64x", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_set1_pd(a: float64): m256d {.importc: "_mm256_set1_pd", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_set1_ps(a: float32): m256 {.importc: "_mm256_set1_ps", nodecl,
+                                       header: "<x86intrin.h>", ...raises: [],
+                                       tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_setzero_pd(): m256d {.importc: "_mm256_setzero_pd", nodecl,
+                                 header: "<x86intrin.h>", ...raises: [], tags: [],
+                                 forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_setzero_ps(): m256 {.importc: "_mm256_setzero_ps", nodecl,
+                                header: "<x86intrin.h>", ...raises: [], tags: [],
+                                forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_setzero_si256(): m256i {.importc: "_mm256_setzero_si256", nodecl,
+                                    header: "<x86intrin.h>", ...raises: [],
+                                    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_shuffle_epi32(a: m256i; imm8: cint): m256i {.
+    importc: "_mm256_shuffle_epi32", nodecl, header: "<x86intrin.h>",
+    ...raises: [], tags: [], forbids: [].}
+
+ + Shuffle 32-bit integers in a according to the control in imm8 Formula is in big endian representation a = {hi[a7, a6, a5, a4, loa3, a2, a1, a0} dst = {d7, d6, d5, d4, d3, d2, d1, d0} imm8 = {bits76, bits54, bits32, bits10} d0 will refer a.lobits10 d1 a.lobits32 ... d4 will refer a.hibits10 d5 a.hibits32 +   Source +Edit + +
+
+ +
+
+
+
func mm256_slli_epi32(a: m256i; count: int32): m256i {.
+    importc: "_mm256_slli_epi32", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_srli_epi32(a: m256i; count: int32): m256i {.
+    importc: "_mm256_srli_epi32", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_srli_epi64(a: m256i; imm8: cint): m256i {.
+    importc: "_mm256_srli_epi64", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + Logical right shift +   Source +Edit + +
+
+ +
+
+
+
func mm256_store_pd(mem_addr: ptr float64; a: m256d) {.
+    importc: "_mm256_store_pd", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_store_ps(mem_addr: ptr float32; a: m256) {.
+    importc: "_mm256_store_ps", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_storeu_pd(mem_addr: ptr float64; a: m256d) {.
+    importc: "_mm256_storeu_pd", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_storeu_ps(mem_addr: ptr float32; a: m256) {.
+    importc: "_mm256_storeu_ps", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_storeu_si256(mem_addr: ptr m256i; a: m256i) {.
+    importc: "_mm256_storeu_si256", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm256_sub_ps(a, b: m256): m256 {.importc: "_mm256_sub_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_add_epi8(a, b: m512i): m512i {.importc: "_mm512_add_epi8", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_add_epi16(a, b: m512i): m512i {.importc: "_mm512_add_epi16", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_add_epi32(a, b: m512i): m512i {.importc: "_mm512_add_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_add_epi64(a, b: m512i): m512i {.importc: "_mm512_add_epi64", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_add_pd(a, b: m512d): m512d {.importc: "_mm512_add_pd", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_add_ps(a, b: m512): m512 {.importc: "_mm512_add_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_and_si512(a, b: m512i): m512i {.importc: "_mm512_and_si512", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Bitwise and +   Source +Edit + +
+
+ +
+
+
+
func mm512_castps_si512(a: m512): m512i {.importc: "_mm512_castps_si512",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Cast a float32x16 vectors into a 512-bit int vector with the same bit pattern +   Source +Edit + +
+
+ +
+
+
+
func mm512_castsi512_ps(a: m512i): m512 {.importc: "_mm512_castsi512_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Cast a 512-bit int vector into a float32x16 vector with the same bit pattern +   Source +Edit + +
+
+ +
+
+
+
func mm512_cmpgt_epi32_mask(a, b: m512i): mmask16 {.
+    importc: "_mm512_cmpgt_epi32_mask", nodecl, header: "<x86intrin.h>",
+    ...raises: [], tags: [], forbids: [].}
+
+ + Compare a greater than b, returns a 16-bit mask +   Source +Edit + +
+
+ +
+
+
+
func mm512_cvtepi32_ps(a: m512i): m512 {.importc: "_mm512_cvtepi32_ps", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Convert a int32x8 to float32x16 +   Source +Edit + +
+
+ +
+
+
+
func mm512_cvtps_epi32(a: m512): m512i {.importc: "_mm512_cvtps_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Convert a float32x16 to int32x8 +   Source +Edit + +
+
+ +
+
+
+
func mm512_fmadd_pd(a, b, c: m512d): m512d {.importc: "_mm512_fmadd_pd", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_fmadd_ps(a, b, c: m512): m512 {.importc: "_mm512_fmadd_ps", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_i32gather_epi32(i: m512i; m: ptr (uint32 or int32); s: int32): m512i {.
+    importc: "_mm512_i32gather_epi32", nodecl, header: "<x86intrin.h>",
+    ...raises: [], tags: [], forbids: [].}
+
+ + ย Warning โš : Argument are switched compared to mm256_i32gather_epi32 +   Source +Edit + +
+
+ +
+
+
+
func mm512_load_pd(aligned_mem_addr: ptr float64): m512d {.
+    importc: "_mm512_load_pd", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_load_ps(aligned_mem_addr: ptr float32): m512 {.
+    importc: "_mm512_load_ps", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_load_si512(mem_addr: ptr SomeInteger): m512i {.
+    importc: "_mm512_load_si512", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_loadu_pd(mem_addr: ptr float64): m512d {.importc: "_mm512_loadu_pd",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_loadu_ps(mem_addr: ptr float32): m512 {.importc: "_mm512_loadu_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_loadu_si512(mem_addr: ptr SomeInteger): m512i {.
+    importc: "_mm512_loadu_si512", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_maskz_set1_epi32(k: mmask16; a: cint): m512i {.
+    importc: "_mm512_maskz_set1_epi32", nodecl, header: "<x86intrin.h>",
+    ...raises: [], tags: [], forbids: [].}
+
+ + Compare a greater than b Broadcast 32-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +   Source +Edit + +
+
+ +
+
+
+
func mm512_max_ps(a, b: m512): m512 {.importc: "_mm512_max_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_min_ps(a, b: m512): m512 {.importc: "_mm512_min_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_movepi8_mask(a: m512i): mmask64 {.importc: "_mm512_movepi8_mask",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Returns the most significant bit of each 8-bit elements in a +   Source +Edit + +
+
+ +
+
+
+
func mm512_movm_epi32(a: mmask16): m512i {.importc: "_mm512_movm_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_mul_pd(a, b: m512d): m512d {.importc: "_mm512_mul_pd", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_mul_ps(a, b: m512): m512 {.importc: "_mm512_mul_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_mullo_epi32(a, b: m512i): m512i {.importc: "_mm512_mullo_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Multiply element-wise 2 vectors of 16 32-bit ints into intermediate 16 32-bit ints, and keep the low 32-bit parts +   Source +Edit + +
+
+ +
+
+
+
func mm512_mullo_epi64(a, b: m512i): m512i {.importc: "_mm512_mullo_epi64",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Multiply element-wise 2 vectors of 8x 64-bit ints into intermediate 8x 64-bit ints, and keep the low 64-bit parts +   Source +Edit + +
+
+ +
+
+
+
func mm512_or_ps(a, b: m512): m512 {.importc: "_mm512_or_ps", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_set1_epi8(a: int8 or uint8): m512i {.importc: "_mm512_set1_epi8",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_set1_epi16(a: int16 or uint16): m512i {.importc: "_mm512_set1_epi16",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_set1_epi32(a: int32 or uint32): m512i {.importc: "_mm512_set1_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_set1_epi64(a: int64 or uint64): m512i {.importc: "_mm512_set1_epi64",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_set1_pd(a: float64): m512d {.importc: "_mm512_set1_pd", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_set1_ps(a: float32): m512 {.importc: "_mm512_set1_ps", nodecl,
+                                       header: "<x86intrin.h>", ...raises: [],
+                                       tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_setzero_pd(): m512d {.importc: "_mm512_setzero_pd", nodecl,
+                                 header: "<x86intrin.h>", ...raises: [], tags: [],
+                                 forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_setzero_ps(): m512 {.importc: "_mm512_setzero_ps", nodecl,
+                                header: "<x86intrin.h>", ...raises: [], tags: [],
+                                forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_setzero_si512(): m512i {.importc: "_mm512_setzero_si512", nodecl,
+                                    header: "<x86intrin.h>", ...raises: [],
+                                    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_slli_epi32(a: m512i; count: int32): m512i {.
+    importc: "_mm512_slli_epi32", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_srli_epi32(a: m512i; count: int32): m512i {.
+    importc: "_mm512_srli_epi32", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_store_pd(mem_addr: ptr float64; a: m512d) {.
+    importc: "_mm512_store_pd", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_store_ps(mem_addr: ptr float32; a: m512) {.
+    importc: "_mm512_store_ps", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_storeu_pd(mem_addr: ptr float64; a: m512d) {.
+    importc: "_mm512_storeu_pd", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_storeu_ps(mem_addr: ptr float32; a: m512) {.
+    importc: "_mm512_storeu_ps", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_storeu_si512(mem_addr: ptr SomeInteger; a: m512i) {.
+    importc: "_mm512_storeu_si512", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm512_sub_ps(a, b: m512): m512 {.importc: "_mm512_sub_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_epi8(a, b: m128i): m128i {.importc: "_mm_add_epi8", nodecl,
+                                       header: "<x86intrin.h>", ...raises: [],
+                                       tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_epi16(a, b: m128i): m128i {.importc: "_mm_add_epi16", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_epi32(a, b: m128i): m128i {.importc: "_mm_add_epi32", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_epi64(a, b: m128i): m128i {.importc: "_mm_add_epi64", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_pd(a, b: m128d): m128d {.importc: "_mm_add_pd", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_ps(a, b: m128): m128 {.importc: "_mm_add_ps", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_add_ss(a, b: m128): m128 {.importc: "_mm_add_ss", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_and_si128(a, b: m128i): m128i {.importc: "_mm_and_si128", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_castps_si128(a: m128): m128i {.importc: "_mm_castps_si128", nodecl,
+                                       header: "<x86intrin.h>", ...raises: [],
+                                       tags: [], forbids: [].}
+
+ + Cast a float32x4 vectors into a 128-bit int vector with the same bit pattern +   Source +Edit + +
+
+ +
+
+
+
func mm_castsi128_ps(a: m128i): m128 {.importc: "_mm_castsi128_ps", nodecl,
+                                       header: "<x86intrin.h>", ...raises: [],
+                                       tags: [], forbids: [].}
+
+ + Cast a 128-bit int vector into a float32x8 vector with the same bit pattern +   Source +Edit + +
+
+ +
+
+
+
func mm_cmpgt_epi32(a, b: m128i): m128i {.importc: "_mm_cmpgt_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Compare a greater than b +   Source +Edit + +
+
+ +
+
+
+
func mm_cvtepi32_ps(a: m128i): m128 {.importc: "_mm_cvtepi32_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + Convert a int32x4 to float32x4 +   Source +Edit + +
+
+ +
+
+
+
func mm_cvtps_epi32(a: m128): m128i {.importc: "_mm_cvtps_epi32", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + Convert a float32x4 to int32x4 +   Source +Edit + +
+
+ +
+
+
+
func mm_cvtsi128_si32(a: m128i): cint {.importc: "_mm_cvtsi128_si32", nodecl,
+                                        header: "<x86intrin.h>", ...raises: [],
+                                        tags: [], forbids: [].}
+
+ + Copy the low part of a to int32 +   Source +Edit + +
+
+ +
+
+
+
func mm_cvtss_f32(a: m128): float32 {.importc: "_mm_cvtss_f32", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + Extract the low part of the input Input: { A0, A1, A2, A3 } Result: A0 +   Source +Edit + +
+
+ +
+
+
+
func mm_extract_epi16(a: m128i; imm8: cint): cint {.
+    importc: "_mm_extract_epi16", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + Extract an int16 from a, selected with imm8 ย and store it in the lower part of destination (padded with zeroes) +   Source +Edit + +
+
+ +
+
+
+
func mm_i32gather_epi32(m: ptr (uint32 or int32); i: m128i; s: int32): m128i {.
+    importc: "_mm_i32gather_epi32", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_load_pd(aligned_mem_addr: ptr float64): m128d {.importc: "_mm_load_pd",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_load_ps(aligned_mem_addr: ptr float32): m128 {.importc: "_mm_load_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_load_si128(mem_addr: ptr m128i): m128i {.importc: "_mm_load_si128",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_load_ss(aligned_mem_addr: ptr float32): m128 {.importc: "_mm_load_ss",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_loadu_pd(mem_addr: ptr float64): m128d {.importc: "_mm_loadu_pd",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_loadu_ps(data: ptr float32): m128 {.importc: "_mm_loadu_ps", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_loadu_si128(mem_addr: ptr m128i): m128i {.importc: "_mm_loadu_si128",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_max_ps(a, b: m128): m128 {.importc: "_mm_max_ps", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_max_ss(a, b: m128): m128 {.importc: "_mm_max_ss", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_min_ps(a, b: m128): m128 {.importc: "_mm_min_ps", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_min_ss(a, b: m128): m128 {.importc: "_mm_min_ss", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_movehdup_ps(a: m128): m128 {.importc: "_mm_movehdup_ps", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + Duplicates high parts of the input Input: { A0, A1, A2, A3 } Result: { A1, A1, A3, A3 } +   Source +Edit + +
+
+ +
+
+
+
func mm_movehl_ps(a, b: m128): m128 {.importc: "_mm_movehl_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + Input: { A0, A1, A2, A3 }, { B0, B1, B2, B3 } Result: { B2, B3, A2, A3 } +   Source +Edit + +
+
+ +
+
+
+
func mm_moveldup_ps(a: m128): m128 {.importc: "_mm_moveldup_ps", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + Duplicates low parts of the input Input: { A0, A1, A2, A3 } Result: { A0, A0, A2, A2 } +   Source +Edit + +
+
+ +
+
+
+
func mm_movelh_ps(a, b: m128): m128 {.importc: "_mm_movelh_ps", nodecl,
+                                      header: "<x86intrin.h>", ...raises: [],
+                                      tags: [], forbids: [].}
+
+ + Input: { A0, A1, A2, A3 }, { B0, B1, B2, B3 } Result: { A0, A1, B0, B1 } +   Source +Edit + +
+
+ +
+
+
+
func mm_movemask_epi8(a: m128i): int32 {.importc: "_mm_movemask_epi8", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Returns the most significant bit ย of each 8-bit elements in a +   Source +Edit + +
+
+ +
+
+
+
func mm_mul_epu32(a: m128i; b: m128i): m128i {.importc: "_mm_mul_epu32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ +

From a = {a1_hi, a1_lo, a0_hi, a0_lo} with a1 and a0 being 64-bit number and b = {b1_hi, b1_lo, b0_hi, b0_lo}

+

Result = {a1_lo * b1_lo, a0_lo * b0_lo}. This is an extended precision multiplication 32x32 -> 64

+ +   Source +Edit + +
+
+ +
+
+
+
func mm_mul_pd(a, b: m128d): m128d {.importc: "_mm_mul_pd", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_mul_ps(a, b: m128): m128 {.importc: "_mm_mul_ps", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_mullo_epi16(a, b: m128i): m128i {.importc: "_mm_mullo_epi16", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Multiply element-wise 2 vectors of 8 16-bit ints into intermediate 8 32-bit ints, and keep the low 16-bit parts +   Source +Edit + +
+
+ +
+
+
+
func mm_mullo_epi32(a, b: m128i): m128i {.importc: "_mm_mullo_epi32", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Multiply element-wise 2 vectors of 4 32-bit ints into intermediate 4 64-bit ints, and keep the low 32-bit parts +   Source +Edit + +
+
+ +
+
+
+
func mm_or_ps(a, b: m128): m128 {.importc: "_mm_or_ps", nodecl,
+                                  header: "<x86intrin.h>", ...raises: [], tags: [],
+                                  forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_or_si128(a, b: m128i): m128i {.importc: "_mm_or_si128", nodecl,
+                                       header: "<x86intrin.h>", ...raises: [],
+                                       tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set1_epi8(a: int8 or uint8): m128i {.importc: "_mm_set1_epi8", nodecl,
+    header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set1_epi16(a: int16 or uint16): m128i {.importc: "_mm_set1_epi16",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set1_epi32(a: int32 or uint32): m128i {.importc: "_mm_set1_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set1_epi64x(a: int64 or uint64): m128i {.importc: "_mm_set1_epi64x",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set1_pd(a: float64): m128d {.importc: "_mm_set1_pd", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set1_ps(a: float32): m128 {.importc: "_mm_set1_ps", nodecl,
+                                    header: "<x86intrin.h>", ...raises: [],
+                                    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_set_epi32(e3, e2, e1, e0: cint): m128i {.importc: "_mm_set_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Initialize m128i with {e3, e2, e1, e0} (big endian order) Storing it will yield e0, e1, e2, e3 +   Source +Edit + +
+
+ +
+
+
+
func mm_setzero_pd(): m128d {.importc: "_mm_setzero_pd", nodecl,
+                              header: "<x86intrin.h>", ...raises: [], tags: [],
+                              forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_setzero_ps(): m128 {.importc: "_mm_setzero_ps", nodecl,
+                             header: "<x86intrin.h>", ...raises: [], tags: [],
+                             forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_setzero_si128(): m128i {.importc: "_mm_setzero_si128", nodecl,
+                                 header: "<x86intrin.h>", ...raises: [], tags: [],
+                                 forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_shuffle_epi32(a: m128i; imm8: cint): m128i {.
+    importc: "_mm_shuffle_epi32", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + Shuffle 32-bit integers in a according to the control in imm8 Formula is in big endian representation a = {a3, a2, a1, a0} dst = {d3, d2, d1, d0} imm8 = {bits76, bits54, bits32, bits10} d0 will refer abits10 d1 abits32 +   Source +Edit + +
+
+ +
+
+
+
func mm_slli_epi32(a: m128i; count: int32): m128i {.importc: "_mm_slli_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_slli_epi64(a: m128i; imm8: cint): m128i {.importc: "_mm_slli_epi64",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Shift 2xint64 left +   Source +Edit + +
+
+ +
+
+
+
func mm_srli_epi32(a: m128i; count: int32): m128i {.importc: "_mm_srli_epi32",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_srli_epi64(a: m128i; imm8: cint): m128i {.importc: "_mm_srli_epi64",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + Shift 2xint64 right +   Source +Edit + +
+
+ +
+
+
+
func mm_store_pd(mem_addr: ptr float64; a: m128d) {.importc: "_mm_store_pd",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_store_ps(mem_addr: ptr float32; a: m128) {.importc: "_mm_store_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_storeu_pd(mem_addr: ptr float64; a: m128d) {.importc: "_mm_storeu_pd",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_storeu_ps(mem_addr: ptr float32; a: m128) {.importc: "_mm_storeu_ps",
+    nodecl, header: "<x86intrin.h>", ...raises: [], tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_storeu_si128(mem_addr: ptr m128i; a: m128i) {.
+    importc: "_mm_storeu_si128", nodecl, header: "<x86intrin.h>", ...raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_sub_pd(a, b: m128d): m128d {.importc: "_mm_sub_pd", nodecl,
+                                     header: "<x86intrin.h>", ...raises: [],
+                                     tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
func mm_sub_ps(a, b: m128): m128 {.importc: "_mm_sub_ps", nodecl,
+                                   header: "<x86intrin.h>", ...raises: [],
+                                   tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/simd.idx b/simd.idx new file mode 100644 index 000000000..41407255b --- /dev/null +++ b/simd.idx @@ -0,0 +1,175 @@ +nimTitle simd simd.html module src/arraymancer/laser/simd 0 +nim m128 simd.html#m128 object m128 21 +nim m128d simd.html#m128d object m128d 23 +nim m128i simd.html#m128i object m128i 25 +nim m256 simd.html#m256 object m256 27 +nim m256d simd.html#m256d object m256d 29 +nim m256i simd.html#m256i object m256i 31 +nim m512 simd.html#m512 object m512 33 +nim m512d simd.html#m512d object m512d 35 +nim m512i simd.html#m512i object m512i 37 +nim mmask16 simd.html#mmask16 type mmask16 39 +nim mmask64 simd.html#mmask64 type mmask64 40 +nim mm_setzero_ps simd.html#mm_setzero_ps proc mm_setzero_ps(): m128 48 +nim mm_set1_ps simd.html#mm_set1_ps,float32 proc mm_set1_ps(a: float32): m128 49 +nim mm_load_ps simd.html#mm_load_ps,ptr.float32 proc mm_load_ps(aligned_mem_addr: ptr float32): m128 50 +nim mm_loadu_ps simd.html#mm_loadu_ps,ptr.float32 proc mm_loadu_ps(data: ptr float32): m128 51 +nim mm_store_ps simd.html#mm_store_ps,ptr.float32,m128 proc mm_store_ps(mem_addr: ptr float32; a: m128) 52 +nim mm_storeu_ps simd.html#mm_storeu_ps,ptr.float32,m128 proc mm_storeu_ps(mem_addr: ptr float32; a: m128) 53 +nim mm_add_ps simd.html#mm_add_ps,m128,m128 proc mm_add_ps(a, b: m128): m128 54 +nim mm_sub_ps simd.html#mm_sub_ps,m128,m128 proc mm_sub_ps(a, b: m128): m128 55 +nim mm_mul_ps simd.html#mm_mul_ps,m128,m128 proc mm_mul_ps(a, b: m128): m128 56 +nim mm_max_ps simd.html#mm_max_ps,m128,m128 proc mm_max_ps(a, b: m128): m128 57 +nim mm_min_ps simd.html#mm_min_ps,m128,m128 proc mm_min_ps(a, b: m128): m128 58 +nim mm_or_ps simd.html#mm_or_ps,m128,m128 proc mm_or_ps(a, b: m128): m128 59 +nim mm_load_ss simd.html#mm_load_ss,ptr.float32 proc mm_load_ss(aligned_mem_addr: ptr float32): m128 67 +nim mm_add_ss simd.html#mm_add_ss,m128,m128 proc mm_add_ss(a, b: m128): m128 68 +nim mm_max_ss simd.html#mm_max_ss,m128,m128 proc mm_max_ss(a, b: m128): m128 69 +nim mm_min_ss simd.html#mm_min_ss,m128,m128 proc mm_min_ss(a, b: m128): m128 70 +nim mm_cvtss_f32 simd.html#mm_cvtss_f32,m128 proc mm_cvtss_f32(a: m128): float32 72 +nim mm_movehl_ps simd.html#mm_movehl_ps,m128,m128 proc mm_movehl_ps(a, b: m128): m128 79 +nim mm_movelh_ps simd.html#mm_movelh_ps,m128,m128 proc mm_movelh_ps(a, b: m128): m128 84 +nim mm_setzero_pd simd.html#mm_setzero_pd proc mm_setzero_pd(): m128d 96 +nim mm_set1_pd simd.html#mm_set1_pd,float64 proc mm_set1_pd(a: float64): m128d 97 +nim mm_load_pd simd.html#mm_load_pd,ptr.float64 proc mm_load_pd(aligned_mem_addr: ptr float64): m128d 98 +nim mm_loadu_pd simd.html#mm_loadu_pd,ptr.float64 proc mm_loadu_pd(mem_addr: ptr float64): m128d 99 +nim mm_store_pd simd.html#mm_store_pd,ptr.float64,m128d proc mm_store_pd(mem_addr: ptr float64; a: m128d) 100 +nim mm_storeu_pd simd.html#mm_storeu_pd,ptr.float64,m128d proc mm_storeu_pd(mem_addr: ptr float64; a: m128d) 101 +nim mm_add_pd simd.html#mm_add_pd,m128d,m128d proc mm_add_pd(a, b: m128d): m128d 102 +nim mm_sub_pd simd.html#mm_sub_pd,m128d,m128d proc mm_sub_pd(a, b: m128d): m128d 103 +nim mm_mul_pd simd.html#mm_mul_pd,m128d,m128d proc mm_mul_pd(a, b: m128d): m128d 104 +nim mm_setzero_si128 simd.html#mm_setzero_si128 proc mm_setzero_si128(): m128i 112 +nim mm_set1_epi8 simd.html#mm_set1_epi8 proc mm_set1_epi8(a: int8 or uint8): m128i 113 +nim mm_set1_epi16 simd.html#mm_set1_epi16 proc mm_set1_epi16(a: int16 or uint16): m128i 114 +nim mm_set1_epi32 simd.html#mm_set1_epi32 proc mm_set1_epi32(a: int32 or uint32): m128i 115 +nim mm_set1_epi64x simd.html#mm_set1_epi64x proc mm_set1_epi64x(a: int64 or uint64): m128i 116 +nim mm_load_si128 simd.html#mm_load_si128,ptr.m128i proc mm_load_si128(mem_addr: ptr m128i): m128i 117 +nim mm_loadu_si128 simd.html#mm_loadu_si128,ptr.m128i proc mm_loadu_si128(mem_addr: ptr m128i): m128i 118 +nim mm_storeu_si128 simd.html#mm_storeu_si128,ptr.m128i,m128i proc mm_storeu_si128(mem_addr: ptr m128i; a: m128i) 119 +nim mm_add_epi8 simd.html#mm_add_epi8,m128i,m128i proc mm_add_epi8(a, b: m128i): m128i 120 +nim mm_add_epi16 simd.html#mm_add_epi16,m128i,m128i proc mm_add_epi16(a, b: m128i): m128i 121 +nim mm_add_epi32 simd.html#mm_add_epi32,m128i,m128i proc mm_add_epi32(a, b: m128i): m128i 122 +nim mm_add_epi64 simd.html#mm_add_epi64,m128i,m128i proc mm_add_epi64(a, b: m128i): m128i 123 +nim mm_or_si128 simd.html#mm_or_si128,m128i,m128i proc mm_or_si128(a, b: m128i): m128i 125 +nim mm_and_si128 simd.html#mm_and_si128,m128i,m128i proc mm_and_si128(a, b: m128i): m128i 126 +nim mm_slli_epi64 simd.html#mm_slli_epi64,m128i,cint proc mm_slli_epi64(a: m128i; imm8: cint): m128i 127 +nim mm_srli_epi64 simd.html#mm_srli_epi64,m128i,cint proc mm_srli_epi64(a: m128i; imm8: cint): m128i 129 +nim mm_srli_epi32 simd.html#mm_srli_epi32,m128i,int32 proc mm_srli_epi32(a: m128i; count: int32): m128i 131 +nim mm_slli_epi32 simd.html#mm_slli_epi32,m128i,int32 proc mm_slli_epi32(a: m128i; count: int32): m128i 132 +nim mm_mullo_epi16 simd.html#mm_mullo_epi16,m128i,m128i proc mm_mullo_epi16(a, b: m128i): m128i 134 +nim mm_shuffle_epi32 simd.html#mm_shuffle_epi32,m128i,cint proc mm_shuffle_epi32(a: m128i; imm8: cint): m128i 138 +nim mm_mul_epu32 simd.html#mm_mul_epu32,m128i,m128i proc mm_mul_epu32(a: m128i; b: m128i): m128i 147 +nim mm_set_epi32 simd.html#mm_set_epi32,cint,cint,cint,cint proc mm_set_epi32(e3, e2, e1, e0: cint): m128i 154 +nim mm_castps_si128 simd.html#mm_castps_si128,m128 proc mm_castps_si128(a: m128): m128i 158 +nim mm_castsi128_ps simd.html#mm_castsi128_ps,m128i proc mm_castsi128_ps(a: m128i): m128 160 +nim mm_cvtps_epi32 simd.html#mm_cvtps_epi32,m128 proc mm_cvtps_epi32(a: m128): m128i 162 +nim mm_cvtepi32_ps simd.html#mm_cvtepi32_ps,m128i proc mm_cvtepi32_ps(a: m128i): m128 164 +nim mm_cmpgt_epi32 simd.html#mm_cmpgt_epi32,m128i,m128i proc mm_cmpgt_epi32(a, b: m128i): m128i 167 +nim mm_cvtsi128_si32 simd.html#mm_cvtsi128_si32,m128i proc mm_cvtsi128_si32(a: m128i): cint 170 +nim mm_extract_epi16 simd.html#mm_extract_epi16,m128i,cint proc mm_extract_epi16(a: m128i; imm8: cint): cint 173 +nim mm_movemask_epi8 simd.html#mm_movemask_epi8,m128i proc mm_movemask_epi8(a: m128i): int32 177 +nim mm_movehdup_ps simd.html#mm_movehdup_ps,m128 proc mm_movehdup_ps(a: m128): m128 187 +nim mm_moveldup_ps simd.html#mm_moveldup_ps,m128 proc mm_moveldup_ps(a: m128): m128 193 +nim mm_mullo_epi32 simd.html#mm_mullo_epi32,m128i,m128i proc mm_mullo_epi32(a, b: m128i): m128i 206 +nim mm256_setzero_ps simd.html#mm256_setzero_ps proc mm256_setzero_ps(): m256 216 +nim mm256_set1_ps simd.html#mm256_set1_ps,float32 proc mm256_set1_ps(a: float32): m256 217 +nim mm256_load_ps simd.html#mm256_load_ps,ptr.float32 proc mm256_load_ps(aligned_mem_addr: ptr float32): m256 218 +nim mm256_loadu_ps simd.html#mm256_loadu_ps,ptr.float32 proc mm256_loadu_ps(mem_addr: ptr float32): m256 219 +nim mm256_store_ps simd.html#mm256_store_ps,ptr.float32,m256 proc mm256_store_ps(mem_addr: ptr float32; a: m256) 220 +nim mm256_storeu_ps simd.html#mm256_storeu_ps,ptr.float32,m256 proc mm256_storeu_ps(mem_addr: ptr float32; a: m256) 221 +nim mm256_add_ps simd.html#mm256_add_ps,m256,m256 proc mm256_add_ps(a, b: m256): m256 222 +nim mm256_mul_ps simd.html#mm256_mul_ps,m256,m256 proc mm256_mul_ps(a, b: m256): m256 223 +nim mm256_sub_ps simd.html#mm256_sub_ps,m256,m256 proc mm256_sub_ps(a, b: m256): m256 224 +nim mm256_and_ps simd.html#mm256_and_ps,m256,m256 proc mm256_and_ps(a, b: m256): m256 226 +nim mm256_or_ps simd.html#mm256_or_ps,m256,m256 proc mm256_or_ps(a, b: m256): m256 228 +nim mm256_min_ps simd.html#mm256_min_ps,m256,m256 proc mm256_min_ps(a, b: m256): m256 230 +nim mm256_max_ps simd.html#mm256_max_ps,m256,m256 proc mm256_max_ps(a, b: m256): m256 231 +nim mm256_castps256_ps128 simd.html#mm256_castps256_ps128,m256 proc mm256_castps256_ps128(a: m256): m128 232 +nim mm256_extractf128_ps simd.html#mm256_extractf128_ps,m256, proc mm256_extractf128_ps(v: m256; m: cint{lit}): m128 234 +nim mm256_setzero_pd simd.html#mm256_setzero_pd proc mm256_setzero_pd(): m256d 244 +nim mm256_set1_pd simd.html#mm256_set1_pd,float64 proc mm256_set1_pd(a: float64): m256d 245 +nim mm256_load_pd simd.html#mm256_load_pd,ptr.float64 proc mm256_load_pd(aligned_mem_addr: ptr float64): m256d 246 +nim mm256_loadu_pd simd.html#mm256_loadu_pd,ptr.float64 proc mm256_loadu_pd(mem_addr: ptr float64): m256d 247 +nim mm256_store_pd simd.html#mm256_store_pd,ptr.float64,m256d proc mm256_store_pd(mem_addr: ptr float64; a: m256d) 248 +nim mm256_storeu_pd simd.html#mm256_storeu_pd,ptr.float64,m256d proc mm256_storeu_pd(mem_addr: ptr float64; a: m256d) 249 +nim mm256_add_pd simd.html#mm256_add_pd,m256d,m256d proc mm256_add_pd(a, b: m256d): m256d 250 +nim mm256_mul_pd simd.html#mm256_mul_pd,m256d,m256d proc mm256_mul_pd(a, b: m256d): m256d 251 +nim mm256_fmadd_ps simd.html#mm256_fmadd_ps,m256,m256,m256 proc mm256_fmadd_ps(a, b, c: m256): m256 259 +nim mm256_fmadd_pd simd.html#mm256_fmadd_pd,m256d,m256d,m256d proc mm256_fmadd_pd(a, b, c: m256d): m256d 260 +nim mm256_setzero_si256 simd.html#mm256_setzero_si256 proc mm256_setzero_si256(): m256i 268 +nim mm256_set1_epi8 simd.html#mm256_set1_epi8 proc mm256_set1_epi8(a: int8 or uint8): m256i 269 +nim mm256_set1_epi16 simd.html#mm256_set1_epi16 proc mm256_set1_epi16(a: int16 or uint16): m256i 270 +nim mm256_set1_epi32 simd.html#mm256_set1_epi32 proc mm256_set1_epi32(a: int32 or uint32): m256i 271 +nim mm256_set1_epi64x simd.html#mm256_set1_epi64x proc mm256_set1_epi64x(a: int64 or uint64): m256i 272 +nim mm256_load_si256 simd.html#mm256_load_si256,ptr.m256i proc mm256_load_si256(mem_addr: ptr m256i): m256i 273 +nim mm256_loadu_si256 simd.html#mm256_loadu_si256,ptr.m256i proc mm256_loadu_si256(mem_addr: ptr m256i): m256i 274 +nim mm256_storeu_si256 simd.html#mm256_storeu_si256,ptr.m256i,m256i proc mm256_storeu_si256(mem_addr: ptr m256i; a: m256i) 275 +nim mm256_castps_si256 simd.html#mm256_castps_si256,m256 proc mm256_castps_si256(a: m256): m256i 277 +nim mm256_castsi256_ps simd.html#mm256_castsi256_ps,m256i proc mm256_castsi256_ps(a: m256i): m256 279 +nim mm256_cvtps_epi32 simd.html#mm256_cvtps_epi32,m256 proc mm256_cvtps_epi32(a: m256): m256i 281 +nim mm256_cvtepi32_ps simd.html#mm256_cvtepi32_ps,m256i proc mm256_cvtepi32_ps(a: m256i): m256 283 +nim mm256_add_epi8 simd.html#mm256_add_epi8,m256i,m256i proc mm256_add_epi8(a, b: m256i): m256i 292 +nim mm256_add_epi16 simd.html#mm256_add_epi16,m256i,m256i proc mm256_add_epi16(a, b: m256i): m256i 293 +nim mm256_add_epi32 simd.html#mm256_add_epi32,m256i,m256i proc mm256_add_epi32(a, b: m256i): m256i 294 +nim mm256_add_epi64 simd.html#mm256_add_epi64,m256i,m256i proc mm256_add_epi64(a, b: m256i): m256i 295 +nim mm256_and_si256 simd.html#mm256_and_si256,m256i,m256i proc mm256_and_si256(a, b: m256i): m256i 297 +nim mm256_srli_epi64 simd.html#mm256_srli_epi64,m256i,cint proc mm256_srli_epi64(a: m256i; imm8: cint): m256i 299 +nim mm256_mullo_epi16 simd.html#mm256_mullo_epi16,m256i,m256i proc mm256_mullo_epi16(a, b: m256i): m256i 302 +nim mm256_mullo_epi32 simd.html#mm256_mullo_epi32,m256i,m256i proc mm256_mullo_epi32(a, b: m256i): m256i 306 +nim mm256_shuffle_epi32 simd.html#mm256_shuffle_epi32,m256i,cint proc mm256_shuffle_epi32(a: m256i; imm8: cint): m256i 310 +nim mm256_mul_epu32 simd.html#mm256_mul_epu32,m256i,m256i proc mm256_mul_epu32(a: m256i; b: m256i): m256i 322 +nim mm256_movemask_epi8 simd.html#mm256_movemask_epi8,m256i proc mm256_movemask_epi8(a: m256i): int32 330 +nim mm256_cmpgt_epi32 simd.html#mm256_cmpgt_epi32,m256i,m256i proc mm256_cmpgt_epi32(a, b: m256i): m256i 334 +nim mm256_srli_epi32 simd.html#mm256_srli_epi32,m256i,int32 proc mm256_srli_epi32(a: m256i; count: int32): m256i 337 +nim mm256_slli_epi32 simd.html#mm256_slli_epi32,m256i,int32 proc mm256_slli_epi32(a: m256i; count: int32): m256i 338 +nim mm_i32gather_epi32 simd.html#mm_i32gather_epi32,ptr.,m128i,int32 proc mm_i32gather_epi32(m: ptr (uint32 or int32); i: m128i; s: int32): m128i 340 +nim mm256_i32gather_epi32 simd.html#mm256_i32gather_epi32,ptr.,m256i,int32 proc mm256_i32gather_epi32(m: ptr (uint32 or int32); i: m256i; s: int32): m256i 341 +nim mm512_setzero_ps simd.html#mm512_setzero_ps proc mm512_setzero_ps(): m512 349 +nim mm512_set1_ps simd.html#mm512_set1_ps,float32 proc mm512_set1_ps(a: float32): m512 350 +nim mm512_load_ps simd.html#mm512_load_ps,ptr.float32 proc mm512_load_ps(aligned_mem_addr: ptr float32): m512 351 +nim mm512_loadu_ps simd.html#mm512_loadu_ps,ptr.float32 proc mm512_loadu_ps(mem_addr: ptr float32): m512 352 +nim mm512_store_ps simd.html#mm512_store_ps,ptr.float32,m512 proc mm512_store_ps(mem_addr: ptr float32; a: m512) 353 +nim mm512_storeu_ps simd.html#mm512_storeu_ps,ptr.float32,m512 proc mm512_storeu_ps(mem_addr: ptr float32; a: m512) 354 +nim mm512_add_ps simd.html#mm512_add_ps,m512,m512 proc mm512_add_ps(a, b: m512): m512 355 +nim mm512_sub_ps simd.html#mm512_sub_ps,m512,m512 proc mm512_sub_ps(a, b: m512): m512 356 +nim mm512_mul_ps simd.html#mm512_mul_ps,m512,m512 proc mm512_mul_ps(a, b: m512): m512 357 +nim mm512_fmadd_ps simd.html#mm512_fmadd_ps,m512,m512,m512 proc mm512_fmadd_ps(a, b, c: m512): m512 358 +nim mm512_min_ps simd.html#mm512_min_ps,m512,m512 proc mm512_min_ps(a, b: m512): m512 360 +nim mm512_max_ps simd.html#mm512_max_ps,m512,m512 proc mm512_max_ps(a, b: m512): m512 361 +nim mm512_or_ps simd.html#mm512_or_ps,m512,m512 proc mm512_or_ps(a, b: m512): m512 363 +nim mm512_setzero_pd simd.html#mm512_setzero_pd proc mm512_setzero_pd(): m512d 371 +nim mm512_set1_pd simd.html#mm512_set1_pd,float64 proc mm512_set1_pd(a: float64): m512d 372 +nim mm512_load_pd simd.html#mm512_load_pd,ptr.float64 proc mm512_load_pd(aligned_mem_addr: ptr float64): m512d 373 +nim mm512_loadu_pd simd.html#mm512_loadu_pd,ptr.float64 proc mm512_loadu_pd(mem_addr: ptr float64): m512d 374 +nim mm512_store_pd simd.html#mm512_store_pd,ptr.float64,m512d proc mm512_store_pd(mem_addr: ptr float64; a: m512d) 375 +nim mm512_storeu_pd simd.html#mm512_storeu_pd,ptr.float64,m512d proc mm512_storeu_pd(mem_addr: ptr float64; a: m512d) 376 +nim mm512_add_pd simd.html#mm512_add_pd,m512d,m512d proc mm512_add_pd(a, b: m512d): m512d 377 +nim mm512_mul_pd simd.html#mm512_mul_pd,m512d,m512d proc mm512_mul_pd(a, b: m512d): m512d 378 +nim mm512_fmadd_pd simd.html#mm512_fmadd_pd,m512d,m512d,m512d proc mm512_fmadd_pd(a, b, c: m512d): m512d 379 +nim mm512_setzero_si512 simd.html#mm512_setzero_si512 proc mm512_setzero_si512(): m512i 387 +nim mm512_set1_epi8 simd.html#mm512_set1_epi8 proc mm512_set1_epi8(a: int8 or uint8): m512i 388 +nim mm512_set1_epi16 simd.html#mm512_set1_epi16 proc mm512_set1_epi16(a: int16 or uint16): m512i 389 +nim mm512_set1_epi32 simd.html#mm512_set1_epi32 proc mm512_set1_epi32(a: int32 or uint32): m512i 390 +nim mm512_set1_epi64 simd.html#mm512_set1_epi64 proc mm512_set1_epi64(a: int64 or uint64): m512i 391 +nim mm512_load_si512 simd.html#mm512_load_si512,ptr.SomeInteger proc mm512_load_si512(mem_addr: ptr SomeInteger): m512i 392 +nim mm512_loadu_si512 simd.html#mm512_loadu_si512,ptr.SomeInteger proc mm512_loadu_si512(mem_addr: ptr SomeInteger): m512i 393 +nim mm512_storeu_si512 simd.html#mm512_storeu_si512,ptr.SomeInteger,m512i proc mm512_storeu_si512(mem_addr: ptr SomeInteger; a: m512i) 394 +nim mm512_add_epi8 simd.html#mm512_add_epi8,m512i,m512i proc mm512_add_epi8(a, b: m512i): m512i 396 +nim mm512_add_epi16 simd.html#mm512_add_epi16,m512i,m512i proc mm512_add_epi16(a, b: m512i): m512i 397 +nim mm512_add_epi32 simd.html#mm512_add_epi32,m512i,m512i proc mm512_add_epi32(a, b: m512i): m512i 398 +nim mm512_add_epi64 simd.html#mm512_add_epi64,m512i,m512i proc mm512_add_epi64(a, b: m512i): m512i 399 +nim mm512_mullo_epi32 simd.html#mm512_mullo_epi32,m512i,m512i proc mm512_mullo_epi32(a, b: m512i): m512i 401 +nim mm512_mullo_epi64 simd.html#mm512_mullo_epi64,m512i,m512i proc mm512_mullo_epi64(a, b: m512i): m512i 405 +nim mm512_and_si512 simd.html#mm512_and_si512,m512i,m512i proc mm512_and_si512(a, b: m512i): m512i 409 +nim mm512_cmpgt_epi32_mask simd.html#mm512_cmpgt_epi32_mask,m512i,m512i proc mm512_cmpgt_epi32_mask(a, b: m512i): mmask16 412 +nim mm512_maskz_set1_epi32 simd.html#mm512_maskz_set1_epi32,mmask16,cint proc mm512_maskz_set1_epi32(k: mmask16; a: cint): m512i 415 +nim mm512_movm_epi32 simd.html#mm512_movm_epi32,mmask16 proc mm512_movm_epi32(a: mmask16): m512i 420 +nim mm512_movepi8_mask simd.html#mm512_movepi8_mask,m512i proc mm512_movepi8_mask(a: m512i): mmask64 422 +nim mm512_srli_epi32 simd.html#mm512_srli_epi32,m512i,int32 proc mm512_srli_epi32(a: m512i; count: int32): m512i 426 +nim mm512_slli_epi32 simd.html#mm512_slli_epi32,m512i,int32 proc mm512_slli_epi32(a: m512i; count: int32): m512i 427 +nim mm512_i32gather_epi32 simd.html#mm512_i32gather_epi32,m512i,ptr.,int32 proc mm512_i32gather_epi32(i: m512i; m: ptr (uint32 or int32); s: int32): m512i 429 +nim mm512_castps_si512 simd.html#mm512_castps_si512,m512 proc mm512_castps_si512(a: m512): m512i 432 +nim mm512_castsi512_ps simd.html#mm512_castsi512_ps,m512i proc mm512_castsi512_ps(a: m512i): m512 434 +nim mm512_cvtps_epi32 simd.html#mm512_cvtps_epi32,m512 proc mm512_cvtps_epi32(a: m512): m512i 436 +nim mm512_cvtepi32_ps simd.html#mm512_cvtepi32_ps,m512i proc mm512_cvtepi32_ps(a: m512i): m512 438 +nim cvtmask64_u64 simd.html#cvtmask64_u64,mmask64 proc cvtmask64_u64(a: mmask64): uint64 441 diff --git a/softmax.html b/softmax.html new file mode 100644 index 000000000..f5136eb8b --- /dev/null +++ b/softmax.html @@ -0,0 +1,474 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/activation/softmax + + + + + + + + + +Arraymancer - src/arraymancer/nn/activation/softmax + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/activation/softmax

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
SoftmaxActivation[TT] = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc softmax[TT](a: Variable[TT]): Variable[TT]
+
+ + Input:
  • A variable
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/softmax.idx b/softmax.idx new file mode 100644 index 000000000..30994d691 --- /dev/null +++ b/softmax.idx @@ -0,0 +1,3 @@ +nimTitle softmax softmax.html module src/arraymancer/nn/activation/softmax 0 +nim SoftmaxActivation softmax.html#SoftmaxActivation type SoftmaxActivation 19 +nim softmax softmax.html#softmax,Variable[TT] proc softmax[TT](a: Variable[TT]): Variable[TT] 47 diff --git a/solve_lapack.html b/solve_lapack.html new file mode 100644 index 000000000..4dcfa8d64 --- /dev/null +++ b/solve_lapack.html @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/solve_lapack + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/solve_lapack + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/solve_lapack

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ overload, tensor +
+
+
+

Procs

+
+
+
+
proc gesv[T: SomeFloat](a, b: var Tensor[T]; pivot_indices: var seq[int32])
+
+ +

Wrapper for LAPACK *gesv routines Solve AX = B for general matrix

+

In-place version, this will overwrite a and b

+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/solve_lapack.idx b/solve_lapack.idx new file mode 100644 index 000000000..66eea5569 --- /dev/null +++ b/solve_lapack.idx @@ -0,0 +1,2 @@ +nimTitle solve_lapack solve_lapack.html module src/arraymancer/linear_algebra/helpers/solve_lapack 0 +nim gesv solve_lapack.html#gesv,Tensor[T: SomeFloat],Tensor[T: SomeFloat],seq[int32] proc gesv[T: SomeFloat](a, b: var Tensor[T]; pivot_indices: var seq[int32]) 17 diff --git a/special_matrices.html b/special_matrices.html new file mode 100644 index 000000000..156e31b7c --- /dev/null +++ b/special_matrices.html @@ -0,0 +1,846 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/special_matrices + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/special_matrices + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/special_matrices

+
+ +
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
MeshGridIndexing = enum
+  xygrid, ijgrid
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc diag[T](d: Tensor[T]; k = 0; anti = false): Tensor[T] {.noInit.}
+
+ +

Creates new square diagonal matrix from an rank-1 input tensor

+

Input:

+
- Rank-1 tensor containg the elements of the diagonal
+- k: The index of the diagonal that will be set. The default is 0.
+  Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal.
+- anti: If true, set the k-th "anti-diagonal" instead of the k-th regular diagonal.
+

Result:

+
- The constructed, square diagonal matrix
+ +   Source +Edit + +
+
+ +
+
+
+
proc diagonal[T](a: Tensor[T]; k = 0; anti = false): Tensor[T] {.noInit.}
+
+ +

Gets the k-th diagonal (or anti-diagonal) of a matrix

+

Input:

+
- A matrix (which can be rectangular)
+- k: The index k of the diagonal that will be extracted. The default is 0 (i.e. the main diagonal).
+  Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal.
+- anti: If true, get the k-th "anti-diagonal" instead of the k-th regular diagonal.
+

Result:

+
- A copy of the diagonal elements as a rank-1 tensor
+ +   Source +Edit + +
+
+ +
+
+
+
proc eye[T](shape: varargs[int]): Tensor[T] {.noInit.}
+
+ +

Return a 2-D tensor with ones on the diagonal and zeros elsewhere

+

Input:

+
- The shape of the output matrix
+

Result:

+
- The constructed, rank-2 diagonal tensor
+ +   Source +Edit + +
+
+ +
+
+
+
proc hilbert(n: int; T: typedesc[SomeFloat]): Tensor[T]
+
+ + Generates an Hilbert matrix of shape N, N +   Source +Edit + +
+
+ +
+
+
+
proc identity[T](n: int): Tensor[T] {.noInit.}
+
+ +

Return an identity matrix (i.e. 2-D tensor) of size n

+

The identity matrix is a square 2-D tensor with ones on the main diagonal and zeros elsewhere. This is basically the same as calling eye(n, n).

+

Input:

+
- Number of rows / columns in the output.
+

Result:

+
- The constructed indentity 2-D tensor
+ +   Source +Edit + +
+
+ +
+
+
+
proc meshgrid[T](t_list: varargs[Tensor[T]]; indexing = MeshGridIndexing.xygrid): seq[
+    Tensor[T]] {.noinit.}
+
+ +

Return a sequence of coordinate matrices from coordinate vectors.

+

Make N-D coordinate tensors for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate tensors x1, x2,..., xn.

+

Inputs:

+
  • xi: The coordinate tensors. Each vector must be a rank-1 tensor.
  • +
  • indexing: Cartesian (xygrid, default) or matrix (ijgrid) indexing of the output. The indexing mode only affects the first 2 output Tensors. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for xygrid indexing and (M, N) for ijgrid indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for xygrid indexing and (M, N, P) for ijgrid indexing.
  • +
+

Result:

+
  • List of N meshgrid N-dimensional Tensors For tensors x1, x2,..., xn with lengths Ni=len(xi), returns (N1, N2, N3,..., Nn) shaped tensors if indexing=ijgrid or (N2, N1, N3,..., Nn) shaped tensors if indexing=xygrid with the elements of xi repeated to fill the matrix along the first dimension for x1, the second for x2 and so on.
  • +
+

Notes:

+
  • This function follows and implements the numpy.meshgrid API.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc set_diagonal[T](a: var Tensor[T]; d: Tensor[T]; k = 0; anti = false)
+
+ +

Sets a diagonal of a matrix (in place)

+

Input:

+
- The matrix that will be changed in place.
+- Rank-1 tensor containg the elements that will be copied into the selected diagonal.
+- k: The index k of the diagonal that will be changed. The default is 0 (i.e. the main diagonal).
+  Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal.
+- anti: If true, set the k-th "anti-diagonal" instead of the k-th regular diagonal.
+ +   Source +Edit + +
+
+ +
+
+
+
proc tri[T](shape: Metadata; k: static int = 0; upper: static bool = false): Tensor[
+    T] {.noInit.}
+
+ +

Return a 2-D tensor with ones at and below the given diagonal and zeros elsewhere

+

Inputs:

+
- The (rank-2) shape of the output matrix.
+- k: The sub-diagonal at and below which the tensor will be filled with ones.
+     The default is 0.
+- upper: If true, the tensor will be filled with ones at and above the given
+         diagonal. The default is false.
+

Result:

+
- The constructed, rank-2 triangular tensor.
+ +   Source +Edit + +
+
+
+
proc tri[T](shape_ax1, shape_ax0: int; k: static int = 0;
+            upper: static bool = false): Tensor[T] {.noInit, inline.}
+
+ +

Return a 2-D tensor with ones at and below the given diagonal and zeros elsewhere

+

Inputs:

+
- The shape of the output matrix.
+- k: The sub-diagonal at and below which the tensor will be filled with ones.
+     The default is 0.
+- upper: If true, the tensor will be filled with ones at and above the given
+         diagonal. The default is false.
+

Result:

+
- The constructed, rank-2 triangular tensor
+ +   Source +Edit + +
+
+ +
+
+
+
proc vander(order: int = -1; increasing = false): Tensor[float] {.inline,
+    ...raises: [ValueError], tags: [], forbids: [].}
+
+ +

Same as the square vandermonde but with increasing set to false by default

+

This procedure is meant for compatibility with numpy, whose vander() function defaults to increasing = false (as opposed to Arraymancer's vandermonde, which defaults to increasing = true).

+

See also: vandermonde

+ +   Source +Edit + +
+
+
+
proc vander[T](x: Tensor[T]; order: int = -1; increasing = false): Tensor[float] {.
+    inline.}
+
+ +

Same as vandermonde but with increasing set to false by default

+

This procedure is meant for compatibility with numpy, whose vander() function defaults to increasing = false (as opposed to Arraymancer's vandermonde, which defaults to increasing = true).

+

See also: vandermonde

+ +   Source +Edit + +
+
+ +
+
+
+
proc vandermonde(order: int; increasing = true): Tensor[float] {.inline,
+    ...raises: [ValueError], tags: [], forbids: [].}
+
+ +

Returns a "square" Vandermonde matrix of the given order

+

A square Vandermonde matrix is a Vandermonde matrix of the given order whose input tensor is arange(order).

+

V_ij = x_i ^ order_j

+

where order_j runs from 0 to order-1 or from order-1 down to 0.

+

Inputs:

+
  • order: the order of the Vandermonde matrix.
  • +
  • increasing: If true, the powers of x_i will run from 0 to order-1, otherwise they will run from order-1 down to 0.
  • +
+

Result:

+
  • The constructed Vandermonde matrix
  • +
+ +   Source +Edit + +
+
+
+
proc vandermonde[T](x: Tensor[T]; order: int = -1; increasing = true): Tensor[
+    float]
+
+ +

Returns a Vandermonde matrix of the input x up to the given order

+

A Vandermonde matrix consists of the input x split into multiple rows where each row contains all powers of x_i from 0 to order-1 (by default) or from order-1 down to 0 (if increasing is set to false).

+

V_ij = x_i ^ order_j

+

where order_j runs from 0 to order-1 or from order-1 down to 0.

+

Inputs:

+
  • x: The input tensor x (which must be a rank-1 tensor)
  • +
  • order: the order of the Vandermonde matrix. If not provided, (or non positive) the order is set to the size of x.
  • +
  • increasing: If true, the powers of x_i will run from 0 to order-1, otherwise they will run from order-1 down to 0.
  • +
+

Result:

+
  • The constructed Vandermonde matrix
  • +
+ +   Source +Edit + +
+
+
+
proc vandermonde[T](x: Tensor[T]; orders: Tensor[SomeNumber]): Tensor[float]
+
+ +

Returns a "Generalized" Vandermonde matrix of the input x over the given orders

+

A "generalized" Vandermonde matrix consists of the input x split into multiple rows where each row contains the powers of x_i elevated to each of the elements of the orders tensor.

+

V_ij = x_i ^ order_j

+

Inputs:

+
  • x: The input tensor x (which must be a rank-1 tensor)
  • +
  • orders: The "exponents" tensor (which must also be a rank-1 tensor)
  • +
+

Result:

+
  • The constructed Vandermonde matrix
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc with_diagonal[T](a: Tensor[T]; d: Tensor[T]; k = 0; anti = false): Tensor[T] {.
+    noInit.}
+
+ +

Copy the input matrix, changing one of its diagonals into the elements of the rank-1 input tensor d

+

Input:

+
- The matrix that will copied into the output.
+- Rank-1 tensor containg the elements that will be copied into the selected diagonal.
+- k: The index k of the diagonal that will be changed. The default is 0 (i.e. the main diagonal).
+  Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal.
+- anti: If true, set the k-th "anti-diagonal" instead of the k-th regular diagonal.
+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Exports

+
+ tril, triu +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/special_matrices.idx b/special_matrices.idx new file mode 100644 index 000000000..ad1207a28 --- /dev/null +++ b/special_matrices.idx @@ -0,0 +1,22 @@ +nimTitle special_matrices special_matrices.html module src/arraymancer/linear_algebra/special_matrices 0 +nim hilbert special_matrices.html#hilbert,int,typedesc[SomeFloat] proc hilbert(n: int; T: typedesc[SomeFloat]): Tensor[T] 9 +nim vandermonde special_matrices.html#vandermonde,Tensor[T],Tensor[SomeNumber] proc vandermonde[T](x: Tensor[T]; orders: Tensor[SomeNumber]): Tensor[float] 37 +nim vandermonde special_matrices.html#vandermonde,Tensor[T],int proc vandermonde[T](x: Tensor[T]; order: int = -1; increasing = true): Tensor[float] 57 +nim vandermonde special_matrices.html#vandermonde,int proc vandermonde(order: int; increasing = true): Tensor[float] 83 +nim vander special_matrices.html#vander,Tensor[T],int proc vander[T](x: Tensor[T]; order: int = -1; increasing = false): Tensor[float] 102 +nim vander special_matrices.html#vander,int proc vander(order: int = -1; increasing = false): Tensor[float] 112 +nim diagonal special_matrices.html#diagonal,Tensor[T],int proc diagonal[T](a: Tensor[T]; k = 0; anti = false): Tensor[T] 122 +nim set_diagonal special_matrices.html#set_diagonal,Tensor[T],Tensor[T],int proc set_diagonal[T](a: var Tensor[T]; d: Tensor[T]; k = 0; anti = false) 161 +nim with_diagonal special_matrices.html#with_diagonal,Tensor[T],Tensor[T],int proc with_diagonal[T](a: Tensor[T]; d: Tensor[T]; k = 0; anti = false): Tensor[T] 201 +nim diag special_matrices.html#diag,Tensor[T],int proc diag[T](d: Tensor[T]; k = 0; anti = false): Tensor[T] 213 +nim identity special_matrices.html#identity,int proc identity[T](n: int): Tensor[T] 228 +nim eye special_matrices.html#eye,varargs[int] proc eye[T](shape: varargs[int]): Tensor[T] 240 +nim tri special_matrices.html#tri,Metadata,staticint,staticbool proc tri[T](shape: Metadata; k: static int = 0; upper: static bool = false): Tensor[T] 251 +nim tri special_matrices.html#tri,int,int,staticint,staticbool proc tri[T](shape_ax1, shape_ax0: int; k: static int = 0; upper: static bool = false): Tensor[\n T] 272 +nim xygrid special_matrices.html#xygrid MeshGridIndexing.xygrid 289 +nim ijgrid special_matrices.html#ijgrid MeshGridIndexing.ijgrid 289 +nim MeshGridIndexing special_matrices.html#MeshGridIndexing enum MeshGridIndexing 289 +nim meshgrid special_matrices.html#meshgrid,varargs[Tensor[T]] proc meshgrid[T](t_list: varargs[Tensor[T]]; indexing = MeshGridIndexing.xygrid): seq[\n Tensor[T]] 291 +nimgrp tri special_matrices.html#tri-procs-all proc 251 +nimgrp vandermonde special_matrices.html#vandermonde-procs-all proc 37 +nimgrp vander special_matrices.html#vander-procs-all proc 102 diff --git a/stats.html b/stats.html new file mode 100644 index 000000000..849685b71 --- /dev/null +++ b/stats.html @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/stats/stats + + + + + + + + + +Arraymancer - src/arraymancer/stats/stats + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/stats/stats

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc covariance_matrix[T: SomeFloat](x, y: Tensor[T]): Tensor[T]
+
+ + Input:
  • 2 tensors of shape Nb observations, features Note: contrary to Numpy default each row is an observations while echo column represent a feature/variable observed.
  • +
+

Returns:

+
  • The unbiased covariance (normalized by the number of observations - 1) in the shape features, features
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/stats.idx b/stats.idx new file mode 100644 index 000000000..cdd73a16b --- /dev/null +++ b/stats.idx @@ -0,0 +1,2 @@ +nimTitle stats stats.html module src/arraymancer/stats/stats 0 +nim covariance_matrix stats.html#covariance_matrix,Tensor[T: SomeFloat],Tensor[T: SomeFloat] proc covariance_matrix[T: SomeFloat](x, y: Tensor[T]): Tensor[T] 7 diff --git a/std_version_types.html b/std_version_types.html new file mode 100644 index 000000000..9e7f2b235 --- /dev/null +++ b/std_version_types.html @@ -0,0 +1,399 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/std_version_types + + + + + + + + + +Arraymancer - src/arraymancer/std_version_types + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/std_version_types

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ +
+
+   Source +Edit + +
+ +

+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/std_version_types.idx b/std_version_types.idx new file mode 100644 index 000000000..a73cdb925 --- /dev/null +++ b/std_version_types.idx @@ -0,0 +1 @@ +nimTitle std_version_types std_version_types.html module src/arraymancer/std_version_types 0 diff --git a/syntactic_sugar.html b/syntactic_sugar.html new file mode 100644 index 000000000..ddb1300ba --- /dev/null +++ b/syntactic_sugar.html @@ -0,0 +1,492 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/syntactic_sugar + + + + + + + + + +Arraymancer - src/arraymancer/tensor/syntactic_sugar + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/syntactic_sugar

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Templates

+
+
+
+
template at[T](t: Tensor[T]; args: varargs[untyped]): untyped
+
+ +

Slice a Tensor and collapse singleton dimension.

+

Input:

+
  • a Tensor
  • +
  • and:
    • specific coordinates (varargs[int])
    • +
    • or a slice (cf. tutorial)
    • +
    +
  • +
+

Returns:

+
  • a value or a view of the Tensor corresponding to the slice Singleton dimension are collapsed
  • +
+

Usage: See the [] macro

+ +   Source +Edit + +
+
+ +
+
+
+
template at_mut[T](t: var Tensor[T]; args: varargs[untyped]): untyped
+
+ +

Slice a Tensor, collapse singleton dimension, returning a mutable slice of the input

+

This can be useful, for example, when assigning a value into a chain of slice operations which are usually considered immutable even if the original tensor is mutable. For example, this lets you do:

+
var x = arange(12).reshape([4, 3])
+let condition = [[true, false, true], [true, false, true]].toTensor
+# The code `x[1..2, _][condition] = 1000` would fail with
+# a `a slice of an immutable tensor cannot be assigned to` error
+# Instead, using `at_mut` allows assignment to the slice
+x.at_mut(1..2, _)[condition] = 1000

Input:

+
  • a Tensor
  • +
  • and:
    • specific coordinates (varargs[int])
    • +
    • or a slice (cf. tutorial)
    • +
    +
  • +
+

Returns:

+
  • a mutable value or view of the Tensor corresponding to the slice Singleton dimension are collapsed
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/syntactic_sugar.idx b/syntactic_sugar.idx new file mode 100644 index 000000000..d6a9cc887 --- /dev/null +++ b/syntactic_sugar.idx @@ -0,0 +1,3 @@ +nimTitle syntactic_sugar syntactic_sugar.html module src/arraymancer/tensor/syntactic_sugar 0 +nim at syntactic_sugar.html#at.t,Tensor[T],varargs[untyped] template at[T](t: Tensor[T]; args: varargs[untyped]): untyped 18 +nim at_mut syntactic_sugar.html#at_mut.t,Tensor[T],varargs[untyped] template at_mut[T](t: var Tensor[T]; args: varargs[untyped]): untyped 33 diff --git a/tanh.html b/tanh.html new file mode 100644 index 000000000..294a9aa0b --- /dev/null +++ b/tanh.html @@ -0,0 +1,474 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nn/activation/tanh + + + + + + + + + +Arraymancer - src/arraymancer/nn/activation/tanh + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nn/activation/tanh

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Types

+
+
+
TanhActivation[TT] {.final.} = ref object of Gate[TT]
+  
+
+ + +   Source +Edit + +
+
+ +
+
+
+

Procs

+
+
+
+
proc tanh[TT](a: Variable[TT]): Variable[TT]
+
+ + Input:
  • A variable
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tanh.idx b/tanh.idx new file mode 100644 index 000000000..c32d1eb5d --- /dev/null +++ b/tanh.idx @@ -0,0 +1,3 @@ +nimTitle tanh tanh.html module src/arraymancer/nn/activation/tanh 0 +nim TanhActivation tanh.html#TanhActivation type TanhActivation 20 +nim tanh tanh.html#tanh,Variable[TT] proc tanh[TT](a: Variable[TT]): Variable[TT] 48 diff --git a/tensor.html b/tensor.html new file mode 100644 index 000000000..ae79639e7 --- /dev/null +++ b/tensor.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor + + + + + + + + + +Arraymancer - src/arraymancer/tensor + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Exports

+
+ max, []=, setLen, &, copyFrom, items, ==, ==, delete, zip, &, $, mitems, add, mpairs, LASER_MAXRANK, insert, reversed, copyFrom, [], low, @, high, product, DynamicStackArray, concat, reversed, [], pairs, [], data=, get_offset_ptr, cpuStorageFromBuffer, get_offset_ptr, allocCpuStorage, reversed, CudaTensor, mpairs, LASER_MAXRANK, $, product, get_data_ptr, get_offset_ptr, AnyTensor, [], rank, reversed, get_data_ptr, size, unsafe_raw_offset, setLen, ClStorage, is_F_contiguous, unsafe_raw_offset, Metadata, copyFrom, ==, delete, is_C_contiguous, []=, Tensor, raw_data_unaligned, mitems, add, zip, &, low, @, initMetadataArray, unsafe_raw_offset, toMetadataArray, rank, unsafe_raw_buf, CpuStorage, unsafe_raw_buf, ClTensor, KnownSupportsCopyMem, copyFrom, [], RawMutableView, size, ==, isContiguous, unsafe_raw_buf, concat, max, MetadataArray, is_C_contiguous, dataArray, CudaStorage, items, RawImmutableView, DynamicStackArray, [], &, []=, shape_to_strides, high, insert, pairs, get_data_ptr, [], dataArray, [], newTensorUninit, zeros_like, item, fromBuffer, zeros, newTensor, newTensorUninit, initTensorMetadata, newTensorWith, ones, ones, toTensor, arange, toUnsafeView, item, randomTensor, fromBuffer, setZero, fromBuffer, randomTensor, ones_like, toMetadata, geomspace, arange, randomNormalTensor, deepCopy, linspace, copyFrom, newTensorWith, arange, copyFromRaw, randomTensor, zeros, randomTensor, newTensor, fromBuffer, newTensorUninit, toMetadata, initTensorMetadata, logspace, clone, enumerateAxis, enumerateAxis, enumerate, axis, mzip, enumerate, items, mzip, items, zip, menumerateZip, zip, axis, mitems, atContiguousIndex, mpairs, mzip, pairs, mitems, menumerate, atContiguousIndex, enumerateZip, enumerateZip, zipAxis, menumerateZip, mzip, enumerateZip, zip, menumerate, enumerateZip, zip, atAxisIndex, _, Ellipsis, |+, ..<, .., ..., SteppedSlice, Step, ArrayOfSlices, |+, ^, ^, |, |-, |-, |+, |-, initSpanSlices, ..^, |, toArrayOfSlices, |, [], []=, []=, <=., <=., >=., >=., .<, <., ==., ==, >., <., isNotNaN, <., .>, >=., !=., .==, .!=, !=., .>=, ==., <=., .<=, >., !=., isNaN, ==., >., map2, map2, map2_inline, apply2, apply, apply, apply2_inline, map, apply3_inline, map3_inline, apply2_inline, apply2, apply_inline, map_inline, apply_inline, fold, reduce_inline, fold_inline, fold_axis_inline, reduce, reduce_axis_inline, reduce, fold, squeeze, broadcast, roll, reshape, moveaxis, chunk, broadcast, broadcast, permute, broadcast, squeeze, roll, bc, flatten, reshape, append, unsqueeze, transpose, bc, split, stack, broadcast2, asContiguous, concat, append, index_fill, index_fill, masked_axis_fill, masked_fill_along_axis, masked_select, masked_fill, index_select, masked_axis_fill, masked_fill, masked_select, index_select, masked_axis_select, masked_axis_select, masked_fill, masked_fill, $, pretty, sqrt, cosh, arccos, lgamma, arcsinh, round, cbrt, radToDeg, ln, erf, ceil, sin, makeUniversalLocal, asType, tan, arccosh, degToRad, fac, log10, gamma, arcsin, exp, arctan, sinh, arctanh, makeUniversal, erfc, tanh, trunc, asType, cos, log2, floor, isNaN, /, *, *, div, -, -, *=, /=, -, /=, -=, +, dot, +, +, +=, mod, dot, mod, gemv, *, gemm, gemm, *, gemv, gemm, .+, +.=, -., /., ./, .=*, .=/, +.=, /., .^=, .=-, +., ^., .-, *.=, *.=, -., *., -., .^, +., .*, -.=, /., ^., *., /.=, /.=, .=+, +., *., .^, -.=, ^.=, mod, or, xor, not, and, imag=, real=, real=, conjugate, imag, imag=, complex, real, CorrelateMode, sgn, classify, mmax, negate, mnegate, abs, elwise_mul, mreciprocal, mmin, -, convolve, reciprocal, ConvolveMode, sinc, melwise_mul, mabs, mclamp, floorMod, mmin, copySign, min, mreciprocal, max, floorMod, mmax, phase, correlate, mcopySign, reciprocal, max, floorMod, abs, square, square, melwise_div, melwise_div, clamp, elwise_div, sinc, min, abs, phase, elwise_div, correlate, min, max, std, sum, argmin_min, percentile, product, sum, mean, unwrap_period, argmax, max, iqr, variance, nonzero, all, cumprod, mean, diff_discrete, variance, argmax_max, cumsum, median, product, mean, min, std, any, mean, argmin, mean, mean, sorted, argsort, sort, frobenius_inner_prod, rewriteToTensorReshape, rewriteTensor_AddMultiply, rewriteTensor_MultiplyAdd_inplace, rewriteTensor_MultiplyAdd, at, at_mut, toSeq5D, export_tensor, toFlatSeq, toSeq4D, toSeq1D, toRawSeq, toSeq2D, toSeq3D, .+, /, broadcast, -., pretty, ./, clone, +.=, reshape, .-, /=, *., broadcast, -., -., $, +., broadcast2, +=, .*, -, /., *=, -=, .=*, zeros_like, .=+, +, cuda, dot, unsqueeze, squeeze, .=/, +.=, /., .=-, cpu, +., ones_like, *, *, -.=, *.=, transpose, /.=, asContiguous, +., *, -.=, $, pretty, .+, -, -., .*, -=, opencl, -, *, .-, /., +, zeros_like, +, +=, dot, -=, *., cpu, ones_like, dot, ./, +=, +. +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tensor.idx b/tensor.idx new file mode 100644 index 000000000..b4ae71f3b --- /dev/null +++ b/tensor.idx @@ -0,0 +1 @@ +nimTitle tensor tensor.html module src/arraymancer/tensor 0 diff --git a/tensor_compare_helper.html b/tensor_compare_helper.html new file mode 100644 index 000000000..13462cdc2 --- /dev/null +++ b/tensor_compare_helper.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/spatial/tensor_compare_helper + + + + + + + + + +Arraymancer - src/arraymancer/spatial/tensor_compare_helper + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/spatial/tensor_compare_helper

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Procs

+
+
+
+
proc `<`[T](s1, s2: Tensor[T]): bool
+
+ + just an internal comparison of two Tensors, which assumes that the order of two seqs matters. This proc is in an extra file, because the proc using it (queryImpl) is generic and we need to call bind for this. If it was defined in the same file, we can't bind it for some reason. Further we do not want to export such a procedure as obviously in the general context this comparison doesn't make sense. But as we use a HeapQueue of tensors, we need a < comparison operator. +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tensor_compare_helper.idx b/tensor_compare_helper.idx new file mode 100644 index 000000000..1480570ca --- /dev/null +++ b/tensor_compare_helper.idx @@ -0,0 +1,2 @@ +nimTitle tensor_compare_helper tensor_compare_helper.html module src/arraymancer/spatial/tensor_compare_helper 0 +nim `<` tensor_compare_helper.html#<,Tensor[T],Tensor[T] proc `<`[T](s1, s2: Tensor[T]): bool 2 diff --git a/tensor_cuda.html b/tensor_cuda.html new file mode 100644 index 000000000..bfb541cba --- /dev/null +++ b/tensor_cuda.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/tensor_cuda + + + + + + + + + +Arraymancer - src/arraymancer/tensor/tensor_cuda + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/tensor_cuda

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+ +
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tensor_cuda.idx b/tensor_cuda.idx new file mode 100644 index 000000000..0692924d5 --- /dev/null +++ b/tensor_cuda.idx @@ -0,0 +1 @@ +nimTitle tensor_cuda tensor_cuda.html module src/arraymancer/tensor/tensor_cuda 0 diff --git a/tensor_opencl.html b/tensor_opencl.html new file mode 100644 index 000000000..5cb24bcb0 --- /dev/null +++ b/tensor_opencl.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/tensor_opencl + + + + + + + + + +Arraymancer - src/arraymancer/tensor/tensor_opencl + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/tensor_opencl

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+ +
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tensor_opencl.idx b/tensor_opencl.idx new file mode 100644 index 000000000..0c41a23d3 --- /dev/null +++ b/tensor_opencl.idx @@ -0,0 +1 @@ +nimTitle tensor_opencl tensor_opencl.html module src/arraymancer/tensor/tensor_opencl 0 diff --git a/theindex.html b/theindex.html new file mode 100644 index 000000000..187978832 --- /dev/null +++ b/theindex.html @@ -0,0 +1,6415 @@ + + + + + + + + + + + + + + + + + + +Index + + + + + + + + + +Arraymancer - Index + + + + + + + +Fork me on GitHub + + +
+
+

Index

+ Modules: accessors, accessors_macros_read, accessors_macros_syntax, accessors_macros_write, accuracy_score, aggregate, algebra, algorithms, align_unroller, ast_utils, autograd, autograd_common, auxiliary_blas, auxiliary_lapack, blas_l3_gemm, blis, common_error_functions, compiler_optim_hints, complex, conv, conv2D, cpuinfo_x86, cross_entropy_losses, cublas, cuda, cuda_global_state, cudnn, cudnn_conv_interface, data_structure, datatypes, dbscan, decomposition, decomposition_lapack, decomposition_rand, deprecate, display, display_cuda, distances, distributions, dynamic_stack_arrays, einsum, embedding, exporting, filling_data, flatten, foreach, foreach_common, foreach_staged, functional, gates_basic, gates_blas, gates_hadamard, gates_reduce, gates_shapeshifting_concat_split, gates_shapeshifting_views, gcn, gemm, gemm_packing, gemm_prepacked, gemm_tiling, gemm_ukernel_avx, gemm_ukernel_avx2, gemm_ukernel_avx512, gemm_ukernel_avx_fma, gemm_ukernel_dispatch, gemm_ukernel_generator, gemm_ukernel_generic, gemm_ukernel_sse, gemm_ukernel_sse2, gemm_ukernel_sse4_1, gemm_utils, global_config, gru, higher_order_applymap, higher_order_foldreduce, imdb, incl_accessors_cuda, incl_higher_order_cuda, incl_kernels_cuda, init, init_colmajor, init_copy_cpu, init_copy_cuda, init_cpu, init_cuda, init_opencl, initialization, io, io_csv, io_hdf5, io_image, io_npy, io_stream_readers, kde, kdtree, kmeans, lapack, least_squares, least_squares_lapack, linear, linear_algebra, linear_systems, math_functions, math_ops_fusion, maxpool2D, mean_square_error_loss, memory, memory_optimization_hints, ml, mnist, naive_l2_gemv, neighbors, nested_containers, nlp, nn, nn_dsl, nn_primitives, nnp_activation, nnp_conv2d_cudnn, nnp_convolution, nnp_embedding, nnp_gru, nnp_linear, nnp_maxpooling, nnp_numerical_gradient, nnp_sigmoid_cross_entropy, nnp_softmax, nnp_softmax_cross_entropy, nnpack, nnpack_interface, opencl_backend, opencl_global_state, openmp, operators_blas_l1, operators_blas_l1_cuda, operators_blas_l1_opencl, operators_blas_l2l3, operators_blas_l2l3_cuda, operators_blas_l2l3_opencl, operators_broadcasted, operators_broadcasted_cuda, operators_broadcasted_opencl, operators_comparison, operators_logical, optim_ops_fusion, optimizers, overload, p_accessors, p_accessors_macros_desugar, p_accessors_macros_read, p_accessors_macros_write, p_activation, p_checks, p_complex, p_display, p_empty_tensors, p_init_cuda, p_init_opencl, p_kernels_interface_cuda, p_kernels_interface_opencl, p_logsumexp, p_nnp_checks, p_nnp_types, p_operator_blas_l2l3, p_shapeshifting, pca, relu, selectors, sequninit, shapeshifting, shapeshifting_cuda, shapeshifting_opencl, sigmoid, simd, softmax, solve_lapack, special_matrices, stats, std_version_types, syntactic_sugar, tanh, tensor, tensor_compare_helper, tensor_cuda, tensor_opencl, tokenizers, triangular, ufunc, util.

API symbols

+
`!=.`:
+
`$`:
+
`&`:
+
`*.=`:
+
`*.`:
+
`*=`:
+
`*`:
+
`+.=`:
+
`+.`:
+
`+=`:
+
`+`:
+
`-.=`:
+
`-.`:
+
`-=`:
+
`-`:
+
`.!=`:
+
`.*`:
+
`.+`:
+
`.-`:
+
`...`:
+
`..<`:
+
`..^`:
+
`..`:
+
`./`:
+
`.<=`:
+
`.<`:
+
`.=*`:
+
`.=+`:
+
`.=-`:
+
`.=/`:
+
`.==`:
+
`.>=`:
+
`.>`:
+
`.^=`:
+
`.^`:
+
`/.=`:
+
`/.`:
+
`/=`:
+
`/`:
+
`<.`:
+
`<=.`:
+
`<`:
+
`==.`:
+
`==`:
+
`>.`:
+
`>=.`:
+
`@`:
+
`[]=`:
+
`[]`:
+
`^.=`:
+
`^.`:
+
`^`:
+
`_`:
+
`and`:
+
`div`:
+
`mod`:
+
`not`:
+
`or`:
+
`xor`:
+
`|+`:
+
`|-`:
+
`|`:
+
AAt:
+
abs:
+
absolute_error:
+
accuracy_score:
+
Adam:
+
add:
+
AddGate:
+
address:
+
advanceStridedIteration:
+
align_raw_data:
+
all:
+
allocCpuStorage:
+
any:
+
AnyMetric:
+
AnyTensor:
+
append:
+
apply:
+
apply2:
+
apply2_inline:
+
apply3_inline:
+
apply_inline:
+
arange:
+
arccos:
+
arccosh:
+
arcsin:
+
arcsinh:
+
arctan:
+
arctanh:
+
argmax:
+
argmax_max:
+
argmin:
+
argmin_min:
+
argsort:
+
ArrayOfSlices:
+
asContiguous:
+
asCudnnType:
+
assume_aligned:
+
asType:
+
at:
+
AtA:
+
atAxisIndex:
+
atContiguousIndex:
+
atIndex:
+
atIndexMut:
+
at_mut:
+
axis:
+
backprop:
+
Backward:
+
bc:
+
blasMM_C_eq_aAB_p_bC:
+
blasMV_y_eq_aAx_p_by:
+
box:
+
boxKernel:
+
broadcast:
+
broadcast2:
+
broadcast2Impl:
+
broadcastImpl:
+
cbrt:
+
ceil:
+
check_axis_index:
+
check_concat:
+
check_contiguous_index:
+
check_ctx:
+
check_dot_prod:
+
check_elementwise:
+
check_index:
+
check_input_target:
+
check_matmat:
+
check_matvec:
+
check_nested_elements:
+
check_reshape:
+
check_shape:
+
check_size:
+
check_squeezeAxis:
+
check_start_end:
+
check_steps:
+
check_unsqueezeAxis:
+
chunk:
+
ChunkSplitGate:
+
clamp:
+
classify:
+
clContext0:
+
clDevice0:
+
clMalloc:
+
clone:
+
clQueue0:
+
ClStorage:
+
ClTensor:
+
col2im:
+
complex:
+
Complex32:
+
Complex64:
+
concat:
+
concatMap:
+
conjugate:
+
Context:
+
contiguousImpl:
+
Conv2D:
+
conv2d:
+
Conv2DAlgorithm:
+
conv2d_backward:
+
Conv2DGate:
+
ConvAlgoSpace:
+
conv_bwd_data_algo_workspace:
+
conv_bwd_kernel_algo_workspace:
+
ConvConfig:
+
convolve:
+
ConvolveMode:
+
convOutDims:
+
copyFrom:
+
copy_from:
+
copyFrom:
+
copyFromRaw:
+
copySign:
+
correlate:
+
CorrelateMode:
+
cos:
+
cosh:
+
covariance_matrix:
+
cpu:
+
CPUFeatureX86:
+
CpuStorage:
+
cpuStorageFromBuffer:
+
create_cache_dirs_if_necessary:
+
cublas_axpy:
+
cublas_copy:
+
cublas_dot:
+
cublas_geam:
+
cublas_gemm:
+
cublas_gemmStridedBatched:
+
cublas_gemv:
+
cublasHandle0:
+
cublas_scal:
+
cuda:
+
cuda_assign_call:
+
cuda_assign_glue:
+
cuda_assignscal_call:
+
cuda_assignscal_glue:
+
cuda_binary_call:
+
cuda_binary_glue:
+
CUDA_HOF_BPG:
+
CUDA_HOF_TPB:
+
cuda_lscal_call:
+
cuda_lscal_glue:
+
cudaMalloc:
+
cuda_rscal_call:
+
cuda_rscal_glue:
+
CudaStorage:
+
cudaStream0:
+
CudaTensor:
+
cudnnHandle0:
+
cumprod:
+
cumsum:
+
CustomMetric:
+
cvtmask64_u64:
+
data=:
+
dataArray:
+
dbscan:
+
deallocCl:
+
deallocCuda:
+
deepCopy:
+
degToRad:
+
delete:
+
desugar:
+
diag:
+
diagonal:
+
diff_discrete:
+
disp2d:
+
distance:
+
distanceMatrix:
+
dot:
+
dualStridedIteration:
+
dualStridedIterationYield:
+
DynamicStackArray:
+
einsum:
+
Ellipsis:
+
elwise_div:
+
elwise_mul:
+
Embedding:
+
embedding:
+
embedding_backward:
+
EmbeddingGate:
+
enumerate:
+
enumerateAxis:
+
enumerateZip:
+
epanechnikov:
+
epanechnikovKernel:
+
erf:
+
erfc:
+
Euclidean:
+
exch_dim:
+
exp:
+
expm1:
+
export_tensor:
+
extract_cpu_simd:
+
extract_c_unit_stride:
+
extract_mr:
+
extract_nb_scalars:
+
extract_nb_vecs_nr:
+
extract_nr:
+
extract_pt:
+
eye:
+
fac:
+
fallbackMM_C_eq_aAB_p_bC:
+
FancyIndex:
+
FancyMaskAxis:
+
FancyMaskFull:
+
FancyNone:
+
FancySelectorKind:
+
FancyUnknownAxis:
+
FancyUnknownFull:
+
flatIter:
+
Flatten:
+
flatten:
+
floor:
+
floorMod:
+
fold:
+
fold_axis_inline:
+
fold_inline:
+
forEach:
+
forEachContiguous:
+
forEachContiguousSerial:
+
forEachSerial:
+
forEachStaged:
+
forEachStrided:
+
forEachStridedSerial:
+
forward:
+
frobenius_inner_prod:
+
fromBuffer:
+
full:
+
gamma:
+
Gate:
+
gauss:
+
gaussKernel:
+
gcn:
+
GCNGate:
+
GCNLayer:
+
gebb_ukernel:
+
gebb_ukernel_edge:
+
gebb_ukernel_edge_epilogue:
+
gebb_ukernel_edge_fallback:
+
gebb_ukernel_edge_float32_x86_AVX:
+
gebb_ukernel_edge_float32_x86_AVX512:
+
gebb_ukernel_edge_float32_x86_AVX_FMA:
+
gebb_ukernel_edge_float32_x86_SSE:
+
gebb_ukernel_edge_float64_x86_AVX:
+
gebb_ukernel_edge_float64_x86_AVX512:
+
gebb_ukernel_edge_float64_x86_AVX_FMA:
+
gebb_ukernel_edge_float64_x86_SSE2:
+
gebb_ukernel_edge_int32_x86_AVX2:
+
gebb_ukernel_edge_int32_x86_AVX512:
+
gebb_ukernel_edge_int32_x86_SSE2:
+
gebb_ukernel_edge_int32_x86_SSE4_1:
+
gebb_ukernel_edge_int64_x86_AVX512:
+
gebb_ukernel_edge_int64_x86_SSE2:
+
gebb_ukernel_epilogue_fallback:
+
gebb_ukernel_fallback:
+
gebb_ukernel_float32_x86_AVX:
+
gebb_ukernel_float32_x86_AVX512:
+
gebb_ukernel_float32_x86_AVX_FMA:
+
gebb_ukernel_float32_x86_SSE:
+
gebb_ukernel_float64_x86_AVX:
+
gebb_ukernel_float64_x86_AVX512:
+
gebb_ukernel_float64_x86_AVX_FMA:
+
gebb_ukernel_float64_x86_SSE2:
+
gebb_ukernel_int32_x86_AVX2:
+
gebb_ukernel_int32_x86_AVX512:
+
gebb_ukernel_int32_x86_SSE2:
+
gebb_ukernel_int32_x86_SSE4_1:
+
gebb_ukernel_int64_x86_AVX512:
+
gebb_ukernel_int64_x86_SSE2:
+
gebp_mkernel:
+
gelsd:
+
gemm:
+
gemm_nn_fallback:
+
gemm_packed:
+
gemm_prepackA:
+
gemm_prepackA_mem_required:
+
gemm_prepackA_mem_required_impl:
+
gemm_prepackB:
+
gemm_prepackB_mem_required:
+
gemm_prepackB_mem_required_impl:
+
gemm_strided:
+
gemv:
+
gen_cl_apply2:
+
gen_cl_apply3:
+
genClInfixOp:
+
genClInPlaceOp:
+
geomspace:
+
geqrf:
+
gesdd:
+
gesv:
+
get_cache_dir:
+
getContiguousIndex:
+
get_data_ptr:
+
getFancySelector:
+
getIndex:
+
get_num_tiles:
+
get_offset_ptr:
+
getrf:
+
getShape:
+
getSubType:
+
gru:
+
gru_backward:
+
gru_cell_backward:
+
gru_cell_forward:
+
gru_cell_inference:
+
gru_forward:
+
GRUGate:
+
gru_inference:
+
GRULayer:
+
HadamardGate:
+
has3DNow:
+
has3DNowEnhanced:
+
hasAbm:
+
hasAdx:
+
hasAes:
+
hasAmdv:
+
hasAvx:
+
hasAvx2:
+
hasAvx512bfloat16:
+
hasAvx512bitalg:
+
hasAvx512bw:
+
hasAvx512cd:
+
hasAvx512dq:
+
hasAvx512er:
+
hasAvx512f:
+
hasAvx512fmaps4:
+
hasAvx512ifma:
+
hasAvx512pf:
+
hasAvx512vbmi:
+
hasAvx512vbmi2:
+
hasAvx512vl:
+
hasAvx512vnni:
+
hasAvx512vnniw4:
+
hasAvx512vp2intersect:
+
hasAvx512vpopcntdq:
+
hasBmi1:
+
hasBmi2:
+
hasCas16B:
+
hasCas8B:
+
hasClflush:
+
hasClflushOpt:
+
hasClwb:
+
hasFloat16c:
+
hasFma3:
+
hasFma4:
+
hasGfni:
+
hasIntelVtx:
+
hasMmx:
+
hasMmxExt:
+
hasMovBigEndian:
+
hasMpx:
+
hasNxBit:
+
hasPclmulqdq:
+
hasPopcnt:
+
hasPrefetch:
+
hasPrefetchWT1:
+
hasRdrand:
+
hasRdseed:
+
hasSgx:
+
hasSha:
+
hasSimultaneousMultithreading:
+
hasSse:
+
hasSse2:
+
hasSse3:
+
hasSse41:
+
hasSse42:
+
hasSse4a:
+
hasSsse3:
+
hasTsxHle:
+
hasTsxRtm:
+
hasType:
+
hasVaes:
+
hasVpclmulqdq:
+
hasX87fpu:
+
hasXop:
+
high:
+
hilbert:
+
identity:
+
ijgrid:
+
im2col:
+
Im2ColGEMM:
+
im2colgemm_conv2d:
+
im2colgemm_conv2d_gradient:
+
imag:
+
imag=:
+
implDeprecatedBy:
+
index_fill:
+
index_select:
+
init:
+
initForEach:
+
initMetadataArray:
+
initSpanSlices:
+
initStridedIteration:
+
initTensorMetadata:
+
insert:
+
inShape:
+
iqr:
+
isAllInt:
+
isBool:
+
is_C_contiguous:
+
isContiguous:
+
is_F_contiguous:
+
is_grad_needed:
+
isHypervisorPresent:
+
isInt:
+
isNaN:
+
isNotNaN:
+
isOpenArray:
+
item:
+
items:
+
IterKind:
+
Iter_Values:
+
Jaccard:
+
kaiming_normal:
+
kaiming_uniform:
+
kde:
+
KDTree:
+
kdTree:
+
KernelFunc:
+
KernelKind:
+
kmeans:
+
knBox:
+
knCustom:
+
knEpanechnikov:
+
knGauss:
+
KnownSupportsCopyMem:
+
knTriangular:
+
knTrig:
+
LASER_MAXRANK:
+
LASER_MEM_ALIGN:
+
laswp:
+
layoutOnDevice:
+
least_squares_solver:
+
letsGoDeeper:
+
lgamma:
+
Linear:
+
linear:
+
linear_backward:
+
LinearGate:
+
linspace:
+
ln:
+
ln1p:
+
load_imdb:
+
load_mnist:
+
log10:
+
log2:
+
logspace:
+
logsumexp:
+
low:
+
lu_permuted:
+
m128:
+
m128d:
+
m128i:
+
m256:
+
m256d:
+
m256i:
+
m512:
+
m512d:
+
m512i:
+
mabs:
+
makeKernel:
+
makeUniversal:
+
makeUniversalLocal:
+
Manhattan:
+
map:
+
map2:
+
map2_inline:
+
map3_inline:
+
map_inline:
+
masked_axis_fill:
+
masked_axis_select:
+
masked_fill:
+
masked_fill_along_axis:
+
masked_select:
+
MatMulGate:
+
MatrixKind:
+
MatrixView:
+
max:
+
MaxPool2D:
+
maxpool2d:
+
maxpool2d_backward:
+
MaxPool2DGate:
+
MAXRANK:
+
mclamp:
+
mcopySign:
+
mean:
+
mean_absolute_error:
+
MeanGate:
+
mean_relative_error:
+
mean_squared_error:
+
median:
+
melwise_div:
+
melwise_mul:
+
menumerate:
+
menumerateZip:
+
meshgrid:
+
MeshGridIndexing:
+
Metadata:
+
MetadataArray:
+
MicroKernel:
+
min:
+
Minkowski:
+
mitems:
+
mkGenBand:
+
mkGeneral:
+
mkGenTriDiag:
+
mkPosDef:
+
mkPosDefBand:
+
mkPosDefTriDiag:
+
mkSymmetric:
+
mm256_add_epi16:
+
mm256_add_epi32:
+
mm256_add_epi64:
+
mm256_add_epi8:
+
mm256_add_pd:
+
mm256_add_ps:
+
mm256_and_ps:
+
mm256_and_si256:
+
mm256_castps256_ps128:
+
mm256_castps_si256:
+
mm256_castsi256_ps:
+
mm256_cmpgt_epi32:
+
mm256_cvtepi32_ps:
+
mm256_cvtps_epi32:
+
mm256_extractf128_ps:
+
mm256_fmadd_pd:
+
mm256_fmadd_ps:
+
mm256_i32gather_epi32:
+
mm256_load_pd:
+
mm256_load_ps:
+
mm256_load_si256:
+
mm256_loadu_pd:
+
mm256_loadu_ps:
+
mm256_loadu_si256:
+
mm256_max_ps:
+
mm256_min_ps:
+
mm256_movemask_epi8:
+
mm256_mul_epu32:
+
mm256_mullo_epi16:
+
mm256_mullo_epi32:
+
mm256_mul_pd:
+
mm256_mul_ps:
+
mm256_or_ps:
+
mm256_set1_epi16:
+
mm256_set1_epi32:
+
mm256_set1_epi64x:
+
mm256_set1_epi8:
+
mm256_set1_pd:
+
mm256_set1_ps:
+
mm256_setzero_pd:
+
mm256_setzero_ps:
+
mm256_setzero_si256:
+
mm256_shuffle_epi32:
+
mm256_slli_epi32:
+
mm256_srli_epi32:
+
mm256_srli_epi64:
+
mm256_store_pd:
+
mm256_store_ps:
+
mm256_storeu_pd:
+
mm256_storeu_ps:
+
mm256_storeu_si256:
+
mm256_sub_ps:
+
mm512_add_epi16:
+
mm512_add_epi32:
+
mm512_add_epi64:
+
mm512_add_epi8:
+
mm512_add_pd:
+
mm512_add_ps:
+
mm512_and_si512:
+
mm512_castps_si512:
+
mm512_castsi512_ps:
+
mm512_cmpgt_epi32_mask:
+
mm512_cvtepi32_ps:
+
mm512_cvtps_epi32:
+
mm512_fmadd_pd:
+
mm512_fmadd_ps:
+
mm512_i32gather_epi32:
+
mm512_load_pd:
+
mm512_load_ps:
+
mm512_load_si512:
+
mm512_loadu_pd:
+
mm512_loadu_ps:
+
mm512_loadu_si512:
+
mm512_maskz_set1_epi32:
+
mm512_max_ps:
+
mm512_min_ps:
+
mm512_movepi8_mask:
+
mm512_movm_epi32:
+
mm512_mullo_epi32:
+
mm512_mullo_epi64:
+
mm512_mul_pd:
+
mm512_mul_ps:
+
mm512_or_ps:
+
mm512_set1_epi16:
+
mm512_set1_epi32:
+
mm512_set1_epi64:
+
mm512_set1_epi8:
+
mm512_set1_pd:
+
mm512_set1_ps:
+
mm512_setzero_pd:
+
mm512_setzero_ps:
+
mm512_setzero_si512:
+
mm512_slli_epi32:
+
mm512_srli_epi32:
+
mm512_store_pd:
+
mm512_store_ps:
+
mm512_storeu_pd:
+
mm512_storeu_ps:
+
mm512_storeu_si512:
+
mm512_sub_ps:
+
mm_add_epi16:
+
mm_add_epi32:
+
mm_add_epi64:
+
mm_add_epi8:
+
mm_add_pd:
+
mm_add_ps:
+
mm_add_ss:
+
mm_and_si128:
+
mmask16:
+
mmask64:
+
mmax:
+
mm_castps_si128:
+
mm_castsi128_ps:
+
mm_cmpgt_epi32:
+
mm_cvtepi32_ps:
+
mm_cvtps_epi32:
+
mm_cvtsi128_si32:
+
mm_cvtss_f32:
+
mm_extract_epi16:
+
mm_i32gather_epi32:
+
mmin:
+
mm_load_pd:
+
mm_load_ps:
+
mm_load_si128:
+
mm_load_ss:
+
mm_loadu_pd:
+
mm_loadu_ps:
+
mm_loadu_si128:
+
mm_max_ps:
+
mm_max_ss:
+
mm_min_ps:
+
mm_min_ss:
+
mm_movehdup_ps:
+
mm_movehl_ps:
+
mm_moveldup_ps:
+
mm_movelh_ps:
+
mm_movemask_epi8:
+
mm_mul_epu32:
+
mm_mullo_epi16:
+
mm_mullo_epi32:
+
mm_mul_pd:
+
mm_mul_ps:
+
mm_or_ps:
+
mm_or_si128:
+
mm_set1_epi16:
+
mm_set1_epi32:
+
mm_set1_epi64x:
+
mm_set1_epi8:
+
mm_set1_pd:
+
mm_set1_ps:
+
mm_set_epi32:
+
mm_setzero_pd:
+
mm_setzero_ps:
+
mm_setzero_si128:
+
mm_shuffle_epi32:
+
mm_slli_epi32:
+
mm_slli_epi64:
+
mm_srli_epi32:
+
mm_srli_epi64:
+
mm_store_pd:
+
mm_store_ps:
+
mm_storeu_pd:
+
mm_storeu_ps:
+
mm_storeu_si128:
+
mm_sub_pd:
+
mm_sub_ps:
+
mnegate:
+
moveaxis:
+
mpairs:
+
mreciprocal:
+
mrelu:
+
MSELoss:
+
mse_loss:
+
msigmoid:
+
mtanh:
+
mzip:
+
naive_gemv_fallback:
+
nchw_channels:
+
nchw_height:
+
nchw_width:
+
nearestNeighbors:
+
negate:
+
network:
+
newClStorage:
+
newClTensor:
+
newContext:
+
newConv2dDesc:
+
newConvAlgoSpace:
+
newCudaStorage:
+
newCudaTensor:
+
newCudnn4DTensorDesc:
+
newCudnnConvKernelDesc:
+
newDiffs:
+
newMatrixUninitColMajor:
+
newParents:
+
newSeqUninit:
+
newSGD:
+
newTensor:
+
newTensorUninit:
+
newTensorWith:
+
newTiles:
+
NNPackAuto:
+
nnpack_conv2d:
+
nnpack_conv2d_gradient:
+
nnp_activation:
+
nnp_convolution_algorithm:
+
nnp_convolution_inference:
+
nnp_convolution_input_gradient:
+
nnp_convolution_kernel_gradient:
+
nnp_convolution_output:
+
nnp_convolution_transform_strategy:
+
nnp_convolution_transform_strategy_block_based:
+
nnp_convolution_transform_strategy_tuple_based:
+
nnp_deinitialize:
+
nnp_fully_connected_inference:
+
nnp_fully_connected_inference_f16f32:
+
nnp_fully_connected_output:
+
nnp_initialize:
+
nnp_max_pooling_output:
+
nnp_padding:
+
nnp_profile:
+
nnp_relu_input_gradient:
+
nnp_relu_output:
+
nnp_size:
+
nnp_softmax_output:
+
nnp_status:
+
nnp_status_invalid_activation:
+
nnp_status_invalid_activation_parameters:
+
nnp_status_invalid_output_subsampling:
+
Node:
+
no_grad_mode:
+
nonzero:
+
numberOne:
+
numerical_gradient:
+
Offset_Values:
+
OMP_FOR_THRESHOLD:
+
omp_get_max_threads:
+
omp_get_num_threads:
+
omp_get_thread_num:
+
OMP_MAX_REDUCE_BLOCKS:
+
omp_parallel_blocks:
+
omp_parallel_countup:
+
omp_parallel_forup:
+
omp_parallel_reduce_blocks:
+
omp_set_num_threads:
+
ones:
+
ones_like:
+
opencl:
+
Optimizer:
+
optimizer:
+
optimizerAdam:
+
optimizerSGD:
+
optimizerSGDMomentum:
+
orgqr:
+
ormqr:
+
outShape:
+
overload:
+
pack_A_mc_kc:
+
pack_B_kc_nc:
+
pairs:
+
pairwiseDistances:
+
partitionMNK:
+
Payload:
+
PayloadKind:
+
pca:
+
PCA_Detailed:
+
pca_detailed:
+
percentile:
+
permute:
+
permuteImpl:
+
phase:
+
pinv:
+
pkSeq:
+
pkVar:
+
pop:
+
prefetch:
+
PrefetchLocality:
+
PrefetchRW:
+
pretty:
+
prettyImpl:
+
product:
+
pthreadpool_t:
+
qr:
+
query:
+
query_ball_point:
+
radToDeg:
+
randomNormalTensor:
+
randomTensor:
+
rank:
+
raw_data_unaligned:
+
RawImmutableView:
+
RawMutableView:
+
read_csv:
+
readFloat32BE:
+
readFloat32LE:
+
readFloat64BE:
+
readFloat64LE:
+
read_hdf5:
+
read_image:
+
readInt32BE:
+
readInt32LE:
+
readInt64BE:
+
readInt64LE:
+
read_mnist_images:
+
read_mnist_labels:
+
read_npy:
+
readUInt16LE:
+
readUInt32BE:
+
readUInt32LE:
+
readUInt64BE:
+
readUInt64LE:
+
real:
+
real=:
+
reciprocal:
+
reduce:
+
reduce_axis_inline:
+
reduce_inline:
+
register_node:
+
relative_error:
+
relu:
+
ReluActivation:
+
relu_backward:
+
replaceNodes:
+
replaceSymsByIdents:
+
reshape:
+
ReshapeGate:
+
reshapeImpl:
+
reshape_no_copy:
+
reshape_with_copy:
+
returnEmptyIfEmpty:
+
reversed:
+
rewriteTensor_AddMultiply:
+
rewriteTensor_MultiplyAdd:
+
rewriteTensor_MultiplyAdd_inplace:
+
rewriteToTensorReshape:
+
roll:
+
round:
+
round_step_down:
+
round_step_up:
+
same:
+
set_diagonal:
+
setLen:
+
setZero:
+
SGD:
+
SGDMomentum:
+
sgn:
+
shape_to_strides:
+
sigmoid:
+
SigmoidActivation:
+
sigmoid_backward:
+
sigmoid_cross_entropy:
+
sigmoid_cross_entropy_backward:
+
SigmoidCrossEntropyLoss:
+
sin:
+
sinc:
+
sinh:
+
size:
+
Size2D:
+
SizeHW:
+
skipIfEmpty:
+
sliceDispatchImpl:
+
slicer:
+
slicerImpl:
+
slicerMut:
+
slice_typed_dispatch:
+
slice_typed_dispatch_mut:
+
slice_typed_dispatch_var:
+
SmallDiffs:
+
softmax:
+
SoftmaxActivation:
+
softmax_cross_entropy:
+
softmax_cross_entropy_backward:
+
SoftmaxCrossEntropyLoss:
+
solve:
+
sort:
+
sorted:
+
sparse_softmax_cross_entropy:
+
sparse_softmax_cross_entropy_backward:
+
SparseSoftmaxCrossEntropyLoss:
+
split:
+
sqrt:
+
square:
+
squared_error:
+
squeeze:
+
squeezeImpl:
+
stable_softmax:
+
stack:
+
std:
+
Step:
+
SteppedSlice:
+
streaming_max_sumexp:
+
stride:
+
stridedBodyTemplate:
+
stridedChunkOffset:
+
stridedCoordsIteration:
+
stridedIteration:
+
stridedIterationYield:
+
stridedVarsSetup:
+
SubGate:
+
sum:
+
SumGate:
+
SupportedDecomposition:
+
svd:
+
svd_randomized:
+
syevr:
+
symeig:
+
syrk:
+
SyrkKind:
+
tan:
+
tanh:
+
TanhActivation:
+
tanh_backward:
+
Tensor:
+
Tiles:
+
tnInner:
+
tnLeaf:
+
toArrayOfSlices:
+
toClpointer:
+
to_csv:
+
toFlatSeq:
+
toMatrixView:
+
toMetadata:
+
toMetadataArray:
+
to_ptr:
+
toRawSeq:
+
toSeq1D:
+
toSeq2D:
+
toSeq3D:
+
toSeq4D:
+
toSeq5D:
+
toTensor:
+
toUnsafeView:
+
transpose:
+
TreeNodeKind:
+
tri:
+
triangular:
+
triangularKernel:
+
trigonometric:
+
trigonometricKernel:
+
tril:
+
tril_unit_diag:
+
tril_unit_diag_mut:
+
tripleStridedIteration:
+
tripleStridedIterationYield:
+
triu:
+
trunc:
+
ukernel_generator:
+
ukernel_generic_impl:
+
ukernel_simd_impl:
+
unsafe_raw_buf:
+
unsafe_raw_offset:
+
unsqueeze:
+
unsqueezeImpl:
+
unwrap_period:
+
update:
+
valid:
+
Values:
+
vander:
+
vandermonde:
+
Variable:
+
variable:
+
variance:
+
whitespaceTokenizer:
+
withCompilerOptimHints:
+
with_diagonal:
+
withMemoryOptimHints:
+
write_bmp:
+
write_hdf5:
+
write_jpg:
+
write_npy:
+
write_png:
+
write_tga:
+
x86_AVX:
+
x86_AVX2:
+
x86_AVX512:
+
x86_AVX_FMA:
+
x86_Generic:
+
x86only:
+
x86_SSE:
+
x86_SSE2:
+
x86_SSE4_1:
+
x86_ukernel:
+
xavier_normal:
+
xavier_uniform:
+
xygrid:
+
yann_normal:
+
yann_uniform:
+
zeroGrads:
+
zeros:
+
zeros_like:
+
zip:
+
zipAxis:
+
+
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tokenizers.html b/tokenizers.html new file mode 100644 index 000000000..c95191c3b --- /dev/null +++ b/tokenizers.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/nlp/tokenizers + + + + + + + + + +Arraymancer - src/arraymancer/nlp/tokenizers + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/nlp/tokenizers

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Imports

+
+ tensor +
+
+
+

Iterators

+
+
+
+
iterator whitespaceTokenizer(input: Tensor[string]): seq[string] {....raises: [],
+    tags: [], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tokenizers.idx b/tokenizers.idx new file mode 100644 index 000000000..ec538ee3f --- /dev/null +++ b/tokenizers.idx @@ -0,0 +1,2 @@ +nimTitle tokenizers tokenizers.html module src/arraymancer/nlp/tokenizers 0 +nim whitespaceTokenizer tokenizers.html#whitespaceTokenizer.i,Tensor[string] iterator whitespaceTokenizer(input: Tensor[string]): seq[string] 9 diff --git a/triangular.html b/triangular.html new file mode 100644 index 000000000..da5f4ed8e --- /dev/null +++ b/triangular.html @@ -0,0 +1,521 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/linear_algebra/helpers/triangular + + + + + + + + + +Arraymancer - src/arraymancer/linear_algebra/helpers/triangular + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/linear_algebra/helpers/triangular

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+ +
+

Procs

+
+
+
+
proc tril[T](a: Tensor[T]; k: static int = 0): Tensor[T]
+
+ +

Lower triangle of a matrix.

+

Return a copy of a matrix with the elements above the k-th diagonal zeroed.

+

Inputs:

+
  • a: the input matrix (i.e. a rank-2 tensor)
  • +
  • k: the diagonal above which elements will be zeroed (default = 0, i.e. the main diagonal)
  • +
+

Result:

+
  • A copy of the input matrix with the elements above the k-th diagonal zeroed.
  • +
+ +   Source +Edit + +
+
+ +
+
+
+
proc tril_unit_diag[T](a: Tensor[T]): Tensor[T]
+
+ + Lower-triangular matrix with unit diagonal For use with getrf which returns LU matrices with L a unit diagonal (not returned) and U a non-unit diagonal (present) +   Source +Edit + +
+
+ +
+
+
+
proc tril_unit_diag_mut[T](a: var Tensor[T])
+
+ +

Lower-triangular matrix with unit diagonal For use with getrf which returns LU matrices

+

The input upper-half is overwritten with 0 The input diagonal is overwritten with 1 Input must be column major

+ +   Source +Edit + +
+
+ +
+
+
+
proc triu[T](a: Tensor[T]; k: static int = 0): Tensor[T]
+
+ +

Upper triangle of a matrix.

+

Return a copy of a matrix with the elements below the k-th diagonal zeroed.

+

Inputs:

+
  • a: the input matrix (i.e. a rank-2 tensor)
  • +
  • k: the diagonal below which elements will be zeroed (default = 0, i.e. the main diagonal)
  • +
+

Result:

+
  • A copy of the input matrix with the elements below the k-th diagonal zeroed.
  • +
+ +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/triangular.idx b/triangular.idx new file mode 100644 index 000000000..5aaa93a9f --- /dev/null +++ b/triangular.idx @@ -0,0 +1,5 @@ +nimTitle triangular triangular.html module src/arraymancer/linear_algebra/helpers/triangular 0 +nim triu triangular.html#triu,Tensor[T],staticint proc triu[T](a: Tensor[T]; k: static int = 0): Tensor[T] 64 +nim tril triangular.html#tril,Tensor[T],staticint proc tril[T](a: Tensor[T]; k: static int = 0): Tensor[T] 77 +nim tril_unit_diag triangular.html#tril_unit_diag,Tensor[T] proc tril_unit_diag[T](a: Tensor[T]): Tensor[T] 90 +nim tril_unit_diag_mut triangular.html#tril_unit_diag_mut,Tensor[T] proc tril_unit_diag_mut[T](a: var Tensor[T]) 137 diff --git a/tuto.aggregate_stats.html b/tuto.aggregate_stats.html new file mode 100644 index 000000000..0e8340c45 --- /dev/null +++ b/tuto.aggregate_stats.html @@ -0,0 +1,364 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Aggregate and statistics + + + + + + + + + +Arraymancer - Tutorial: Aggregate and statistics + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Aggregate and statistics

+

sum and mean functions are available to compute the sum and mean of a tensor. sum and mean can also be computed along an axis with the axis argument.

+

Generic aggregates on the whole tensor or along an axis can be computed with agg and agg_inplace functions.

+ + + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.broadcasting.html b/tuto.broadcasting.html new file mode 100644 index 000000000..89fee43f8 --- /dev/null +++ b/tuto.broadcasting.html @@ -0,0 +1,376 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Broadcasting + + + + + + + + + +Arraymancer - Tutorial: Broadcasting + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Broadcasting

+

Arraymancer supports explicit broadcasting with broadcast and its alias bc. And supports implicit broadcasting with operations beginning with a dot:

+
let j = [0, 10, 20, 30].toTensor.reshape(4,1)
+let k = [0, 1, 2].toTensor.reshape(1,3)
+
+echo j +. k
+# Tensor[int] of shape "[4, 3]" on backend "Cpu"
+# |0       1       2|
+# |10     11      12|
+# |20     21      22|
+# |30     31      32|
  • +.,-.,
  • +
  • *.: broadcasted element-wise matrix multiplication also called Hadamard product)
  • +
  • ./: broadcasted element-wise division or integer-division
  • +
  • +.=, -.=, *.=, /.=: in-place versions. Only the right operand is broadcastable.
  • +
+ + + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.first_steps.html b/tuto.first_steps.html new file mode 100644 index 000000000..d57f14e03 --- /dev/null +++ b/tuto.first_steps.html @@ -0,0 +1,493 @@ + + + + + + + + + + + + + + + + + + +Tutorial: First steps + + + + + + + + + +Arraymancer - Tutorial: First steps + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: First steps

+ +

Tensor properties

Tensors have the following properties: - rank: - 0 for scalar (unfortunately cannot be stored) - 1 for vector - 2 for matrices - N for N-dimension array - shape: a sequence of the tensor dimensions along each axis.

+

Next properties are technical and there for completeness - strides: a sequence of numbers of steps to get the next item along a dimension. - offset: the first element of the tensor

+
import arraymancer
+
+let d = [[1, 2, 3], [4, 5, 6]].toTensor()
+
+echo d
+# Tensor[int] of shape "[2, 3]" on backend "Cpu"
+# |1      2       3|
+# |4      5       6|
+
+echo d.rank # 2
+echo d.shape # @[2, 3]
+echo d.strides # @[3, 1] => Next row is 3 elements away in memory while next column is 1 element away.
+echo d.offset # 0
+

Tensor creation

The canonical way to initialize a tensor is by converting a seq of seq of โ€ฆ or an array of array of โ€ฆ into a tensor using toTensor.

+

toTensor supports deep nested sequences and arrays, even sequence of arrays of sequences.

+
import arraymancer
+
+let c = [
+          [
+            [1,2,3],
+            [4,5,6]
+          ],
+          [
+            [11,22,33],
+            [44,55,66]
+          ],
+          [
+            [111,222,333],
+            [444,555,666]
+          ],
+          [
+            [1111,2222,3333],
+            [4444,5555,6666]
+          ]
+        ].toTensor()
+echo c
+
+# Tensor[system.int] of shape "[4, 2, 3]" on backend "Cpu"
+#           0                      1                      2                      3
+# |1          2       3| |11        22      33| |111      222     333| |1111    2222    3333|
+# |4          5       6| |44        55      66| |444      555     666| |4444    5555    6666|

newTensor procedure can be used to initialize a tensor of a specific shape with a default value. (0 for numbers, false for bool โ€ฆ)

+

zeros and ones procedures create a new tensor filled with 0 and 1 respectively.

+

zeros_like and ones_like take an input tensor and output a tensor of the same shape but filled with 0 and 1 respectively.

+
let e = newTensor[bool]([2, 3])
+# Tensor[bool] of shape "[2, 3]" on backend "Cpu"
+# |false  false   false|
+# |false  false   false|
+
+let f = zeros[float]([4, 3])
+# Tensor[float] of shape "[4, 3]" on backend "Cpu"
+# |0.0    0.0     0.0|
+# |0.0    0.0     0.0|
+# |0.0    0.0     0.0|
+# |0.0    0.0     0.0|
+
+let g = ones[float]([4, 3])
+# Tensor[float] of shape "[4, 3]" on backend "Cpu"
+# |1.0    1.0     1.0|
+# |1.0    1.0     1.0|
+# |1.0    1.0     1.0|
+# |1.0    1.0     1.0|
+
+let tmp = [[1,2],[3,4]].toTensor()
+let h = tmp.zeros_like
+# Tensor[int] of shape "[2, 2]" on backend "Cpu"
+# |0      0|
+# |0      0|
+
+let i = tmp.ones_like
+# Tensor[int] of shape "[2, 2]" on backend "Cpu"
+# |1      1|
+# |1      1|
+

Accessing and modifying a value

Tensors value can be retrieved or set with array brackets.

+
var a = toSeq(1..24).toTensor().reshape(2,3,4)
+
+echo a
+# Tensor[system.int] of shape "[2, 3, 4]" on backend "Cpu"
+#           0                      1
+# |1      2     3     4| |13    14    15    16|
+# |5      6     7     8| |17    18    19    20|
+# |9     10    11    12| |21    22    23    24|
+
+echo a[1, 1, 1]
+# 18
+
+a[1, 1, 1] = 999
+echo a
+# Tensor[system.int] of shape "[2, 3, 4]" on backend "Cpu"
+#             0                          1
+# |1        2      3      4| |13      14     15     16|
+# |5        6      7      8| |17     999     19     20|
+# |9       10     11     12| |21      22     23     24|
+

Copying

Warning โš : When you do the following, both tensors a and b will share data. Full copy must be explicitly requested via the clone function.

+
let a = toSeq(1..24).toTensor().reshape(2,3,4)
+var b = a

Here modifying b WILL modify a. This behaviour is the same as Numpy and Julia, reasons can be found in the following under the hood article.

+ +

Tensor printing

As already seen in the examples above, printing of tensors of arbitrary dimensionality is supported. For dimensions larger than 2 we need to pick a way to represent them on a 2D screen.

+

We pick a representation that is possibly the most "natural" generalization of pretty printing up to 2 dimensions. Consider the following:

+
  • A scalar is of "even" rank (0) and is printed as a 1x1 grid.
  • +
  • A vector (odd rank 1) is represented by a row of scalars. That is a stacking of dimension N - 1 along the horizontal axis.
  • +
  • A matrix (even rank 2) is represented by stacking rows of vectors. That is we extend along the vertical axis of elements of dimension N - 1.
  • +
+

From here we continue along the pattern:

+
  • Odd dimensions N are horizontal stacks of the pretty print of N - 1
  • +
  • Even dimensions N are vertical stacks of the pretty print of N - 1
  • +
+

To help with visibility separators | and - are applied between stacks of different dimensions.

+

This yields a final 2D table of numbers where the dimension "increases" from outside to inside.

+

If we have a tensor of shape [2, 3, 4, 3, 2] the most "outer" layer is the first 2. As it is an odd dimension, this rank will be stacked horizontally. The next dimension 3 will be a stack in vertical. Inside of that are 4 horizontal stacks again until we reach the last two dimensions [3, 2], which are simply printed as expected for a 2D tensor.

+

To help with readability, the index of each of these dimensions is printed on the top (odd dimension) / left (even dimension) of the layer.

+

Take a look at the printing result of the aforementioned shape and try to understand the indexing shown on the top / right and how it relates to the different dimensions:

+
let t1 = toSeq(1..144).toTensor().reshape(2,3,4,3,2)
+# Tensor[system.int] of shape "[2, 3, 4, 3, 2]" on backend "Cpu"
+#                           0                            |                            1
+#        0            1            2            3        |         0            1            2            3
+#   |1        2| |7        8| |13      14| |19      20|  |    |73      74| |79      80| |85      86| |91      92|
+# 0 |3        4| |9       10| |15      16| |21      22|  |  0 |75      76| |81      82| |87      88| |93      94|
+#   |5        6| |11      12| |17      18| |23      24|  |    |77      78| |83      84| |89      90| |95      96|
+#   ---------------------------------------------------  |    ---------------------------------------------------
+#        0            1            2            3        |         0            1            2            3
+#   |25      26| |31      32| |37      38| |43      44|  |    |97      98| |103    104| |109    110| |115    116|
+# 1 |27      28| |33      34| |39      40| |45      46|  |  1 |99     100| |105    106| |111    112| |117    118|
+#   |29      30| |35      36| |41      42| |47      48|  |    |101    102| |107    108| |113    114| |119    120|
+#   ---------------------------------------------------  |    ---------------------------------------------------
+#        0            1            2            3        |         0            1            2            3
+#   |49      50| |55      56| |61      62| |67      68|  |    |121    122| |127    128| |133    134| |139    140|
+# 2 |51      52| |57      58| |63      64| |69      70|  |  2 |123    124| |129    130| |135    136| |141    142|
+#   |53      54| |59      60| |65      66| |71      72|  |    |125    126| |131    132| |137    138| |143    144|
+#   ---------------------------------------------------  |    ---------------------------------------------------
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.iterators.html b/tuto.iterators.html new file mode 100644 index 000000000..6b87d9f39 --- /dev/null +++ b/tuto.iterators.html @@ -0,0 +1,400 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Iterators + + + + + + + + + +Arraymancer - Tutorial: Iterators + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Iterators

+

Tensors can be iterated in the proper order. Arraymancer provides:

+
  • items and pairs. pairs returns the coordinates of the tensor.
  • +
+
import ../arraymancer, sequtils
+
+let a = toSeq(1..24).toTensor.reshape(2,3,4)
+# Tensor[system.int] of shape "[2, 3, 4]" on backend "Cpu"
+#           0                      1
+# |1      2     3     4| |13    14    15    16|
+# |5      6     7     8| |17    18    19    20|
+# |9     10    11    12| |21    22    23    24|
+
+for v in a:
+  echo v
+
+for coord, v in a:
+  echo coord
+  echo v
+# @[0, 0, 0]
+# 1
+# @[0, 0, 1]
+# 2
+# @[0, 0, 2]
+# 3
+# @[0, 0, 3]
+# 4
+# @[0, 1, 0]
+# 5
+# @[0, 1, 1]
+# 6
+# @[0, 1, 2]
+# 7
+# @[0, 1, 3]
+# 8
+# @[0, 2, 0]
+# 9
+# ...

For convenience a values closure iterator is available for iterator chaining. values is equivalent to items.

+

A mitems iterator is available to directly mutate elements while iterating. An axis iterator is available to iterate along an axis.

+ + + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.linear_algebra.html b/tuto.linear_algebra.html new file mode 100644 index 000000000..e5c286d7d --- /dev/null +++ b/tuto.linear_algebra.html @@ -0,0 +1,378 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Matrix and vectors operations + + + + + + + + + +Arraymancer - Tutorial: Matrix and vectors operations + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Matrix and vectors operations

+

The following linear algebra operations are supported for tensors of rank 1 (vectors) and 2 (matrices):

+
  • dot product (Vector to Vector) using dot
  • +
  • addition and substraction (any rank) using + and -
  • +
  • in-place addition and substraction (any-rank) using += and -=
  • +
  • multiplication or division by a scalar using * and /
  • +
  • matrix-matrix multiplication using *
  • +
  • matrix-vector multiplication using *
  • +
  • element-wise multiplication (Hadamard product) using *.
  • +
+

Note: Matrix operations for floats are accelerated using BLAS (Intel MKL, OpenBLAS, Apple Accelerate โ€ฆ). Unfortunately there is no acceleration routine for integers. Integer matrix-matrix and matrix-vector multiplications are implemented via semi-optimized routines, see the benchmarks section.

+
echo foo_float * foo_float # Accelerated Matrix-Matrix multiplication (needs float)
+# Tensor[float] of shape "[5, 5]" on backend "Cpu"
+# |15.0         55.0      225.0       979.0       4425.0|
+# |258.0      1146.0     5274.0     24810.0     118458.0|
+# |1641.0     7653.0    36363.0    174945.0     849171.0|
+# |6372.0    30340.0   146244.0    710980.0    3478212.0|
+# |18555.0   89355.0   434205.0   2123655.0   10436805.0|
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.map_reduce.html b/tuto.map_reduce.html new file mode 100644 index 000000000..d5233bf4e --- /dev/null +++ b/tuto.map_reduce.html @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Higher-order functions (Map, Reduce, Fold) + + + + + + + + + +Arraymancer - Tutorial: Higher-order functions (Map, Reduce, Fold) + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Higher-order functions (Map, Reduce, Fold)

+

Arraymancer supports efficient higher-order functions on the whole tensor or on an axis.

+ +

map, apply, map2, apply2

a.map(x => x+1)

or

+
proc plusone[T](x: T): T =
+  x + 1
+a.map(plusone) # Map the function plusone

Note: for basic operation, you can use implicit broadcasting instead a +. 1

+

apply is the same as map but in-place.

+

map2 and apply2 takes 2 input tensors and respectively, return a new one or modify the first in-place.

+
proc `**`[T](x, y: T): T = # We create a new power `**` function that works on 2 scalars
+  pow(x, y)
+a.map2(`**`, b)
+# Or
+map2(a, `**`, b)
+

reduce on the whole Tensor or along an axis

reduce apply a function like + or max on the whole Tensor[T] returning a single value T.

+

For example: - Reducing with + returns the sum of all elements of the Tensor. - Reducing with max returns the biggest element of the Tensor

+

reduce can be applied along an axis, for example the sum along the rows of a Tensor.

+ +

fold on the whole Tensor or along an axis

fold is a generalization of reduce. Its starting value is not the first element of the Tensor.

+

It can do anything that reduce can, but also has other tricks because it is not constrained by the Tensor type or starting value.

+

For example: - Reducing with was_a_odd_and_what_about_b and a starting value of true returns true if all elements are odd or false otherwise

+

Just in case

+
proc was_a_odd_and_what_about_b[T: SomeInteger](a: bool, b: T): bool =
+  return a and (b mod 2 == 1) # a is the result of previous computations, b is the new integer to check.
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.shapeshifting.html b/tuto.shapeshifting.html new file mode 100644 index 000000000..d66b4a782 --- /dev/null +++ b/tuto.shapeshifting.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Transposing, Reshaping, Permuting, Concatenating + + + + + + + + + +Arraymancer - Tutorial: Transposing, Reshaping, Permuting, Concatenating + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Transposing, Reshaping, Permuting, Concatenating

+ +

Shapeshifting

+

Transposing

The transpose function will reverse the dimensions of a tensor.

+ +

Reshaping

The reshape function will change the shape of a tensor. The number of elements in the new and old shape must be the same.

+

For example:

+
let a = toSeq(1..24).toTensor().reshape(2,3,4)
+
+# Tensor[system.int] of shape "[2, 3, 4]" on backend "Cpu"
+#           0                      1
+# |1      2     3     4| |13    14    15    16|
+# |5      6     7     8| |17    18    19    20|
+# |9     10    11    12| |21    22    23    24|

The 0 and 1 correspond to the index along the first dimension of the reshaped tensor.

+ +

Permuting - Reordering dimension

The permute proc can be used to reorder dimensions. Input is a tensor and the new dimension order

+
let a = toSeq(1..24).toTensor.reshape(2,3,4)
+echo a
+
+# Tensor[system.int] of shape "[2, 3, 4]" on backend "Cpu"
+#           0                      1
+# |1      2     3     4| |13    14    15    16|
+# |5      6     7     8| |17    18    19    20|
+# |9     10    11    12| |21    22    23    24|
+
+echo a.permute(0,2,1) # dim 0 stays at 0, dim 1 becomes dim 2 and
+dim 2 becomes dim 1
+
+# Tensor[system.int] of shape "[2, 4, 3]" on backend "Cpu"
+#        0                1
+# |1      5     9| |13    17    21|
+# |2      6    10| |14    18    22|
+# |3      7    11| |15    19    23|
+# |4      8    12| |16    20    24|
+

Concatenation

Tensors can be concatenated along an axis with the concat proc.

+
import ../arraymancer, sequtils
+
+
+let a = toSeq(1..4).toTensor.reshape(2,2)
+
+let b = toSeq(5..8).toTensor.reshape(2,2)
+
+let c = toSeq(11..16).toTensor
+let c0 = c.reshape(3,2)
+let c1 = c.reshape(2,3)
+
+echo concat(a,b,c0, axis = 0)
+# Tensor[system.int] of shape "[7, 2]" on backend "Cpu"
+# |1       2|
+# |3       4|
+# |5       6|
+# |7       8|
+# |11     12|
+# |13     14|
+# |15     16|
+
+echo concat(a,b,c1, axis = 1)
+# Tensor[system.int] of shape "[2, 7]" on backend "Cpu"
+# |1       2     5     6    11    12    13|
+# |3       4     7     8    14    15    16|
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/tuto.slicing.html b/tuto.slicing.html new file mode 100644 index 000000000..4bbc1b5bf --- /dev/null +++ b/tuto.slicing.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + + + +Tutorial: Slicing + + + + + + + + + +Arraymancer - Tutorial: Slicing + + + + + + + +Fork me on GitHub + + +
+
+

Tutorial: Slicing

+

Arraymancer supports the following slicing syntax. It allows for selecting dimension subsets, whole dimensions, stepping (e.g. one out of 2 rows), reversing dimensions and counting from the end.

+
import arraymancer
+
+let foo = vandermonde(arange(1, 6), arange(1, 6)).asType(int)
+
+echo foo
+
+# Tensor[int] of shape "[5, 5]" on backend "Cpu"
+# |1       1        1       1        1|
+# |2       4        8      16       32|
+# |3       9       27      81      243|
+# |4      16       64     256     1024|
+# |5      25      125     625     3125|
+
+echo foo[1..2, 3..4] # slice
+
+# Tensor[int] of shape "[2, 2]" on backend "Cpu"
+# |16      32|
+# |81     243|
+
+echo foo[1..<3, 3..<5] # use "..<" if you do not want to include the end in the slice
+
+# Tensor[int] of shape "[2, 2]" on backend "Cpu"
+# |16      32|
+# |81     243|
+
+echo foo[_, 3..4] # Span slice (i.e. "_") means "all items" in the dimension (in this case "all rows")
+                  # Note that "_" is equivalent (and preferred) to "_.._"
+
+# Tensor[system.int] of shape "[5, 2]" on backend "Cpu"
+# |1          1|
+# |16        32|
+# |81       243|
+# |256     1024|
+# |625     3125|
+
+echo foo[3.._, _] # Partial span slice (".._" means "until the end")
+
+# Tensor[system.int] of shape "[2, 5]" on backend "Cpu"
+# |4         16      64     256    1024|
+# |5         25     125     625    3125|
+
+echo foo[_..2, _] # Partial span slice ("_.." means "from the beginning" and is rarely useful)
+
+# Tensor[system.int] of shape "[3, 5]" on backend "Cpu"
+# |1        1      1      1      1|
+# |2        4      8     16     32|
+# |3        9     27     81    243|
+
+echo foo[1..^3, _] # Slice until the 3rd element from the end (inclusive, consistent with Nim,
+                   # cannot be combined with "..<")
+
+# Tensor[system.int] of shape "[3, 5]" on backend "Cpu"
+# |2        4      8     16     32|
+# |3        9     27     81    243|
+
+echo foo[_|2, _] # Take steps of 2 to get all the rows in the even positions
+
+# Tensor[system.int] of shape "[3, 5]" on backend "Cpu"
+# |1          1       1       1       1|
+# |3          9      27      81     243|
+# |5         25     125     625    3125|
+
+echo foo[1.._|2, _] # Take steps of 2 starting on the second element (i.e. index 1)
+                    # to get all the rows in the odd positions
+
+# Tensor[system.int] of shape "[2, 5]" on backend "Cpu"
+# |2          4       8      16      32|
+# |4         16      64     256    1024|
+
+echo foo[3..1|-2, _] # Negative steps are also supported,
+                     # but require a slice start that is higher than the slice end
+
+# Tensor[system.int] of shape "[2, 5]" on backend "Cpu"
+# |4         16      64     256    1024|
+# |2          4       8      16      32|
+
+echo foo[^1..^3|-1, _] # Combining "^" with negative steps is supported,
+                       # and make it easy to go through a tensor from the back,
+                       # but note the offset of 1 compared to positive steps
+                       # (i.e. ^1 points to the last element, not the second to last)
+
+# Tensor[system.int] of shape "[2, 5]" on backend "Cpu"
+# |5         25     125     625    3125|
+# |4         16      64     256    1024|
+# |3          9      27      81     243|
+
+echo foo[_|-1, _] # Combining "_" with a -1 step is the easiest way to reverse a tensor
+
+# Tensor[int] of shape "[5, 5]" on backend "Cpu"
+# |5      25      125     625     3125|
+# |4      16       64     256     1024|
+# |3       9       27      81      243|
+# |2       4        8      16       32|
+# |1       1        1       1        1|
+
+# Note that while "_" and "_.._" are equivalent to "^1..0"
+# partial slices currently do not work with negative steps
+

Slice mutations

Slices can also be mutated with a single value, a nested seq or array, a tensor or tensor slice.

+

For certain use cases slice mutations can have less than intuitive results, because the mutation happens on the same memory the whole time. See the last mutation shown in the following code block for such an example and the explanation below.

+
import arraymancer
+
+var foo = vandermonde(arange(1, 6), arange(1, 6)).asType(int)
+
+echo foo
+
+# Tensor[int] of shape "[5, 5]" on backend "Cpu"
+# |1       1        1       1        1|
+# |2       4        8      16       32|
+# |3       9       27      81      243|
+# |4      16       64     256     1024|
+# |5      25      125     625     3125|
+
+# Mutation with a single value
+foo[1..2, 3..4] = 999
+
+echo foo
+# Tensor[int] of shape "[5, 5]" on backend "Cpu"
+# |1       1        1       1       1|
+# |2       4        8     999     999|
+# |3       9       27     999     999|
+# |4      16       64     256    1024|
+# |5      25      125     625    3125|
+
+# Mutation with nested array or nested seq
+foo[0..1,0..1] = [[111, 222], [333, 444]]
+
+echo foo
+# Tensor[int] of shape "[5, 5]" on backend "Cpu"
+# |111    222       1       1       1|
+# |333    444       8     999     999|
+# |3        9      27     999     999|
+# |4       16      64     256    1024|
+# |5       25     125     625    3125|
+
+# Mutation with a tensor or tensor slice.
+foo[^2..^1,2..4] = foo[^1..^2|-1, 4..2|-1]
+
+echo foo
+# Tensor[system.int] of shape [5, 5]" on backend "Cpu"
+# |111    222       1      1       1|
+# |333    444       8    999     999|
+# |3        9      27    999     999|
+# |4       16    3125    625     125|
+# |5       25     125    625    3125|

The careful reader might have expected a different result for the final mutation foo[^2..^1,2..4] = foo[^1..^2|-1, 4..2|-1]. Namely, that the bottom right block of the input tensor:

+
# |64      256     1024|
+# |125     625     3125|

might simply be exchanged row wise and reversed column wise to give the following result:

+
# |3125    625     125|
+# |1024    256      64|

However, this result would only be obtained, if slicing mutation used a temporary copy of the input tensor. To see what happens exactly, consider the following code. Here foo is foo as it was computed before the final mutation in the full code sample from above.

+
# first let's print the LHS we write to
+echo foo[^2..^1, 2..4]
+# Tensor[system.int] of shape [2, 3]" on backend "Cpu"
+# |64     256     1024|
+# |125    625     3125|
+
+# now print the RHS we read from
+echo foo[^1..^2|-1, 4..2|-1]
+# Tensor[system.int] of shape [2, 3]" on backend "Cpu"
+# |3125   625     125|
+# |1024   256      64|
+
+# this means we first perform this:
+foo[^2, 2..4] = foo[^1, 4..2|-1]
+echo foo
+# Tensor[system.int] of shape [5, 5]" on backend "Cpu"
+# |111    222       1      1       1|
+# |333    444       8    999     999|
+# |3        9      27    999     999|
+# |4       16    3125    625     125|
+# |5       25     125    625    3125|
+
+# and then the following. At this step (compare output
+foo[^1, 2..4] = foo[^2, 4..2|-1]
+echo foo
+# Tensor[system.int] of shape [5, 5]" on backend "Cpu"
+# |111    222       1      1       1|
+# |333    444       8    999     999|
+# |3        9      27    999     999|
+# |4       16    3125    625     125|
+# |5       25     125    625    3125|

In effect it makes it seem like the final mutation does not even do anything! But that is only, because we are somewhat "inverting" doing the second to last operation in reverse in the final operation, thus copying exactly the thing we copied to the second to last row in reverse back to the last row. But because that is where the values in the second to last row originated from, nothing "happens".

+ +

Boolean Masks

In addition to regular slicing, boolean masks can be used to select items from a tensor. The mask should have the same shape as the tensor it is used on. However, the result of the mask operation will be a flat, 1-D tensor with the selected items.

+
foo = vandermonde.toTensor()
+echo foo[foo >. 27]
+
+# Tensor[system.int] of shape "[9]" on backend "Cpu"
+#     32      81     243      64     256    1024     125     625    3125

Boolean masks can also be used to mutate a tensor. The tensor is mutated in place, and thus it maintains its original shape.

+
foo = vandermonde.toTensor()
+foo[foo >. 27] = -arange(9)
+
+# Tensor[system.int] of shape "[5, 5]" on backend "Cpu"
+# |1      1     1     1     1|
+# |2      4     8    16     0|
+# |3      9    27    -1    -2|
+# |4     16    -3    -4    -5|
+# |5     25    -6    -7    -8|
+ + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/ufunc.html b/ufunc.html new file mode 100644 index 000000000..bdf998044 --- /dev/null +++ b/ufunc.html @@ -0,0 +1,1589 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/tensor/ufunc + + + + + + + + + +Arraymancer - src/arraymancer/tensor/ufunc + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/tensor/ufunc

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

NOTE: This should be {.noinit.}, but this is blocked by: https://github.com/nim-lang/Nim/issues/16253NOTE: This should be {.noinit.}, see above.

+ +
+

Procs

+
+
+
+
proc arccos[T](t`gensym8: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc arccos[T](t`gensym8: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc arccosh[T](t`gensym11: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc arccosh[T](t`gensym11: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc arcsin[T](t`gensym9: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc arcsin[T](t`gensym9: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc arcsinh[T](t`gensym12: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc arcsinh[T](t`gensym12: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc arctan[T](t`gensym10: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc arctan[T](t`gensym10: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc arctanh[T](t`gensym13: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc arctanh[T](t`gensym13: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc asType[T: SomeNumber; U: Complex](t: Tensor[T]; typ: typedesc[U]): Tensor[U]
+
+ + Apply type conversion on the whole tensor +   Source +Edit + +
+
+
+
proc asType[T; U: not Complex](t: Tensor[T]; typ: typedesc[U]): Tensor[U]
+
+ + Apply type conversion on the whole tensor. This is a no-op if T is the same as U. +   Source +Edit + +
+
+ +
+
+
+
proc cbrt[T](t`gensym3: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc cbrt[T](t`gensym3: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc ceil[T](t`gensym25: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc ceil[T](t`gensym25: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc cos[T](t`gensym14: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc cos[T](t`gensym14: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc cosh[T](t`gensym15: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc cosh[T](t`gensym15: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc degToRad[T](t`gensym28: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc degToRad[T](t`gensym28: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc erf[T](t`gensym20: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc erf[T](t`gensym20: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc erfc[T](t`gensym21: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc erfc[T](t`gensym21: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc exp[T](t`gensym7: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc exp[T](t`gensym7: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc fac[T](t`gensym0: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc fac[T](t`gensym0: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc floor[T](t`gensym24: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc floor[T](t`gensym24: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc gamma[T](t`gensym23: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc gamma[T](t`gensym23: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc isNaN[T](t`gensym1: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc isNaN[T](t`gensym1: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc lgamma[T](t`gensym22: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc lgamma[T](t`gensym22: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc ln[T](t`gensym4: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc ln[T](t`gensym4: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc log2[T](t`gensym6: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc log2[T](t`gensym6: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc log10[T](t`gensym5: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc log10[T](t`gensym5: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc radToDeg[T](t`gensym29: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc radToDeg[T](t`gensym29: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc round[T](t`gensym27: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc round[T](t`gensym27: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc sin[T](t`gensym17: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc sin[T](t`gensym17: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc sinh[T](t`gensym16: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc sinh[T](t`gensym16: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc sqrt[T](t`gensym2: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc sqrt[T](t`gensym2: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc tan[T](t`gensym18: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc tan[T](t`gensym18: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc tanh[T](t`gensym19: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc tanh[T](t`gensym19: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+
+
+
proc trunc[T](t`gensym26: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+
+
proc trunc[T](t`gensym26: Tensor[T]): Tensor[T] {.noinit.}
+
+ +

Auto-generated universal version of the function.

+

The function can be used directly on tensors and will work element-wise.

+ +   Source +Edit + +
+
+ +
+ +
+
+
+

Templates

+
+
+
+
template makeUniversal(func_name: untyped)
+
+ + +   Source +Edit + +
+
+ +
+
+
+
template makeUniversalLocal(func_name: untyped)
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/ufunc.idx b/ufunc.idx new file mode 100644 index 000000000..245a811c3 --- /dev/null +++ b/ufunc.idx @@ -0,0 +1,96 @@ +nimTitle ufunc ufunc.html module src/arraymancer/tensor/ufunc 0 +nim asType ufunc.html#asType,Tensor[T],typedesc[U] proc asType[T; U: not Complex](t: Tensor[T]; typ: typedesc[U]): Tensor[U] 22 +nim asType ufunc.html#asType,Tensor[T: SomeNumber],typedesc[U] proc asType[T: SomeNumber; U: Complex](t: Tensor[T]; typ: typedesc[U]): Tensor[U] 32 +nim makeUniversal ufunc.html#makeUniversal.t,untyped template makeUniversal(func_name: untyped) 47 +nim makeUniversalLocal ufunc.html#makeUniversalLocal.t,untyped template makeUniversalLocal(func_name: untyped) 63 +nim fac ufunc.html#fac proc fac[T](t`gensym0: Tensor[T]): Tensor[T] 55 +nim fac ufunc.html#fac_2 proc fac[T](t`gensym0: Tensor[T]): Tensor[T] 55 +nim isNaN ufunc.html#isNaN proc isNaN[T](t`gensym1: Tensor[T]): Tensor[T] 55 +nim isNaN ufunc.html#isNaN_2 proc isNaN[T](t`gensym1: Tensor[T]): Tensor[T] 55 +nim sqrt ufunc.html#sqrt proc sqrt[T](t`gensym2: Tensor[T]): Tensor[T] 55 +nim sqrt ufunc.html#sqrt_2 proc sqrt[T](t`gensym2: Tensor[T]): Tensor[T] 55 +nim cbrt ufunc.html#cbrt proc cbrt[T](t`gensym3: Tensor[T]): Tensor[T] 55 +nim cbrt ufunc.html#cbrt_2 proc cbrt[T](t`gensym3: Tensor[T]): Tensor[T] 55 +nim ln ufunc.html#ln proc ln[T](t`gensym4: Tensor[T]): Tensor[T] 55 +nim ln ufunc.html#ln_2 proc ln[T](t`gensym4: Tensor[T]): Tensor[T] 55 +nim log10 ufunc.html#log10 proc log10[T](t`gensym5: Tensor[T]): Tensor[T] 55 +nim log10 ufunc.html#log10_2 proc log10[T](t`gensym5: Tensor[T]): Tensor[T] 55 +nim log2 ufunc.html#log2 proc log2[T](t`gensym6: Tensor[T]): Tensor[T] 55 +nim log2 ufunc.html#log2_2 proc log2[T](t`gensym6: Tensor[T]): Tensor[T] 55 +nim exp ufunc.html#exp proc exp[T](t`gensym7: Tensor[T]): Tensor[T] 55 +nim exp ufunc.html#exp_2 proc exp[T](t`gensym7: Tensor[T]): Tensor[T] 55 +nim arccos ufunc.html#arccos proc arccos[T](t`gensym8: Tensor[T]): Tensor[T] 55 +nim arccos ufunc.html#arccos_2 proc arccos[T](t`gensym8: Tensor[T]): Tensor[T] 55 +nim arcsin ufunc.html#arcsin proc arcsin[T](t`gensym9: Tensor[T]): Tensor[T] 55 +nim arcsin ufunc.html#arcsin_2 proc arcsin[T](t`gensym9: Tensor[T]): Tensor[T] 55 +nim arctan ufunc.html#arctan proc arctan[T](t`gensym10: Tensor[T]): Tensor[T] 55 +nim arctan ufunc.html#arctan_2 proc arctan[T](t`gensym10: Tensor[T]): Tensor[T] 55 +nim arccosh ufunc.html#arccosh proc arccosh[T](t`gensym11: Tensor[T]): Tensor[T] 55 +nim arccosh ufunc.html#arccosh_2 proc arccosh[T](t`gensym11: Tensor[T]): Tensor[T] 55 +nim arcsinh ufunc.html#arcsinh proc arcsinh[T](t`gensym12: Tensor[T]): Tensor[T] 55 +nim arcsinh ufunc.html#arcsinh_2 proc arcsinh[T](t`gensym12: Tensor[T]): Tensor[T] 55 +nim arctanh ufunc.html#arctanh proc arctanh[T](t`gensym13: Tensor[T]): Tensor[T] 55 +nim arctanh ufunc.html#arctanh_2 proc arctanh[T](t`gensym13: Tensor[T]): Tensor[T] 55 +nim cos ufunc.html#cos proc cos[T](t`gensym14: Tensor[T]): Tensor[T] 55 +nim cos ufunc.html#cos_2 proc cos[T](t`gensym14: Tensor[T]): Tensor[T] 55 +nim cosh ufunc.html#cosh proc cosh[T](t`gensym15: Tensor[T]): Tensor[T] 55 +nim cosh ufunc.html#cosh_2 proc cosh[T](t`gensym15: Tensor[T]): Tensor[T] 55 +nim sinh ufunc.html#sinh proc sinh[T](t`gensym16: Tensor[T]): Tensor[T] 55 +nim sinh ufunc.html#sinh_2 proc sinh[T](t`gensym16: Tensor[T]): Tensor[T] 55 +nim sin ufunc.html#sin proc sin[T](t`gensym17: Tensor[T]): Tensor[T] 55 +nim sin ufunc.html#sin_2 proc sin[T](t`gensym17: Tensor[T]): Tensor[T] 55 +nim tan ufunc.html#tan proc tan[T](t`gensym18: Tensor[T]): Tensor[T] 55 +nim tan ufunc.html#tan_2 proc tan[T](t`gensym18: Tensor[T]): Tensor[T] 55 +nim tanh ufunc.html#tanh proc tanh[T](t`gensym19: Tensor[T]): Tensor[T] 55 +nim tanh ufunc.html#tanh_2 proc tanh[T](t`gensym19: Tensor[T]): Tensor[T] 55 +nim erf ufunc.html#erf proc erf[T](t`gensym20: Tensor[T]): Tensor[T] 55 +nim erf ufunc.html#erf_2 proc erf[T](t`gensym20: Tensor[T]): Tensor[T] 55 +nim erfc ufunc.html#erfc proc erfc[T](t`gensym21: Tensor[T]): Tensor[T] 55 +nim erfc ufunc.html#erfc_2 proc erfc[T](t`gensym21: Tensor[T]): Tensor[T] 55 +nim lgamma ufunc.html#lgamma proc lgamma[T](t`gensym22: Tensor[T]): Tensor[T] 55 +nim lgamma ufunc.html#lgamma_2 proc lgamma[T](t`gensym22: Tensor[T]): Tensor[T] 55 +nim gamma ufunc.html#gamma proc gamma[T](t`gensym23: Tensor[T]): Tensor[T] 55 +nim gamma ufunc.html#gamma_2 proc gamma[T](t`gensym23: Tensor[T]): Tensor[T] 55 +nim floor ufunc.html#floor proc floor[T](t`gensym24: Tensor[T]): Tensor[T] 55 +nim floor ufunc.html#floor_2 proc floor[T](t`gensym24: Tensor[T]): Tensor[T] 55 +nim ceil ufunc.html#ceil proc ceil[T](t`gensym25: Tensor[T]): Tensor[T] 55 +nim ceil ufunc.html#ceil_2 proc ceil[T](t`gensym25: Tensor[T]): Tensor[T] 55 +nim trunc ufunc.html#trunc proc trunc[T](t`gensym26: Tensor[T]): Tensor[T] 55 +nim trunc ufunc.html#trunc_2 proc trunc[T](t`gensym26: Tensor[T]): Tensor[T] 55 +nim round ufunc.html#round proc round[T](t`gensym27: Tensor[T]): Tensor[T] 55 +nim round ufunc.html#round_2 proc round[T](t`gensym27: Tensor[T]): Tensor[T] 55 +nim degToRad ufunc.html#degToRad proc degToRad[T](t`gensym28: Tensor[T]): Tensor[T] 55 +nim degToRad ufunc.html#degToRad_2 proc degToRad[T](t`gensym28: Tensor[T]): Tensor[T] 55 +nim radToDeg ufunc.html#radToDeg proc radToDeg[T](t`gensym29: Tensor[T]): Tensor[T] 55 +nim radToDeg ufunc.html#radToDeg_2 proc radToDeg[T](t`gensym29: Tensor[T]): Tensor[T] 55 +nimgrp degtorad ufunc.html#degToRad-procs-all proc 110 +nimgrp arccosh ufunc.html#arccosh-procs-all proc 92 +nimgrp arccos ufunc.html#arccos-procs-all proc 89 +nimgrp tanh ufunc.html#tanh-procs-all proc 100 +nimgrp erfc ufunc.html#erfc-procs-all proc 102 +nimgrp erf ufunc.html#erf-procs-all proc 101 +nimgrp radtodeg ufunc.html#radToDeg-procs-all proc 111 +nimgrp tan ufunc.html#tan-procs-all proc 99 +nimgrp arctanh ufunc.html#arctanh-procs-all proc 94 +nimgrp sinh ufunc.html#sinh-procs-all proc 97 +nimgrp fac ufunc.html#fac-procs-all proc 77 +nimgrp astype ufunc.html#asType-procs-all proc 22 +nimgrp exp ufunc.html#exp-procs-all proc 88 +nimgrp log10 ufunc.html#log10-procs-all proc 86 +nimgrp sin ufunc.html#sin-procs-all proc 98 +nimgrp lgamma ufunc.html#lgamma-procs-all proc 103 +nimgrp trunc ufunc.html#trunc-procs-all proc 107 +nimgrp round ufunc.html#round-procs-all proc 108 +nimgrp sqrt ufunc.html#sqrt-procs-all proc 83 +nimgrp cosh ufunc.html#cosh-procs-all proc 96 +nimgrp arctan ufunc.html#arctan-procs-all proc 91 +nimgrp cbrt ufunc.html#cbrt-procs-all proc 84 +nimgrp log2 ufunc.html#log2-procs-all proc 87 +nimgrp arcsinh ufunc.html#arcsinh-procs-all proc 93 +nimgrp ceil ufunc.html#ceil-procs-all proc 106 +nimgrp isnan ufunc.html#isNaN-procs-all proc 78 +nimgrp ln ufunc.html#ln-procs-all proc 85 +nimgrp arcsin ufunc.html#arcsin-procs-all proc 90 +nimgrp floor ufunc.html#floor-procs-all proc 105 +nimgrp gamma ufunc.html#gamma-procs-all proc 104 +nimgrp cos ufunc.html#cos-procs-all proc 95 diff --git a/uth.copy_semantics.html b/uth.copy_semantics.html new file mode 100644 index 000000000..52aa96de5 --- /dev/null +++ b/uth.copy_semantics.html @@ -0,0 +1,369 @@ + + + + + + + + + + + + + + + + + + +Under the hood: Copy semantics + + + + + + + + + +Arraymancer - Under the hood: Copy semantics + + + + + + + +Fork me on GitHub + + +
+
+

Under the hood: Copy semantics

+

WORK IN PROGRESS

+

The story and struggles behind Arraymancer current copy semantics is fully detailed in the following issues:

+

+

+

TO BE CONTINUED

+ + + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/uth.opencl_cuda_nim.html b/uth.opencl_cuda_nim.html new file mode 100644 index 000000000..aaf76445f --- /dev/null +++ b/uth.opencl_cuda_nim.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + +Working with OpenCL and Cuda in Nim + + + + + + + + + +Arraymancer - Working with OpenCL and Cuda in Nim + + + + + + + +Fork me on GitHub + + +
+
+

Working with OpenCL and Cuda in Nim

+

Date: May 6, 2018, by Mamy Andrรฉ-Ratsimbazafy

+

Arraymancer is a tensor library Iโ€™m writing from the ground up in Nim. Cuda support was added in v0.3 last December, I just released the new v0.4 with OpenCL support.

+

Iโ€™d like to share a bit of my experience on working in OpenCL through Nim. First of all, you have to know that none of the big guys (Google Tensorflow, Facebook PyTorch, Apache/Amazon MxNet, Microsoft CNTK or even Intel/AMD) has first class OpenCL support.

+

Why? Probably because Nvidia is providing superb tools and documentation for frameworks developers. Also Cuda can leerage a few C++ facilities like generics and function objects that I use heavily for generic code.

+

For example in Nim+Cuda I define element-wise functions like the following and pass it to a higher-order function that will apply it element-wise on 3 tensors:

+
# Binary op
+# Does C[i] = A[i] `op` B[i]
+template cuda_binary_op(op_name, op_symbol: string)=
+  {.emit:["""
+  template<typename T>
+  struct """,op_name,"""{
+  __device__ __forceinline__ void operator()(
+      T *  __restrict__ dst,
+      const T *  __restrict__ A,
+      const T *  __restrict__ B){
+      *dst = __ldg(A)""", op_symbol, """ __ldg(B);
+      }
+  };
+  """].}

You can see here the advantage of C++: typename T to template over int/float/double and higher-order functions/function object for cleaner code. You can also see that Nim can directly inline C++ code with emit and I even templatize the operation_name.

+

Now what about OpenCL? Unfortunately C doesnโ€™t offer something similar and requires a lot of boilerplate. The alternative, the C++ official OpenCL API and implementation: SYCL is very experimental and I am not sure how it works on actual GPUs.

+

However thanks to Nim metaprogramming, squashing the C boilerplate is super easy. Here is an example kernel to do C = A op B

+
template gen_cl_apply3*(kern_name, ctype, op: string): string =
+  ## Generates an OpenCL kernel for an elementwise binary infix operations (like +, -, ...)
+  ## Input:
+  ##   - The C type
+  ##   - The C kernel name (this only helps debugging the C code)
+  ##   - The C operation (+, -, ...)
+  
+  
+  opencl_getIndexOfElementID() & """
+  __kernel
+  void """ & kern_name &
+          """(const int rank,
+              const int len,
+              __global const int * restrict dst_shape,
+              __global const int * restrict dst_strides,
+              const int dst_offset,
+              __global       """ & ctype & """ * restrict const dst_data,
+              __global const int * restrict A_shape,
+              __global const int * restrict A_strides,
+              const int A_offset,
+              __global const """ & ctype & """ * restrict const A_data,
+              __global const int * restrict B_shape,
+              __global const int * restrict B_strides,
+              const int B_offset,
+              __global const """ & ctype & """ * restrict const B_data)
+  {
+    // Grid-stride loop
+    for (int elemID = get_global_id(0);
+    elemID < len;
+    elemID += get_global_size(0)) {
+      const int dst_real_idx = opencl_getIndexOfElementID(rank, dst_shape, dst_strides, dst_offset, elemID);
+      const int A_real_idx = opencl_getIndexOfElementID(rank, A_shape, A_strides, A_offset, elemID);
+      const int B_real_idx = opencl_getIndexOfElementID(rank, B_shape, B_strides, B_offset, elemID);
+      
+      dst_data[dst_real_idx] = A_data[A_real_idx] """ & op & """ B_data[B_real_idx];
+    }
+  }
+  """

And write a few generic lines of code to deal with the data on the device (especially opencl_getIndexOfElementID which convert foo[1, 2, 3] into foo.data[456] depending on the tensor shape.

+

Afterwards, all my operations are easily added in one line:

+
  • kind of function (infix: C = A op B or in-place A += B or A *= B)
  • +
  • Nim type
  • +
  • C type
  • +
  • Nim operator (for operator overloading)
  • +
  • OpenCL kernel name
  • +
  • OpenCL operation
  • +
+
genClInfixOp(float32, "float", `+`, "clAdd", "+")
+genClInfixOp(float64, "double", `+`, "clAdd", "+")
+genClInfixOp(float32, "float", `-`, "clSub", "-")
+genClInfixOp(float64, "double", `-`, "clSub", "-")
+
+genClInPlaceOp(float32, "float", `+=`, "clAdd", "+=")
+genClInPlaceOp(float64, "double", `+=`, "clAdd", "+=")
+genClInPlaceOp(float32, "float", `-=`, "clSub", "-=")
+genClInPlaceOp(float64, "double", `-=`, "clSub", "-=")

Next steps? Create unary operation higher-order functions and add cos/sin/ln/exp in just 2 lines of code each. Furthermore allow lifting any unary operation to operations on whole tensors with a map function, expose it so that OpenCL tensors are easily customizable.

+

After using Nim + OpenCL, I actually realized that using C++ function objects was overengineering.

+

To conclude, at the moment, I am convinced that the best language to work with GPUs is Nim.

+

Oh, and for those who wants to see real Nim code for neural networks, here is a Fizzbuzz in Nim using neural networks (I didnโ€™t implement it on GPU yet though)

+
# A port to Arraymancer of Joel Grus hilarious FizzBuzz in Tensorflow:
+# http://joelgrus.com/2016/05/23/fizz-buzz-in-tensorflow/
+
+# Interviewer: Welcome, can I get you a coffee or anything? Do you need a break?
+# ...
+# Interviewer: OK, so I need you to print the numbers from 1 to 100,
+#              except that if the number is divisible by 3 print "fizz",
+#              if it's divisible by 5 print "buzz", and if it's divisible by 15 print "fizzbuzz".
+
+# Let's start with standard imports
+import ../src/arraymancer, math, strformat
+
+# We want to input a number and output the correct "fizzbuzz" representation
+# ideally the input is a represented by a vector of real values between 0 and 1
+# One way to do that is by using the binary representation of number
+func binary_encode(i: int, num_digits: int): Tensor[float32] =
+  result = newTensor[float32](1, num_digits)
+  for d in 0 ..< num_digits:
+    result[0, d] = float32(i shr d and 1)
+
+# For the input, we distinguishes 4 cases, nothing, fizz, buzz and fizzbuzz.
+func fizz_buzz_encode(i: int): int =
+  if   i mod 15 == 0: return 3 # fizzbuzz
+  elif i mod  5 == 0: return 2 # buzz
+  elif i mod  3 == 0: return 1 # fizz
+  else              : return 0
+
+# Next, let's generate training data, we don't want to train on 1..100, that's our test values
+# We can't tell the neural net the truth values it must discover the logic by itself.
+# so we use values between 101 and 1024 (2^10)
+const NumDigits = 10
+
+var x_train = newTensor[float32](2^NumDigits - 101, NumDigits)
+var y_train = newTensor[int](2^NumDigits - 101)
+
+for i in 101 ..< 2^NumDigits:
+  x_train[i - 101, _] = binary_encode(i, NumDigits)
+  y_train[i - 101] = fizz_buzz_encode(i)
+
+# How many neurons do we need to change a light bulb, sorry do a division? let's pick ...
+const NumHidden = 100
+
+# Let's setup our neural network context, variables and model
+let
+  ctx = newContext Tensor[float32]
+  X   = ctx.variable x_train
+
+network ctx, FizzBuzzNet:
+  layers:
+    hidden: Linear(NumDigits, NumHidden)
+    output: Linear(NumHidden, 4)
+  forward x:
+    x.hidden.relu.output
+
+let model = ctx.init(FizzBuzzNet)
+let optim = model.optimizer(SGD, 0.05'f32)
+
+func fizz_buzz(i: int, prediction: int): string =
+  [$i, "fizz", "buzz", "fizzbuzz"][prediction]
+
+# Phew, finally ready to train, let's pick the batch size and number of epochs
+const BatchSize = 128
+const Epochs    = 2500
+
+# And let's start training the network
+for epoch in 0 ..< Epochs:
+  # Here I should probably shuffle the input data.
+  for start_batch in countup(0, x_train.shape[0]-1, BatchSize):
+    
+    # Pick the minibatch
+    let end_batch = min(x_train.shape[0]-1, start_batch + BatchSize)
+    let X_batch = X[start_batch ..< end_batch, _]
+    let target = y_train[start_batch ..< end_batch]
+    
+    # Go through the model
+    let clf = model.forward(X_batch)
+    
+    # Go through our cost function
+    let loss = clf.sparse_softmax_cross_entropy(target)
+    
+    # Backpropagate the errors and let the optimizer fix them.
+    loss.backprop()
+    optim.update()
+  
+  # Let's see how we fare:
+  ctx.no_grad_mode:
+    echo &"\nEpoch #{epoch} done. Testing accuracy"
+    
+    let y_pred = model
+                  .forward(X)
+                  .value
+                  .softmax
+                  .argmax(axis = 1)
+                  .squeeze
+    
+    let score = y_pred.accuracy_score(y_train)
+    echo &"Accuracy: {score:.3f}%"
+    echo "\n"
+
+
+# Our network is trained, let's see if it's well behaved
+
+# Now let's use what we really want to fizzbuzz, numbers from 1 to 100
+var x_buzz = newTensor[float32](100, NumDigits)
+for i in 1 .. 100:
+  x_buzz[i - 1, _] = binary_encode(i, NumDigits)
+
+# Wrap them for neural net
+let X_buzz = ctx.variable x_buzz
+
+# Pass it through the network
+ctx.no_grad_mode:
+  let y_buzz = model
+                .forward(X_buzz)
+                .value
+                .softmax
+                .argmax(axis = 1)
+                .squeeze
+
+# Extract the answer
+var answer: seq[string] = @[]
+
+for i in 1..100:
+  answer.add fizz_buzz(i, y_buzz[i - 1])
+
+echo answer
+# @["1", "2", "fizz", "4", "buzz", "6", "7", "8", "fizz", "10",
+#   "11", "12", "13", "14", "15", "16", "17", "fizz", "19", "buzz",
+#   "fizz", "22", "23", "24", "buzz", "26", "fizz", "28", "29", "30",
+#   "31", "32", "fizz", "34", "buzz", "36", "37", "38", "39", "40",
+#   "41", "fizz", "43", "44", "fizzbuzz", "46", "47", "fizz", "49", "50",
+#   "fizz", "52","53", "54", "buzz", "56", "fizz", "58", "59", "fizzbuzz",
+#   "61", "62", "63", "64", "buzz", "fizz", "67", "68", "fizz", "buzz",
+#   "71", "fizz", "73", "74", "75", "76", "77","fizz", "79", "buzz",
+#   "fizz", "82", "83", "fizz", "buzz", "86", "fizz", "88", "89", "90",
+#   "91", "92", "fizz", "94", "buzz", "fizz", "97", "98", "fizz", "buzz"]
+
+# I guess 100 neurons are not enough to learn multiplication :/.

Thank you for your attention and your support,

+

Be sure to try Nim and Arraymancer!

+ + + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/uth.speed.html b/uth.speed.html new file mode 100644 index 000000000..dda150f81 --- /dev/null +++ b/uth.speed.html @@ -0,0 +1,409 @@ + + + + + + + + + + + + + + + + + + +Under the hood: Speed + + + + + + + + + +Arraymancer - Under the hood: Speed + + + + + + + +Fork me on GitHub + + +
+
+

Under the hood: Speed

+ +

Parallelism

Most operations in Arraymancer are parallelized through OpenMP including linear algebra functions, universal functions, map, reduce and fold based operations.

+ +

Parallel loop fusion - YOLO (You Only Loop Once)

Arraymancer provides several constructs for the YOLOโ„ข paradigm (You Only Loop Once).

+

A naรฏve logistic sigmoid implementation in Numpy would be:

+
import math
+
+proc sigmoid(x):
+  return 1 / (1 + math.exp(-x))

With Numpy broadcasting, all those operations would be done on whole tensors using Numpy C implementation, pretty efficient?

+

Actually no, this would create lots of temporary and loops across the data: - temp1 = -x - temp2 = math.exp(temp1) - temp3 = 1 + temp2 - temp4 = 1 / temp3

+

So you suddenly get a O(4*n) algorithm.

+

Arraymancer can do the same using the explicit broadcast operator /. and +.. (To avoid name conflict we change the logistic sigmoid name)

+
import arraymancer
+
+proc customSigmoid[T: SomeFloat](t: Tensor[T]): Tensor[T] =
+  result = 1 /. (1 +. exp(-t))

Well, unfortunately, the only thing we gain here is parallelism but we still have 4 loops over the data implicitly. Another way would be to use the loop fusion template map_inline:

+
import arraymancer
+
+proc customSigmoid2[T: SomeFloat](t: Tensor[T]): Tensor[T] =
+  result = map_inline(t):
+    1 / (1 + exp(-x))

Now in a single loop over t, Arraymancer will do 1 / (1 + exp(-x)) for each x found. x is a shorthand for the elements of the first tensor argument.

+

Here is another example with 3 tensors and element-wise fused multiply-add C += A *. B:

+
import arraymancer
+
+proc fusedMultiplyAdd[T: SomeNumber](c: var Tensor[T], a, b: Tensor[T]) =
+  ## Implements C += A *. B, *. is the element-wise multiply
+  apply3_inline(c, a, b):
+    x += y * z

Since the tensor were given in order (c, a, b): - x corresponds to elements of c - y to a - z to b

+

Today Arraymancer offers map_inline, map2_inline, apply_inline, apply2_inline and apply3_inline.

+

Those are also parallelized using OpenMP. In the future, this will be generalized to N inputs.

+

Similarly, reduce_inline and fold_inline are offered for parallel, custom, fused reductions operations.

+ +

Memory allocation

For most operations in machine learning, memory and cache is the bottleneck, for example taking the log of a Tensor can use at most 20% of your theoretical max CPU speed (in GFLOPS) while matrix multiplication can use 70%-90%+ for the best implementations (MKL, OpenBLAS).

+

In the log case, the processor gives a result faster than it can load data into its cache. In the matrix multiplication case, each element of a matrix can be reused several times before loading data again.

+

Arraymancer strives hard to limit memory allocation with the inline version of map, apply, reduce, fold (map_inline, apply_inline, reduce_inline, fold_inline) mentioned above that avoids intermediate results.

+ +

Micro benchmark: Int64 matrix multiplication

Integers seem to be the abandoned children of ndarrays and tensors libraries. Everyone is optimising the hell of floating points. Not so with Arraymancer:

+
Archlinux, i9-9980XE
+(Skylake-X 18 cores, overclocked 4.1GHz all-core turbo, 4.0GHz all-AVX-turbo, 3.5 GHz all-AVX512 turbo)
+Input 1500x1500 random large int64 matrix
+Arraymancer 81777f0 (~v0.6.0, master branch 2020-01-09)
+ + + + +
LanguageSpeedMemory
Nim 1.0.4 (-d:danger) + OpenMP0.14s22.7 MB
Julia v1.3.11.67s246.5 MB
Python 3.8.1 + Numpy-MKL 1.18 compiled from source5.69s75.9 MB

Benchmark setup is in the ./benchmarks folder and similar to (stolen from) Kostyaโ€™s.

+

Note: Arraymancer, Julia and Numpy have the same speed as each other on float matrix multiplication as they all use Assembly-based BLAS + OpenMP underneath. In the future, pure-Nim backends without Assembly and/or OpenMP may be used to ease deployment, especially on Windows and be free of OpenMP limitations with regards to nested parallelism and load-balancing of generic algorithms. Speed will be competitive at least with OpenBLAS, see the Weave multithreading runtime benchmarks.

+ + + +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/util.html b/util.html new file mode 100644 index 000000000..1a4a44dce --- /dev/null +++ b/util.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + +src/arraymancer/datasets/util + + + + + + + + + +Arraymancer - src/arraymancer/datasets/util + + + + + + + +Fork me on GitHub + + +
+
+

src/arraymancer/datasets/util

+
+
+
+ + +
+ +
+ Search: +
+
+ Group by: + +
+ + +
+
+   Source +Edit + +
+ +

+
+

Procs

+
+
+
+
proc create_cache_dirs_if_necessary() {....raises: [OSError, IOError], tags: [
+    WriteDirEffect, ReadDirEffect, ReadEnvEffect, ReadIOEffect], forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+
+
+
proc get_cache_dir(): string {....raises: [], tags: [ReadEnvEffect, ReadIOEffect],
+                               forbids: [].}
+
+ + +   Source +Edit + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + diff --git a/util.idx b/util.idx new file mode 100644 index 000000000..6f468d6ab --- /dev/null +++ b/util.idx @@ -0,0 +1,3 @@ +nimTitle util util.html module src/arraymancer/datasets/util 0 +nim get_cache_dir util.html#get_cache_dir proc get_cache_dir(): string 20 +nim create_cache_dirs_if_necessary util.html#create_cache_dirs_if_necessary proc create_cache_dirs_if_necessary() 23