1 ;;; GNU Guix --- Functional package management for GNU
2 ;;; Copyright © 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Ricardo Wurmus <rekado@elephly.net>
3 ;;; Copyright © 2016, 2020, 2021, 2022 Efraim Flashner <efraim@flashner.co.il>
4 ;;; Copyright © 2016, 2017, 2020 Marius Bakke <mbakke@fastmail.com>
5 ;;; Copyright © 2016 Hartmut Goebel <h.goebel@crazy-compilers.com>
6 ;;; Copyright © 2018, 2019, 2020 Tobias Geerinckx-Rice <me@tobias.gr>
7 ;;; Copyright © 2018 Kei Kebreau <kkebreau@posteo.net>
8 ;;; Copyright © 2018 Mark Meyer <mark@ofosos.org>
9 ;;; Copyright © 2018 Ben Woodcroft <donttrustben@gmail.com>
10 ;;; Copyright © 2018 Fis Trivial <ybbs.daans@hotmail.com>
11 ;;; Copyright © 2018 Julien Lepiller <julien@lepiller.eu>
12 ;;; Copyright © 2018 Björn Höfling <bjoern.hoefling@bjoernhoefling.de>
13 ;;; Copyright © 2019 Nicolas Goaziou <mail@nicolasgoaziou.fr>
14 ;;; Copyright © 2019, 2020 Guillaume Le Vaillant <glv@posteo.net>
15 ;;; Copyright © 2019 Brett Gilio <brettg@gnu.org>
16 ;;; Copyright © 2020 Konrad Hinsen <konrad.hinsen@fastmail.net>
17 ;;; Copyright © 2020 Edouard Klein <edk@beaver-labs.com>
18 ;;; Copyright © 2020, 2021, 2022 Vinicius Monego <monego@posteo.net>
19 ;;; Copyright © 2020, 2021, 2022 Maxim Cournoyer <maxim.cournoyer@gmail.com>
21 ;;; This file is part of GNU Guix.
23 ;;; GNU Guix is free software; you can redistribute it and/or modify it
24 ;;; under the terms of the GNU General Public License as published by
25 ;;; the Free Software Foundation; either version 3 of the License, or (at
26 ;;; your option) any later version.
28 ;;; GNU Guix is distributed in the hope that it will be useful, but
29 ;;; WITHOUT ANY WARRANTY; without even the implied warranty of
30 ;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 ;;; GNU General Public License for more details.
33 ;;; You should have received a copy of the GNU General Public License
34 ;;; along with GNU Guix. If not, see <http://www.gnu.org/licenses/>.
36 (define-module (gnu packages machine-learning)
37 #:use-module ((guix licenses) #:prefix license:)
38 #:use-module (guix gexp)
39 #:use-module (guix packages)
40 #:use-module (guix utils)
41 #:use-module (guix download)
42 #:use-module (guix svn-download)
43 #:use-module (guix build-system cmake)
44 #:use-module (guix build-system gnu)
45 #:use-module (guix build-system ocaml)
46 #:use-module (guix build-system python)
47 #:use-module (guix build-system r)
48 #:use-module (guix git-download)
49 #:use-module (gnu packages)
50 #:use-module (gnu packages adns)
51 #:use-module (gnu packages algebra)
52 #:use-module (gnu packages audio)
53 #:use-module (gnu packages autotools)
54 #:use-module (gnu packages base)
55 #:use-module (gnu packages bash)
56 #:use-module (gnu packages boost)
57 #:use-module (gnu packages bdw-gc)
58 #:use-module (gnu packages check)
59 #:use-module (gnu packages compression)
60 #:use-module (gnu packages cmake)
61 #:use-module (gnu packages cpp)
62 #:use-module (gnu packages cran)
63 #:use-module (gnu packages databases)
64 #:use-module (gnu packages dejagnu)
65 #:use-module (gnu packages gcc)
66 #:use-module (gnu packages gettext)
67 #:use-module (gnu packages gl)
68 #:use-module (gnu packages glib)
69 #:use-module (gnu packages graphviz)
70 #:use-module (gnu packages gstreamer)
71 #:use-module (gnu packages guile)
72 #:use-module (gnu packages haskell-xyz)
73 #:use-module (gnu packages image)
74 #:use-module (gnu packages image-processing)
75 #:use-module (gnu packages imagemagick)
76 #:use-module (gnu packages jupyter)
77 #:use-module (gnu packages libffi)
78 #:use-module (gnu packages linux)
79 #:use-module (gnu packages llvm)
80 #:use-module (gnu packages maths)
81 #:use-module (gnu packages mpi)
82 #:use-module (gnu packages ninja)
83 #:use-module (gnu packages ocaml)
84 #:use-module (gnu packages onc-rpc)
85 #:use-module (gnu packages parallel)
86 #:use-module (gnu packages perl)
87 #:use-module (gnu packages pkg-config)
88 #:use-module (gnu packages protobuf)
89 #:use-module (gnu packages pulseaudio)
90 #:use-module (gnu packages python)
91 #:use-module (gnu packages python-build)
92 #:use-module (gnu packages python-check)
93 #:use-module (gnu packages python-science)
94 #:use-module (gnu packages python-web)
95 #:use-module (gnu packages python-xyz)
96 #:use-module (gnu packages rpc)
97 #:use-module (gnu packages serialization)
98 #:use-module (gnu packages sphinx)
99 #:use-module (gnu packages statistics)
100 #:use-module (gnu packages sqlite)
101 #:use-module (gnu packages swig)
102 #:use-module (gnu packages tls)
103 #:use-module (gnu packages video)
104 #:use-module (gnu packages web)
105 #:use-module (gnu packages xml)
106 #:use-module (gnu packages xorg)
107 #:use-module (ice-9 match))
110 ;; The last release is >100 commits behind, so we package from git.
111 (let ((commit "d71d54788bee56ba4cf7522801270152da5209d7"))
114 (version (string-append "2.2.0-1." (string-take commit 8)))
118 (url "https://github.com/libfann/fann")
120 (file-name (string-append name "-" version "-checkout"))
123 "0ibwpfrjs6q2lijs8slxjgzb2llcl6rk3v2ski4r6215g5jjhg3x"))))
124 (build-system cmake-build-system)
127 (modify-phases %standard-phases
129 (lambda* (#:key outputs #:allow-other-keys)
130 (let* ((out (assoc-ref outputs "out")))
131 (with-directory-excursion (string-append (getcwd) "/tests")
132 (invoke "./fann_tests"))))))))
133 (home-page "http://leenissen.dk/fann/wp/")
134 (synopsis "Fast Artificial Neural Network")
136 "FANN is a neural network library, which implements multilayer
137 artificial neural networks in C with support for both fully connected and
138 sparsely connected networks.")
139 (license license:lgpl2.1))))
141 (define-public libsvm
148 (uri (string-append "https://www.csie.ntu.edu.tw/~cjlin/libsvm/"
149 name "-" version ".tar.gz"))
151 (base32 "0jpaq0rr92x38p4nk3gjan79ip67m6p80anb28z1d8601miysyi5"))))
152 (build-system gnu-build-system)
154 `(#:tests? #f ; no "check" target
156 (modify-phases %standard-phases
158 (add-after 'build 'build-lib
160 (invoke "make" "lib")))
161 (replace 'install ; no ‘install’ target
162 (lambda* (#:key outputs #:allow-other-keys)
163 (let* ((out (assoc-ref outputs "out"))
164 (bin (string-append out "/bin/"))
165 (lib (string-append out "/lib/"))
166 (inc (string-append out "/include/libsvm")))
168 (for-each (lambda (file)
169 (copy-file file (string-append bin file)))
174 (install-file "libsvm.so.2" lib)
176 (install-file "svm.h" inc)))))))
177 (home-page "https://www.csie.ntu.edu.tw/~cjlin/libsvm/")
178 (synopsis "Library for Support Vector Machines")
180 "LIBSVM is a machine learning library for support vector
181 classification, (C-SVC, nu-SVC), regression (epsilon-SVR, nu-SVR) and
182 distribution estimation (one-class SVM). It supports multi-class
184 (license license:bsd-3)))
186 (define-public python-libsvm
187 (package (inherit libsvm)
188 (name "python-libsvm")
189 (build-system gnu-build-system)
191 `(#:tests? #f ; no "check" target
192 #:make-flags '("-C" "python")
194 (modify-phases %standard-phases
197 'install ; no ‘install’ target
198 (lambda* (#:key inputs outputs #:allow-other-keys)
199 (let ((site (string-append (assoc-ref outputs "out")
203 (assoc-ref inputs "python") 5) 3)
205 (substitute* "python/svm.py"
206 (("../libsvm.so.2") "libsvm.so.2"))
208 (for-each (lambda (file)
209 (copy-file file (string-append site (basename file))))
210 (find-files "python" "\\.py"))
211 (copy-file "libsvm.so.2"
212 (string-append site "libsvm.so.2")))
216 (synopsis "Python bindings of libSVM")))
219 ;; The latest release candidate is several years and a couple of fixes have
220 ;; been published since. This is why we download the sources from the SVN
222 (let ((svn-revision 2341))
225 (version (string-append "0.9-rc3-0." (number->string svn-revision)))
229 (url "http://svn.code.sf.net/p/ghmm/code/trunk")
230 (revision svn-revision)))
231 (file-name (string-append name "-" version "-checkout"))
234 "0qbq1rqp94l530f043qzp8aw5lj7dng9wq0miffd7spd1ff638wq"))))
235 (build-system gnu-build-system)
237 `(#:imported-modules (,@%gnu-build-system-modules
238 (guix build python-build-system))
239 #:modules ((guix build python-build-system)
240 ,@%gnu-build-system-modules)
242 (modify-phases %standard-phases
243 (add-after 'unpack 'enter-dir
244 (lambda _ (chdir "ghmm")))
245 (add-after 'enter-dir 'fix-runpath
246 (lambda* (#:key outputs #:allow-other-keys)
247 (substitute* "ghmmwrapper/setup.py"
248 (("^(.*)extra_compile_args = \\[" line indent)
249 (string-append indent
250 "extra_link_args = [\"-Wl,-rpath="
251 (assoc-ref outputs "out") "/lib\"],\n"
254 (assoc-ref outputs "out")
256 (add-after 'enter-dir 'disable-broken-tests
258 (substitute* "tests/Makefile.am"
259 ;; GHMM_SILENT_TESTS is assumed to be a command.
260 (("TESTS_ENVIRONMENT.*") "")
261 ;; Do not build broken tests.
265 (("label_higher_order_test.*$")
266 "label_higher_order_test\n"))
268 ;; These Python unittests are broken as there is no gato.
269 ;; See https://sourceforge.net/p/ghmm/support-requests/3/
270 (substitute* "ghmmwrapper/ghmmunittests.py"
271 (("^(.*)def (testNewXML|testMultipleTransitionClasses|testNewXML)"
273 (string-append indent
274 "@unittest.skip(\"Disabled by Guix\")\n"
277 `(("python" ,python-2) ; only Python 2 is supported
278 ("libxml2" ,libxml2)))
286 (home-page "http://ghmm.org")
287 (synopsis "Hidden Markov Model library")
289 "The General Hidden Markov Model library (GHMM) is a C library with
290 additional Python bindings implementing a wide range of types of @dfn{Hidden
291 Markov Models} (HMM) and algorithms: discrete, continuous emissions, basic
292 training, HMM clustering, HMM mixtures.")
293 (license license:lgpl2.0+))))
295 (define-public guile-aiscm
302 (url "https://github.com/wedesoft/aiscm")
303 (commit "2e16e38391bf1638f1dd9a1cf4b25a25f6626078")))
304 (file-name (git-file-name name version))
307 "1gwqpzl6irpaszkpxaf5wliwq19280632hlgxs3ikjkfg8mkqql0"))))
308 (build-system gnu-build-system)
312 #~(list (string-append "OPENCV_CFLAGS=-I" #$(this-package-input "opencv")
315 (list "aruco" "barcode" "bgsegm" "bioinspired"
316 "calib3d" "ccalib" "core" "datasets" "dnn"
317 "dnn_objdetect" "dnn_superres" "dpm" "face"
318 "features2d" "flann" "freetype" "fuzzy" "hdf"
319 "hfs" "highgui" "img_hash" "imgcodecs" "imgproc"
320 "intensity_transform" "line_descriptor" "mcc"
321 "ml" "objdetect" "optflow" "phase_unwrapping"
322 "photo" "plot" "quality" "rapid" "reg" "rgbd"
323 "saliency" "shape" "stereo" "stitching"
324 "structured_light" "superres" "surface_matching"
325 "text" "tracking" "video" "videoio" "videostab"
326 "wechat_qrcode" "ximgproc" "xobjdetect" "xphoto")))
327 (format #false "OPENCV_LIBS=~{-lopencv_~a~^ ~}" modules)))
329 #~(list (string-append "GUILE_CACHE=" #$output "/lib/guile/3.0/site-ccache")
330 (string-append "GUILE_EXT=" #$output "/lib/guile/3.0/extensions")
331 (string-append "GUILE_SITE=" #$output "/share/guile/site/3.0"))
333 '(modify-phases %standard-phases
334 (add-after 'unpack 'build-reproducibly
336 (substitute* "doc/Makefile.am"
337 (("\\$\\(DATE\\)") "1970-01-01"))))
338 (add-after 'unpack 'find-clearsilver
339 (lambda* (#:key inputs #:allow-other-keys)
340 (substitute* "configure.ac"
341 (("/usr/local/include/ClearSilver")
342 (string-append (assoc-ref inputs "clearsilver")
343 "/include/ClearSilver")))
344 (substitute* "aiscm/Makefile.am"
346 (string-append m " -lstreamhtmlparser")))
347 (setenv "C_INCLUDE_PATH"
348 (string-append (assoc-ref inputs "clearsilver")
349 "/include/ClearSilver:"
350 (or (getenv "C_INCLUDE_PATH") "")))))
351 (add-after 'unpack 'use-llvm-config
353 (substitute* "m4/ax_llvmc.m4"
354 (("llvm-config-11") "llvm-config")
355 ;; For some reason this library is not on the link list.
356 (("(LLVM_LIBS=\"\\$\\(\\$ac_llvm_config_path --libs \\$1\\))\"" _ m)
357 (string-append m " -lLLVMMCJIT\"")))
359 ;; Because of this message:
360 ;; symbol lookup error: ./.libs/libguile-aiscm-core.so: undefined symbol: LLVMInitializeX86TargetInfo
361 ;; This probably needs to differ when building on architectures
362 ;; other than x86_64.
363 (substitute* "aiscm/Makefile.am"
364 (("LLVM_LIBS\\)") "LLVM_LIBS) \
365 -lLLVMX86AsmParser -lLLVMX86CodeGen -lLLVMX86Desc -lLLVMX86Info"))))
366 ;; This test fails because our version of tensorflow is too old
367 ;; to provide tf-string-length.
368 (add-after 'unpack 'disable-broken-test
370 (substitute* "tests/test_tensorflow.scm"
371 (("\\(test-eqv \"determine string length" m)
372 (string-append "#;" m)))))
373 ;; Use Clang instead of GCC.
374 (add-before 'configure 'prepare-build-environment
376 (setenv "AR" "llvm-ar")
377 (setenv "NM" "llvm-nm")
378 (setenv "CC" "clang")
379 (setenv "CXX" "clang++"))))))
410 (home-page "https://wedesoft.github.io/aiscm/")
411 (synopsis "Guile extension for numerical arrays and tensors")
412 (description "AIscm is a Guile extension for numerical arrays and tensors.
413 Performance is achieved by using the LLVM JIT compiler.")
414 (license license:gpl3+)))
416 (define-public guile-aiscm-next
417 (deprecated-package "guile-aiscm-next" guile-aiscm))
426 "http://micans.org/mcl/src/mcl-"
427 (string-replace-substring version "." "-")
431 "15xlax3z31lsn62vlg94hkm75nm40q4679amnfg13jm8m2bnhy5m"))))
432 (build-system gnu-build-system)
434 `(#:configure-flags (list "--enable-blast"
438 (home-page "http://micans.org/mcl/")
439 (synopsis "Clustering algorithm for graphs")
441 "The MCL algorithm is short for the @dfn{Markov Cluster Algorithm}, a
442 fast and scalable unsupervised cluster algorithm for graphs (also known as
443 networks) based on simulation of (stochastic) flow in graphs.")
444 ;; In the LICENCE file and web page it says "The software is licensed
445 ;; under the GNU General Public License, version 3.", but in several of
446 ;; the source code files it suggests GPL3 or later.
447 ;; http://listserver.ebi.ac.uk/pipermail/mcl-users/2016/000376.html
448 (license license:gpl3)))
450 (define-public ocaml-mcl
453 (version "12-068oasis4")
458 (url "https://github.com/fhcrc/mcl")
460 (file-name (git-file-name name version))
463 "0009dc3h2jp3qg5val452wngpqnbfyhbcxylghq0mrjqxx0jdq5p"))))
464 (build-system ocaml-build-system)
467 (modify-phases %standard-phases
468 (add-before 'configure 'patch-paths
470 (substitute* "setup.ml"
472 (string-append "LDFLAGS=-fPIC\"; \"SHELL=" (which "sh")))
473 (("-std=c89") "-std=gnu99 -fcommon")
475 ;; This is a mutable string, which is no longer supported. Use
476 ;; a byte buffer instead.
477 (("String.make \\(String.length s\\)")
478 "Bytes.make (String.length s)")
480 ;; These two belong together.
481 (("OASISString.replace_chars")
482 "Bytes.to_string (OASISString.replace_chars")
485 (substitute* "myocamlbuild.ml"
486 (("std=c89") "std=gnu99 -fcommon"))
487 ;; Since we build with a more recent OCaml, we have to use C99 or
488 ;; later. This causes problems with the old C code.
489 (substitute* "src/impala/matrix.c"
490 (("restrict") "restrict_"))
494 (home-page "https://github.com/fhcrc/mcl")
495 (synopsis "OCaml wrappers around MCL")
497 "This package provides OCaml bindings for the MCL graph clustering
499 (license license:gpl3)))
501 (define-public randomjungle
503 (name "randomjungle")
509 "https://www.imbs.uni-luebeck.de/fileadmin/files/Software"
510 "/randomjungle/randomjungle-" version ".tar_.gz"))
511 (patches (search-patches "randomjungle-disable-static-build.patch"))
514 "12c8rf30cla71swx2mf4ww9mfd8jbdw5lnxd7dxhyw1ygrvg6y4w"))))
515 (build-system gnu-build-system)
518 (list "--disable-static"
519 (string-append "--with-boost="
520 (assoc-ref %build-inputs "boost")))
522 (modify-phases %standard-phases
523 (add-after 'unpack 'fix-compatibility-errors
525 (substitute* "src/library/IAM2WayImportance.h"
526 (("= std::make_pair.*")
527 "= std::minmax(varID1, varID2);"))
528 (substitute* "src/library/DataFrame.h"
530 "if (isFirst) { isFirst = false; } else { os << par.delimiter; }\n"))))
531 (add-before 'configure 'set-CXXFLAGS
532 (lambda _ (setenv "CXXFLAGS" "-fpermissive "))))))
534 (list boost gsl libxml2 zlib))
536 `(("gfortran" ,gfortran)
537 ("gfortran:lib" ,gfortran "lib")))
538 ;; Non-portable assembly instructions are used so building fails on
539 ;; platforms other than x86_64 or i686.
540 (supported-systems '("x86_64-linux" "i686-linux"))
541 (home-page "https://www.imbs.uni-luebeck.de/forschung/software/details.html#c224")
542 (synopsis "Implementation of the Random Forests machine learning method")
544 "Random Jungle is an implementation of Random Forests. It is supposed to
545 analyse high dimensional data. In genetics, it can be used for analysing big
546 Genome Wide Association (GWA) data. Random Forests is a powerful machine
547 learning method. Most interesting features are variable selection, missing
548 value imputation, classifier creation, generalization error estimation and
549 sample proximities between pairs of cases.")
550 (license license:gpl3+)))
552 (define-public openfst
558 (uri (string-append "http://www.openfst.org/twiki/pub/FST/"
559 "FstDownload/openfst-" version ".tar.gz"))
562 "0hlbdmjjf1jgsvi3d2hwni5lz3l9a5bzj6ijpbawa8a7cbrpp66y"))))
563 (build-system gnu-build-system)
564 (arguments '(#:configure-flags '("--enable-ngram-fsts")))
565 (home-page "http://www.openfst.org")
566 (synopsis "Library for weighted finite-state transducers")
567 (description "OpenFst is a library for constructing, combining,
568 optimizing, and searching weighted finite-state transducers (FSTs).")
569 (license license:asl2.0)))
571 ;; This is a temporary addition to bypass upstream issues with the kaldi
573 (define-public openfst-1.7.3
574 (package (inherit openfst)
578 (uri (string-append "http://www.openfst.org/twiki/pub/FST/"
579 "FstDownload/openfst-" version ".tar.gz"))
582 "038a60w7y8qnbxmcrsim9rafz9mihsny8xv50jpzlr7rl166pp5q"))))
583 (arguments '(#:configure-flags '("--enable-ngram-fsts" "CXXFLAGS=-std=c++14")
584 #:make-flags '("CXXFLAGS=-std=c++14")))))
586 (define-public shogun
594 "ftp://shogun-toolbox.org/shogun/releases/"
595 (version-major+minor version)
596 "/sources/shogun-" version ".tar.bz2"))
599 "1rn9skm3nw6hr7mr3lgp2gfqhi7ii0lyxck7qmqnf8avq349s5jp"))
600 (modules '((guix build utils)
604 ;; Remove non-free sources and files referencing them
605 (for-each delete-file
606 (find-files "src/shogun/classifier/svm/"
607 "SVMLight\\.(cpp|h)"))
608 (for-each delete-file
609 (find-files "examples/undocumented/libshogun/"
611 "(classifier_.*svmlight.*|"
612 "evaluation_cross_validation_locked_comparison).cpp")))
613 ;; Remove non-free functions.
614 (define (delete-ifdefs file)
615 (with-atomic-file-replacement file
617 (let loop ((line (read-line in 'concat))
619 (if (eof-object? line)
624 "#endif //USE_SVMLIGHT" line)))
626 "#ifdef USE_SVMLIGHT" line))))
627 (when (or (not skipping?)
628 (and skipping? (not skip-next?)))
630 (loop (read-line in 'concat) skip-next?)))))))
631 (for-each delete-ifdefs
633 (find-files "src/shogun/classifier/mkl"
634 "^MKLClassification\\.cpp")
635 (find-files "src/shogun/classifier/svm"
636 "^SVMLightOneClass\\.(cpp|h)")
637 (find-files "src/shogun/multiclass"
638 "^ScatterSVM\\.(cpp|h)")
639 (find-files "src/shogun/kernel/"
640 "^(Kernel|CombinedKernel|ProductKernel)\\.(cpp|h)")
641 (find-files "src/shogun/regression/svr"
642 "^(MKLRegression|SVRLight)\\.(cpp|h)")
643 (find-files "src/shogun/transfer/domain_adaptation"
644 "^DomainAdaptationSVM\\.(cpp|h)")))
646 (build-system cmake-build-system)
648 '(#:tests? #f ;no check target
650 (modify-phases %standard-phases
651 (add-after 'unpack 'delete-broken-symlinks
653 (for-each delete-file '("applications/arts/data"
654 "applications/asp/data"
655 "applications/easysvm/data"
656 "applications/msplicer/data"
657 "applications/ocr/data"
659 "examples/undocumented/data"))
661 (add-after 'unpack 'change-R-target-path
662 (lambda* (#:key outputs #:allow-other-keys)
663 (substitute* '("src/interfaces/r/CMakeLists.txt"
664 "examples/meta/r/CMakeLists.txt")
665 (("\\$\\{R_COMPONENT_LIB_PATH\\}")
666 (string-append (assoc-ref outputs "out")
669 (add-after 'unpack 'fix-octave-modules
670 (lambda* (#:key outputs #:allow-other-keys)
671 (substitute* "src/interfaces/octave/CMakeLists.txt"
672 (("^include_directories\\(\\$\\{OCTAVE_INCLUDE_DIRS\\}")
673 "include_directories(${OCTAVE_INCLUDE_DIRS} ${OCTAVE_INCLUDE_DIRS}/octave")
674 ;; change target directory
675 (("\\$\\{OCTAVE_OCT_LOCAL_API_FILE_DIR\\}")
676 (string-append (assoc-ref outputs "out")
677 "/share/octave/packages")))
678 (substitute* '("src/interfaces/octave/swig_typemaps.i"
679 "src/interfaces/octave/sg_print_functions.cpp")
680 ;; "octave/config.h" and "octave/oct-obj.h" deprecated in Octave.
681 (("octave/config\\.h") "octave/octave-config.h")
682 (("octave/oct-obj.h") "octave/ovl.h"))
684 (add-after 'unpack 'move-rxcpp
685 (lambda* (#:key inputs #:allow-other-keys)
686 (let ((rxcpp-dir "shogun/third-party/rxcpp"))
688 (install-file (assoc-ref inputs "rxcpp") rxcpp-dir)
690 (add-before 'build 'set-HOME
691 ;; $HOME needs to be set at some point during the build phase
692 (lambda _ (setenv "HOME" "/tmp") #t)))
694 (list "-DCMAKE_BUILD_WITH_INSTALL_RPATH=TRUE"
695 "-DUSE_SVMLIGHT=OFF" ;disable proprietary SVMLIGHT
696 "-DBUILD_META_EXAMPLES=OFF" ;requires unpackaged ctags
697 ;;"-DINTERFACE_JAVA=ON" ;requires unpackaged jblas
698 ;;"-DINTERFACE_RUBY=ON" ;requires unpackaged ruby-narray
699 ;;"-DINTERFACE_PERL=ON" ;"FindPerlLibs" does not exist
700 ;;"-DINTERFACE_LUA=ON" ;fails because lua doesn't build pkgconfig file
701 "-DINTERFACE_OCTAVE=ON"
702 "-DINTERFACE_PYTHON=ON"
703 "-DINTERFACE_R=ON")))
706 ("numpy" ,python-numpy)
707 ("r-minimal" ,r-minimal)
708 ("octave" ,octave-cli)
713 ("arpack" ,arpack-ng)
720 (list pkg-config rxcpp))
721 ;; Non-portable SSE instructions are used so building fails on platforms
722 ;; other than x86_64.
723 (supported-systems '("x86_64-linux"))
724 (home-page "https://shogun-toolbox.org/")
725 (synopsis "Machine learning toolbox")
727 "The Shogun Machine learning toolbox provides a wide range of unified and
728 efficient Machine Learning (ML) methods. The toolbox seamlessly
729 combines multiple data representations, algorithm classes, and general purpose
730 tools. This enables both rapid prototyping of data pipelines and extensibility
731 in terms of new algorithms.")
732 (license license:gpl3+)))
741 (url "https://github.com/onnx/onnx")
742 (commit (string-append "v" version))))
745 "1g9f1hviksbn7gi6fnd0dsm7nf0w3yia0mjj33d9mggklrl0db6x"))
746 (file-name (git-file-name name version))
747 (patches (search-patches "onnx-use-system-googletest.patch"
748 "onnx-shared-libraries.patch"
749 "onnx-skip-model-downloads.patch"))
750 (modules '((guix build utils)))
751 (snippet '(delete-file-recursively "third_party"))))
752 (build-system python-build-system)
754 '(#:phases (modify-phases %standard-phases
755 (add-before 'build 'pass-cmake-arguments
756 (lambda* (#:key outputs #:allow-other-keys)
757 ;; Pass options to the CMake-based build process.
759 (assoc-ref outputs "out"))
762 ;; Copy arguments from 'cmake-build-system', plus ask
763 ;; for shared libraries.
764 (list "-DCMAKE_BUILD_TYPE=RelWithDebInfo"
765 (string-append "-DCMAKE_INSTALL_PREFIX=" out)
766 "-DCMAKE_INSTALL_LIBDIR=lib"
767 "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=TRUE"
768 (string-append "-DCMAKE_INSTALL_RPATH=" out
770 "-DCMAKE_VERBOSE_MAKEFILE=ON"
772 "-DBUILD_SHARED_LIBS=ON"))
774 ;; This environment variable is honored by 'setup.py',
775 ;; which passes it down to 'cmake'.
776 (setenv "CMAKE_ARGS" (string-join args))
778 ;; This one is honored by 'setup.py' and passed to 'make
781 (number->string (parallel-job-count)))))
782 (add-before 'check 'make-test-directory-writable
784 ;; Make things writable for tests.
785 (setenv "HOME" (getcwd))
786 (for-each make-file-writable
787 (find-files "onnx/examples" "."
788 #:directories? #t))))
789 (add-after 'install 'install-from-cmake
791 ;; Run "make install" in the build tree 'setup.py'
792 ;; created for CMake so that libonnx.so,
793 ;; libonnx_proto.so, etc. are installed.
794 (invoke "make" "install"
795 "-C" ".setuptools-cmake-build"))))))
803 python-pytest-runner))
807 (list python-numpy python-protobuf python-six python-tabulate
808 python-typing-extensions))
809 (home-page "https://onnx.ai/")
810 (synopsis "Open Neural Network Exchange")
812 "@acronym{ONNX, Open Neural Network Exchange} is a format for AI models,
813 both deep learning and traditional @acronym{ML, machine learning}. It defines
814 an extensible computation graph model, as well as definitions of built-in
815 operators and standard data types.")
816 (license license:expat)))
818 (define-public python-onnx
819 ;; This used to be called "python-onnx" because it provided nothing but
820 ;; Python bindings. The package now provides shared libraries and C++
821 ;; headers, hence the name change.
822 (deprecated-package "python-onnx" onnx))
824 (define-public onnx-optimizer
826 (name "onnx-optimizer")
827 ;; Note: 0.2.x is *more* recent than 1.5.0.
829 (home-page "https://github.com/onnx/optimizer")
834 (commit (string-append "v" version))))
837 "1wkqqdxcxpfbf8zpbdfdd3zz5jkw775g31gyykj11z4y6pp659l6"))
838 (file-name (git-file-name name version))
839 (patches (search-patches "onnx-optimizer-system-library.patch"))
840 (modules '((guix build utils)))
841 (snippet '(delete-file-recursively "third_party"))))
842 (build-system python-build-system)
843 (arguments (package-arguments onnx)) ;reuse build system tweaks
845 (list cmake python-pytest python-pytest-runner python-nbval
848 (list onnx protobuf pybind11))
851 (synopsis "Library to optimize ONNX models")
853 "This package provides a C++ and Python library for performing arbitrary
854 optimizations on ONNX models, as well as a growing list of prepackaged
857 Not all possible optimizations can be directly implemented on ONNX graphs---
858 some will need additional backend-specific information---but many can, and the
859 aim is to provide all such passes along with ONNX so that they can be re-used
860 with a single function call.")
861 (license license:expat)))
871 (url "https://github.com/ReactiveX/RxCpp")
872 (commit (string-append "v" version))))
874 (base32 "1rdpa3jlc181jd08nk437aar085h28i45s6nzrv65apb3xyyz0ij"))
875 (file-name (git-file-name name version))))
876 (build-system cmake-build-system)
879 (modify-phases %standard-phases
880 (add-after 'unpack 'remove-werror
882 (substitute* (find-files ".")
887 (invoke "ctest"))))))
889 (list catch-framework))
890 (home-page "http://reactivex.io/")
891 (synopsis "Reactive Extensions for C++")
893 "The Reactive Extensions for C++ (RxCpp) is a library of algorithms for
894 values-distributed-in-time. ReactiveX is a library for composing asynchronous
895 and event-based programs by using observable sequences.
897 It extends the observer pattern to support sequences of data and/or events and
898 adds operators that allow you to compose sequences together declaratively while
899 abstracting away concerns about things like low-level threading,
900 synchronization, thread-safety, concurrent data structures, and non-blocking
902 (license license:asl2.0)))
905 (define-public gemmlowp
906 (let ((commit "f9959600daa42992baace8a49544a00a743ce1b6")
911 (version (git-version version revision commit))
912 (home-page "https://github.com/google/gemmlowp")
915 (uri (git-reference (url home-page) (commit commit)))
916 (file-name (git-file-name name version))
919 "1hzfhlhzcb827aza6a7drydc67dw5fm3qfqilb9ibskan8dsf0c6"))))
922 (list ,@(match (%current-system)
923 ((or "x86_64-linux" "i686-linux")
924 '("-DCMAKE_CXX_FLAGS=-msse2"))
927 (modify-phases %standard-phases
928 ;; This directory contains the CMakeLists.txt.
929 (add-after 'unpack 'chdir
930 (lambda _ (chdir "contrib") #t))
931 ;; There is no install target
933 (lambda* (#:key outputs #:allow-other-keys)
934 (let* ((out (assoc-ref outputs "out"))
935 (lib (string-append out "/lib/"))
936 (inc (string-append out "/include/")))
937 (install-file "../build/libeight_bit_int_gemm.so" lib)
938 (for-each (lambda (dir)
940 (string-append inc "/gemmlowp/" dir)))
941 (for-each (lambda (h)
942 (install-file h target))
943 (find-files (string-append "../" dir)
945 '("meta" "profiling" "public" "fixedpoint"
946 "eight_bit_int_gemm" "internal"))))))))
947 (build-system cmake-build-system)
948 (synopsis "Small self-contained low-precision GEMM library")
950 "This is a small self-contained low-precision @dfn{general matrix
951 multiplication} (GEMM) library. It is not a full linear algebra library.
952 Low-precision means that the input and output matrix entries are integers on
953 at most 8 bits. To avoid overflow, results are internally accumulated on more
954 than 8 bits, and at the end only some significant 8 bits are kept.")
955 (license license:asl2.0))))
957 (define-public gemmlowp-for-tensorflow
958 ;; The commit hash is taken from "tensorflow/workspace.bzl".
959 (let ((commit "38ebac7b059e84692f53e5938f97a9943c120d98")
963 (version (git-version "0" revision commit))
966 (uri (string-append "https://mirror.bazel.build/"
967 "github.com/google/gemmlowp/archive/"
969 (file-name (string-append "gemmlowp-" version ".zip"))
972 "0n56s2g8hrssm4w8qj1v58gfm56a04n9v992ixkmvk6zjiralzxq"))))
974 (substitute-keyword-arguments (package-arguments gemmlowp)
976 `(modify-phases ,phases
978 (lambda* (#:key outputs #:allow-other-keys)
979 (let* ((out (assoc-ref outputs "out"))
980 (lib (string-append out "/lib/"))
981 (inc (string-append out "/include/")))
982 (install-file "../build/libeight_bit_int_gemm.so" lib)
983 (for-each (lambda (dir)
984 ;; Note: Install headers straight into
985 ;; $includedir instead of $includedir/gemmlowp.
986 (let ((target (string-append inc "/" dir)))
987 (for-each (lambda (h)
988 (install-file h target))
989 (find-files (string-append "../" dir)
991 '("meta" "profiling" "public" "fixedpoint"
992 "eight_bit_int_gemm" "internal")))))))))
995 (properties '((hidden? . #t))))))
1004 "http://dlib.net/files/dlib-" version ".tar.bz2"))
1007 "139jyi19qz37wwmmy48gil9d1kkh2r3w3bwdzabha6ayxmba96nz"))
1008 (modules '((guix build utils)))
1011 ;; Delete ~13MB of bundled dependencies.
1012 (delete-file-recursively "dlib/external")
1013 (delete-file-recursively "docs/dlib/external")
1015 (build-system cmake-build-system)
1017 `(#:configure-flags '("-DBUILD_SHARED_LIBS=ON")
1019 (modify-phases %standard-phases
1020 (add-after 'unpack 'disable-asserts
1022 ;; config.h recommends explicitly enabling or disabling asserts
1023 ;; when building as a shared library. By default neither is set.
1024 (substitute* "dlib/config.h"
1025 (("^//#define DLIB_DISABLE_ASSERTS") "#define DLIB_DISABLE_ASSERTS"))
1027 (add-after 'disable-asserts 'disable-failing-tests
1029 ;; One test times out on MIPS, so we need to disable it.
1030 ;; Others are flaky on some platforms.
1031 (let* ((system ,(or (%current-target-system)
1033 (disabled-tests (cond
1034 ((string-prefix? "mips64" system)
1035 '("object_detector" ; timeout
1037 ((string-prefix? "armhf" system)
1038 '("learning_to_track"))
1039 ((string-prefix? "i686" system)
1044 (substitute* "dlib/test/makefile"
1045 (((string-append "SRC \\+= " test "\\.cpp")) "")))
1050 ;; No test target, so we build and run the unit tests here.
1051 (let ((test-dir (string-append "../dlib-" ,version "/dlib/test")))
1052 (with-directory-excursion test-dir
1053 (invoke "make" "-j" (number->string (parallel-job-count)))
1054 (invoke "./dtest" "--runall"))
1061 `(("giflib" ,giflib)
1063 ("libjpeg" ,libjpeg-turbo)
1066 ("openblas" ,openblas)
1069 "Toolkit for making machine learning and data analysis applications in C++")
1071 "Dlib is a modern C++ toolkit containing machine learning algorithms and
1072 tools. It is used in both industry and academia in a wide range of domains
1073 including robotics, embedded devices, mobile phones, and large high performance
1074 computing environments.")
1075 (home-page "http://dlib.net")
1076 (license license:boost1.0)))
1078 (define-public python-scikit-learn
1080 (name "python-scikit-learn")
1086 (url "https://github.com/scikit-learn/scikit-learn")
1088 (file-name (git-file-name name version))
1091 "0wcngyfm2fl3vgyi2aq6j5fvky5185xjzgip64968wqj1hmir5nv"))))
1092 (build-system python-build-system)
1095 (modify-phases %standard-phases
1096 (add-before 'build 'configure
1098 (setenv "SKLEARN_BUILD_PARALLEL"
1099 (number->string (parallel-job-count)))))
1100 (add-after 'build 'build-ext
1101 (lambda _ (invoke "python" "setup.py" "build_ext" "--inplace"
1102 "-j" (number->string (parallel-job-count)))))
1104 (lambda* (#:key tests? #:allow-other-keys)
1106 ;; Restrict OpenBLAS threads to prevent segfaults while testing!
1107 (setenv "OPENBLAS_NUM_THREADS" "1")
1109 ;; Some tests require write access to $HOME.
1110 (setenv "HOME" "/tmp")
1112 ;; Step out of the source directory to avoid interference;
1113 ;; we want to run the installed code with extensions etc.
1114 (with-directory-excursion "/tmp"
1115 (invoke "pytest" "-vv" "--pyargs" "sklearn"
1117 "-n" (number->string (parallel-job-count))
1118 ;; This test tries to access the internet.
1119 "-k" "not test_load_boston_alternative"))))))))
1120 (inputs (list openblas))
1125 python-pytest-xdist))
1127 (list python-numpy python-threadpoolctl python-scipy python-joblib))
1128 (home-page "https://scikit-learn.org/")
1129 (synopsis "Machine Learning in Python")
1131 "Scikit-learn provides simple and efficient tools for data mining and
1133 (license license:bsd-3)))
1135 (define-public python-threadpoolctl
1137 (name "python-threadpoolctl")
1142 (uri (pypi-uri "threadpoolctl" version))
1145 "0szsxcm2fbxrn83iynn42bnvrdh7mfsmkhfn8pdn7swblfb7rifx"))))
1146 (build-system python-build-system)
1149 (modify-phases %standard-phases
1151 (lambda* (#:key tests? inputs outputs #:allow-other-keys)
1153 (add-installed-pythonpath inputs outputs)
1157 (list python-pytest))
1158 (home-page "https://github.com/joblib/threadpoolctl")
1159 (synopsis "Python helpers for common threading libraries")
1160 (description "Thread-pool Controls provides Python helpers to limit the
1161 number of threads used in the threadpool-backed of common native libraries used
1162 for scientific computing and data science (e.g. BLAS and OpenMP).")
1163 (license license:bsd-3)))
1165 (define-public python-pynndescent
1167 (name "python-pynndescent")
1172 (uri (pypi-uri "pynndescent" version))
1174 (base32 "0p3jsdcprjfzz7qf5674dsqfpvdn6p4wgqikg7b6ki5abf433yv1"))))
1175 (build-system python-build-system)
1178 (modify-phases %standard-phases
1180 (lambda* (#:key inputs outputs tests? #:allow-other-keys)
1182 (invoke "python" "-m" "pytest" "--pyargs" "pynndescent"
1183 ;; wminkowski no longer exists in scipy 1.8.0 (see:
1184 ;; https://github.com/lmcinnes/pynndescent/issues/177)
1185 "-k" "not test_weighted_minkowski")))))))
1186 (native-inputs (list python-pytest))
1193 (home-page "https://github.com/lmcinnes/pynndescent")
1194 (synopsis "Nearest neighbor descent for approximate nearest neighbors")
1196 "PyNNDescent provides a Python implementation of Nearest Neighbor Descent
1197 for k-neighbor-graph construction and approximate nearest neighbor search.")
1198 (license license:bsd-2)))
1200 (define-public python-opentsne
1202 (name "python-opentsne")
1206 (method git-fetch) ; no tests in PyPI release
1208 (url "https://github.com/pavlin-policar/openTSNE")
1209 (commit (string-append "v" version))))
1210 (file-name (git-file-name name version))
1212 (base32 "124nid27lfq1ipfjd2gkynqcmb4khisjb4r05jv42ckfkk4dbsxs"))))
1213 (build-system python-build-system)
1216 (modify-phases %standard-phases
1217 ;; Benchmarks require the 'macosko2015' data files.
1218 (add-after 'unpack 'delete-benchmark
1220 (delete-file-recursively "benchmarks")))
1221 (add-after 'unpack 'skip-test
1222 (lambda _ ;; TODO: figure out why this test fails.
1223 (substitute* "tests/test_correctness.py"
1224 (("def test_iris\\(self\\)") "def _test_iris(self)"))))
1225 ;; Numba needs a writable dir to cache functions.
1226 (add-before 'check 'set-numba-cache-dir
1228 (setenv "NUMBA_CACHE_DIR" "/tmp"))))))
1230 (list python-cython))
1234 (list python-numpy python-pynndescent python-scikit-learn
1236 (home-page "https://github.com/pavlin-policar/openTSNE")
1237 (synopsis "Extensible, parallel implementations of t-SNE")
1239 "This is a modular Python implementation of t-Distributed Stochastic
1240 Neighbor Embedding (t-SNE), a popular dimensionality-reduction algorithm for
1241 visualizing high-dimensional data sets.")
1242 (license license:bsd-3)))
1244 (define-public python-scikit-rebate
1246 (name "python-scikit-rebate")
1250 (uri (pypi-uri "skrebate" version))
1253 "0n55ghvnv7rxqa5agq6a4892ad0ghha165b0g4ghwr9gqm6ss3dj"))))
1254 (build-system python-build-system)
1255 (arguments '(#:tests? #f)) ;no tests on PyPI and no tags in repo
1257 (list python-numpy python-scipy python-scikit-learn python-joblib))
1258 (home-page "https://epistasislab.github.io/scikit-rebate/")
1259 (synopsis "Relief-based feature selection algorithms for Python")
1260 (description "Scikit-rebate is a scikit-learn-compatible Python
1261 implementation of ReBATE, a suite of Relief-based feature selection algorithms
1262 for Machine Learning. These algorithms excel at identifying features that are
1263 predictive of the outcome in supervised learning problems, and are especially
1264 good at identifying feature interactions that are normally overlooked by
1265 standard feature selection algorithms.")
1266 (license license:expat)))
1268 (define-public python-cmaes
1270 (name "python-cmaes")
1274 (method git-fetch) ;no tests in PyPI
1276 (url "https://github.com/CyberAgent/cmaes")
1277 (commit (string-append "v" version))))
1279 (base32 "1jyckaifir528dz6m95nvky8hvqmz5gz6dlp65baahhbca0danzb"))
1280 (file-name (git-file-name name version))))
1281 (build-system python-build-system)
1283 (list python-setuptools ;build fails without this
1286 (list python-numpy))
1287 (home-page "https://github.com/CyberAgent/cmaes")
1288 (synopsis "CMA-ES implementation for Python")
1289 (description "This package provides provides an implementation of the
1290 Covariance Matrix Adaptation Evolution Strategy (CMA-ES) for Python.")
1291 (license license:expat)))
1293 (define-public python-autograd
1294 (let* ((commit "442205dfefe407beffb33550846434baa90c4de7")
1296 (version (git-version "0.0.0" revision commit)))
1298 (name "python-autograd")
1299 (home-page "https://github.com/HIPS/autograd")
1307 "189sv2xb0mwnjawa9z7mrgdglc1miaq93pnck26r28fi1jdwg0z4"))
1308 (file-name (git-file-name name version))))
1310 (build-system python-build-system)
1312 (list python-nose python-pytest))
1314 (list python-future python-numpy))
1316 `(#:phases (modify-phases %standard-phases
1319 (invoke "py.test" "-v"))))))
1320 (synopsis "Efficiently computes derivatives of NumPy code")
1321 (description "Autograd can automatically differentiate native Python and
1322 NumPy code. It can handle a large subset of Python's features, including loops,
1323 ifs, recursion and closures, and it can even take derivatives of derivatives
1324 of derivatives. It supports reverse-mode differentiation
1325 (a.k.a. backpropagation), which means it can efficiently take gradients of
1326 scalar-valued functions with respect to array-valued arguments, as well as
1327 forward-mode differentiation, and the two can be composed arbitrarily. The
1328 main intended application of Autograd is gradient-based optimization.")
1329 (license license:expat))))
1331 (define-public lightgbm
1338 (url "https://github.com/Microsoft/LightGBM")
1339 (commit (string-append "v" version))))
1342 "0jlvyn7k81dzrh9ij3zw576wbgiwmmr26rzpdxjn1dbpc3njpvzi"))
1343 (file-name (git-file-name name version))))
1345 (list python-pytest python-nose))
1349 (list python-numpy python-scipy))
1354 (modify-phases %standard-phases
1357 (with-directory-excursion "../source"
1358 (invoke "pytest" "tests/c_api_test/test_.py")))))))
1359 (build-system cmake-build-system)
1360 (home-page "https://github.com/Microsoft/LightGBM")
1361 (synopsis "Gradient boosting framework based on decision tree algorithms")
1362 (description "LightGBM is a gradient boosting framework that uses tree
1363 based learning algorithms. It is designed to be distributed and efficient with
1364 the following advantages:
1367 @item Faster training speed and higher efficiency
1368 @item Lower memory usage
1369 @item Better accuracy
1370 @item Parallel and GPU learning supported (not enabled in this package)
1371 @item Capable of handling large-scale data
1373 (license license:expat)))
1375 (define-public vowpal-wabbit
1376 ;; Language bindings not included.
1378 (name "vowpal-wabbit")
1383 (url "https://github.com/JohnLangford/vowpal_wabbit")
1387 "04bwzk6ifgnz3fmzid8b7avxf9n5pnx9xcjm61nkjng1vv0bpj8x"))
1388 (file-name (git-file-name name version))))
1393 (list (string-append "--with-boost="
1394 (assoc-ref %build-inputs "boost")))
1396 (modify-phases %standard-phases
1397 (add-after 'unpack 'make-files-writable
1399 (for-each make-file-writable (find-files "." ".*")) #t))
1400 (add-after 'install 'install-more-headers
1401 (lambda* (#:key outputs #:allow-other-keys)
1404 (install-file file (string-append
1405 (assoc-ref outputs "out")
1406 "/include/vowpalwabbit")))
1407 (find-files "vowpalwabbit" "\\.h$"))
1409 (build-system gnu-build-system)
1410 (home-page "https://github.com/JohnLangford/vowpal_wabbit")
1411 (synopsis "Fast machine learning library for online learning")
1412 (description "Vowpal Wabbit is a machine learning system with techniques
1413 such as online, hashing, allreduce, reductions, learning2search, active, and
1414 interactive learning.")
1415 (license license:bsd-3)))
1417 (define-public python-hyperopt
1419 (name "python-hyperopt")
1424 (uri (pypi-uri "hyperopt" version))
1426 (base32 "1k4ma8ci0bxghw7g4ms944zak1pi83yv2d6bxd7fcslm1zalfq5w"))))
1427 (build-system python-build-system)
1430 (modify-phases %standard-phases
1432 (lambda* (#:key inputs outputs tests? #:allow-other-keys)
1434 (add-installed-pythonpath inputs outputs)
1435 (invoke "python" "-m" "pytest" "--ignore"
1436 ;; Needs python-pyspark.
1437 "hyperopt/tests/test_spark.py"
1438 ;; Needs both python-scikit-learn and python-lightgbm.
1439 "--ignore" "hyperopt/tests/test_atpe_basic.py"
1440 ;; The tests below need python-lightgbm.
1441 "-k" (string-append "not test_branin"
1442 " and not test_distractor"
1443 " and not test_q1lognormal"
1444 " and not test_quadratic1"
1445 " and not test_twoarms"))))))))
1447 (list python-cloudpickle
1461 (home-page "https://hyperopt.github.io/hyperopt/")
1462 (synopsis "Library for hyperparameter optimization")
1463 (description "Hyperopt is a Python library for serial and parallel
1464 optimization over awkward search spaces, which may include real-valued,
1465 discrete, and conditional dimensions.")
1466 (license license:bsd-3)))
1468 ;; There have been no proper releases yet.
1469 (define-public kaldi
1470 (let ((commit "dd107fd594ac58af962031c1689abfdc10f84452")
1472 (openfst openfst-1.7.3)) ;; Temporary bypass for upstream issues
1475 (version (git-version "0" revision commit))
1479 (url "https://github.com/kaldi-asr/kaldi")
1481 (file-name (git-file-name name version))
1484 "0iqbzgn7gzmgwvjfzifpbwwidxx887qmlgmsjkg7b1yzyfv00l21"))))
1485 (build-system gnu-build-system)
1487 `(#:test-target "test"
1489 (modify-phases %standard-phases
1490 (add-after 'unpack 'chdir
1491 (lambda _ (chdir "src") #t))
1493 (lambda* (#:key build system inputs outputs #:allow-other-keys)
1494 (when (not (or (string-prefix? "x86_64" system)
1495 (string-prefix? "i686" system)))
1496 (substitute* "makefiles/linux_openblas.mk"
1497 (("-msse -msse2") "")))
1498 (substitute* "makefiles/default_rules.mk"
1499 (("/bin/bash") (which "bash")))
1500 (substitute* "Makefile"
1501 (("ext_depend: check_portaudio")
1503 (substitute* '("online/Makefile"
1504 "onlinebin/Makefile"
1505 "gst-plugin/Makefile")
1506 (("../../tools/portaudio/install")
1507 (assoc-ref inputs "portaudio")))
1508 (substitute* "matrix/Makefile" ;temporary test bypass
1509 (("matrix-lib-test sparse-matrix-test") ""))
1511 ;; This `configure' script doesn't support variables passed as
1512 ;; arguments, nor does it support "prefix".
1513 (let ((out (assoc-ref outputs "out"))
1514 (openblas (assoc-ref inputs "openblas"))
1515 (openfst (assoc-ref inputs "openfst")))
1516 (substitute* "configure"
1517 (("check_for_slow_expf;") "")
1518 ;; This affects the RPATH and also serves as the installation
1520 (("KALDILIBDIR=`pwd`/lib")
1521 (string-append "KALDILIBDIR=" out "/lib")))
1522 (mkdir-p out) ; must exist
1523 (setenv "CONFIG_SHELL" (which "bash"))
1524 (setenv "OPENFST_VER" ,(package-version openfst))
1525 (invoke "./configure"
1528 (string-append "--openblas-root=" openblas)
1529 (string-append "--fst-root=" openfst)))))
1530 (add-after 'build 'build-ext-and-gstreamer-plugin
1532 (invoke "make" "-C" "online" "depend")
1533 (invoke "make" "-C" "online")
1534 (invoke "make" "-C" "onlinebin" "depend")
1535 (invoke "make" "-C" "onlinebin")
1536 (invoke "make" "-C" "gst-plugin" "depend")
1537 (invoke "make" "-C" "gst-plugin")
1539 ;; TODO: also install the executables.
1541 (lambda* (#:key outputs #:allow-other-keys)
1542 (let* ((out (assoc-ref outputs "out"))
1543 (inc (string-append out "/include"))
1544 (lib (string-append out "/lib")))
1546 ;; The build phase installed symlinks to the actual
1547 ;; libraries. Install the actual targets.
1548 (for-each (lambda (file)
1549 (let ((target (readlink file)))
1551 (install-file target lib)))
1552 (find-files lib "\\.so"))
1554 (for-each (lambda (file)
1555 (let ((target-dir (string-append inc "/" (dirname file))))
1556 (install-file file target-dir)))
1557 (find-files "." "\\.h"))
1558 (install-file "gst-plugin/libgstonlinegmmdecodefaster.so"
1559 (string-append lib "/gstreamer-1.0"))
1572 (list `(,glib "bin") ; glib-genmarshal
1577 (home-page "https://kaldi-asr.org/")
1578 (synopsis "Speech recognition toolkit")
1579 (description "Kaldi is an extensible toolkit for speech recognition
1581 (license license:asl2.0))))
1583 (define-public gst-kaldi-nnet2-online
1584 (let ((commit "cb227ef43b66a9835c14eb0ad39e08ee03c210ad")
1587 (name "gst-kaldi-nnet2-online")
1588 (version (git-version "0" revision commit))
1592 (url "https://github.com/alumae/gst-kaldi-nnet2-online")
1594 (file-name (git-file-name name version))
1597 "1i6ffwiavxx07ri0lxix6s8q0r31x7i4xxvhys5jxkixf5q34w8g"))))
1598 (build-system gnu-build-system)
1600 `(#:tests? #f ; there are none
1602 (list (string-append "SHELL="
1603 (assoc-ref %build-inputs "bash") "/bin/bash")
1604 (string-append "KALDI_ROOT="
1605 (assoc-ref %build-inputs "kaldi-src"))
1606 (string-append "KALDILIBDIR="
1607 (assoc-ref %build-inputs "kaldi") "/lib")
1608 "KALDI_FLAVOR=dynamic")
1610 (modify-phases %standard-phases
1611 (add-after 'unpack 'chdir
1612 (lambda _ (chdir "src") #t))
1614 (lambda* (#:key inputs #:allow-other-keys)
1615 (let ((glib (assoc-ref inputs "glib")))
1616 (setenv "CXXFLAGS" "-fPIC")
1617 (setenv "CPLUS_INCLUDE_PATH"
1618 (string-append glib "/include/glib-2.0:"
1619 glib "/lib/glib-2.0/include:"
1620 (assoc-ref inputs "gstreamer")
1621 "/include/gstreamer-1.0")))
1622 (substitute* "Makefile"
1623 (("include \\$\\(KALDI_ROOT\\)/src/kaldi.mk") "")
1624 (("\\$\\(error Cannot find") "#"))
1626 (add-before 'build 'build-depend
1627 (lambda* (#:key make-flags #:allow-other-keys)
1628 (apply invoke "make" "depend" make-flags)))
1630 (lambda* (#:key outputs #:allow-other-keys)
1631 (let* ((out (assoc-ref outputs "out"))
1632 (lib (string-append out "/lib/gstreamer-1.0")))
1633 (install-file "libgstkaldinnet2onlinedecoder.so" lib)
1636 (list glib gstreamer jansson openfst kaldi))
1639 ("glib:bin" ,glib "bin") ; glib-genmarshal
1640 ("kaldi-src" ,(package-source kaldi))
1641 ("pkg-config" ,pkg-config)))
1642 (home-page "https://kaldi-asr.org/")
1643 (synopsis "Gstreamer plugin for decoding speech")
1644 (description "This package provides a GStreamer plugin that wraps
1645 Kaldi's @code{SingleUtteranceNnet2Decoder}. It requires iVector-adapted DNN
1646 acoustic models. The iVectors are adapted to the current audio stream
1648 (license license:asl2.0))))
1650 (define-public kaldi-gstreamer-server
1651 ;; This is the tip of the py3 branch
1652 (let ((commit "f68cab490be7eb0da2af1475fbc16655f50a60cb")
1655 (name "kaldi-gstreamer-server")
1656 (version (git-version "0" revision commit))
1660 (url "https://github.com/alumae/kaldi-gstreamer-server")
1662 (file-name (git-file-name name version))
1665 "17lh1368vkg8ngrcbn2phvigzlmalrqg6djx2gg61qq1a0nj87dm"))))
1666 (build-system gnu-build-system)
1668 `(#:tests? #f ; there are no tests that can be run automatically
1669 #:modules ((guix build utils)
1670 (guix build gnu-build-system)
1673 (modify-phases %standard-phases
1676 (lambda* (#:key outputs #:allow-other-keys)
1677 ;; Disable hash randomization to ensure the generated .pycs
1678 ;; are reproducible.
1679 (setenv "PYTHONHASHSEED" "0")
1680 (with-directory-excursion "kaldigstserver"
1681 ;; See https://github.com/alumae/kaldi-gstreamer-server/issues/232
1682 (substitute* "master_server.py"
1683 (("\\.replace\\('\\\\.*") ")"))
1685 ;; This is a Python 2 file
1686 (delete-file "decoder_test.py")
1687 (delete-file "test-buffer.py")
1689 (for-each (lambda (file)
1693 "-f" ; force rebuild
1695 (find-files "." "\\.py$")))
1698 (lambda* (#:key inputs outputs #:allow-other-keys)
1699 (let* ((out (assoc-ref outputs "out"))
1700 (bin (string-append out "/bin"))
1701 (share (string-append out "/share/kaldi-gstreamer-server/")))
1702 ;; Install Python files
1703 (with-directory-excursion "kaldigstserver"
1704 (for-each (cut install-file <> share)
1705 (find-files "." ".*")))
1707 ;; Install sample configuration files
1708 (for-each (cut install-file <> share)
1709 (find-files "." "\\.yaml"))
1711 ;; Install executables
1713 (let* ((server (string-append bin "/kaldi-gst-server"))
1714 (client (string-append bin "/kaldi-gst-client"))
1715 (worker (string-append bin "/kaldi-gst-worker"))
1716 (PYTHONPATH (getenv "GUIX_PYTHONPATH"))
1717 (GST_PLUGIN_PATH (string-append
1718 (assoc-ref inputs "gst-kaldi-nnet2-online")
1719 "/lib/gstreamer-1.0:${GST_PLUGIN_PATH}"))
1720 (wrap (lambda (wrapper what)
1721 (with-output-to-file wrapper
1725 export GUIX_PYTHONPATH=~a
1726 export GST_PLUGIN_PATH=~a
1727 exec ~a ~a/~a \"$@\"~%"
1728 (which "bash") PYTHONPATH GST_PLUGIN_PATH
1729 (which "python") share what)))
1730 (chmod wrapper #o555))))
1732 (list server client worker)
1733 (list "master_server.py"
1738 `(("gst-kaldi-nnet2-online" ,gst-kaldi-nnet2-online)
1739 ("python" ,python-wrapper)
1740 ("python-pygobject" ,python-pygobject)
1741 ("python-pyyaml" ,python-pyyaml)
1742 ("python-tornado" ,python-tornado-6)))
1743 (home-page "https://github.com/alumae/kaldi-gstreamer-server")
1744 (synopsis "Real-time full-duplex speech recognition server")
1745 (description "This is a real-time full-duplex speech recognition server,
1746 based on the Kaldi toolkit and the GStreamer framework and implemented in
1748 (license license:bsd-2))))
1750 ;; Note that Tensorflow includes a "third_party" directory, which seems to not
1751 ;; only contain modified subsets of upstream library source code, but also
1752 ;; adapter headers provided by Google (such as the fft.h header, which is not
1753 ;; part of the upstream project code). The Tensorflow code includes headers
1754 ;; from the "third_party" directory. It does not look like we can replace
1755 ;; these headers with unmodified upstream files, so we keep them.
1756 (define-public tensorflow
1764 (url "https://github.com/tensorflow/tensorflow")
1765 (commit (string-append "v" version))))
1766 (file-name (string-append "tensorflow-" version "-checkout"))
1769 "0a9kwha395g3wgxfwln5j8vn9nkspmd75xldrlqdq540w996g8xa"))
1771 (search-patches "tensorflow-c-api-fix.patch"))))
1772 (build-system cmake-build-system)
1774 `(#:tests? #f ; no "check" target
1775 #:build-type "Release"
1777 (let ((protobuf (assoc-ref %build-inputs "protobuf"))
1778 (protobuf:native (assoc-ref %build-inputs "protobuf:native"))
1779 (jsoncpp (assoc-ref %build-inputs "jsoncpp"))
1780 (snappy (assoc-ref %build-inputs "snappy"))
1781 (sqlite (assoc-ref %build-inputs "sqlite")))
1783 ;; Use protobuf from Guix
1784 (string-append "-Dprotobuf_STATIC_LIBRARIES="
1785 protobuf "/lib/libprotobuf.so")
1786 (string-append "-DPROTOBUF_PROTOC_EXECUTABLE="
1787 protobuf:native "/bin/protoc")
1789 ;; Use snappy from Guix
1790 (string-append "-Dsnappy_STATIC_LIBRARIES="
1791 snappy "/lib/libsnappy.so")
1792 ;; Yes, this is not actually the include directory but a prefix...
1793 (string-append "-Dsnappy_INCLUDE_DIR=" snappy)
1795 ;; Use jsoncpp from Guix
1796 (string-append "-Djsoncpp_STATIC_LIBRARIES="
1797 jsoncpp "/lib/libjsoncpp.so")
1798 ;; Yes, this is not actually the include directory but a prefix...
1799 (string-append "-Djsoncpp_INCLUDE_DIR=" jsoncpp)
1801 ;; Use sqlite from Guix
1802 (string-append "-Dsqlite_STATIC_LIBRARIES="
1803 sqlite "/lib/libsqlite.a")
1805 ;; Use system libraries wherever possible. Currently, this
1806 ;; only affects zlib.
1807 "-Dsystemlib_ALL=ON"
1808 "-Dtensorflow_ENABLE_POSITION_INDEPENDENT_CODE=ON"
1809 "-Dtensorflow_BUILD_SHARED_LIB=ON"
1810 "-Dtensorflow_OPTIMIZE_FOR_NATIVE_ARCH=OFF"
1811 "-Dtensorflow_ENABLE_SSL_SUPPORT=OFF"
1812 "-Dtensorflow_BUILD_CONTRIB_KERNELS=OFF"))
1815 #:modules ((ice-9 ftw)
1817 (guix build cmake-build-system)
1818 ((guix build python-build-system)
1819 #:select (python-version)))
1820 #:imported-modules (,@%cmake-build-system-modules
1821 (guix build python-build-system))
1823 (modify-phases %standard-phases
1824 (add-after 'unpack 'set-source-file-times-to-1980
1825 ;; At the end of the tf_python_build_pip_package target, a ZIP
1826 ;; archive should be generated via bdist_wheel, but it fails with
1827 ;; "ZIP does not support timestamps before 1980". Luckily,
1828 ;; SOURCE_DATE_EPOCH is respected, which we set to some time in
1830 (lambda _ (setenv "SOURCE_DATE_EPOCH" "315532800") #t))
1831 (add-after 'unpack 'python3.9-compatibility
1833 ;; See https://github.com/tensorflow/tensorflow/issues/20517#issuecomment-406373913
1834 (substitute* '("tensorflow/python/eager/pywrap_tfe_src.cc"
1835 "tensorflow/python/lib/core/ndarray_tensor.cc"
1836 "tensorflow/python/lib/core/py_func.cc")
1837 (("PyUnicode_AsUTF8") "(char *)PyUnicode_AsUTF8"))
1838 (substitute* "tensorflow/c/eager/c_api.h"
1839 (("unsigned char async")
1840 "unsigned char is_async"))
1842 ;; Remove dependency on tensorboard, a complicated but probably
1843 ;; optional package.
1844 (substitute* "tensorflow/tools/pip_package/setup.py"
1845 ((".*'tensorboard >.*") ""))
1847 ;; Fix the build with python-3.8, taken from rejected upstream patch:
1848 ;; https://github.com/tensorflow/tensorflow/issues/34197
1849 (substitute* (find-files "tensorflow/python" ".*\\.cc$")
1850 (("(nullptr,)(\\ +/. tp_print)" _ _ tp_print)
1851 (string-append "NULL, " tp_print)))
1853 ;; Fix the build with numpy >= 1.19.
1854 ;; Suggested in https://github.com/tensorflow/tensorflow/issues/41086#issuecomment-656833081
1855 (substitute* "tensorflow/python/lib/core/bfloat16.cc"
1856 (("void BinaryUFunc\\(char\\*\\* args, npy_intp\\* dimensions, npy_intp\\* steps,")
1857 "void BinaryUFunc(char** args, npy_intp const* dimensions, npy_intp const* steps,")
1858 (("void CompareUFunc\\(char\\*\\* args, npy_intp\\* dimensions, npy_intp\\* steps,")
1859 "void CompareUFunc(char** args, npy_intp const* dimensions, npy_intp const* steps,"))))
1860 (add-after 'python3.9-compatibility 'chdir
1861 (lambda _ (chdir "tensorflow/contrib/cmake")))
1862 (add-after 'chdir 'disable-downloads
1863 (lambda* (#:key inputs #:allow-other-keys)
1864 (substitute* (find-files "external" "\\.cmake$")
1865 (("GIT_REPOSITORY.*") "")
1868 "DOWNLOAD_COMMAND \"\"\nPREFIX "))
1870 ;; Use packages from Guix
1871 (let ((grpc (assoc-ref inputs "grpc")))
1872 (substitute* "CMakeLists.txt"
1874 (("include\\(sqlite\\)") "")
1875 (("\\$\\{sqlite_STATIC_LIBRARIES\\}")
1876 (search-input-file inputs "/lib/libsqlite3.so"))
1877 (("sqlite_copy_headers_to_destination") "")
1880 (("include\\(png\\)") "")
1881 (("\\$\\{png_STATIC_LIBRARIES\\}")
1882 (search-input-file inputs "/lib/libpng16.so"))
1883 (("png_copy_headers_to_destination") "")
1886 (("include\\(jpeg\\)") "")
1887 (("\\$\\{jpeg_STATIC_LIBRARIES\\}")
1888 (search-input-file inputs "/lib/libjpeg.so"))
1889 (("jpeg_copy_headers_to_destination") "")
1892 (("include\\(gif\\)") "")
1893 (("\\$\\{gif_STATIC_LIBRARIES\\}")
1894 (search-input-file inputs "/lib/libgif.so"))
1895 (("gif_copy_headers_to_destination") "")
1898 (("include\\(lmdb\\)") "")
1899 (("\\$\\{lmdb_STATIC_LIBRARIES\\}")
1900 (search-input-file inputs "/lib/liblmdb.so"))
1901 (("lmdb_copy_headers_to_destination") "")
1904 (("include\\(protobuf\\)") "")
1905 (("protobuf_copy_headers_to_destination") "")
1906 (("^ +protobuf") "")
1909 (("include\\(grpc\\)")
1910 "find_package(grpc REQUIRED NAMES gRPC)")
1911 (("list\\(APPEND tensorflow_EXTERNAL_DEPENDENCIES grpc\\)") "")
1914 (("include\\(eigen\\)")
1915 (string-append "find_package(eigen REQUIRED NAMES Eigen3)
1916 set(eigen_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}/external/eigen_archive "
1917 (assoc-ref inputs "eigen") "/include/eigen3)"))
1921 (("include\\(snappy\\)")
1922 "add_definitions(-DTF_USE_SNAPPY)")
1923 (("list\\(APPEND tensorflow_EXTERNAL_DEPENDENCIES snappy\\)") "")
1926 (("include\\(jsoncpp\\)") "")
1927 (("^ +jsoncpp") ""))
1929 (substitute* "tf_core_framework.cmake"
1931 (("\\$\\{GRPC_BUILD\\}/grpc_cpp_plugin")
1932 (which "grpc_cpp_plugin"))
1933 ;; Link with gRPC libraries
1934 (("add_library\\(tf_protos_cc.*" m)
1936 (format #f "\ntarget_link_libraries(tf_protos_cc PRIVATE \
1937 ~a/lib/libgrpc++_unsecure.a \
1938 ~a/lib/libgrpc_unsecure.a \
1939 ~a/lib/libaddress_sorting.a \
1944 (assoc-ref inputs "c-ares"))))))
1945 (substitute* "tf_tools.cmake"
1946 (("add_dependencies\\(\\$\\{proto_text.*") ""))
1947 ;; Remove dependency on bundled grpc
1948 (substitute* "tf_core_distributed_runtime.cmake"
1949 (("tf_core_cpu grpc") "tf_core_cpu"))
1951 ;; This directory is a dependency of many targets.
1952 (mkdir-p "protobuf")))
1953 (add-after 'configure 'unpack-third-party-sources
1954 (lambda* (#:key inputs outputs #:allow-other-keys)
1955 ;; This is needed to configure bundled packages properly.
1956 (setenv "CONFIG_SHELL" (which "bash"))
1959 (let* ((what (assoc-ref inputs (string-append name "-src")))
1960 (name* (string-map (lambda (c)
1963 (where (string-append "../build/" name* "/src/" name*)))
1965 ((string-suffix? ".zip" what)
1967 (with-directory-excursion where
1968 (invoke "unzip" what)))
1969 ((string-suffix? ".tar.gz" what)
1971 (invoke "tar" "xf" what
1972 "-C" where "--strip-components=1"))
1974 (let ((parent (dirname where)))
1976 (with-directory-excursion parent
1977 (when (file-exists? name*)
1978 (delete-file-recursively name*))
1979 (copy-recursively what name*)
1980 (map make-file-writable
1981 (find-files name* ".*"))))))))
1991 (rename-file "../build/cub/src/cub/cub-1.8.0/"
1992 "../build/cub/src/cub/cub/")
1995 (string-append "-Wl,-rpath="
1996 (assoc-ref outputs "out") "/lib"))))
1997 (add-after 'unpack 'fix-python-build
1998 (lambda* (#:key inputs outputs #:allow-other-keys)
1999 (mkdir-p "protobuf-src")
2000 (invoke "tar" "xf" (assoc-ref inputs "protobuf:src")
2001 "-C" "protobuf-src" "--strip-components=1")
2002 (mkdir-p "eigen-src")
2003 (copy-recursively (assoc-ref inputs "eigen:src") "eigen-src")
2005 (substitute* "tensorflow/contrib/cmake/tf_python.cmake"
2006 ;; Take protobuf source files from our source package.
2007 (("\\$\\{CMAKE_CURRENT_BINARY_DIR\\}/protobuf/src/protobuf/src/google")
2008 (string-append (getcwd) "/protobuf-src/src/google")))
2010 (substitute* '("tensorflow/contrib/cmake/tf_shared_lib.cmake"
2011 "tensorflow/contrib/cmake/tf_python.cmake")
2012 ;; Take Eigen source files from our source package.
2013 (("\\$\\{CMAKE_CURRENT_BINARY_DIR\\}/eigen/src/eigen/")
2014 (string-append (getcwd) "/eigen-src/"))
2015 ;; Take Eigen headers from our own package.
2016 (("\\$\\{CMAKE_CURRENT_BINARY_DIR\\}/external/eigen_archive")
2017 (search-input-directory inputs "/include/eigen3")))
2019 ;; Correct the RUNPATH of ops libraries generated for Python.
2020 ;; TODO: this doesn't work :(
2021 ;; /gnu/store/...-tensorflow-1.9.0/lib/python3.7/site-packages/tensorflow/contrib/seq2seq/python/ops/lib_beam_search_ops.so:
2022 ;; warning: RUNPATH contains bogus entries: ("/tmp/guix-build-tensorflow-1.9.0.drv-0/source/tensorflow/contrib/build")
2023 ;; /gnu/store/...-tensorflow-1.9.0/lib/python3.7/site-packages/tensorflow/contrib/seq2seq/python/ops/lib_beam_search_ops.so:
2024 ;; error: depends on 'libpywrap_tensorflow_internal.so', which
2025 ;; cannot be found in RUNPATH ...
2026 (substitute* "tensorflow/contrib/cmake/tf_cc_ops.cmake"
2027 (("set_target_properties.*")
2028 (string-append "set_target_properties(${_AT_TARGET} PROPERTIES \
2029 COMPILE_FLAGS ${target_compile_flags} \
2030 INSTALL_RPATH_USE_LINK_PATH TRUE \
2031 INSTALL_RPATH " (assoc-ref outputs "out") "/lib)\n")))))
2032 (add-after 'unpack 'patch-cmake-file-to-install-c-headers
2034 (substitute* "tensorflow/contrib/cmake/tf_c.cmake"
2035 (("if\\(tensorflow_BUILD_PYTHON_BINDINGS" m)
2037 "install(DIRECTORY ${tensorflow_source_dir}/tensorflow/c/ \
2038 DESTINATION include/tensorflow/c FILES_MATCHING PATTERN \"*.h\")\n" m)))))
2039 (add-after 'build 'build-c-bindings
2040 (lambda* (#:key outputs parallel-build? #:allow-other-keys)
2041 (invoke "make" "-j" (if parallel-build?
2042 (number->string (parallel-job-count))
2045 (add-after 'install 'build-pip-package
2046 (lambda* (#:key outputs parallel-build? #:allow-other-keys)
2047 (invoke "make" "-j" (if parallel-build?
2048 (number->string (parallel-job-count))
2050 "tf_python_build_pip_package")))
2051 (add-after 'build-pip-package 'install-python
2052 (lambda* (#:key inputs outputs #:allow-other-keys)
2053 (let ((out (assoc-ref outputs "out"))
2054 (wheel (car (find-files "../build/tf_python/dist/" "\\.whl$")))
2055 (python-version (python-version
2056 (assoc-ref inputs "python"))))
2057 (invoke "python" "-m" "pip" "install" wheel
2058 (string-append "--prefix=" out))
2060 ;; XXX: broken RUNPATH, see fix-python-build phase.
2063 out "/lib/python" python-version
2064 "/site-packages/tensorflow/contrib/"
2065 "seq2seq/python/ops/lib_beam_search_ops.so"))))))))
2067 `(("pkg-config" ,pkg-config)
2068 ("protobuf:native" ,protobuf-3.6) ; protoc
2069 ("protobuf:src" ,(package-source protobuf-3.6))
2070 ("eigen:src" ,(package-source eigen-for-tensorflow))
2071 ;; install_pip_packages.sh wants setuptools 39.1.0 specifically.
2072 ("python-setuptools" ,python-setuptools-for-tensorflow)
2074 ;; The commit hashes and URLs for third-party source code are taken
2075 ;; from "tensorflow/workspace.bzl".
2077 ,(let ((commit "ee7aa02")
2082 (url "https://boringssl.googlesource.com/boringssl")
2084 (file-name (string-append "boringssl-0-" revision
2085 (string-take commit 7)
2089 "1jf693q0nw0adsic6cgmbdx6g7wr4rj4vxa8j1hpn792fqhd8wgw")))))
2091 ,(let ((version "1.8.0"))
2094 (uri (string-append "https://mirror.bazel.build/github.com/NVlabs/"
2095 "cub/archive/" version ".zip"))
2096 (file-name (string-append "cub-" version ".zip"))
2099 "1hsqikqridb90dkxkjr2918dcry6pfh46ccnwrzawl56aamhdykb")))))
2100 ("double-conversion-src"
2101 ,(let ((commit "5664746")
2106 (url "https://github.com/google/double-conversion")
2109 (git-file-name "double-conversion"
2110 (string-append "0-" revision "."
2111 (string-take commit 7))))
2114 "1h5lppqqxcvdg5jq42i5msgwx20ryij3apvmndflngrgdpc04gn1")))))
2116 ,(let ((commit "816a4ae622e964763ca0862d9dbd19324a1eaf45"))
2120 "https://mirror.bazel.build/github.com/google/farmhash/archive/"
2122 (file-name (string-append "farmhash-0-" (string-take commit 7)
2126 "185b2xdxl4d4cnsnv6abg8s22gxvx8673jq2yaq85bz4cdy58q35")))))
2127 ;; The license notice on the home page at
2128 ;; http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html says:
2129 ;; Copyright Takuya OOURA, 1996-2001
2131 ;; You may use, copy, modify and distribute this code for any purpose
2132 ;; (include commercial use) and without fee. Please refer to this
2133 ;; package when you modify this code.
2135 ;; We take the identical tarball from the Bazel mirror, because the URL
2136 ;; at the home page is not versioned and might change.
2140 (uri "https://mirror.bazel.build/www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz")
2141 (file-name "fft2d.tar.gz")
2144 "15jjkfvhqvl2c0753d2di8hz0pyzn598g74wqy79awdrf1y67fsj"))))
2146 ,(let ((commit "be5edafc2e1a455768e260ccd68ae7317b6690ee")
2151 (url "https://github.com/google/highwayhash")
2153 (file-name (string-append "highwayhash-0-" revision
2154 (string-take commit 7)
2158 "154jwf98cyy54hldr94pgjn85zynly3abpnc1avmb8a18lzwjyb6")))))
2160 ,(let ((version "0559ce013feac8db639ee1bf776aca0325d28777")
2164 (uri (string-append "https://mirror.bazel.build/"
2165 "github.com/google/nsync/archive/"
2167 (file-name (string-append "nsync-0." revision
2168 "-" (string-take version 7)
2172 "0qdkyqym34x739mmzv97ah5r7ph462v5xkxqxvidmcfqbi64b132")))))
2174 ,(let ((commit "e7efc48")
2179 (url "https://github.com/google/re2")
2181 (file-name (string-append "re2-0-" revision
2182 (string-take commit 7)
2186 "161g9841rjfsy5pn52fcis0s9hdr7rxvb06pad38j5rppfihvign")))))
2187 ("googletest" ,googletest)
2191 `(("python-absl-py" ,python-absl-py)
2192 ("python-astor" ,python-astor)
2193 ("python-gast" ,python-gast)
2194 ("python-grpcio" ,python-grpcio)
2195 ("python-numpy" ,python-numpy)
2196 ("python-protobuf" ,python-protobuf-3.6)
2197 ("python-six" ,python-six)
2198 ("python-termcolo" ,python-termcolor)
2199 ("python-wheel" ,python-wheel)))
2201 `(("c-ares" ,c-ares)
2202 ("eigen" ,eigen-for-tensorflow)
2203 ("gemmlowp" ,gemmlowp-for-tensorflow)
2205 ("libjpeg" ,libjpeg-turbo)
2208 ("grpc" ,grpc-1.16.1 "static")
2209 ("grpc:bin" ,grpc-1.16.1)
2210 ("jsoncpp" ,jsoncpp-for-tensorflow)
2213 ("protobuf" ,protobuf-3.6)
2214 ("python" ,python-wrapper)
2216 (home-page "https://tensorflow.org")
2217 (synopsis "Machine learning framework")
2219 "TensorFlow is a flexible platform for building and training machine
2220 learning models. It provides a library for high performance numerical
2221 computation and includes high level Python APIs, including both a sequential
2222 API for beginners that allows users to build models quickly by plugging
2223 together building blocks and a subclassing API with an imperative style for
2224 advanced research.")
2225 (license license:asl2.0)))
2227 (define-public tensorflow-lite
2229 (name "tensorflow-lite")
2235 (url "https://github.com/tensorflow/tensorflow")
2236 (commit (string-append "v" version))))
2237 (file-name (git-file-name name version))
2240 "1jdw2i1rq06zqd6aabh7bbm0avsg4pygnfmd7gviv0blhih9054l"))))
2241 (build-system cmake-build-system)
2243 `(#:tests? #false ; no "check" target
2244 #:build-type "Release"
2247 "-DTFLITE_ENABLE_GPU=OFF"
2248 "-DTFLITE_ENABLE_RUY=OFF"
2250 ;; TODO: The build system attempts to build xnnpack from source. We
2251 ;; would like to use our xnnpack package here, but this requires more
2253 "-DTFLITE_ENABLE_XNNPACK=OFF"
2255 ;; Pretend we've already fetched abseil. We won't actually build it
2256 ;; but use the existing package.
2257 "-Dabseil-cpp_POPULATED=TRUE"
2259 ;; Don't fetch the sources. We have already built flatbuffers.
2260 "-Dflatbuffers_POPULATED=TRUE"
2262 "-DFFT2D_SOURCE_DIR=/tmp/fft2d"
2263 "-Dneon2sse_SOURCE_DIR=/tmp/neon2sse"
2264 "-Dneon2sse_BINARY_DIR=/tmp/neon2sse-bin"
2265 "-DFARMHASH_SOURCE_DIR=/tmp/farmhash"
2266 "-Dgemmlowp_SOURCE_DIR=/tmp/gemmlowp"
2267 (string-append "-DRUY_SOURCE_DIR="
2268 (assoc-ref %build-inputs "ruy-src")))
2270 (modify-phases %standard-phases
2271 (add-after 'unpack 'chdir
2272 (lambda _ (chdir "tensorflow/lite")))
2273 (add-after 'chdir 'copy-sources
2274 (lambda* (#:key inputs #:allow-other-keys)
2275 ;; Use external cmake finders instead of these stubs that won't
2276 ;; find anything but the bundled sources.
2277 (delete-file "tools/cmake/modules/Findabsl.cmake")
2278 (delete-file "tools/cmake/modules/Findeigen.cmake")
2280 (substitute* "CMakeLists.txt"
2281 (("find_package\\(eigen REQUIRED")
2282 "find_package(eigen REQUIRED NAMES Eigen3"))
2283 (substitute* "tools/cmake/modules/Findflatbuffers.cmake"
2284 (("get_target_property.*")
2285 (format #false "set(FLATBUFFERS_INCLUDE_DIRS ~a/include)\n"
2286 (assoc-ref inputs "flatbuffers"))))
2288 ;; Don't fetch source code; we already have everything we need.
2289 (substitute* '("tools/cmake/modules/fft2d.cmake"
2290 "tools/cmake/modules/ruy.cmake"
2291 "tools/cmake/modules/farmhash.cmake"
2292 "tools/cmake/modules/neon2sse.cmake"
2293 "tools/cmake/modules/gemmlowp.cmake")
2294 (("OverridableFetchContent_Populate.*") ""))
2296 (mkdir-p "/tmp/farmhash")
2297 (with-directory-excursion "/tmp/farmhash"
2298 (invoke "tar" "--strip-components=1"
2299 "-xf" (assoc-ref inputs "farmhash-src")))
2301 (mkdir-p "/tmp/fft2d")
2302 (with-directory-excursion "/tmp/fft2d"
2303 (invoke "tar" "--strip-components=1"
2304 "-xf" (assoc-ref inputs "fft2d-src")))
2306 (copy-recursively (assoc-ref inputs "neon2sse-src")
2308 (copy-recursively (assoc-ref inputs "gemmlowp-src")
2310 (add-after 'copy-sources 'prepare-shared-library-build
2311 (lambda _ (chdir "c")))
2313 (lambda* (#:key outputs #:allow-other-keys)
2314 (let* ((out (assoc-ref outputs "out"))
2315 (lib (string-append out "/lib"))
2316 (headers (string-append out "/include/tensorflow/lite")))
2317 (install-file "../build/libtensorflowlite_c.so" lib)
2318 (with-directory-excursion ".."
2321 (let ((target-dir (string-append headers "/" (dirname file))))
2322 (install-file file target-dir)))
2323 (find-files "." "\\.h$")))))))))
2325 `(("abseil-cpp" ,abseil-cpp-20200923.3)
2326 ("eigen" ,eigen-for-tensorflow-lite)
2327 ("flatbuffers" ,flatbuffers)
2328 ("python" ,python)))
2330 `(("pkg-config" ,pkg-config)
2332 ;; The commit hash is taken from
2333 ;; "tensorflow/lite/tools/cmake/modules/gemmlowp.cmake".
2334 ,(let ((commit "fda83bdc38b118cc6b56753bd540caa49e570745"))
2338 (url "https://github.com/google/gemmlowp")
2340 (file-name (git-file-name "gemmlowp" (string-take commit 8)))
2343 "1sbp8kmr2azwlvfbzryy1frxi99jhsh1nc93bdbxdf8zdgpv0kxl")))))
2345 ,(let ((commit "a1652fd5253afbf3e39357b012974f93511f6108"))
2349 (url "https://github.com/intel/ARM_NEON_2_x86_SSE")
2351 (file-name (git-file-name "neon2sse" (string-take commit 8)))
2354 "1q8gkxag9wlnwdwad2pclsrkwzrdjy94hyrkayrsvxyj7szb5y8i")))))
2356 ,(let ((commit "816a4ae622e964763ca0862d9dbd19324a1eaf45"))
2360 "https://mirror.bazel.build/github.com/google/farmhash/archive/"
2362 (file-name (git-file-name "farmhash" (string-take commit 8)))
2365 "185b2xdxl4d4cnsnv6abg8s22gxvx8673jq2yaq85bz4cdy58q35")))))
2369 (uri (string-append "https://storage.googleapis.com/"
2370 "mirror.tensorflow.org/"
2371 "www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz"))
2372 (file-name "fft2d.tar.gz")
2375 "1jfflzi74fag9z4qmgwvp90aif4dpbr1657izmxlgvf4hy8fk9xd"))))
2377 ,(let ((commit "9c56af3fce210a8a103eda19bd6f47c08a9e3d90"))
2381 (url "https://github.com/google/ruy")
2383 (recursive? #true)))
2384 (file-name (git-file-name "ruy" (string-take commit 8)))
2387 "1cfd5gk6kaj8kbl3h98gx1ap8czd59y6p8qq8nr28fklpyzf5cis")))))))
2388 (home-page "https://tensorflow.org")
2389 (synopsis "Machine learning framework")
2391 "TensorFlow is a flexible platform for building and training machine
2392 learning models. This package provides the \"lite\" variant for mobile
2394 (license license:asl2.0)))
2396 (define-public dmlc-core
2404 (url "https://github.com/dmlc/dmlc-core")
2405 (commit (string-append "v" version))))
2406 (file-name (git-file-name name version))
2408 (base32 "1x4ad1jhn84fywlk031fmv1kxyiscclmrqn9hhj8gz0mh7z9vcrh"))))
2409 (build-system cmake-build-system)
2412 (list "-DGOOGLE_TEST=ON")))
2414 `(("googletest" ,googletest)
2415 ("python" ,python-wrapper)))
2416 (home-page "https://github.com/dmlc/dmlc-core")
2417 (synopsis "Common bricks library for machine learning")
2419 "DMLC-Core is the backbone library to support all DMLC projects,
2420 offers the bricks to build efficient and scalable distributed machine
2421 learning libraries.")
2422 (license license:asl2.0)))
2424 (define-public xgboost
2432 (url "https://github.com/dmlc/xgboost")
2433 (commit (string-append "v" version))))
2434 (file-name (git-file-name name version))
2435 (patches (search-patches "xgboost-use-system-dmlc-core.patch"))
2437 (base32 "0qx04y7cz8z7qv6bk9q7d7ba9b7xzj53l83l2x9ykdwhzacc3dn0"))))
2438 (build-system cmake-build-system)
2440 `(#:configure-flags (list "-DGOOGLE_TEST=ON")))
2442 `(("googletest" ,googletest)
2443 ("python" ,python-wrapper)))
2446 (home-page "https://xgboost.ai/")
2447 (synopsis "Gradient boosting (GBDT, GBRT or GBM) library")
2449 "XGBoost is an optimized distributed gradient boosting library designed
2450 to be highly efficient, flexible and portable. It implements machine learning
2451 algorithms under the Gradient Boosting framework. XGBoost provides a parallel
2452 tree boosting (also known as GBDT, GBM) that solve many data science problems
2453 in a fast and accurate way.")
2454 (license license:asl2.0)))
2456 (define-public python-xgboost
2459 (name "python-xgboost")
2460 (source (package-source xgboost))
2461 (build-system python-build-system)
2464 (modify-phases %standard-phases
2465 (add-after 'unpack 'preparations
2467 ;; Move python-package content to parent directory to silence
2468 ;; some warnings about files not being found if we chdir.
2469 (rename-file "python-package/xgboost" "xgboost")
2470 (rename-file "python-package/README.rst" "README.rst")
2471 (rename-file "python-package/setup.cfg" "setup.cfg")
2472 (rename-file "python-package/setup.py" "setup.py")
2473 ;; Skip rebuilding libxgboost.so.
2474 (substitute* "setup.py"
2475 (("ext_modules=\\[CMakeExtension\\('libxgboost'\\)\\],") "")
2476 (("'install_lib': InstallLib,") ""))))
2477 (add-after 'install 'install-version-and-libxgboost
2478 (lambda* (#:key inputs outputs #:allow-other-keys)
2479 (let* ((out (assoc-ref outputs "out"))
2480 (pylib (string-append out "/lib/python"
2481 ,(version-major+minor
2482 (package-version python))
2484 (xgbdir (string-append pylib "/xgboost"))
2485 (version-file (string-append xgbdir "/VERSION"))
2486 (libxgboost (string-append (assoc-ref inputs "xgboost")
2487 "/lib/libxgboost.so")))
2488 (with-output-to-file version-file
2490 (display ,(package-version xgboost))))
2491 (mkdir-p (string-append xgbdir "/lib"))
2492 (symlink libxgboost (string-append xgbdir "/lib"
2493 "/libxgboost.so")))))
2495 ;; Python-specific tests are located in tests/python.
2496 (lambda* (#:key inputs outputs tests? #:allow-other-keys)
2498 (add-installed-pythonpath inputs outputs)
2499 (invoke "pytest" "tests/python"
2500 ;; FIXME: CLI tests fail with PermissionError.
2501 "--ignore" "tests/python/test_cli.py" "-k"
2503 "not test_cli_regression_demo"
2504 ;; The tests below open a network connection.
2505 " and not test_model_compatibility"
2506 " and not test_get_group"
2507 " and not test_cv_no_shuffle"
2509 " and not test_training"
2510 ;; "'['./runexp.sh']' returned non-zero exit status 1"
2511 " and not test_cli_binary_classification"))))))))
2513 (list python-pandas python-pytest python-scikit-learn))
2517 (list python-numpy python-scipy))
2518 (synopsis "Python interface for the XGBoost library")))
2520 (define-public python-iml
2527 (uri (pypi-uri "iml" version))
2530 "1k8szlpm19rcwcxdny9qdm3gmaqq8akb4xlvrzyz8c2d679aak6l"))))
2531 (build-system python-build-system)
2533 (list python-ipython python-numpy python-pandas python-scipy))
2536 (home-page "https://github.com/interpretable-ml/iml")
2537 (synopsis "Interpretable Machine Learning (iML) package")
2538 (description "Interpretable ML (iML) is a set of data type objects,
2539 visualizations, and interfaces that can be used by any method designed to
2540 explain the predictions of machine learning models (or really the output of
2541 any function). It currently contains the interface and IO code from the Shap
2542 project, and it will potentially also do the same for the Lime project.")
2543 (license license:expat)))
2545 (define-public python-keras-applications
2547 (name "python-keras-applications")
2552 (uri (pypi-uri "Keras_Applications" version))
2555 "1rcz31ca4axa6kzhjx4lwqxbg4wvlljkj8qj9a7p9sfd5fhzjyam"))))
2556 (build-system python-build-system)
2557 ;; The tests require Keras, but this package is needed to build Keras.
2558 (arguments '(#:tests? #f))
2560 (list python-h5py python-numpy))
2562 (list python-pytest python-pytest-cov python-pytest-pep8
2563 python-pytest-xdist))
2564 (home-page "https://github.com/keras-team/keras-applications")
2565 (synopsis "Reference implementations of popular deep learning models")
2567 "This package provides reference implementations of popular deep learning
2568 models for use with the Keras deep learning framework.")
2569 (license license:expat)))
2571 (define-public python-keras-preprocessing
2573 (name "python-keras-preprocessing")
2578 (uri (pypi-uri "Keras_Preprocessing" version))
2581 "1r98nm4k1svsqjyaqkfk23i31bl1kcfcyp7094yyj3c43phfp3as"))))
2582 (build-system python-build-system)
2584 (list python-numpy python-six))
2592 (home-page "https://github.com/keras-team/keras-preprocessing/")
2593 (synopsis "Data preprocessing and augmentation for deep learning models")
2595 "Keras Preprocessing is the data preprocessing and data augmentation
2596 module of the Keras deep learning library. It provides utilities for working
2597 with image data, text data, and sequence data.")
2598 (license license:expat)))
2600 (define-public python-keras
2602 (name "python-keras")
2607 (uri (pypi-uri "Keras" version))
2608 (patches (search-patches "python-keras-integration-test.patch"))
2611 "1j8bsqzh49vjdxy6l1k4iwax5vpjzniynyd041xjavdzvfii1dlh"))))
2612 (build-system python-build-system)
2615 (modify-phases %standard-phases
2616 (add-after 'unpack 'remove-tests-for-unavailable-features
2618 (delete-file "keras/backend/theano_backend.py")
2619 (delete-file "keras/backend/cntk_backend.py")
2620 (delete-file "tests/keras/backend/backend_test.py")
2621 ;; FIXME: This doesn't work because Tensorflow is missing the
2622 ;; coder ops library.
2623 (delete-file "tests/keras/test_callbacks.py")))
2625 (lambda* (#:key tests? #:allow-other-keys)
2627 ;; These tests attempt to download data files from the internet.
2628 (delete-file "tests/integration_tests/test_datasets.py")
2629 (delete-file "tests/integration_tests/imagenet_utils_test.py")
2630 ;; Backport https://github.com/keras-team/keras/pull/12479.
2631 (substitute* "tests/keras/engine/test_topology.py"
2632 (("np.ones\\(\\(3, 2\\)\\)")
2634 (invoke "python" "-m" "pytest" "tests"
2636 ;; FIXME: python-build-system lacks PARALLEL-TESTS?
2637 "-n" (number->string (parallel-job-count))
2640 ;; The following test fails only in the build
2641 ;; container; skip it.
2643 ;; The following test was found flaky and removed in
2645 "and not test_stateful_metrics"))))))))
2648 python-keras-applications
2649 python-keras-preprocessing
2662 python-pytest-timeout
2666 (home-page "https://github.com/keras-team/keras")
2667 (synopsis "High-level deep learning framework")
2668 (description "Keras is a high-level neural networks API, written in Python
2669 and capable of running on top of TensorFlow. It was developed with a focus on
2670 enabling fast experimentation. Use Keras if you need a deep learning library
2673 @item Allows for easy and fast prototyping (through user friendliness,
2674 modularity, and extensibility).
2675 @item Supports both convolutional networks and recurrent networks, as well as
2676 combinations of the two.
2677 @item Runs seamlessly on CPU and GPU.
2679 (license license:expat)))
2682 (let ((version "0.0.0") ; no proper version tag
2683 (commit "c22a5cfba94edf8ea4f53a174d38aa0c629d070f")
2687 (version (git-version version revision commit))
2692 (url "https://github.com/facebookincubator/gloo")
2694 (file-name (git-file-name name version))
2697 "1crmqgybzkgkpbmcx16912gsl5qsj49swa0ikx6mhqgph0chrh11"))))
2698 (build-system cmake-build-system)
2704 `(#:configure-flags '("-DBUILD_TEST=1")
2706 (modify-phases %standard-phases
2708 (lambda* (#:key tests? #:allow-other-keys)
2710 (invoke "make" "gloo_test")))))))
2711 (synopsis "Collective communications library")
2713 "Gloo is a collective communications library. It comes with a
2714 number of collective algorithms useful for machine learning applications.
2715 These include a barrier, broadcast, and allreduce.")
2716 (home-page "https://github.com/facebookincubator/gloo")
2717 (license license:bsd-3))))
2719 (define-public python-umap-learn
2721 (name "python-umap-learn")
2725 (method git-fetch) ;no tests in pypi release
2727 (url "https://github.com/lmcinnes/umap")
2729 (file-name (git-file-name name version))
2732 "1315jkb0h1b579y9m59632f0nnpksilm01nxx46in0rq8zna8vsb"))))
2733 (build-system python-build-system)
2737 #~(modify-phases %standard-phases
2738 ;; Numba needs a writable dir to cache functions.
2739 (add-before 'check 'set-numba-cache-dir
2741 (setenv "NUMBA_CACHE_DIR" "/tmp")))
2743 (lambda* (#:key tests? #:allow-other-keys)
2745 (setenv "HOME" "/tmp")
2746 (invoke "pytest" "-vv" "umap"
2747 ;; This test can fail because trust may only be
2748 ;; 0.9679405204460967 >= 0.97
2749 "-k" "not test_densmap_trustworthiness_on_iris_supervised")))))))
2750 (native-inputs (list python-pytest))
2758 (home-page "https://github.com/lmcinnes/umap")
2759 (synopsis "Uniform Manifold Approximation and Projection")
2760 (description "Uniform Manifold Approximation and Projection is a dimension
2761 reduction technique that can be used for visualization similarly to t-SNE, but
2762 also for general non-linear dimension reduction.")
2763 (license license:bsd-3)))
2765 (define-public nnpack
2766 (let ((version "0.0")
2767 (commit "c07e3a0400713d546e0dea2d5466dd22ea389c73")
2771 (version (git-version version revision commit))
2772 (home-page "https://github.com/Maratyszcza/NNPACK")
2775 (uri (git-reference (url home-page) (commit commit)))
2776 (file-name (git-file-name name version))
2779 "0s0kk3a35w3yzf0q447p72350sbsh4qhg6vm3y2djbj4xpg7jc8v"))
2780 (patches (search-patches "nnpack-system-libraries.patch"))))
2781 (build-system cmake-build-system)
2782 ;; XXX: The test suite runs but it's very expensive, and on x86_64 CPUs
2783 ;; that lack the right ISA extensions, tests fail with:
2785 ;; Expected equality of these values:
2786 ;; nnp_status_success
2791 ;; where 51 is 'nnp_status_unsupported_hardware'.
2792 (arguments '(#:tests? #f))
2793 (synopsis "Acceleration package for neural network computations")
2795 "NNPACK is an acceleration package for neural network computations.
2796 NNPACK aims to provide high-performance implementations of convnet layers for
2799 NNPACK is not intended to be directly used by machine learning researchers;
2800 instead it provides low-level performance primitives leveraged in leading deep
2801 learning frameworks, such as PyTorch, Caffe2, MXNet, tiny-dnn, Caffe, Torch,
2811 (list python python-peachpy python-six))
2812 (license license:bsd-2))))
2814 (define-public xnnpack
2815 ;; There's currently no tag on this repo.
2816 (let ((version "0.0")
2817 (commit "ae108ef49aa5623b896fc93d4298c49d1750d9ba")
2821 (version (git-version version revision commit))
2822 (home-page "https://github.com/google/XNNPACK") ;fork of QNNPACK
2825 (uri (git-reference (url home-page) (commit commit)))
2826 (file-name (git-file-name name version))
2829 "0q68q2jxiiiblx45q4337k13ppgh5vqjwrwznchcnpb8hawjj3zl"))
2830 (patches (search-patches "xnnpack-system-libraries.patch"))))
2831 (build-system cmake-build-system)
2833 '(#:configure-flags '("-DXNNPACK_USE_SYSTEM_LIBS=YES"
2834 "-DBUILD_SHARED_LIBS=ON"
2835 "-DXNNPACK_LIBRARY_TYPE=shared"
2836 "-DXNNPACK_BUILD_TESTS=FALSE" ;FIXME: see below
2837 "-DXNNPACK_BUILD_BENCHMARKS=FALSE")
2839 ;; FIXME: Building tests leads to a CMake error:
2841 ;; ADD_LIBRARY cannot create target "all_microkernels" because
2842 ;; another target with the same name already exists.
2852 (synopsis "Optimized floating-point neural network inference operators")
2854 "XNNPACK is a highly optimized library of floating-point neural network
2855 inference operators for ARM, WebAssembly, and x86 platforms. XNNPACK is not
2856 intended for direct use by deep learning practitioners and researchers;
2857 instead it provides low-level performance primitives for accelerating
2858 high-level machine learning frameworks, such as TensorFlow Lite,
2859 TensorFlow.js, PyTorch, and MediaPipe.")
2860 (license license:bsd-3))))
2862 ;; Please also update python-torchvision when updating this package.
2863 (define-public python-pytorch
2865 (name "python-pytorch")
2870 (url "https://github.com/pytorch/pytorch")
2871 (commit (string-append "v" version))
2873 (file-name (git-file-name name version))
2876 "0pdqi91qzgyx947zv4pw2fdj9vpqvdhfzw1ydjd4mpqm8g5njgnz"))
2877 (patches (search-patches "python-pytorch-system-libraries.patch"
2878 "python-pytorch-runpath.patch"))
2879 (modules '((guix build utils)))
2882 ;; XXX: Let's be clear: this package is a bundling fest. We
2883 ;; delete as much as we can, but there's still a lot left.
2884 (for-each (lambda (directory)
2885 (delete-file-recursively
2886 (string-append "third_party/" directory)))
2887 '("benchmark" "cpuinfo" "eigen"
2889 ;; FIXME: QNNPACK (of which XNNPACK is a fork)
2891 ;; "FP16" "FXdiv" "gemmlowp" "psimd"
2893 "gloo" "googletest" "ios-cmake" "NNPACK"
2894 "onnx" "protobuf" "pthreadpool"
2895 "pybind11" "python-enum" "python-peachpy"
2896 "python-six" "tbb" "XNNPACK" "zstd"))))))
2897 (build-system python-build-system)
2899 '(#:phases (modify-phases %standard-phases
2900 (add-before 'build 'use-system-libraries
2901 (lambda* (#:key outputs #:allow-other-keys)
2902 ;; Tell 'setup.py' to let 'CMakeLists.txt' know that we
2903 ;; want to use "system libraries" instead of the bundled
2905 (setenv "USE_SYSTEM_LIBS" "1")
2907 (substitute* "cmake/Dependencies.cmake"
2908 (("if\\(USE_SYSTEM_BIND11\\)")
2911 ;; XXX: Disable that for simplicity for now.
2912 (setenv "USE_FBGEMM" "0")))
2913 (add-before 'build 'make-things-writable
2915 ;; The 'build_caffe2' function in
2916 ;; 'tools/build_pytorch_libs.py', called from the
2917 ;; top-level 'setup.py', needs write access to this
2919 (for-each make-file-writable
2920 (find-files "caffe2/proto" "."
2921 #:directories? #t))))
2923 (lambda* (#:key inputs outputs tests? #:allow-other-keys)
2924 ;; Run the test suite following the instructions in
2925 ;; 'CONTRIBUTING.md'. XXX: Unfortunately this doesn't
2926 ;; work, unless you set GUIX_PYTHONPATH presumably.
2928 (add-installed-pythonpath inputs outputs)
2929 (invoke "python" "test/run_test.py"))))
2930 (add-after 'install 'remove-test-executables
2931 (lambda* (#:key inputs outputs #:allow-other-keys)
2932 ;; Remove test executables, but keep other executables
2933 ;; such as 'torch_shm_manager' and and .so files such as
2934 ;; 'libtorch_global_deps.so'.
2935 (let ((python-site (site-packages inputs outputs)))
2936 (for-each delete-file
2937 (find-files python-site
2938 "(^test_cpp_rpc|_test)$")))))
2939 (add-after 'install 'remove-caffe2-onnx-scripts
2940 (lambda* (#:key outputs #:allow-other-keys)
2941 (let* ((out (assoc-ref outputs "out"))
2942 (bin (string-append out "/bin")))
2943 ;; Remove 'convert-caffe2-to-onnx' and
2944 ;; 'convert-onnx-to-caffe2': they seem to be
2945 ;; deprecated and they cause a failure of the
2946 ;; 'sanity-check' phase:
2948 ;; ImportError: cannot import name 'metanet_pb2' from partially initialized module 'caffe2.proto' (most likely due to a circular import)
2949 (for-each delete-file
2950 (find-files bin "^convert-.*caffe2"))
2952 (substitute* (find-files out "^entry_points\\.txt$")
2953 (("^convert-.*" all)
2954 (string-append "# " all "\n")))))))
2956 ;; XXX: Tests attempt to download data such as
2957 ;; <https://raw.githubusercontent.com/pytorch/test-infra/master/stats/slow-tests.json>.
2958 ;; We're also missing some Python modules, such as expecttest.
2980 (list python-astunparse
2985 python-typing-extensions
2989 onnx ;propagated for its Python modules
2992 (home-page "https://pytorch.org/")
2993 (synopsis "Python library for tensor computation and deep neural networks")
2995 "PyTorch is a Python package that provides two high-level features:
2998 @item tensor computation (like NumPy) with strong GPU acceleration;
2999 @item deep neural networks (DNNs) built on a tape-based autograd system.
3002 You can reuse Python packages such as NumPy, SciPy, and Cython to extend
3003 PyTorch when needed.
3005 Note: currently this package does not provide GPU support.")
3006 (license license:bsd-3)))
3008 (define-public python-pytorch-for-r-torch
3010 (inherit python-pytorch)
3011 (name "python-pytorch")
3016 (url "https://github.com/pytorch/pytorch")
3017 (commit (string-append "v" version))
3019 (file-name (git-file-name name version))
3022 "1zbk7y74r0ycsfa7x59jnhwhs1gj5rs3n89p15y0212iszgbljq8"))
3023 (patches (search-patches "python-pytorch-system-libraries.patch"
3024 "python-pytorch-runpath.patch"))
3025 (modules '((guix build utils)))
3028 ;; XXX: Let's be clear: this package is a bundling fest. We
3029 ;; delete as much as we can, but there's still a lot left.
3030 (for-each (lambda (directory)
3031 (delete-file-recursively
3032 (string-append "third_party/" directory)))
3033 '("benchmark" "cpuinfo" "eigen"
3035 ;; FIXME: QNNPACK (of which XNNPACK is a fork)
3037 ;; "FP16" "FXdiv" "gemmlowp" "psimd"
3039 "gloo" "googletest" "ios-cmake" "NNPACK"
3040 "onnx" "protobuf" "pthreadpool"
3041 "pybind11" "python-enum" "python-peachpy"
3042 "python-six" "tbb" "XNNPACK" "zstd"))))))))
3044 ;; Keep this in sync with python-pytorch
3045 (define-public python-torchvision
3047 (name "python-torchvision")
3052 (url "https://github.com/pytorch/vision")
3053 (commit (string-append "v" version))
3055 (file-name (git-file-name name version))
3058 "19f6s3ffwkdvjjbvib18c8n7vhysg58smxzq3rvii1c0z4g3b0cw"))))
3059 (build-system python-build-system)
3061 `(#:tests? #false ;the test suite is expensive and there is no easy way
3064 (modify-phases %standard-phases
3066 (lambda* (#:key tests? #:allow-other-keys)
3068 (invoke "pytest" "-vv")))))))
3074 python-typing-extensions
3080 (list which python-pytest))
3081 (home-page "https://pytorch.org/vision/stable/index.html")
3082 (synopsis " Datasets, transforms and models specific to computer vision")
3084 "The torchvision package consists of popular datasets, model architectures,
3085 and common image transformations for computer vision.")
3086 (license license:bsd-3)))
3088 (define-public python-torchfile
3090 (name "python-torchfile")
3094 (uri (pypi-uri "torchfile" version))
3097 "0vhklj6krl9r0kdynb4kcpwp8y1ihl2zw96byallay3k9c9zwgd5"))))
3098 (build-system python-build-system)
3099 (arguments '(#:tests? #false)) ;there are no tests
3101 (list python-numpy))
3102 (home-page "https://github.com/bshillingford/python-torchfile")
3103 (synopsis "Torch7 binary serialized file parser")
3104 (description "This package enables you to deserialize Lua torch-serialized objects from
3106 (license license:bsd-3)))
3108 (define-public python-hmmlearn
3110 (name "python-hmmlearn")
3115 (uri (pypi-uri "hmmlearn" version))
3118 "1my0j3rzp17438idr32ssh0j969a98yjblx5igx5kgiiigr9qa1a"))
3121 (use-modules ((guix build utils)))
3122 (delete-file "lib/hmmlearn/_hmmc.c")))))
3123 (build-system python-build-system)
3126 (modify-phases %standard-phases
3128 (lambda* (#:key inputs outputs tests? #:allow-other-keys)
3130 (add-installed-pythonpath inputs outputs)
3131 (with-directory-excursion (string-append (assoc-ref outputs "out") "/lib")
3132 (invoke "python" "-m" "pytest"))))))))
3134 (list python-cython python-numpy python-scikit-learn python-scipy
3135 python-setuptools-scm))
3137 (list python-pytest))
3138 (home-page "https://github.com/hmmlearn/hmmlearn")
3139 (synopsis "Hidden Markov Models with scikit-learn like API")
3141 "Hmmlearn is a set of algorithms for unsupervised learning and inference
3142 of Hidden Markov Models.")
3143 (license license:bsd-3)))
3145 ;; Keep this in sync with the r-torch package.
3146 (define-public liblantern
3154 (url "https://github.com/mlverse/torch")
3155 (commit (string-append "v" version))))
3156 (file-name (git-file-name name version))
3158 (base32 "1q57jsqzv9b70svr6whf04yd08np3x15qq1zm06fbl0sqf0kfn7p"))))
3159 (build-system cmake-build-system)
3162 #:tests? #false ;no test target
3164 (let ((python-version (version-major+minor (package-version python))))
3165 #~(modify-phases %standard-phases
3166 (add-after 'unpack 'chdir
3167 (lambda _ (chdir "lantern")))
3168 (add-after 'chdir 'do-not-download-binaries
3169 (lambda* (#:key inputs #:allow-other-keys)
3170 (substitute* "CMakeLists.txt"
3171 (("find_package\\(Torch.*") "set(TORCH_CXX_FLAGS \"-ltorch\")\n")
3172 (("retrieve_lib\\(.*") ""))
3173 (let ((site-packages (string-append "/lib/python"
3176 (setenv "LIBRARY_PATH"
3178 (search-input-directory
3179 inputs (string-append site-packages "/torch/lib"))
3180 ":" (or (getenv "LIBRARY_PATH") "")))
3181 (setenv "CPLUS_INCLUDE_PATH"
3183 (search-input-directory
3184 inputs (string-append
3185 site-packages "/torch/include/torch/csrc/api/include/"))
3187 (search-input-directory
3188 inputs (string-append site-packages "/torch/include/"))
3190 (or (getenv "CPLUS_INCLUDE_PATH") "")))
3191 (setenv "C_INCLUDE_PATH"
3193 (search-input-directory
3194 inputs (string-append site-packages "/torch/include/"))
3196 (or (getenv "C_INCLUDE_PATH") ""))))))
3200 "../build/liblantern.so"
3201 (string-append #$output "/lib"))
3203 "../lantern/include"
3204 (string-append #$output "/include"))))))))
3205 (inputs (list python-pytorch-for-r-torch))
3206 (home-page "https://github.com/mlverse/torch/")
3207 (synopsis "C API to libtorch")
3209 "Lantern provides a C API to the libtorch machine learning library.")
3210 (license license:expat)))
3212 (define-public python-lap
3218 (uri (pypi-uri "lap" version))
3221 "0fqfxpq4jg9h4wxjw540gjmvfg1ccc1nssk7i9njg7qfdybxknn4"))))
3222 (build-system python-build-system)
3225 (modify-phases %standard-phases
3227 (lambda* (#:key inputs #:allow-other-keys)
3228 (invoke "python" "setup.py" "build"
3229 "--cpu-baseline=sse2")))
3231 (lambda* (#:key tests? #:allow-other-keys)
3233 ;; The tests must be run from elsewhere.
3234 (mkdir-p "/tmp/test")
3235 (copy-recursively "lap/tests" "/tmp/test")
3236 (with-directory-excursion "/tmp/test"
3237 (invoke "pytest" "-vv"))))))))
3242 (list python-cython python-pytest))
3243 (home-page "https://github.com/gatagat/lap")
3244 (synopsis "Linear Assignment Problem solver (LAPJV/LAPMOD).")
3245 (description "Lap is a linear assignment problem solver using Jonker-Volgenant
3246 algorithm for dense (LAPJV) or sparse (LAPMOD) matrices.")
3247 (license license:bsd-2)))
3249 (define-public python-visdom
3251 (name "python-visdom")
3255 (uri (pypi-uri "visdom" version))
3258 "09kiczx2i5asqsv214fz7sx8wlyldgbqvxwrd0alhjn24cvx4fn7"))))
3259 (build-system python-build-system)
3261 (list python-jsonpatch
3270 python-websocket-client))
3271 (home-page "https://github.com/fossasia/visdom")
3272 (synopsis "Visualizations of live, rich data for Torch and Numpy")
3274 "This package provides a tool for visualizing live, rich data for Torch
3276 (license license:asl2.0)))
3278 (define-public python-pyro-api
3280 (name "python-pyro-api")
3284 (uri (pypi-uri "pyro-api" version))
3287 "086r2h6x9i5d9ayl1x65lx6p84rlydzsn8xingxc588ab3ch1fd1"))))
3288 (build-system python-build-system)
3289 (arguments '(#:tests? #false)) ;requires pyro
3295 python-sphinx-rtd-theme))
3296 (home-page "https://github.com/pyro-ppl/pyro-api")
3297 (synopsis "Generic API for dispatch to Pyro backends.")
3298 (description "This package provides a generic API for dispatch to Pyro backends.")
3299 (license license:asl2.0)))
3301 (define-public python-pyro-ppl
3303 (name "python-pyro-ppl")
3305 ;; The sources on pypi don't include tests.
3310 (url "https://github.com/pyro-ppl/pyro")
3312 (file-name (git-file-name name version))
3314 (base32 "0ns20mr8qgjshzbplrfzaz1xhb9ldbgvrj2rzlsxvns2bi1ddyl5"))))
3315 (build-system python-build-system)
3318 (modify-phases %standard-phases
3320 (lambda* (#:key tests? #:allow-other-keys)
3321 ;; This tests features that are only implemented when non-free
3322 ;; software is available (Intel MKL or CUDA).
3323 (for-each delete-file
3324 (list "tests/distributions/test_spanning_tree.py"
3325 "tests/infer/mcmc/test_mcmc_api.py"))
3327 ;; Four test_gamma_elbo tests fail with bad values for unknown
3329 (delete-file "tests/distributions/test_rejector.py")
3330 ;; This test fails sometimes.
3331 (delete-file "tests/optim/test_optim.py")
3332 (invoke "pytest" "-vv" "--stage=unit"))))))
3363 python-sphinx-rtd-theme
3368 (home-page "https://pyro.ai")
3369 (synopsis "Python library for probabilistic modeling and inference")
3371 "This package provides a Python library for probabilistic modeling and
3373 (license license:asl2.0)))