-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnotebook.tex
792 lines (678 loc) · 50.3 KB
/
notebook.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
% Default to the notebook output style
% Inherit from the specified cell style.
\documentclass[11pt]{article}
\usepackage[T1]{fontenc}
% Nicer default font (+ math font) than Computer Modern for most use cases
\usepackage{mathpazo}
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts ![](path) syntax from Markdown).
\usepackage{graphicx}
% We will generate all images so they have a width \maxwidth. This means
% that they will get their normal width if they fit onto the page, but
% are scaled down if they would overflow the margins.
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth
\else\Gin@nat@width\fi}
\makeatother
\let\Oldincludegraphics\includegraphics
% Set max figure width to be 80% of text width, for now hardcoded.
\renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}}
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
\DeclareCaptionLabelFormat{nolabel}{}
\captionsetup{labelformat=nolabel}
\usepackage{adjustbox} % Used to constrain images to a maximum size
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
% Colors for the hyperref package
\definecolor{urlcolor}{rgb}{0,.145,.698}
\definecolor{linkcolor}{rgb}{.71,0.21,0.01}
\definecolor{citecolor}{rgb}{.12,.54,.11}
% ANSI colors
\definecolor{ansi-black}{HTML}{3E424D}
\definecolor{ansi-black-intense}{HTML}{282C36}
\definecolor{ansi-red}{HTML}{E75C58}
\definecolor{ansi-red-intense}{HTML}{B22B31}
\definecolor{ansi-green}{HTML}{00A250}
\definecolor{ansi-green-intense}{HTML}{007427}
\definecolor{ansi-yellow}{HTML}{DDB62B}
\definecolor{ansi-yellow-intense}{HTML}{B27D12}
\definecolor{ansi-blue}{HTML}{208FFB}
\definecolor{ansi-blue-intense}{HTML}{0065CA}
\definecolor{ansi-magenta}{HTML}{D160C4}
\definecolor{ansi-magenta-intense}{HTML}{A03196}
\definecolor{ansi-cyan}{HTML}{60C6C8}
\definecolor{ansi-cyan-intense}{HTML}{258F8F}
\definecolor{ansi-white}{HTML}{C5C1B4}
\definecolor{ansi-white-intense}{HTML}{A1A6B2}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Additional commands for more recent versions of Pandoc
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatability definitions
\def\gt{>}
\def\lt{<}
% Document parameters
\title{estimator\_models}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% Exact colors from NB
\definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
\definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=urlcolor,
linkcolor=linkcolor,
citecolor=citecolor,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\section{Deep learning with tf.data and
tf.estimators}\label{deep-learning-with-tf.data-and-tf.estimators}
Since the recent 2018 Google I/O I meant to do a tutorial on the new
data pipelines of tensorflow and the estimator class that they
introduced a couple of verions ago. Google is pushing tensorflow to be
an easy to use framework without a steep learning curve - at least in
order to achieve elementary results. Both libraries that we are going to
showcase in this notebook are striving for that. Tf.data replaces the
old fashioned way of feed\_dict within the tf.session and streamlines
the data input flow. Tf.estimator acts as a blanket for all deep
learning models that have tensorflow under the hood. It takes care of
training, evaluation and prediction with wrapper functions on top of
your model. Also we are going to see one of the out of the box
classifiers that Google has developed DNNClassifier and test how good it
performs.
In order to examine these libraries we are going to tackle the Kaggle
problem of the Titanic. We will try to predict based on features such as
how much the Titanic ticket cost, the age, the ticket class etc if the
passenger survived or not. Most of the good solutions on Kaggle achieve
around 75-85\% accuracy in this problem with extensive feature
engineering. Here we are not going to bother with feature engineering
since our purpose is not to break the Kaggle record. Let's take a look
of the dataset:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}3}]:} \PY{c+c1}{\PYZsh{} Necessary imports}
\PY{k+kn}{import} \PY{n+nn}{tensorflow} \PY{k}{as} \PY{n+nn}{tf}
\PY{k+kn}{import} \PY{n+nn}{pandas} \PY{k}{as} \PY{n+nn}{pd}
\PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}4}]:} \PY{c+c1}{\PYZsh{} Load the dataset on memory and show first 5 records}
\PY{n}{data} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}csv}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{/Users/Blackbak/giannis\PYZus{}home/python\PYZus{}folder/titanic\PYZus{}dataset.csv}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{data}\PY{o}{.}\PY{n}{head}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}4}]:} survived pclass name sex \textbackslash{}
0 1 1 Allen, Miss. Elisabeth Walton female
1 1 1 Allison, Master. Hudson Trevor male
2 0 1 Allison, Miss. Helen Loraine female
3 0 1 Allison, Mr. Hudson Joshua Creighton male
4 0 1 Allison, Mrs. Hudson J C (Bessie Waldo Daniels) female
age sibsp parch ticket fare
0 29.0000 0 0 24160 211.3375
1 0.9167 1 2 113781 151.5500
2 2.0000 1 2 113781 151.5500
3 30.0000 1 2 113781 151.5500
4 25.0000 1 2 113781 151.5500
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}5}]:} \PY{c+c1}{\PYZsh{} Split the dataset into train and test for us to evaluate the generalization of our models}
\PY{n}{train} \PY{o}{=} \PY{n}{data}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{p}{:}\PY{n+nb}{int}\PY{p}{(}\PY{n}{data}\PY{o}{.}\PY{n}{shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{o}{*}\PY{l+m+mf}{0.8}\PY{p}{)}\PY{p}{]}
\PY{n}{test} \PY{o}{=} \PY{n}{data}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{n+nb}{int}\PY{p}{(}\PY{n}{data}\PY{o}{.}\PY{n}{shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{o}{*}\PY{l+m+mf}{0.8}\PY{p}{)}\PY{p}{:}\PY{p}{]}
\end{Verbatim}
First thing that we need to do is to specify our input pipeline. The
pipeline ties with the estimator class because we need to feed the
estimator data in a specific way. The estimator class takes as an
argument an input function that returns the next data to be trained or
evaluated. So basically what we need is a generator function that
outputs the next next batch of data for batch training and testing or
the next data point for online. If you are not familiar with generators
I would suggest seeing
\href{https://www.youtube.com/watch?v=cKPlPJyQrt4}{this youtube video}
(actually I would suggest it to everyone regardless). Thankfully Google
has provided us with the nesessary tools that make this task very easy.
But first things first we need to specify the data that we need to load.
Depending on how the data are stored there are different functions to
load the data into the tf.data.Dataset class. We see below the most
common ones:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} If the data are stored in the default format of tensorflow TFRecords}
\PY{n}{files} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{data}\PY{o}{.}\PY{n}{Dataset}\PY{o}{.}\PY{n}{list\PYZus{}files}\PY{p}{(}\PY{n}{file\PYZus{}pattern}\PY{p}{)}
\PY{n}{dataset} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{data}\PY{o}{.}\PY{n}{TFRecordDataset}\PY{p}{(}\PY{n}{files}\PY{p}{)}
\PY{c+c1}{\PYZsh{} If the data are store in one or multiple csv files}
\PY{n}{dataset} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{contrib}\PY{o}{.}\PY{n}{data}\PY{o}{.}\PY{n}{make\PYZus{}csv\PYZus{}dataset}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{*.csv}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{c+c1}{\PYZsh{} path to the csv file/files}
\PY{n}{batch\PYZus{}size}\PY{o}{=}\PY{l+m+mi}{32}\PY{p}{,} \PY{c+c1}{\PYZsh{} have to specify batch size in this step}
\PY{n}{column\PYZus{}names}\PY{o}{=}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{features}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{that}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{are}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{useful}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{,}
\PY{n}{label\PYZus{}name}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{label\PYZus{}column}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} If the data are in memory already in a dictionary}
\PY{n}{dataset} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{data}\PY{o}{.}\PY{n}{Dataset}\PY{o}{.}\PY{n}{from\PYZus{}tensor\PYZus{}slices}\PY{p}{(}\PY{n}{data\PYZus{}dict}\PY{p}{)}
\end{Verbatim}
Once the data are loaded into a tf.data.Dataset form our goal is to
develop the generator function. Before we proceed with making our data
iterable we need to specify some key parameters on how we consume them
such as the number of epochs, the batch size, if we shuffle after each
epoch or if we want to manipulate the input. As in their
\href{https://www.youtube.com/watch?v=uIcqeP7MFH0\&t=270s}{presentation
at Google} the usual data pipeline would look something like this:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dataset} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{shuffle}\PY{p}{(}\PY{l+m+mi}{1000}\PY{p}{)} \PY{c+c1}{\PYZsh{} 1000 is the shuffle buffer size where it samples from}
\PY{n}{dataset} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{repeat}\PY{p}{(}\PY{n}{num\PYZus{}epochs}\PY{p}{)}
\PY{c+c1}{\PYZsh{} if some pre\PYZhy{}processing is needed we can do map and filter functions with the help of lambda}
\PY{c+c1}{\PYZsh{} the downside is that it is somewhat complex}
\PY{n}{dataset} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{map}\PY{p}{(}\PY{k}{lambda} \PY{n}{x}\PY{p}{:} \PY{n}{tf}\PY{o}{.}\PY{n}{parse\PYZus{}single\PYZus{}example}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{features}\PY{p}{)}\PY{p}{)}
\PY{n}{dataset} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{batch}\PY{p}{(}\PY{n}{batch\PYZus{}size}\PY{p}{)}
\PY{c+c1}{\PYZsh{} here we make the data iterable and call the next batch}
\PY{n}{iterator} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{make\PYZus{}one\PYZus{}shot\PYZus{}iterator}\PY{p}{(}\PY{p}{)}
\PY{n}{next\PYZus{}data} \PY{o}{=} \PY{n}{iterator}\PY{o}{.}\PY{n}{get\PYZus{}next}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
At this point what we only need to do is to wrap these operations in a
function to be able to get parsed as an argument in the estimator
decleration. Also we will need two functions, one for training and one
for evaluating. We could have one and parse the data as an argument but
this way is a bit more clear. We are going to use only numerical
features to keep the notebook short.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}6}]:} \PY{k}{def} \PY{n+nf}{train\PYZus{}input\PYZus{}fn}\PY{p}{(}\PY{p}{)}\PY{p}{:}
\PY{n}{dataset} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{data}\PY{o}{.}\PY{n}{Dataset}\PY{o}{.}\PY{n}{from\PYZus{}tensor\PYZus{}slices}\PY{p}{(}\PY{p}{(}\PY{n}{train}\PY{p}{[}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{pclass}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fare}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{age}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{sibsp}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{parch}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{]}\PY{o}{.}\PY{n}{to\PYZus{}dict}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{list}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}\PY{p}{,}
\PY{n}{train}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{survived}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{o}{.}\PY{n}{values}\PY{p}{)}\PY{p}{)}
\PY{n}{dataset} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{shuffle}\PY{p}{(}\PY{l+m+mi}{1000}\PY{p}{)}\PY{o}{.}\PY{n}{repeat}\PY{p}{(}\PY{p}{)}\PY{o}{.}\PY{n}{batch}\PY{p}{(}\PY{l+m+mi}{100}\PY{p}{)}
\PY{n}{iterator} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{make\PYZus{}one\PYZus{}shot\PYZus{}iterator}\PY{p}{(}\PY{p}{)}
\PY{n}{feat\PYZus{}next}\PY{p}{,} \PY{n}{label\PYZus{}next} \PY{o}{=} \PY{n}{iterator}\PY{o}{.}\PY{n}{get\PYZus{}next}\PY{p}{(}\PY{p}{)}
\PY{k}{return} \PY{n}{feat\PYZus{}next}\PY{p}{,} \PY{n}{label\PYZus{}next}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}7}]:} \PY{k}{def} \PY{n+nf}{eval\PYZus{}input\PYZus{}fn}\PY{p}{(}\PY{p}{)}\PY{p}{:}
\PY{n}{dataset} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{data}\PY{o}{.}\PY{n}{Dataset}\PY{o}{.}\PY{n}{from\PYZus{}tensor\PYZus{}slices}\PY{p}{(}\PY{p}{(}\PY{n}{test}\PY{p}{[}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{pclass}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fare}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{age}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{sibsp}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{parch}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{]}\PY{o}{.}\PY{n}{to\PYZus{}dict}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{list}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}\PY{p}{,}
\PY{n}{test}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{survived}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{o}{.}\PY{n}{values}\PY{p}{)}\PY{p}{)}
\PY{n}{dataset} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{shuffle}\PY{p}{(}\PY{l+m+mi}{1000}\PY{p}{)}\PY{o}{.}\PY{n}{repeat}\PY{p}{(}\PY{p}{)}\PY{o}{.}\PY{n}{batch}\PY{p}{(}\PY{l+m+mi}{100}\PY{p}{)}
\PY{n}{iterator} \PY{o}{=} \PY{n}{dataset}\PY{o}{.}\PY{n}{make\PYZus{}one\PYZus{}shot\PYZus{}iterator}\PY{p}{(}\PY{p}{)}
\PY{n}{feat\PYZus{}next}\PY{p}{,} \PY{n}{label\PYZus{}next} \PY{o}{=} \PY{n}{iterator}\PY{o}{.}\PY{n}{get\PYZus{}next}\PY{p}{(}\PY{p}{)}
\PY{k}{return} \PY{n}{feat\PYZus{}next}\PY{p}{,} \PY{n}{label\PYZus{}next}
\end{Verbatim}
Next we need to define our features for the estimator to understand its
input. Basically with this step we make sure that values get connected
to the corresponding input and that different types of input gets
handled accordingly e.g. categorical entries get translated to one-hot
encodings. More on the features at the
\href{https://www.tensorflow.org/get_started/feature_columns}{tensorflow
docs}.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}8}]:} \PY{n}{feat\PYZus{}name} \PY{o}{=} \PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{pclass}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fare}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{age}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{sibsp}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{parch}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}
\PY{n}{my\PYZus{}feature\PYZus{}columns} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{k}{for} \PY{n}{name} \PY{o+ow}{in} \PY{n}{feat\PYZus{}name}\PY{p}{:}
\PY{n}{my\PYZus{}feature\PYZus{}columns}\PY{o}{.}\PY{n}{append}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{feature\PYZus{}column}\PY{o}{.}\PY{n}{numeric\PYZus{}column}\PY{p}{(}\PY{n}{key}\PY{o}{=}\PY{n}{name}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
Now we are ready to define the estimator class. Google has developed a
handful of predefined estimators to make our life a bit easier. We are
going to showcase the DNNClassifier model, which is what the name
suggests: a feed forward classifier.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}9}]:} \PY{n}{estimator} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{DNNClassifier}\PY{p}{(}\PY{n}{feature\PYZus{}columns}\PY{o}{=}\PY{n}{my\PYZus{}feature\PYZus{}columns}\PY{p}{,}
\PY{n}{hidden\PYZus{}units}\PY{o}{=}\PY{p}{[}\PY{l+m+mi}{1000}\PY{p}{,}\PY{l+m+mi}{1000}\PY{p}{]}\PY{p}{,}
\PY{n}{dropout}\PY{o}{=}\PY{l+m+mf}{0.5}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmppsx1900f
INFO:tensorflow:Using config: \{'\_model\_dir': '/var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmppsx1900f', '\_tf\_random\_seed': None, '\_save\_summary\_steps': 100, '\_save\_checkpoints\_steps': None, '\_save\_checkpoints\_secs': 600, '\_session\_config': None, '\_keep\_checkpoint\_max': 5, '\_keep\_checkpoint\_every\_n\_hours': 10000, '\_log\_step\_count\_steps': 100, '\_train\_distribute': None, '\_service': None, '\_cluster\_spec': <tensorflow.python.training.server\_lib.ClusterSpec object at 0x10a230630>, '\_task\_type': 'worker', '\_task\_id': 0, '\_global\_id\_in\_cluster': 0, '\_master': '', '\_evaluation\_master': '', '\_is\_chief': True, '\_num\_ps\_replicas': 0, '\_num\_worker\_replicas': 1\}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}10}]:} \PY{n}{estimator}\PY{o}{.}\PY{n}{train}\PY{p}{(}\PY{n}{input\PYZus{}fn}\PY{o}{=}\PY{n}{train\PYZus{}input\PYZus{}fn}\PY{p}{,} \PY{n}{steps}\PY{o}{=}\PY{l+m+mi}{3000}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
INFO:tensorflow:Calling model\_fn.
INFO:tensorflow:Done calling model\_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Running local\_init\_op.
INFO:tensorflow:Done running local\_init\_op.
INFO:tensorflow:Saving checkpoints for 1 into /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmppsx1900f/model.ckpt.
INFO:tensorflow:loss = 194.63202, step = 1
INFO:tensorflow:global\_step/sec: 50.324
INFO:tensorflow:loss = 67.78077, step = 101 (1.988 sec)
INFO:tensorflow:global\_step/sec: 56.2207
INFO:tensorflow:loss = 62.972076, step = 201 (1.779 sec)
INFO:tensorflow:global\_step/sec: 56.7792
INFO:tensorflow:loss = 63.67186, step = 301 (1.762 sec)
INFO:tensorflow:global\_step/sec: 57.0776
INFO:tensorflow:loss = 64.02928, step = 401 (1.752 sec)
INFO:tensorflow:global\_step/sec: 56.2241
INFO:tensorflow:loss = 63.789528, step = 501 (1.779 sec)
INFO:tensorflow:global\_step/sec: 55.9744
INFO:tensorflow:loss = 64.28005, step = 601 (1.787 sec)
INFO:tensorflow:global\_step/sec: 56.4468
INFO:tensorflow:loss = 62.797832, step = 701 (1.771 sec)
INFO:tensorflow:global\_step/sec: 54.4926
INFO:tensorflow:loss = 64.018936, step = 801 (1.835 sec)
INFO:tensorflow:global\_step/sec: 38.8435
INFO:tensorflow:loss = 64.99931, step = 901 (2.575 sec)
INFO:tensorflow:global\_step/sec: 48.1407
INFO:tensorflow:loss = 59.514896, step = 1001 (2.077 sec)
INFO:tensorflow:global\_step/sec: 43.6324
INFO:tensorflow:loss = 65.64246, step = 1101 (2.291 sec)
INFO:tensorflow:global\_step/sec: 39.8222
INFO:tensorflow:loss = 57.61671, step = 1201 (2.515 sec)
INFO:tensorflow:global\_step/sec: 36.2843
INFO:tensorflow:loss = 57.705513, step = 1301 (2.752 sec)
INFO:tensorflow:global\_step/sec: 44.9542
INFO:tensorflow:loss = 56.1115, step = 1401 (2.224 sec)
INFO:tensorflow:global\_step/sec: 48.6494
INFO:tensorflow:loss = 64.49755, step = 1501 (2.059 sec)
INFO:tensorflow:global\_step/sec: 43.2238
INFO:tensorflow:loss = 64.67319, step = 1601 (2.311 sec)
INFO:tensorflow:global\_step/sec: 48.0495
INFO:tensorflow:loss = 61.380512, step = 1701 (2.081 sec)
INFO:tensorflow:global\_step/sec: 50.441
INFO:tensorflow:loss = 64.450134, step = 1801 (1.983 sec)
INFO:tensorflow:global\_step/sec: 45.3973
INFO:tensorflow:loss = 56.074, step = 1901 (2.203 sec)
INFO:tensorflow:global\_step/sec: 52.7706
INFO:tensorflow:loss = 63.082817, step = 2001 (1.895 sec)
INFO:tensorflow:global\_step/sec: 36.8167
INFO:tensorflow:loss = 60.534943, step = 2101 (2.717 sec)
INFO:tensorflow:global\_step/sec: 38.4023
INFO:tensorflow:loss = 58.265648, step = 2201 (2.605 sec)
INFO:tensorflow:global\_step/sec: 46.6467
INFO:tensorflow:loss = 68.471176, step = 2301 (2.143 sec)
INFO:tensorflow:global\_step/sec: 51.628
INFO:tensorflow:loss = 53.29716, step = 2401 (1.937 sec)
INFO:tensorflow:global\_step/sec: 48.3244
INFO:tensorflow:loss = 63.285633, step = 2501 (2.070 sec)
INFO:tensorflow:global\_step/sec: 50.95
INFO:tensorflow:loss = 58.008884, step = 2601 (1.962 sec)
INFO:tensorflow:global\_step/sec: 54.0401
INFO:tensorflow:loss = 64.027405, step = 2701 (1.850 sec)
INFO:tensorflow:global\_step/sec: 49.6456
INFO:tensorflow:loss = 66.76814, step = 2801 (2.014 sec)
INFO:tensorflow:global\_step/sec: 49.8796
INFO:tensorflow:loss = 63.259502, step = 2901 (2.004 sec)
INFO:tensorflow:Saving checkpoints for 3000 into /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmppsx1900f/model.ckpt.
INFO:tensorflow:Loss for final step: 64.38674.
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}10}]:} <tensorflow.python.estimator.canned.dnn.DNNClassifier at 0x10a230358>
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}11}]:} \PY{n}{estimator}\PY{o}{.}\PY{n}{evaluate}\PY{p}{(}\PY{n}{input\PYZus{}fn}\PY{o}{=}\PY{n}{eval\PYZus{}input\PYZus{}fn}\PY{p}{,} \PY{n}{steps}\PY{o}{=}\PY{l+m+mi}{200}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
INFO:tensorflow:Calling model\_fn.
WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to "careful\_interpolation" instead.
WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to "careful\_interpolation" instead.
INFO:tensorflow:Done calling model\_fn.
INFO:tensorflow:Starting evaluation at 2018-06-10-14:39:11
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmppsx1900f/model.ckpt-3000
INFO:tensorflow:Running local\_init\_op.
INFO:tensorflow:Done running local\_init\_op.
INFO:tensorflow:Evaluation [20/200]
INFO:tensorflow:Evaluation [40/200]
INFO:tensorflow:Evaluation [60/200]
INFO:tensorflow:Evaluation [80/200]
INFO:tensorflow:Evaluation [100/200]
INFO:tensorflow:Evaluation [120/200]
INFO:tensorflow:Evaluation [140/200]
INFO:tensorflow:Evaluation [160/200]
INFO:tensorflow:Evaluation [180/200]
INFO:tensorflow:Evaluation [200/200]
INFO:tensorflow:Finished evaluation at 2018-06-10-14:39:14
INFO:tensorflow:Saving dict for global step 3000: accuracy = 0.78255, accuracy\_baseline = 0.79015, auc = 0.5815983, auc\_precision\_recall = 0.28891575, average\_loss = 0.55284804, global\_step = 3000, label/mean = 0.20985, loss = 55.284805, precision = 0.4500657, prediction/mean = 0.32641548, recall = 0.16321182
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}11}]:} \{'accuracy': 0.78255,
'accuracy\_baseline': 0.79015,
'auc': 0.5815983,
'auc\_precision\_recall': 0.28891575,
'average\_loss': 0.55284804,
'label/mean': 0.20985,
'loss': 55.284805,
'precision': 0.4500657,
'prediction/mean': 0.32641548,
'recall': 0.16321182,
'global\_step': 3000\}
\end{Verbatim}
These models are nice out of the box solutions for rapid prototyping or
for people that are not tha familiar with machine learning. In most
cases we will need to specify our own model to solve the specific task
at hand e.g. for image classification we will need convolutional layers
with pooling. In this example we are going to formulate another feed
forward network but we will use batch normalization on the layers. The
model is defined as a function that outputs different outcomes based on
the mode that it is in. Each estimator has 3 modes: training, evaluating
and predicting - tf.estimator.ModeKeys.(TRAIN/EVAL/PREDICT). In the
model function my\_model\_fn we are going to go through the different
steps that we must specify in order to comply with the estimator form.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}12}]:} \PY{c+c1}{\PYZsh{} This is the layer that we are going to use as hidden}
\PY{k}{def} \PY{n+nf}{dnn\PYZus{}layer}\PY{p}{(}\PY{n}{inputs}\PY{p}{,} \PY{n}{unit\PYZus{}num}\PY{p}{,} \PY{n}{activation}\PY{p}{,} \PY{n}{d\PYZus{}rate}\PY{p}{,} \PY{n}{mode}\PY{p}{)}\PY{p}{:}
\PY{n}{bn} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{layers}\PY{o}{.}\PY{n}{batch\PYZus{}normalization}\PY{p}{(}\PY{n}{inputs}\PY{o}{=}\PY{n}{inputs}\PY{p}{)}
\PY{n}{nn} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{layers}\PY{o}{.}\PY{n}{dense}\PY{p}{(}\PY{n}{inputs}\PY{o}{=}\PY{n}{bn}\PY{p}{,} \PY{n}{units}\PY{o}{=}\PY{n}{unit\PYZus{}num}\PY{p}{,} \PY{n}{activation}\PY{o}{=}\PY{n}{activation}\PY{p}{)}
\PY{n}{dn} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{layers}\PY{o}{.}\PY{n}{dropout}\PY{p}{(}\PY{n}{nn}\PY{p}{,} \PY{n}{rate}\PY{o}{=}\PY{n}{d\PYZus{}rate}\PY{p}{,} \PY{n}{training}\PY{o}{=}\PY{n}{mode} \PY{o}{==} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{ModeKeys}\PY{o}{.}\PY{n}{TRAIN}\PY{p}{)}
\PY{k}{return} \PY{n}{dn}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}16}]:} \PY{k}{def} \PY{n+nf}{my\PYZus{}model\PYZus{}fn}\PY{p}{(}\PY{n}{features}\PY{p}{,} \PY{n}{labels}\PY{p}{,} \PY{n}{mode}\PY{p}{,} \PY{n}{params}\PY{p}{)}\PY{p}{:}
\PY{c+c1}{\PYZsh{} Always the first step of the model function is to connect input and feature definitions}
\PY{n}{net} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{feature\PYZus{}column}\PY{o}{.}\PY{n}{input\PYZus{}layer}\PY{p}{(}\PY{n}{features}\PY{p}{,} \PY{n}{params}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{feature\PYZus{}columns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Define the computation graph for forward pass}
\PY{k}{for} \PY{n}{hid\PYZus{}num} \PY{o+ow}{in} \PY{n}{params}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{hidden\PYZus{}units}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{:}
\PY{n}{net} \PY{o}{=} \PY{n}{dnn\PYZus{}layer}\PY{p}{(}\PY{n}{inputs}\PY{o}{=}\PY{n}{net}\PY{p}{,} \PY{n}{unit\PYZus{}num}\PY{o}{=}\PY{n}{hid\PYZus{}num}\PY{p}{,} \PY{n}{activation}\PY{o}{=}\PY{n}{tf}\PY{o}{.}\PY{n}{nn}\PY{o}{.}\PY{n}{leaky\PYZus{}relu}\PY{p}{,} \PY{n}{d\PYZus{}rate}\PY{o}{=}\PY{l+m+mf}{0.5}\PY{p}{,} \PY{n}{mode}\PY{o}{=}\PY{n}{mode}\PY{p}{)}
\PY{n}{logits} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{layers}\PY{o}{.}\PY{n}{dense}\PY{p}{(}\PY{n}{inputs}\PY{o}{=}\PY{n}{net}\PY{p}{,} \PY{n}{units}\PY{o}{=}\PY{n}{params}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{n\PYZus{}classes}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Prediction part}
\PY{n}{predictions} \PY{o}{=} \PY{p}{\PYZob{}}
\PY{c+c1}{\PYZsh{} Generate predictions (for PREDICT and EVAL mode)}
\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{classes}\PY{l+s+s2}{\PYZdq{}}\PY{p}{:} \PY{n}{tf}\PY{o}{.}\PY{n}{argmax}\PY{p}{(}\PY{n+nb}{input}\PY{o}{=}\PY{n}{logits}\PY{p}{,} \PY{n}{axis}\PY{o}{=}\PY{l+m+mi}{1}\PY{p}{)}\PY{p}{,}
\PY{c+c1}{\PYZsh{} Add `softmax\PYZus{}tensor` to the graph. It is used for PREDICT and by the}
\PY{c+c1}{\PYZsh{} `logging\PYZus{}hook`.}
\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{probabilities}\PY{l+s+s2}{\PYZdq{}}\PY{p}{:} \PY{n}{tf}\PY{o}{.}\PY{n}{nn}\PY{o}{.}\PY{n}{softmax}\PY{p}{(}\PY{n}{logits}\PY{p}{,} \PY{n}{name}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{softmax\PYZus{}tensor}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{p}{\PYZcb{}}
\PY{k}{if} \PY{n}{mode} \PY{o}{==} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{ModeKeys}\PY{o}{.}\PY{n}{PREDICT}\PY{p}{:}
\PY{k}{return} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{EstimatorSpec}\PY{p}{(}\PY{n}{mode}\PY{o}{=}\PY{n}{mode}\PY{p}{,} \PY{n}{predictions}\PY{o}{=}\PY{n}{predictions}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Calculate Loss (for both TRAIN and EVAL modes)}
\PY{n}{loss} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{losses}\PY{o}{.}\PY{n}{sparse\PYZus{}softmax\PYZus{}cross\PYZus{}entropy}\PY{p}{(}\PY{n}{labels}\PY{o}{=}\PY{n}{labels}\PY{p}{,} \PY{n}{logits}\PY{o}{=}\PY{n}{logits}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Configure the Training Op (for TRAIN mode)}
\PY{k}{if} \PY{n}{mode} \PY{o}{==} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{ModeKeys}\PY{o}{.}\PY{n}{TRAIN}\PY{p}{:}
\PY{n}{optimizer} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{train}\PY{o}{.}\PY{n}{AdamOptimizer}\PY{p}{(}\PY{n}{learning\PYZus{}rate}\PY{o}{=}\PY{l+m+mf}{0.01}\PY{p}{)}
\PY{n}{train\PYZus{}op} \PY{o}{=} \PY{n}{optimizer}\PY{o}{.}\PY{n}{minimize}\PY{p}{(}
\PY{n}{loss}\PY{o}{=}\PY{n}{loss}\PY{p}{,}
\PY{n}{global\PYZus{}step}\PY{o}{=}\PY{n}{tf}\PY{o}{.}\PY{n}{train}\PY{o}{.}\PY{n}{get\PYZus{}global\PYZus{}step}\PY{p}{(}\PY{p}{)}\PY{p}{)}
\PY{k}{return} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{EstimatorSpec}\PY{p}{(}\PY{n}{mode}\PY{o}{=}\PY{n}{mode}\PY{p}{,} \PY{n}{loss}\PY{o}{=}\PY{n}{loss}\PY{p}{,} \PY{n}{train\PYZus{}op}\PY{o}{=}\PY{n}{train\PYZus{}op}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Add evaluation metrics (for EVAL mode)}
\PY{k}{if} \PY{n}{mode} \PY{o}{==} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{ModeKeys}\PY{o}{.}\PY{n}{EVAL}\PY{p}{:}
\PY{n}{eval\PYZus{}metric\PYZus{}ops} \PY{o}{=} \PY{p}{\PYZob{}}
\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{accuracy}\PY{l+s+s2}{\PYZdq{}}\PY{p}{:} \PY{n}{tf}\PY{o}{.}\PY{n}{metrics}\PY{o}{.}\PY{n}{accuracy}\PY{p}{(}\PY{n}{labels}\PY{o}{=}\PY{n}{labels}\PY{p}{,} \PY{n}{predictions}\PY{o}{=}\PY{n}{predictions}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{classes}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{)}\PY{p}{,}
\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{auc}\PY{l+s+s2}{\PYZdq{}}\PY{p}{:} \PY{n}{tf}\PY{o}{.}\PY{n}{metrics}\PY{o}{.}\PY{n}{auc}\PY{p}{(}\PY{n}{labels}\PY{o}{=}\PY{n}{labels}\PY{p}{,} \PY{n}{predictions}\PY{o}{=}\PY{n}{predictions}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{classes}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{)}\PY{p}{\PYZcb{}}
\PY{k}{return} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{EstimatorSpec}\PY{p}{(}\PY{n}{mode}\PY{o}{=}\PY{n}{mode}\PY{p}{,} \PY{n}{loss}\PY{o}{=}\PY{n}{loss}\PY{p}{,} \PY{n}{eval\PYZus{}metric\PYZus{}ops}\PY{o}{=}\PY{n}{eval\PYZus{}metric\PYZus{}ops}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}17}]:} \PY{n}{classifier} \PY{o}{=} \PY{n}{tf}\PY{o}{.}\PY{n}{estimator}\PY{o}{.}\PY{n}{Estimator}\PY{p}{(}
\PY{n}{model\PYZus{}fn}\PY{o}{=}\PY{n}{my\PYZus{}model\PYZus{}fn}\PY{p}{,}
\PY{n}{params}\PY{o}{=}\PY{p}{\PYZob{}}
\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{feature\PYZus{}columns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{:} \PY{n}{my\PYZus{}feature\PYZus{}columns}\PY{p}{,}
\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{hidden\PYZus{}units}\PY{l+s+s1}{\PYZsq{}}\PY{p}{:} \PY{p}{[}\PY{l+m+mi}{1000}\PY{p}{,} \PY{l+m+mi}{1000}\PY{p}{]}\PY{p}{,}
\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{n\PYZus{}classes}\PY{l+s+s1}{\PYZsq{}}\PY{p}{:} \PY{l+m+mi}{2}
\PY{p}{\PYZcb{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmp1tui31ju
INFO:tensorflow:Using config: \{'\_model\_dir': '/var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmp1tui31ju', '\_tf\_random\_seed': None, '\_save\_summary\_steps': 100, '\_save\_checkpoints\_steps': None, '\_save\_checkpoints\_secs': 600, '\_session\_config': None, '\_keep\_checkpoint\_max': 5, '\_keep\_checkpoint\_every\_n\_hours': 10000, '\_log\_step\_count\_steps': 100, '\_train\_distribute': None, '\_service': None, '\_cluster\_spec': <tensorflow.python.training.server\_lib.ClusterSpec object at 0x1235e7908>, '\_task\_type': 'worker', '\_task\_id': 0, '\_global\_id\_in\_cluster': 0, '\_master': '', '\_evaluation\_master': '', '\_is\_chief': True, '\_num\_ps\_replicas': 0, '\_num\_worker\_replicas': 1\}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}20}]:} \PY{n}{classifier}\PY{o}{.}\PY{n}{train}\PY{p}{(}\PY{n}{input\PYZus{}fn}\PY{o}{=}\PY{n}{train\PYZus{}input\PYZus{}fn}\PY{p}{,} \PY{n}{steps}\PY{o}{=}\PY{l+m+mi}{3000}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
INFO:tensorflow:Calling model\_fn.
INFO:tensorflow:Done calling model\_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmp1tui31ju/model.ckpt-3000
INFO:tensorflow:Running local\_init\_op.
INFO:tensorflow:Done running local\_init\_op.
INFO:tensorflow:Saving checkpoints for 3001 into /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmp1tui31ju/model.ckpt.
INFO:tensorflow:loss = 0.6218874, step = 3001
INFO:tensorflow:global\_step/sec: 41.134
INFO:tensorflow:loss = 0.6308135, step = 3101 (2.433 sec)
INFO:tensorflow:global\_step/sec: 42.4112
INFO:tensorflow:loss = 0.59997797, step = 3201 (2.357 sec)
INFO:tensorflow:global\_step/sec: 43.8712
INFO:tensorflow:loss = 0.55855, step = 3301 (2.279 sec)
INFO:tensorflow:global\_step/sec: 48.3987
INFO:tensorflow:loss = 0.68547094, step = 3401 (2.066 sec)
INFO:tensorflow:global\_step/sec: 45.9739
INFO:tensorflow:loss = 0.58312017, step = 3501 (2.177 sec)
INFO:tensorflow:global\_step/sec: 36.8226
INFO:tensorflow:loss = 0.56016314, step = 3601 (2.715 sec)
INFO:tensorflow:global\_step/sec: 32.9154
INFO:tensorflow:loss = 0.5868153, step = 3701 (3.037 sec)
INFO:tensorflow:global\_step/sec: 48.8304
INFO:tensorflow:loss = 0.6282436, step = 3801 (2.049 sec)
INFO:tensorflow:global\_step/sec: 45.4257
INFO:tensorflow:loss = 0.6056875, step = 3901 (2.200 sec)
INFO:tensorflow:global\_step/sec: 49.1671
INFO:tensorflow:loss = 0.58671457, step = 4001 (2.034 sec)
INFO:tensorflow:global\_step/sec: 42.7573
INFO:tensorflow:loss = 0.6089299, step = 4101 (2.339 sec)
INFO:tensorflow:global\_step/sec: 37.8459
INFO:tensorflow:loss = 0.6326588, step = 4201 (2.642 sec)
INFO:tensorflow:global\_step/sec: 43.9588
INFO:tensorflow:loss = 0.6027881, step = 4301 (2.275 sec)
INFO:tensorflow:global\_step/sec: 48.5635
INFO:tensorflow:loss = 0.54152703, step = 4401 (2.059 sec)
INFO:tensorflow:global\_step/sec: 50.0463
INFO:tensorflow:loss = 0.66083395, step = 4501 (1.998 sec)
INFO:tensorflow:global\_step/sec: 49.4703
INFO:tensorflow:loss = 0.62198687, step = 4601 (2.021 sec)
INFO:tensorflow:global\_step/sec: 50.6755
INFO:tensorflow:loss = 0.60302573, step = 4701 (1.973 sec)
INFO:tensorflow:global\_step/sec: 50.6348
INFO:tensorflow:loss = 0.57599527, step = 4801 (1.975 sec)
INFO:tensorflow:global\_step/sec: 50.0629
INFO:tensorflow:loss = 0.59555376, step = 4901 (1.997 sec)
INFO:tensorflow:global\_step/sec: 38.1622
INFO:tensorflow:loss = 0.49892017, step = 5001 (2.620 sec)
INFO:tensorflow:global\_step/sec: 47.4043
INFO:tensorflow:loss = 0.6800725, step = 5101 (2.110 sec)
INFO:tensorflow:global\_step/sec: 49.1188
INFO:tensorflow:loss = 0.7758391, step = 5201 (2.036 sec)
INFO:tensorflow:global\_step/sec: 49.4261
INFO:tensorflow:loss = 0.5849468, step = 5301 (2.023 sec)
INFO:tensorflow:global\_step/sec: 49.6492
INFO:tensorflow:loss = 0.67116183, step = 5401 (2.014 sec)
INFO:tensorflow:global\_step/sec: 47.4597
INFO:tensorflow:loss = 0.6721305, step = 5501 (2.108 sec)
INFO:tensorflow:global\_step/sec: 42.7385
INFO:tensorflow:loss = 0.58171046, step = 5601 (2.342 sec)
INFO:tensorflow:global\_step/sec: 33.6575
INFO:tensorflow:loss = 0.5937742, step = 5701 (2.969 sec)
INFO:tensorflow:global\_step/sec: 34.6596
INFO:tensorflow:loss = 0.6021945, step = 5801 (2.884 sec)
INFO:tensorflow:global\_step/sec: 36.8168
INFO:tensorflow:loss = 0.5518951, step = 5901 (2.717 sec)
INFO:tensorflow:Saving checkpoints for 6000 into /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmp1tui31ju/model.ckpt.
INFO:tensorflow:Loss for final step: 0.6435217.
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}20}]:} <tensorflow.python.estimator.estimator.Estimator at 0x1235e78d0>
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}21}]:} \PY{n}{classifier}\PY{o}{.}\PY{n}{evaluate}\PY{p}{(}\PY{n}{input\PYZus{}fn}\PY{o}{=}\PY{n}{eval\PYZus{}input\PYZus{}fn}\PY{p}{,} \PY{n}{steps}\PY{o}{=}\PY{l+m+mi}{200}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
INFO:tensorflow:Calling model\_fn.
INFO:tensorflow:Done calling model\_fn.
INFO:tensorflow:Starting evaluation at 2018-06-10-14:44:56
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /var/folders/n8/wbjbrw4n6wv8v5kbx4zg70wm0000gn/T/tmp1tui31ju/model.ckpt-6000
INFO:tensorflow:Running local\_init\_op.
INFO:tensorflow:Done running local\_init\_op.
INFO:tensorflow:Evaluation [20/200]
INFO:tensorflow:Evaluation [40/200]
INFO:tensorflow:Evaluation [60/200]
INFO:tensorflow:Evaluation [80/200]
INFO:tensorflow:Evaluation [100/200]
INFO:tensorflow:Evaluation [120/200]
INFO:tensorflow:Evaluation [140/200]
INFO:tensorflow:Evaluation [160/200]
INFO:tensorflow:Evaluation [180/200]
INFO:tensorflow:Evaluation [200/200]
INFO:tensorflow:Finished evaluation at 2018-06-10-14:44:58
INFO:tensorflow:Saving dict for global step 6000: accuracy = 0.79, auc = 0.6002502, global\_step = 6000, loss = 0.54535925
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}21}]:} \{'accuracy': 0.79, 'auc': 0.6002502, 'loss': 0.54535925, 'global\_step': 6000\}
\end{Verbatim}
We achieved test set accuracy of 79\% which is better than most models
for the Titanic dataset without any feature engineering. Concluding this
guide we have seen an example of the new input pipelines that Google
introduced recently and how they tie into the estimator models. For more
extensive examples and tutorials I strongly advise to visit the latest
version \href{https://www.tensorflow.org/get_started/}{docs}.
% Add a bibliography block to the postdoc
\end{document}