Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
keras-team
GitHub Repository: keras-team/keras-io
Path: blob/master/scripts/examples_master.py
7753 views
1
EXAMPLES_MASTER = {
2
"path": "examples/",
3
"title": "Code examples",
4
"toc": False,
5
"children": [
6
{
7
"path": "vision/",
8
"title": "Computer Vision",
9
"toc": True,
10
"children": [
11
# Image classification
12
{
13
"path": "image_classification_from_scratch",
14
"title": "Image classification from scratch",
15
"subcategory": "Image classification",
16
"highlight": True,
17
"keras_3": True,
18
},
19
{
20
"path": "mnist_convnet",
21
"title": "Simple MNIST convnet",
22
"subcategory": "Image classification",
23
"highlight": True,
24
"keras_3": True,
25
},
26
{
27
"path": "image_classification_efficientnet_fine_tuning",
28
"title": "Image classification via fine-tuning with EfficientNet",
29
"subcategory": "Image classification",
30
"highlight": True,
31
"keras_3": True,
32
},
33
{
34
"path": "image_classification_with_vision_transformer",
35
"title": "Image classification with Vision Transformer",
36
"subcategory": "Image classification",
37
"keras_3": True,
38
},
39
{
40
"path": "attention_mil_classification",
41
"title": "Classification using Attention-based Deep Multiple Instance Learning",
42
"subcategory": "Image classification",
43
"keras_3": True,
44
},
45
{
46
"path": "mlp_image_classification",
47
"title": "Image classification with modern MLP models",
48
"subcategory": "Image classification",
49
"keras_3": True,
50
},
51
{
52
"path": "mobilevit",
53
"title": "A mobile-friendly Transformer-based model for image classification",
54
"subcategory": "Image classification",
55
"keras_3": True,
56
},
57
{
58
"path": "xray_classification_with_tpus",
59
"title": "Pneumonia Classification on TPU",
60
"subcategory": "Image classification",
61
"keras_3": True,
62
},
63
{
64
"path": "cct",
65
"title": "Compact Convolutional Transformers",
66
"subcategory": "Image classification",
67
"keras_3": True,
68
},
69
{
70
"path": "convmixer",
71
"title": "Image classification with ConvMixer",
72
"subcategory": "Image classification",
73
"keras_3": True,
74
},
75
{
76
"path": "eanet",
77
"title": "Image classification with EANet (External Attention Transformer)",
78
"subcategory": "Image classification",
79
"keras_3": True,
80
},
81
{
82
"path": "involution",
83
"title": "Involutional neural networks",
84
"subcategory": "Image classification",
85
"keras_3": True,
86
},
87
{
88
"path": "perceiver_image_classification",
89
"title": "Image classification with Perceiver",
90
"subcategory": "Image classification",
91
"keras_3": True,
92
},
93
{
94
"path": "reptile",
95
"title": "Few-Shot learning with Reptile",
96
"subcategory": "Image classification",
97
"keras_3": True,
98
},
99
{
100
"path": "semisupervised_simclr",
101
"title": "Semi-supervised image classification using contrastive pretraining with SimCLR",
102
"subcategory": "Image classification",
103
"keras_3": True,
104
},
105
{
106
"path": "swin_transformers",
107
"title": "Image classification with Swin Transformers",
108
"subcategory": "Image classification",
109
"keras_3": True,
110
},
111
{
112
"path": "vit_small_ds",
113
"title": "Train a Vision Transformer on small datasets",
114
"subcategory": "Image classification",
115
"keras_3": True,
116
},
117
{
118
"path": "shiftvit",
119
"title": "A Vision Transformer without Attention",
120
"subcategory": "Image classification",
121
"keras_3": True,
122
},
123
{
124
"path": "image_classification_using_global_context_vision_transformer",
125
"title": "Image Classification using Global Context Vision Transformer",
126
"subcategory": "Image classification",
127
"keras_3": True,
128
},
129
{
130
"path": "temporal_latent_bottleneck",
131
"title": "When Recurrence meets Transformers",
132
"subcategory": "Image classification",
133
"keras_3": True,
134
},
135
{
136
"path": "forwardforward",
137
"title": "Using the Forward-Forward Algorithm for Image Classification",
138
"subcategory": "Image classification",
139
"keras_3": True,
140
},
141
# Image segmentation
142
{
143
"path": "oxford_pets_image_segmentation",
144
"title": "Image segmentation with a U-Net-like architecture",
145
"subcategory": "Image segmentation",
146
"highlight": True,
147
"keras_3": True,
148
},
149
{
150
"path": "deeplabv3_plus",
151
"title": "Multiclass semantic segmentation using DeepLabV3+",
152
"subcategory": "Image segmentation",
153
"keras_3": True,
154
},
155
{
156
"path": "basnet_segmentation",
157
"title": "Highly accurate boundaries segmentation using BASNet",
158
"subcategory": "Image segmentation",
159
"keras_3": True,
160
},
161
{
162
"path": "fully_convolutional_network",
163
"title": "Image Segmentation using Composable Fully-Convolutional Networks",
164
"subcategory": "Image segmentation",
165
"keras_3": True,
166
},
167
# Object Detection
168
{
169
"path": "retinanet",
170
"title": "Object Detection with RetinaNet",
171
"subcategory": "Object detection",
172
},
173
{
174
"path": "keypoint_detection",
175
"title": "Keypoint Detection with Transfer Learning",
176
"subcategory": "Object detection",
177
"keras_3": True,
178
},
179
{
180
"path": "object_detection_using_vision_transformer",
181
"title": "Object detection with Vision Transformers",
182
"subcategory": "Object detection",
183
"keras_3": True,
184
},
185
# 3D
186
{
187
"path": "brain_tumor_segmentation",
188
"title": "3D Multimodal Brain Tumor Segmentation",
189
"subcategory": "3D",
190
"keras_3": True,
191
},
192
{
193
"path": "3D_image_classification",
194
"title": "3D image classification from CT scans",
195
"subcategory": "3D",
196
"keras_3": True,
197
},
198
{
199
"path": "depth_estimation",
200
"title": "Monocular depth estimation",
201
"subcategory": "3D",
202
"keras_3": True,
203
},
204
{
205
"path": "nerf",
206
"title": "3D volumetric rendering with NeRF",
207
"subcategory": "3D",
208
"keras_3": True,
209
"highlight": True,
210
},
211
{
212
"path": "pointnet_segmentation",
213
"title": "Point cloud segmentation with PointNet",
214
"subcategory": "3D",
215
"keras_3": True,
216
},
217
{
218
"path": "pointnet",
219
"title": "Point cloud classification",
220
"subcategory": "3D",
221
"keras_3": True,
222
},
223
# OCR
224
{
225
"path": "captcha_ocr",
226
"title": "OCR model for reading Captchas",
227
"subcategory": "OCR",
228
"keras_3": True,
229
},
230
{
231
"path": "handwriting_recognition",
232
"title": "Handwriting recognition",
233
"subcategory": "OCR",
234
"keras_3": True,
235
},
236
# Image enhancement
237
{
238
"path": "autoencoder",
239
"title": "Convolutional autoencoder for image denoising",
240
"subcategory": "Image enhancement",
241
"keras_3": True,
242
},
243
{
244
"path": "mirnet",
245
"title": "Low-light image enhancement using MIRNet",
246
"subcategory": "Image enhancement",
247
"keras_3": True,
248
},
249
{
250
"path": "super_resolution_sub_pixel",
251
"title": "Image Super-Resolution using an Efficient Sub-Pixel CNN",
252
"subcategory": "Image enhancement",
253
"keras_3": True,
254
},
255
{
256
"path": "edsr",
257
"title": "Enhanced Deep Residual Networks for single-image super-resolution",
258
"subcategory": "Image enhancement",
259
"keras_3": True,
260
},
261
{
262
"path": "zero_dce",
263
"title": "Zero-DCE for low-light image enhancement",
264
"subcategory": "Image enhancement",
265
"keras_3": True,
266
},
267
# Data augmentation
268
{
269
"path": "cutmix",
270
"title": "CutMix data augmentation for image classification",
271
"subcategory": "Data augmentation",
272
"keras_3": True,
273
},
274
{
275
"path": "mixup",
276
"title": "MixUp augmentation for image classification",
277
"subcategory": "Data augmentation",
278
"keras_3": True,
279
},
280
{
281
"path": "randaugment",
282
"title": "RandAugment for Image Classification for Improved Robustness",
283
"subcategory": "Data augmentation",
284
"keras_3": True,
285
},
286
# Image & Text
287
{
288
"path": "image_captioning",
289
"title": "Image captioning",
290
"subcategory": "Image & Text",
291
"highlight": True,
292
"keras_3": True,
293
},
294
{
295
"path": "nl_image_search",
296
"title": "Natural language image search with a Dual Encoder",
297
"subcategory": "Image & Text",
298
},
299
# Vision models interpretability
300
{
301
"path": "visualizing_what_convnets_learn",
302
"title": "Visualizing what convnets learn",
303
"subcategory": "Vision models interpretability",
304
"keras_3": True,
305
},
306
{
307
"path": "integrated_gradients",
308
"title": "Model interpretability with Integrated Gradients",
309
"subcategory": "Vision models interpretability",
310
"keras_3": True,
311
},
312
{
313
"path": "probing_vits",
314
"title": "Investigating Vision Transformer representations",
315
"subcategory": "Vision models interpretability",
316
"keras_3": True,
317
},
318
{
319
"path": "grad_cam",
320
"title": "Grad-CAM class activation visualization",
321
"subcategory": "Vision models interpretability",
322
"keras_3": True,
323
},
324
# Image similarity search
325
{
326
"path": "near_dup_search",
327
"title": "Near-duplicate image search",
328
"subcategory": "Image similarity search",
329
},
330
{
331
"path": "semantic_image_clustering",
332
"title": "Semantic Image Clustering",
333
"subcategory": "Image similarity search",
334
"keras_3": True,
335
},
336
{
337
"path": "siamese_contrastive",
338
"title": "Image similarity estimation using a Siamese Network with a contrastive loss",
339
"subcategory": "Image similarity search",
340
"keras_3": True,
341
},
342
{
343
"path": "siamese_network",
344
"title": "Image similarity estimation using a Siamese Network with a triplet loss",
345
"subcategory": "Image similarity search",
346
"keras_3": True,
347
},
348
{
349
"path": "metric_learning",
350
"title": "Metric learning for image similarity search",
351
"subcategory": "Image similarity search",
352
"keras_3": True,
353
},
354
{
355
"path": "metric_learning_tf_similarity",
356
"title": "Metric learning for image similarity search using TensorFlow Similarity",
357
"subcategory": "Image similarity search",
358
},
359
{
360
"path": "nnclr",
361
"title": "Self-supervised contrastive learning with NNCLR",
362
"subcategory": "Image similarity search",
363
"keras_3": True,
364
},
365
{
366
"path": "simsiam",
367
"title": "Self-supervised contrastive learning with SimSiam",
368
"subcategory": "Image similarity search",
369
"keras_3": True,
370
},
371
# Video
372
{
373
"path": "video_classification",
374
"title": "Video Classification with a CNN-RNN Architecture",
375
"subcategory": "Video",
376
"keras_3": True,
377
},
378
{
379
"path": "conv_lstm",
380
"title": "Next-Frame Video Prediction with Convolutional LSTMs",
381
"subcategory": "Video",
382
"keras_3": True,
383
},
384
{
385
"path": "video_transformers",
386
"title": "Video Classification with Transformers",
387
"subcategory": "Video",
388
"keras_3": True,
389
},
390
{
391
"path": "vivit",
392
"title": "Video Vision Transformer",
393
"subcategory": "Video",
394
"keras_3": True,
395
},
396
{
397
"path": "bit",
398
"title": "Image Classification using BigTransfer (BiT)",
399
"subcategory": "Image classification",
400
"keras_3": True,
401
},
402
# Performance recipes
403
{
404
"path": "gradient_centralization",
405
"title": "Gradient Centralization for Better Training Performance",
406
"subcategory": "Performance recipes",
407
"keras_3": True,
408
},
409
{
410
"path": "token_learner",
411
"title": "Learning to tokenize in Vision Transformers",
412
"subcategory": "Performance recipes",
413
"keras_3": True,
414
},
415
{
416
"path": "knowledge_distillation",
417
"title": "Knowledge Distillation",
418
"subcategory": "Performance recipes",
419
"keras_3": True,
420
},
421
{
422
"path": "fixres",
423
"title": "FixRes: Fixing train-test resolution discrepancy",
424
"subcategory": "Performance recipes",
425
"keras_3": True,
426
},
427
{
428
"path": "cait",
429
"title": "Class Attention Image Transformers with LayerScale",
430
"subcategory": "Performance recipes",
431
"keras_3": True,
432
},
433
{
434
"path": "patch_convnet",
435
"title": "Augmenting convnets with aggregated attention",
436
"subcategory": "Performance recipes",
437
"keras_3": True,
438
},
439
{
440
"path": "learnable_resizer",
441
"title": "Learning to Resize",
442
"subcategory": "Performance recipes",
443
"keras_3": True,
444
},
445
],
446
},
447
{
448
"path": "nlp/",
449
"title": "Natural Language Processing",
450
"toc": True,
451
"children": [
452
# Text classification
453
{
454
"path": "text_classification_from_scratch",
455
"title": "Text classification from scratch",
456
"subcategory": "Text classification",
457
"highlight": True,
458
"keras_3": True,
459
},
460
{
461
"path": "active_learning_review_classification",
462
"title": "Review Classification using Active Learning",
463
"subcategory": "Text classification",
464
"keras_3": True,
465
},
466
{
467
"path": "fnet_classification_with_keras_hub",
468
"title": "Text Classification using FNet",
469
"subcategory": "Text classification",
470
"keras_3": True,
471
},
472
{
473
"path": "multi_label_classification",
474
"title": "Large-scale multi-label text classification",
475
"subcategory": "Text classification",
476
"keras_3": True,
477
},
478
{
479
"path": "text_classification_with_transformer",
480
"title": "Text classification with Transformer",
481
"subcategory": "Text classification",
482
"keras_3": True,
483
},
484
{
485
"path": "text_classification_with_switch_transformer",
486
"title": "Text classification with Switch Transformer",
487
"subcategory": "Text classification",
488
"keras_3": True,
489
},
490
{
491
"path": "tweet-classification-using-tfdf",
492
"title": "Text classification using Decision Forests and pretrained embeddings",
493
"subcategory": "Text classification",
494
},
495
{
496
"path": "pretrained_word_embeddings",
497
"title": "Using pre-trained word embeddings",
498
"subcategory": "Text classification",
499
"keras_3": True,
500
},
501
{
502
"path": "bidirectional_lstm_imdb",
503
"title": "Bidirectional LSTM on IMDB",
504
"subcategory": "Text classification",
505
"keras_3": True,
506
},
507
{
508
"path": "data_parallel_training_with_keras_hub",
509
"title": "Data Parallel Training with KerasHub and tf.distribute",
510
"subcategory": "Text classification",
511
"keras_3": True,
512
},
513
# Machine translation
514
{
515
"path": "neural_machine_translation_with_keras_hub",
516
"title": "English-to-Spanish translation with KerasHub",
517
"subcategory": "Machine translation",
518
"keras_3": True,
519
},
520
{
521
"path": "neural_machine_translation_with_transformer",
522
"title": "English-to-Spanish translation with a sequence-to-sequence Transformer",
523
"subcategory": "Machine translation",
524
"highlight": True,
525
"keras_3": True,
526
},
527
{
528
"path": "lstm_seq2seq",
529
"title": "Character-level recurrent sequence-to-sequence model",
530
"subcategory": "Machine translation",
531
"keras_3": True,
532
},
533
# Entailement prediction
534
{
535
"path": "multimodal_entailment",
536
"title": "Multimodal entailment",
537
"subcategory": "Entailment prediction",
538
"keras_3": True,
539
},
540
# Named entity recognition
541
{
542
"path": "ner_transformers",
543
"title": "Named Entity Recognition using Transformers",
544
"subcategory": "Named entity recognition",
545
"keras_3": True,
546
},
547
# Sequence-to-sequence
548
{
549
"path": "text_extraction_with_bert",
550
"title": "Text Extraction with BERT",
551
"subcategory": "Sequence-to-sequence",
552
},
553
{
554
"path": "addition_rnn",
555
"title": "Sequence to sequence learning for performing number addition",
556
"subcategory": "Sequence-to-sequence",
557
"keras_3": True,
558
},
559
# Text similarity search
560
{
561
"path": "semantic_similarity_with_keras_hub",
562
"title": "Semantic Similarity with KerasHub",
563
"subcategory": "Text similarity search",
564
"keras_3": True,
565
},
566
{
567
"path": "semantic_similarity_with_bert",
568
"title": "Semantic Similarity with BERT",
569
"subcategory": "Text similarity search",
570
"keras_3": True,
571
},
572
{
573
"path": "sentence_embeddings_with_sbert",
574
"title": "Sentence embeddings using Siamese RoBERTa-networks",
575
"subcategory": "Text similarity search",
576
"keras_3": True,
577
},
578
# Language modeling
579
{
580
"path": "masked_language_modeling",
581
"title": "End-to-end Masked Language Modeling with BERT",
582
"subcategory": "Language modeling",
583
"keras_3": True,
584
},
585
{
586
"path": "abstractive_summarization_with_bart",
587
"title": "Abstractive Text Summarization with BART",
588
"subcategory": "Language modeling",
589
"keras_3": True,
590
},
591
# Parameter efficient fine-tuning.
592
{
593
"path": "parameter_efficient_finetuning_of_gpt2_with_lora",
594
"title": "Parameter-efficient fine-tuning of GPT-2 with LoRA",
595
"subcategory": "Parameter efficient fine-tuning",
596
"keras_3": True,
597
},
598
# Remainder is autogenerated
599
],
600
},
601
{
602
"path": "structured_data/",
603
"title": "Structured Data",
604
"toc": True,
605
"children": [
606
{
607
"path": "structured_data_classification_with_feature_space",
608
"title": "Structured data classification with FeatureSpace",
609
"subcategory": "Structured data classification",
610
"highlight": True,
611
"keras_3": True,
612
},
613
{
614
"path": "feature_space_advanced",
615
"title": "FeatureSpace advanced use cases",
616
"subcategory": "Structured data classification",
617
"highlight": True,
618
"keras_3": True,
619
},
620
{
621
"path": "imbalanced_classification",
622
"title": "Imbalanced classification: credit card fraud detection",
623
"subcategory": "Structured data classification",
624
"highlight": True,
625
"keras_3": True,
626
},
627
{
628
"path": "structured_data_classification_from_scratch",
629
"title": "Structured data classification from scratch",
630
"subcategory": "Structured data classification",
631
"keras_3": True,
632
},
633
{
634
"path": "wide_deep_cross_networks",
635
"title": "Structured data learning with Wide, Deep, and Cross networks",
636
"subcategory": "Structured data classification",
637
"keras_3": True,
638
},
639
{
640
"path": "customer_lifetime_value",
641
"title": "Deep Learning for Customer Lifetime Value",
642
"subcategory": "Structured data regression",
643
"keras_3": True,
644
},
645
{
646
"path": "classification_with_grn_and_vsn",
647
"title": "Classification with Gated Residual and Variable Selection Networks",
648
"subcategory": "Structured data classification",
649
"keras_3": True,
650
},
651
{
652
"path": "classification_with_tfdf",
653
"title": "Classification with TensorFlow Decision Forests",
654
"subcategory": "Structured data classification",
655
},
656
{
657
"path": "deep_neural_decision_forests",
658
"title": "Classification with Neural Decision Forests",
659
"subcategory": "Structured data classification",
660
"keras_3": True,
661
},
662
{
663
"path": "tabtransformer",
664
"title": "Structured data learning with TabTransformer",
665
"subcategory": "Structured data classification",
666
"keras_3": True,
667
},
668
{
669
"path": "class_with_grn_and_vsn_with_hyperparameters_tuning",
670
"title": "Classification with Gated Residual and Variable Selection Networks with HyperParameters tuning",
671
"subcategory": "Structured data classification",
672
"keras_3": True,
673
},
674
# Recommendation
675
{
676
"path": "collaborative_filtering_movielens",
677
"title": "Collaborative Filtering for Movie Recommendations",
678
"subcategory": "Recommendation",
679
"keras_3": True,
680
},
681
{
682
"path": "movielens_recommendations_transformers",
683
"title": "A Transformer-based recommendation system",
684
"subcategory": "Recommendation",
685
"keras_3": True,
686
},
687
],
688
},
689
{
690
"path": "timeseries/",
691
"title": "Timeseries",
692
"toc": True,
693
"children": [
694
# Timeseries classification
695
{
696
"path": "timeseries_classification_from_scratch",
697
"title": "Timeseries classification from scratch",
698
"subcategory": "Timeseries classification",
699
"highlight": True,
700
"keras_3": True,
701
},
702
{
703
"path": "timeseries_classification_transformer",
704
"title": "Timeseries classification with a Transformer model",
705
"subcategory": "Timeseries classification",
706
"keras_3": True,
707
},
708
{
709
"path": "eeg_signal_classification",
710
"title": "Electroencephalogram Signal Classification for action identification",
711
"subcategory": "Timeseries classification",
712
"keras_3": True,
713
},
714
{
715
"path": "event_classification_for_payment_card_fraud_detection",
716
"title": "Event classification for payment card fraud detection",
717
"subcategory": "Timeseries classification",
718
"keras_3": True,
719
},
720
{
721
"path": "eeg_bci_ssvepformer",
722
"title": "Electroencephalogram Signal Classification for Brain-Computer Interface",
723
"subcategory": "Timeseries classification",
724
"keras_3": True,
725
},
726
# Anomaly detection
727
{
728
"path": "timeseries_anomaly_detection",
729
"title": "Timeseries anomaly detection using an Autoencoder",
730
"subcategory": "Anomaly detection",
731
"keras_3": True,
732
},
733
# Timeseries forecasting
734
{
735
"path": "timeseries_traffic_forecasting",
736
"title": "Traffic forecasting using graph neural networks and LSTM",
737
"subcategory": "Timeseries forecasting",
738
"keras_3": True,
739
},
740
{
741
"path": "timeseries_weather_forecasting",
742
"title": "Timeseries forecasting for weather prediction",
743
"subcategory": "Timeseries forecasting",
744
"keras_3": True,
745
},
746
],
747
},
748
{
749
"path": "generative/",
750
"title": "Generative Deep Learning",
751
"toc": True,
752
"children": [
753
# Image generation
754
{
755
"path": "ddim",
756
"title": "Denoising Diffusion Implicit Models",
757
"subcategory": "Image generation",
758
"highlight": True,
759
"keras_3": True,
760
},
761
{
762
"path": "random_walks_with_stable_diffusion_3",
763
"title": "A walk through latent space with Stable Diffusion 3",
764
"subcategory": "Image generation",
765
"highlight": True,
766
"keras_3": True,
767
},
768
{
769
"path": "dreambooth",
770
"title": "DreamBooth",
771
"subcategory": "Image generation",
772
},
773
{
774
"path": "ddpm",
775
"title": "Denoising Diffusion Probabilistic Models",
776
"subcategory": "Image generation",
777
},
778
{
779
"path": "fine_tune_via_textual_inversion",
780
"title": "Teach StableDiffusion new concepts via Textual Inversion",
781
"subcategory": "Image generation",
782
},
783
{
784
"path": "finetune_stable_diffusion",
785
"title": "Fine-tuning Stable Diffusion",
786
"subcategory": "Image generation",
787
},
788
{
789
"path": "vae",
790
"title": "Variational AutoEncoder",
791
"subcategory": "Image generation",
792
"keras_3": True,
793
},
794
{
795
"path": "dcgan_overriding_train_step",
796
"title": "GAN overriding Model.train_step",
797
"subcategory": "Image generation",
798
"keras_3": True,
799
},
800
{
801
"path": "wgan_gp",
802
"title": "WGAN-GP overriding Model.train_step",
803
"subcategory": "Image generation",
804
"keras_3": True,
805
},
806
{
807
"path": "conditional_gan",
808
"title": "Conditional GAN",
809
"subcategory": "Image generation",
810
"keras_3": True,
811
},
812
{
813
"path": "cyclegan",
814
"title": "CycleGAN",
815
"subcategory": "Image generation",
816
"keras_3": True,
817
},
818
{
819
"path": "gan_ada",
820
"title": "Data-efficient GANs with Adaptive Discriminator Augmentation",
821
"subcategory": "Image generation",
822
"keras_3": True,
823
},
824
{
825
"path": "deep_dream",
826
"title": "Deep Dream",
827
"subcategory": "Image generation",
828
"keras_3": True,
829
},
830
{
831
"path": "gaugan",
832
"title": "GauGAN for conditional image generation",
833
"subcategory": "Image generation",
834
"keras_3": True,
835
},
836
{
837
"path": "pixelcnn",
838
"title": "PixelCNN",
839
"subcategory": "Image generation",
840
"keras_3": True,
841
},
842
{
843
"path": "stylegan",
844
"title": "Face image generation with StyleGAN",
845
"subcategory": "Image generation",
846
},
847
{
848
"path": "vq_vae",
849
"title": "Vector-Quantized Variational Autoencoders",
850
"subcategory": "Image generation",
851
},
852
{
853
"path": "random_walks_with_stable_diffusion",
854
"title": "A walk through latent space with Stable Diffusion",
855
"subcategory": "Image generation",
856
"keras_3": True,
857
},
858
# Style transfer
859
{
860
"path": "neural_style_transfer",
861
"title": "Neural style transfer",
862
"subcategory": "Style transfer",
863
"keras_3": True,
864
},
865
{
866
"path": "adain",
867
"title": "Neural Style Transfer with AdaIN",
868
"subcategory": "Style transfer",
869
},
870
# Text generation
871
{
872
"path": "gpt2_text_generation_with_keras_hub",
873
"title": "GPT2 Text Generation with KerasHub",
874
"subcategory": "Text generation",
875
"highlight": True,
876
"keras_3": True,
877
},
878
{
879
"path": "text_generation_gpt",
880
"title": "GPT text generation from scratch with KerasHub",
881
"subcategory": "Text generation",
882
"keras_3": True,
883
},
884
{
885
"path": "text_generation_with_miniature_gpt",
886
"title": "Text generation with a miniature GPT",
887
"subcategory": "Text generation",
888
"keras_3": True,
889
},
890
{
891
"path": "lstm_character_level_text_generation",
892
"title": "Character-level text generation with LSTM",
893
"subcategory": "Text generation",
894
"keras_3": True,
895
},
896
{
897
"path": "text_generation_fnet",
898
"title": "Text Generation using FNet",
899
"subcategory": "Text generation",
900
},
901
# Audio / midi generation
902
{
903
"path": "midi_generation_with_transformer",
904
"title": "Music Generation with Transformer Models",
905
"subcategory": "Audio generation",
906
"keras_3": True,
907
},
908
# Graph generation
909
{
910
"path": "molecule_generation",
911
"title": "Drug Molecule Generation with VAE",
912
"subcategory": "Graph generation",
913
"keras_3": True,
914
},
915
{
916
"path": "wgan-graphs",
917
"title": "WGAN-GP with R-GCN for the generation of small molecular graphs",
918
"subcategory": "Graph generation",
919
},
920
],
921
},
922
{
923
"path": "audio/",
924
"title": "Audio Data",
925
"toc": True,
926
"children": [
927
{
928
"path": "vocal_track_separation",
929
"title": "Vocal Track Separation with Encoder-Decoder Architecture",
930
"subcategory": "Vocal track separation",
931
"keras_3": True,
932
},
933
{
934
"path": "transformer_asr",
935
"title": "Automatic Speech Recognition with Transformer",
936
"subcategory": "Speech recognition",
937
"keras_3": True,
938
},
939
{
940
"path": "stft",
941
"title": "Audio Classification with the STFTSpectrogram layer",
942
"subcategory": "Audio classification",
943
"keras_3": True,
944
},
945
# Rest will be autogenerated
946
],
947
},
948
{
949
"path": "rl/",
950
"title": "Reinforcement Learning",
951
"toc": True,
952
"children": [
953
{
954
"path": "actor_critic_cartpole",
955
"title": "Actor Critic Method",
956
"subcategory": "RL algorithms",
957
"keras_3": True,
958
},
959
{
960
"path": "ppo_cartpole",
961
"title": "Proximal Policy Optimization",
962
"subcategory": "RL algorithms",
963
"keras_3": True,
964
},
965
{
966
"path": "deep_q_network_breakout",
967
"title": "Deep Q-Learning for Atari Breakout",
968
"subcategory": "RL algorithms",
969
"keras_3": True,
970
},
971
{
972
"path": "ddpg_pendulum",
973
"title": "Deep Deterministic Policy Gradient (DDPG)",
974
"subcategory": "RL algorithms",
975
"keras_3": True,
976
},
977
# Rest will be autogenerated
978
],
979
},
980
{
981
"path": "graph/",
982
"title": "Graph Data",
983
"toc": True,
984
"children": [
985
# Will be autogenerated
986
],
987
},
988
{
989
"path": "keras_recipes/",
990
"title": "Quick Keras Recipes",
991
"toc": True,
992
"children": [
993
# Keras usage tips
994
{
995
"path": "parameter_efficient_finetuning_of_gemma_with_lora_and_qlora",
996
"title": "Parameter-efficient fine-tuning of Gemma with LoRA and QLoRA",
997
"subcategory": "Keras usage tips",
998
"keras_3": True,
999
},
1000
{
1001
"path": "float8_training_and_inference_with_transformer",
1002
"title": "Float8 training and inference with a simple Transformer model",
1003
"subcategory": "Keras usage tips",
1004
"keras_3": True,
1005
},
1006
{
1007
"path": "debugging_tips",
1008
"title": "Keras debugging tips",
1009
"subcategory": "Keras usage tips",
1010
"keras_3": True,
1011
},
1012
{
1013
"path": "subclassing_conv_layers",
1014
"title": "Customizing the convolution operation of a Conv2D layer",
1015
"subcategory": "Keras usage tips",
1016
"keras_3": True,
1017
},
1018
{
1019
"path": "trainer_pattern",
1020
"title": "Trainer pattern",
1021
"subcategory": "Keras usage tips",
1022
"keras_3": True,
1023
},
1024
{
1025
"path": "endpoint_layer_pattern",
1026
"title": "Endpoint layer pattern",
1027
"subcategory": "Keras usage tips",
1028
"keras_3": True,
1029
},
1030
{
1031
"path": "reproducibility_recipes",
1032
"title": "Reproducibility in Keras Models",
1033
"subcategory": "Keras usage tips",
1034
"keras_3": True,
1035
},
1036
{
1037
"path": "tensorflow_numpy_models",
1038
"title": "Writing Keras Models With TensorFlow NumPy",
1039
"subcategory": "Keras usage tips",
1040
"keras_3": True,
1041
},
1042
{
1043
"path": "antirectifier",
1044
"title": "Simple custom layer example: Antirectifier",
1045
"subcategory": "Keras usage tips",
1046
"keras_3": True,
1047
},
1048
{
1049
"path": "packaging_keras_models_for_wide_distribution",
1050
"title": "Packaging Keras models for wide distribution using Functional Subclassing",
1051
"subcategory": "Keras usage tips",
1052
"keras_3": True,
1053
},
1054
{
1055
"path": "approximating_non_function_mappings",
1056
"title": "Approximating non-Function Mappings with Mixture Density Networks",
1057
"subcategory": "Keras usage tips",
1058
"keras_3": True,
1059
},
1060
# Serving
1061
{
1062
"path": "tf_serving",
1063
"title": "Serving TensorFlow models with TFServing",
1064
"subcategory": "Serving",
1065
"keras_3": True,
1066
},
1067
# ML best practices
1068
{
1069
"path": "sample_size_estimate",
1070
"title": "Estimating required sample size for model training",
1071
"subcategory": "ML best practices",
1072
"keras_3": True,
1073
},
1074
{
1075
"path": "memory_efficient_embeddings",
1076
"title": "Memory-efficient embeddings for recommendation systems",
1077
"subcategory": "ML best practices",
1078
"keras_3": True,
1079
},
1080
{
1081
"path": "creating_tfrecords",
1082
"title": "Creating TFRecords",
1083
"subcategory": "ML best practices",
1084
"keras_3": True,
1085
},
1086
# Rest will be autogenerated
1087
],
1088
},
1089
],
1090
}
1091
1092