Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Avatar for KuCalc : devops.
Download
50640 views
1
###
2
Jupyter Backend
3
4
For interactive testing:
5
6
$ source smc-env
7
$ coffee
8
coffee> j = require('./smc-project/jupyter/jupyter')
9
coffee> k = j.kernel(name:'python3', path:'x.ipynb')
10
coffee> k.execute_code(all:true, cb:((x) -> console.log(JSON.stringify(x))), code:'2+3')
11
###
12
13
{EventEmitter} = require('events')
14
async = require('async')
15
kernelspecs = require('kernelspecs')
16
fs = require('fs')
17
misc = require('smc-util/misc')
18
{defaults, required} = misc
19
{key_value_store} = require('smc-util/key-value-store')
20
misc_node = require('smc-util-node/misc_node')
21
{blob_store} = require('./jupyter-blobs')
22
node_cleanup = require('node-cleanup')
23
util = require('smc-webapp/jupyter/util')
24
iframe = require('smc-webapp/jupyter/iframe')
25
{remove_redundant_reps} = require('smc-webapp/jupyter/import-from-ipynb')
26
nbconvert = require('./nbconvert')
27
28
###
29
We set a few extra user-specific options for the environment in which
30
Sage-based Jupyter kernels run; these are more multi-user friendly.
31
###
32
SAGE_JUPYTER_ENV = misc.merge misc.copy(process.env),
33
"PYTHONUSERBASE" : "#{process.env.HOME}/.local",
34
"PYTHON_EGG_CACHE" : "#{process.env.HOME}/.sage/.python-eggs",
35
"R_MAKEVARS_USER" : "#{process.env.HOME}/.sage/R/Makevars.user"
36
37
exports.jupyter_backend = (syncdb, client) ->
38
dbg = client.dbg("jupyter_backend")
39
dbg()
40
{JupyterActions} = require('smc-webapp/jupyter/project-actions')
41
{JupyterStore} = require('smc-webapp/jupyter/store')
42
smc_react = require('smc-webapp/smc-react')
43
44
project_id = client.client_id()
45
46
# This path is the file we will watch for changes and save to, which is in the original
47
# official ipynb format:
48
path = misc.original_path(syncdb._path)
49
50
redux_name = smc_react.redux_name(project_id, path)
51
actions = new JupyterActions(redux_name, smc_react.redux)
52
store = new JupyterStore(redux_name, smc_react.redux)
53
54
actions._init(project_id, path, syncdb, store, client)
55
56
syncdb.once 'init', (err) ->
57
dbg("syncdb init complete -- #{err}")
58
59
# for interactive testing
60
class Client
61
dbg: (f) ->
62
return (m...) -> console.log("Client.#{f}: ", m...)
63
64
exports.kernel = (opts) ->
65
opts = defaults opts,
66
name : required # name of the kernel as a string
67
client : undefined
68
verbose : true
69
path : required # filename of the ipynb corresponding to this kernel (doesn't have to actually exist)
70
actions : undefined # optional redux actions object
71
if not opts.client?
72
opts.client = new Client()
73
return new Kernel(opts.name, (if opts.verbose then opts.client?.dbg), opts.path, opts.actions)
74
75
###
76
Jupyter Kernel interface.
77
78
The kernel does *NOT* start up until either spawn is explicitly called, or
79
code execution is explicitly requested. This makes it possible to
80
call process_output without spawning an actual kernel.
81
###
82
_jupyter_kernels = {}
83
84
node_cleanup =>
85
for id, kernel of _jupyter_kernels
86
kernel.close()
87
88
logger = undefined
89
class Kernel extends EventEmitter
90
constructor : (@name, @_dbg, @_path, @_actions) ->
91
@store = key_value_store()
92
{head, tail} = misc.path_split(@_path)
93
@_directory = head
94
@_filename = tail
95
@_set_state('off')
96
@_identity = misc.uuid()
97
@_start_time = new Date() - 0
98
_jupyter_kernels[@_path] = @
99
dbg = @dbg('constructor')
100
dbg()
101
logger = @dbg
102
process.on('exit', @close)
103
@setMaxListeners(100)
104
105
_set_state: (state) =>
106
# state = 'off' --> 'spawning' --> 'starting' --> 'running' --> 'closed'
107
@_state = state
108
@emit('state', @_state)
109
110
spawn: (cb) =>
111
dbg = @dbg('spawn')
112
if @_state == 'closed'
113
cb?('closed')
114
return
115
if @_state in ['running', 'starting']
116
cb?()
117
return
118
if @_state == 'spawning'
119
@_spawn_cbs.push(cb)
120
return
121
@_spawn_cbs = [cb]
122
@_set_state('spawning')
123
dbg('spawning kernel...')
124
success = (kernel) =>
125
dbg("spawned kernel; now creating comm channels...")
126
kernel.spawn.on 'error', (err) =>
127
dbg("kernel spawn error", err)
128
@emit("spawn_error", err)
129
130
@_kernel = kernel
131
@_channels = require('enchannel-zmq-backend').createChannels(@_identity, @_kernel.config)
132
133
@_channels.shell.subscribe((mesg) => @emit('shell', mesg))
134
135
@_channels.stdin.subscribe((mesg) => @emit('stdin', mesg))
136
137
@_channels.iopub.subscribe (mesg) =>
138
if mesg.content?.execution_state?
139
@emit('execution_state', mesg.content?.execution_state)
140
@emit('iopub', mesg)
141
142
@once 'iopub', (m) =>
143
# first iopub message from the kernel means it has started running
144
dbg("iopub: #{misc.to_json(m)}")
145
# We still wait a few ms, since otherwise -- especially in testing --
146
# the kernel will bizarrely just ignore first input.
147
# TODO: I think this a **massive bug** in Jupyter (or spawnteract or ZMQ)...
148
f = =>
149
@_set_state('running')
150
for cb in @_spawn_cbs
151
cb?()
152
setTimeout(f, 100)
153
154
kernel.spawn.on('close', @close)
155
156
@_set_state('starting') # so we can send code execution to the kernel, etc.
157
158
# Very ugly! In practice, with testing, I've found that some kernels simply
159
# don't start immediately, and drop early messages. The only reliable way to
160
# get things going properly is to just keep trying something (we do the kernel_info
161
# command) until it works. Only then do we declare the kernel ready for code
162
# execution, etc. Probably the jupyter devs never notice this race condition
163
# bug in ZMQ/Jupyter kernels... or maybe the Python server has a sort of
164
# accidental work around.
165
misc.retry_until_success
166
start_delay : 500
167
max_delay : 5000
168
factor : 1.4
169
max_time : 45000
170
f : (cb) =>
171
@kernel_info(cb : =>)
172
cb(@_state == 'starting')
173
174
fail = (err) =>
175
@_set_state('off')
176
err = "#{err}"
177
for cb in @_spawn_cbs
178
cb?(err)
179
opts = {detached: true, stdio:'ignore'}
180
if @name.indexOf('sage')
181
# special environment for sage-based kernels
182
opts.env = SAGE_JUPYTER_ENV
183
if @_directory != ''
184
opts.cwd = @_directory
185
require('spawnteract').launch(@name, opts).then(success, fail)
186
return
187
188
signal: (signal) =>
189
dbg = @dbg("signal")
190
pid = @_kernel?.spawn?.pid
191
dbg("pid=#{pid}, signal=#{signal}")
192
if pid
193
try
194
@_clear_execute_code_queue()
195
process.kill(-pid, signal) # negative to kill the process group
196
catch err
197
dbg("error: #{err}")
198
199
close: =>
200
@dbg("close")()
201
if @_state == 'closed'
202
return
203
@store.close(); delete @store
204
@_set_state('closed')
205
if _jupyter_kernels[@_path]?._identity == @_identity
206
delete _jupyter_kernels[@_path]
207
@removeAllListeners()
208
process.removeListener('exit', @close)
209
if @_kernel?
210
@_kernel.spawn?.removeAllListeners()
211
@signal('SIGKILL') # kill the process group
212
try
213
fs.unlink(@_kernel.connectionFile)
214
catch err
215
# ignore
216
delete @_kernel
217
delete @_channels
218
if @_execute_code_queue?
219
for opts in @_execute_code_queue
220
opts.cb('closed')
221
delete @_execute_code_queue
222
delete @_kernel_info
223
224
if @_kernel_info_cbs?
225
for cb in @_kernel_info_cbs
226
cb('closed')
227
delete @_kernel_info_cbs
228
229
dbg: (f) =>
230
if not @_dbg?
231
return ->
232
else
233
return @_dbg("jupyter.Kernel('#{@name}',path='#{@_path}').#{f}")
234
235
_low_level_dbg: =>
236
# for low level debugging only...
237
f = (channel) =>
238
@_channels[channel].subscribe (mesg) => console.log(channel, mesg)
239
for channel in ['shell', 'iopub', 'control', 'stdin']
240
f(channel)
241
242
_ensure_running: (cb) =>
243
if @_state == 'closed'
244
cb("closed")
245
return
246
if @_state != 'running'
247
@spawn(cb)
248
else
249
cb()
250
return
251
252
execute_code: (opts) =>
253
opts = defaults opts,
254
code : required
255
id : undefined # optional tag to be used by cancel_execute
256
all : false # if all=true, cb(undefined, [all output messages]); used for testing mainly.
257
stdin : undefined # if given, support stdin prompting; this function will be called
258
# as `stdin(options, cb)`, and must then do cb(undefined, 'user input')
259
# Here, e.g., options = { password: false, prompt: '' }.
260
halt_on_error : true # Clear execution queue if shell returns status:'error', e.g., on traceback
261
cb : undefined # if all=false, this happens **repeatedly**: cb(undefined, output message)
262
if @_state == 'closed'
263
opts.cb?("closed")
264
return
265
@_execute_code_queue ?= []
266
@_execute_code_queue.push(opts)
267
if @_execute_code_queue.length == 1
268
@_process_execute_code_queue()
269
270
cancel_execute: (opts) =>
271
opts = defaults opts,
272
id : required
273
if @_state == 'closed'
274
return
275
dbg = @dbg("cancel_execute(id='#{opts.id}')")
276
if not @_execute_code_queue? or @_execute_code_queue.length == 0
277
dbg("nothing to do")
278
return
279
if @_execute_code_queue.length > 1
280
dbg("mutate @_execute_code_queue removing everything with the given id")
281
for i in [@_execute_code_queue.length - 1 .. 1]
282
o = @_execute_code_queue[i]
283
if o.id == opts.id
284
dbg("removing entry #{i} from queue")
285
@_execute_code_queue.splice(i, 1)
286
o.cb("cancelled")
287
# if the currently running computation involves this id, send an
288
# interrupt signal (that's the best we can do)
289
if @_execute_code_queue[0].id == opts.id
290
dbg("interrupting running computation")
291
@signal("SIGINT")
292
293
_process_execute_code_queue: =>
294
dbg = @dbg("_process_execute_code_queue")
295
dbg("state='#{@_state}'")
296
if @_state == 'closed'
297
dbg("closed")
298
return
299
if not @_execute_code_queue?
300
dbg("no queue")
301
return
302
n = @_execute_code_queue.length
303
if n == 0
304
dbg("queue is empty")
305
return
306
dbg("queue has #{n} items; ensure kernel running")
307
@_ensure_running (err) =>
308
if err
309
dbg("error running kernel -- #{err}")
310
for opts in @_execute_code_queue
311
opts.cb?(err)
312
@_execute_code_queue = []
313
else
314
dbg("now executing oldest item in queue")
315
@_execute_code(@_execute_code_queue[0])
316
return
317
318
_clear_execute_code_queue: =>
319
# ensure no future queued up evaluation occurs (currently running
320
# one will complete and new executions could happen)
321
if @_state == 'closed'
322
return
323
if not @_execute_code_queue?
324
return
325
mesg = {done:true}
326
for opts in @_execute_code_queue.slice(1)
327
if opts.all
328
opts.cb?(undefined, [mesg])
329
else
330
opts.cb?(undefined, mesg)
331
@_execute_code_queue = []
332
333
_execute_code: (opts) =>
334
opts = defaults opts,
335
code : required
336
id : undefined # optional tag that can be used as input to cancel_execute.
337
all : false # if all=true, cb(undefined, [all output messages]);
338
# used for testing mainly.
339
stdin : undefined
340
halt_on_error : true # clear execution queue if there is an error.
341
cb : required # if all=false, this happens **repeatedly**: cb(undefined, output message)
342
dbg = @dbg("_execute_code('#{misc.trunc(opts.code, 15)}')")
343
dbg("code='#{opts.code}', all=#{opts.all}")
344
if @_state == 'closed'
345
opts.cb?("closed")
346
return
347
348
message =
349
header:
350
msg_id : "execute_#{misc.uuid()}"
351
username : ''
352
session : ''
353
msg_type : 'execute_request'
354
version : '5.0'
355
content:
356
code : opts.code
357
silent : false
358
store_history : true # so execution_count is updated.
359
user_expressions : {}
360
allow_stdin : opts.stdin?
361
362
# setup handling of the results
363
if opts.all
364
all_mesgs = []
365
366
f = g = h = shell_done = iopub_done = undefined
367
368
push_mesg = (mesg) =>
369
# TODO: mesg isn't a normal javascript object; it's **silently** immutable, which
370
# is pretty annoying for our use. For now, we just copy it, which is a waste.
371
msg_type = mesg.header?.msg_type
372
mesg = misc.copy_with(mesg,['metadata', 'content', 'buffers', 'done'])
373
mesg = misc.deep_copy(mesg)
374
mesg.msg_type = msg_type
375
if opts.all
376
all_mesgs.push(mesg)
377
else
378
opts.cb?(undefined, mesg)
379
380
if opts.stdin?
381
g = (mesg) =>
382
dbg("got STDIN message -- #{JSON.stringify(mesg)}")
383
if mesg.parent_header.msg_id != message.header.msg_id
384
return
385
386
opts.stdin mesg.content, (err, response) =>
387
if err
388
response = "ERROR -- #{err}"
389
m =
390
header :
391
msg_id : message.header.msg_id
392
username : ''
393
session : ''
394
msg_type : 'input_reply'
395
version : '5.0'
396
content :
397
value: response
398
@_channels.stdin.next(m)
399
400
@on('stdin', g)
401
402
h = (mesg) =>
403
if mesg.parent_header.msg_id != message.header.msg_id
404
return
405
dbg("got SHELL message -- #{JSON.stringify(mesg)}")
406
if mesg.content?.status == 'error'
407
if opts.halt_on_error
408
@_clear_execute_code_queue()
409
# just bail; actual error would have been reported on iopub channel, hopefully.
410
finish?()
411
else
412
push_mesg(mesg)
413
shell_done = true
414
if iopub_done and shell_done
415
finish?()
416
417
@on('shell', h)
418
419
f = (mesg) =>
420
if mesg.parent_header.msg_id != message.header.msg_id
421
return
422
dbg("got IOPUB message -- #{JSON.stringify(mesg)}")
423
424
# check this before giving opts.cb the chance to mutate.
425
iopub_done = mesg.content?.execution_state == 'idle'
426
427
push_mesg(mesg)
428
429
if iopub_done and shell_done
430
finish?()
431
432
@on('iopub', f)
433
434
finish = () =>
435
if f?
436
@removeListener('iopub', f)
437
if g?
438
@removeListener('stdin', g)
439
if h?
440
@removeListener('shell', h)
441
@_execute_code_queue.shift() # finished
442
@_process_execute_code_queue() # start next exec
443
push_mesg({done:true})
444
if opts.all
445
opts.cb?(undefined, all_mesgs)
446
delete opts.cb # avoid memory leaks
447
finish = undefined
448
449
dbg("send the message")
450
@_channels.shell.next(message)
451
452
process_output: (content) =>
453
if @_state == 'closed'
454
return
455
dbg = @dbg("process_output")
456
dbg(JSON.stringify(content))
457
if not content.data?
458
# todo: FOR now -- later may remove large stdout, stderr, etc...
459
dbg("no data, so nothing to do")
460
return
461
462
remove_redundant_reps(content.data)
463
464
for type in util.JUPYTER_MIMETYPES
465
if content.data[type]?
466
if type.split('/')[0] == 'image' or type == 'application/pdf'
467
content.data[type] = blob_store.save(content.data[type], type)
468
else if type == 'text/html' and iframe.is_likely_iframe(content.data[type])
469
# Likely iframe, so we treat it as such. This is very important, e.g.,
470
# because of Sage's JMOL-based 3d graphics. These are huge, so we have to parse
471
# and remove these and serve them from the backend.
472
# {iframe: sha1 of srcdoc}
473
content.data['iframe'] = iframe.process(content.data[type], blob_store)
474
delete content.data[type]
475
476
# Returns a reference to the blob store.
477
get_blob_store: =>
478
return blob_store
479
480
# Returns information about all available kernels
481
get_kernel_data: (cb) => # cb(err, kernel_data) # see below.
482
get_kernel_data(cb)
483
484
call: (opts) =>
485
opts = defaults opts,
486
msg_type : required
487
content : {}
488
cb : required
489
@_ensure_running (err) =>
490
if err
491
opts.cb(err)
492
else
493
@_call(opts)
494
495
_call: (opts) =>
496
message =
497
header:
498
msg_id : misc.uuid()
499
username : ''
500
session : ''
501
msg_type : opts.msg_type
502
version : '5.0'
503
content: opts.content
504
505
# setup handling of the results
506
if opts.all
507
all_mesgs = []
508
509
f = (mesg) =>
510
if mesg.parent_header.msg_id == message.header.msg_id
511
@removeListener('shell', f)
512
mesg = misc.deep_copy(mesg.content)
513
if misc.len(mesg.metadata) == 0
514
delete mesg.metadata
515
opts.cb(undefined, mesg)
516
@on('shell', f)
517
@_channels.shell.next(message)
518
519
complete: (opts) =>
520
opts = defaults opts,
521
code : required
522
cursor_pos : required
523
cb : required
524
dbg = @dbg("complete")
525
dbg("code='#{opts.code}', cursor_pos='#{opts.cursor_pos}'")
526
@call
527
msg_type : 'complete_request'
528
content:
529
code : opts.code
530
cursor_pos : opts.cursor_pos
531
cb : opts.cb
532
533
introspect: (opts) =>
534
opts = defaults opts,
535
code : required
536
cursor_pos : required
537
detail_level : required
538
cb : required
539
dbg = @dbg("introspect")
540
dbg("code='#{opts.code}', cursor_pos='#{opts.cursor_pos}', detail_level=#{opts.detail_level}")
541
@call
542
msg_type : 'inspect_request'
543
content :
544
code : opts.code
545
cursor_pos : opts.cursor_pos
546
detail_level : opts.detail_level
547
cb: opts.cb
548
549
kernel_info: (opts) =>
550
opts = defaults opts,
551
cb : required
552
if @_kernel_info?
553
opts.cb(undefined, @_kernel_info)
554
return
555
if @_kernel_info_cbs?
556
@_kernel_info_cbs.push(opts.cb)
557
return
558
@_kernel_info_cbs = [opts.cb]
559
@call
560
msg_type : 'kernel_info_request'
561
cb : (err, info) =>
562
if not err
563
info.nodejs_version = process.version
564
info.start_time = @_actions?.store.get('start_time')
565
@_kernel_info = info
566
for cb in @_kernel_info_cbs
567
cb(err, info)
568
delete @_kernel_info_cbs
569
570
more_output: (opts) =>
571
opts = defaults opts,
572
id : undefined
573
cb : required
574
if not opts.id?
575
opts.cb("must specify id")
576
return
577
if not @_actions?
578
opts.cb("must have redux actions")
579
return
580
opts.cb(undefined, @_actions?.store.get_more_output(opts.id) ? [])
581
582
nbconvert: (opts) =>
583
opts = defaults opts,
584
args : required
585
timeout : 30 # seconds
586
cb : required
587
if @_nbconvert_lock
588
opts.cb("lock")
589
return
590
if not misc.is_array(opts.args)
591
opts.cb("args must be an array")
592
return
593
@_nbconvert_lock = true
594
args = misc.copy(opts.args)
595
args.push(@_filename)
596
nbconvert.nbconvert
597
args : args
598
timeout : opts.timeout
599
directory : @_directory
600
cb : (err) =>
601
delete @_nbconvert_lock
602
opts.cb(err)
603
604
load_attachment: (opts) =>
605
opts = defaults opts,
606
path : required
607
cb : required
608
dbg = @dbg("load_attachment")
609
dbg("path='#{opts.path}'")
610
if opts.path[0] != '/'
611
opts.path = process.env.HOME + '/' + opts.path
612
sha1 = undefined
613
misc.retry_until_success
614
f : (cb) =>
615
blob_store.readFile opts.path, 'base64', (err, _sha1) =>
616
sha1 = _sha1
617
cb(err)
618
max_time : 30000
619
cb : (err) =>
620
fs.unlink(opts.path)
621
opts.cb(err, sha1)
622
623
process_attachment: (base64, mime) =>
624
return blob_store.save(base64, mime)
625
626
http_server: (opts) =>
627
opts = defaults opts,
628
segments : required
629
query : required
630
cb : required
631
632
dbg = @dbg("http_server")
633
dbg(opts.segments.join('/'))
634
switch opts.segments[0]
635
636
when 'signal'
637
@signal(opts.segments[1])
638
opts.cb(undefined, {})
639
640
when 'kernel_info'
641
@kernel_info(cb: opts.cb)
642
643
when 'more_output'
644
@more_output
645
id : opts.query.id
646
cb : opts.cb
647
648
when 'complete'
649
code = opts.query.code
650
if not code
651
opts.cb('must specify code to complete')
652
return
653
if opts.query.cursor_pos?
654
try
655
cursor_pos = parseInt(opts.query.cursor_pos)
656
catch
657
cursor_pos = code.length
658
else
659
cursor_pos = code.length
660
@complete
661
code : opts.query.code
662
cursor_pos : cursor_pos
663
cb : opts.cb
664
665
when 'introspect'
666
code = opts.query.code
667
if not code?
668
opts.cb('must specify code to introspect')
669
return
670
if opts.query.cursor_pos?
671
try
672
cursor_pos = parseInt(opts.query.cursor_pos)
673
catch
674
cursor_pos = code.length
675
else
676
cursor_pos = code.length
677
if opts.query.level?
678
try
679
level = parseInt(opts.query.level)
680
if level < 0 or level > 1
681
level = 0
682
catch
683
level = 0
684
else
685
level = 0
686
@introspect
687
code : opts.query.code
688
cursor_pos : cursor_pos
689
detail_level : level
690
cb : opts.cb
691
692
when 'store'
693
try
694
if opts.query.key?
695
key = JSON.parse(opts.query.key)
696
else
697
key = undefined
698
if opts.query.value?
699
value = JSON.parse(opts.query.value)
700
else
701
value = undefined
702
catch err
703
opts.cb(err)
704
return
705
if not value?
706
opts.cb(undefined, @store.get(key))
707
else if value == null
708
@store.delete(key)
709
opts.cb()
710
else
711
@store.set(key, value)
712
opts.cb()
713
714
else
715
opts.cb("no route '#{opts.segments.join('/')}'")
716
717
718
_kernel_data = undefined
719
720
exports.get_kernel_data = get_kernel_data = (cb) ->
721
if _kernel_data?
722
cb(undefined, _kernel_data)
723
return
724
725
fail = (err) =>
726
cb(err ? 'fail')
727
728
success = (kernelspecs) =>
729
_kernel_data = {kernelspecs: kernelspecs}
730
v = []
731
for kernel, value of _kernel_data.kernelspecs
732
v.push
733
name : kernel
734
display_name : value.spec.display_name
735
language : value.spec.language
736
v.sort(misc.field_cmp('display_name'))
737
_kernel_data.jupyter_kernels = v
738
_kernel_data.jupyter_kernels_json = JSON.stringify(_kernel_data.jupyter_kernels)
739
# cache, but only for a few seconds so many requests near each other are fast.
740
setTimeout((->_kernel_data=undefined), 5000)
741
cb(undefined, _kernel_data)
742
743
# Now do it -- this takes only a few ms.
744
kernelspecs.findAll().then(success, fail)
745
746
747
jupyter_kernel_info_handler = (base, router) ->
748
749
router.get base + 'kernels.json', (req, res) ->
750
get_kernel_data (err, kernel_data) ->
751
if err
752
res.send(err) # TODO: set some code
753
else
754
res.send(kernel_data.jupyter_kernels_json)
755
756
router.get base + 'kernelspecs/*', (req, res) ->
757
get_kernel_data (err, kernel_data) ->
758
if err
759
res.send(err) # TODO: set some code
760
else
761
path = req.path.slice((base + 'kernelspecs/').length).trim()
762
if path.length == 0
763
res.send(kernel_data.jupyter_kernels_json)
764
return
765
segments = path.split('/')
766
name = segments[0]
767
kernel = kernel_data.kernelspecs[name]
768
if not kernel?
769
res.send("no such kernel '#{name}'") # todo: error?
770
return
771
# kernelspecs incorrectly calls it resources_dir instead of resource_dir.
772
# See https://github.com/nteract/kernelspecs/issues/25
773
resource_dir = kernel.resource_dir ? kernel.resources_dir
774
path = require('path').join(resource_dir, segments.slice(1).join('/'))
775
path = require('path').resolve(path)
776
if not misc.startswith(path, resource_dir)
777
# don't let user use .. or something to get any file on the server...!
778
# (this really can't happen due to url rules already; just being super paranoid.)
779
res.send("suspicious path '#{path}'")
780
else
781
fs.exists path, (exists) ->
782
if not exists
783
res.send("no such path '#{path}'")
784
else
785
res.sendFile(path)
786
return router
787
788
789
jupyter_kernel_http_server = (base, router) ->
790
791
router.get base + 'kernels/*', (req, res) ->
792
path = req.path.slice((base + 'kernels/').length).trim()
793
if path.length == 0
794
res.send(kernel_data.jupyter_kernels_json)
795
return
796
segments = path.split('/')
797
path = req.query.path
798
kernel = _jupyter_kernels[path]
799
if not kernel?
800
res.send(JSON.stringify({error:"no kernel with path '#{path}'"}))
801
return
802
kernel.http_server
803
segments : segments
804
query : req.query
805
cb : (err, resp) ->
806
if err
807
res.send(JSON.stringify({error:err}))
808
else
809
res.send(JSON.stringify(resp ? {}))
810
811
return router
812
813
814
exports.jupyter_router = (express) ->
815
base = '/.smc/jupyter/'
816
817
# Install handling for the blob store
818
router = blob_store.express_router(base, express)
819
820
# Handler for Jupyter kernel info
821
router = jupyter_kernel_info_handler(base, router)
822
823
# Handler for http messages for **specific kernels**
824
router = jupyter_kernel_http_server(base, router)
825
826
return router
827
828
829
830
831
832