Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/src/packages/jupyter/redux/project-actions.ts
Views: 923
/*1* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.2* License: MS-RSL – see LICENSE.md for details3*/45/*6project-actions: additional actions that are only available in the7backend/project, which "manages" everything.89This code should not *explicitly* require anything that is only10available in the project or requires node to run, so that we can11fully unit test it via mocking of components.1213NOTE: this is also now the actions used by remote compute servers as well.14*/1516import { get_kernel_data } from "@cocalc/jupyter/kernel/kernel-data";17import * as immutable from "immutable";18import json_stable from "json-stable-stringify";19import { debounce } from "lodash";20import { JupyterActions as JupyterActions0 } from "@cocalc/jupyter/redux/actions";21import { callback2, once } from "@cocalc/util/async-utils";22import * as misc from "@cocalc/util/misc";23import { OutputHandler } from "@cocalc/jupyter/execute/output-handler";24import { RunAllLoop } from "./run-all-loop";25import nbconvertChange from "./handle-nbconvert-change";26import type { ClientFs } from "@cocalc/sync/client/types";27import { kernel as createJupyterKernel } from "@cocalc/jupyter/kernel";28import {29decodeUUIDtoNum,30isEncodedNumUUID,31} from "@cocalc/util/compute/manager";32import { handleApiRequest } from "@cocalc/jupyter/kernel/websocket-api";33import { callback } from "awaiting";34import { get_blob_store } from "@cocalc/jupyter/blobs";35import { removeJupyterRedux } from "@cocalc/jupyter/kernel";3637// see https://github.com/sagemathinc/cocalc/issues/806038const MAX_OUTPUT_SAVE_DELAY = 30000;3940type BackendState = "init" | "ready" | "spawning" | "starting" | "running";4142export class JupyterActions extends JupyterActions0 {43private _backend_state: BackendState = "init";44private _initialize_manager_already_done: any;45private _kernel_state: any;46private _manager_run_cell_queue: any;47private _running_cells: { [id: string]: string };48private _throttled_ensure_positions_are_unique: any;49private run_all_loop?: RunAllLoop;50private clear_kernel_error?: any;51private running_manager_run_cell_process_queue: boolean = false;52private last_ipynb_save: number = 0;53protected _client: ClientFs; // this has filesystem access, etc.5455public run_cell(56id: string,57save: boolean = true,58no_halt: boolean = false,59): void {60if (this.store.get("read_only")) return;61const cell = this.store.getIn(["cells", id]);62if (cell == null) {63// it is trivial to run a cell that does not exist -- nothing needs to be done.64return;65}66const cell_type = cell.get("cell_type", "code");67if (cell_type == "code") {68// when the backend is running code, just don't worry about69// trying to parse things like "foo?" out. We can't do70// it without CodeMirror, and it isn't worth it for that71// application.72this.run_code_cell(id, save, no_halt);73}74if (save) {75this.save_asap();76}77}7879private set_backend_state(backend_state: BackendState): void {80this.dbg("set_backend_state")(backend_state);8182/*83The backend states, which are put in the syncdb so clients84can display this:8586- 'init' -- the backend is checking the file on disk, etc.87- 'ready' -- the backend is setup and ready to use; kernel isn't running though88- 'starting' -- the kernel itself is actived and currently starting up (e.g., Sage is starting up)89- 'running' -- the kernel is running and ready to evaluate code909192'init' --> 'ready' --> 'spawning' --> 'starting' --> 'running'93/|\ |94|-----------------------------------------|9596Going from ready to starting happens first when a code execution is requested.97*/9899// Check just in case Typescript doesn't catch something:100if (101["init", "ready", "spawning", "starting", "running"].indexOf(102backend_state,103) === -1104) {105throw Error(`invalid backend state '${backend_state}'`);106}107if (backend_state == "init" && this._backend_state != "init") {108// Do NOT allow changing the state to init from any other state.109throw Error(110`illegal state change '${this._backend_state}' --> '${backend_state}'`,111);112}113this._backend_state = backend_state;114115if (this.isCellRunner()) {116const stored_backend_state = this.syncdb117.get_one({ type: "settings" })118?.get("backend_state");119120if (stored_backend_state != backend_state) {121this._set({122type: "settings",123backend_state,124last_backend_state: Date.now(),125});126this.save_asap();127}128129// The following is to clear kernel_error if things are working only.130if (backend_state == "running") {131// clear kernel error if kernel successfully starts and stays132// in running state for a while.133this.clear_kernel_error = setTimeout(() => {134this._set({135type: "settings",136kernel_error: "",137});138}, 3000);139} else {140// change to a different state; cancel attempt to clear kernel error141if (this.clear_kernel_error) {142clearTimeout(this.clear_kernel_error);143delete this.clear_kernel_error;144}145}146}147}148149set_kernel_state = (state: any, save = false) => {150if (!this.isCellRunner()) return;151this._kernel_state = state;152this._set({ type: "settings", kernel_state: state }, save);153};154155// Called exactly once when the manager first starts up after the store is initialized.156// Here we ensure everything is in a consistent state so that we can react157// to changes later.158async initialize_manager() {159if (this._initialize_manager_already_done) {160return;161}162const dbg = this.dbg("initialize_manager");163dbg();164this._initialize_manager_already_done = true;165166this.sync_exec_state = debounce(this.sync_exec_state, 2000);167this._throttled_ensure_positions_are_unique = debounce(168this.ensure_positions_are_unique,1695000,170);171// Listen for changes...172this.syncdb.on("change", this._backend_syncdb_change.bind(this));173174this.setState({175// used by the kernel_info function of this.jupyter_kernel176start_time: this._client.server_time().valueOf(),177});178179// clear nbconvert start on init, since no nbconvert can be running yet180this.syncdb.delete({ type: "nbconvert" });181182// Initialize info about available kernels, which is used e.g., for183// saving to ipynb format.184this.init_kernel_info();185186// We try once to load from disk. If it fails, then187// a record with type:'fatal'188// is created in the database; if it succeeds, that record is deleted.189// Try again only when the file changes.190await this._first_load();191192// Listen for model state changes...193if (this.syncdb.ipywidgets_state == null) {194throw Error("syncdb's ipywidgets_state must be defined!");195}196this.syncdb.ipywidgets_state.on(197"change",198this.handle_ipywidgets_state_change.bind(this),199);200201this.syncdb.on("cursor_activity", this.checkForComputeServerStateChange);202203// initialize the websocket api204this.initWebsocketApi();205}206207private async _first_load() {208const dbg = this.dbg("_first_load");209dbg("doing load");210if (this.is_closed()) {211throw Error("actions must not be closed");212}213try {214await this.loadFromDiskIfNewer();215} catch (err) {216dbg(`load failed -- ${err}; wait for file change and try again`);217const path = this.store.get("path");218const watcher = this._client.watch_file({ path });219await once(watcher, "change");220dbg("file changed");221watcher.close();222await this._first_load();223return;224}225dbg("loading worked");226this._init_after_first_load();227}228229private _init_after_first_load() {230const dbg = this.dbg("_init_after_first_load");231232dbg("initializing");233this.ensure_backend_kernel_setup(); // this may change the syncdb.234235this.init_file_watcher();236237this._state = "ready";238this.ensure_there_is_a_cell();239}240241_backend_syncdb_change = (changes: any) => {242if (this.is_closed()) {243return;244}245const dbg = this.dbg("_backend_syncdb_change");246if (changes != null) {247changes.forEach((key) => {248switch (key.get("type")) {249case "settings":250dbg("settings change");251var record = this.syncdb.get_one(key);252if (record != null) {253// ensure kernel is properly configured254this.ensure_backend_kernel_setup();255// only the backend should change kernel and backend state;256// however, our security model allows otherwise (e.g., via TimeTravel).257if (258record.get("kernel_state") !== this._kernel_state &&259this._kernel_state != null260) {261this.set_kernel_state(this._kernel_state, true);262}263if (record.get("backend_state") !== this._backend_state) {264this.set_backend_state(this._backend_state);265}266267if (record.get("run_all_loop_s")) {268if (this.run_all_loop == null) {269this.run_all_loop = new RunAllLoop(270this,271record.get("run_all_loop_s"),272);273} else {274// ensure interval is correct275this.run_all_loop.set_interval(record.get("run_all_loop_s"));276}277} else if (278!record.get("run_all_loop_s") &&279this.run_all_loop != null280) {281// stop it.282this.run_all_loop.close();283delete this.run_all_loop;284}285}286break;287}288});289}290291this.ensure_there_is_a_cell();292this._throttled_ensure_positions_are_unique();293this.sync_exec_state();294};295296// ensure_backend_kernel_setup ensures that we have a connection297// to the proper type of kernel.298// If running is true, starts the kernel and waits until running.299ensure_backend_kernel_setup = () => {300const dbg = this.dbg("ensure_backend_kernel_setup");301if (this.isDeleted()) {302dbg("file is deleted");303return;304}305306const kernel = this.store.get("kernel");307308let current: string | undefined = undefined;309if (this.jupyter_kernel != null) {310current = this.jupyter_kernel.name;311if (current == kernel && this.jupyter_kernel.get_state() != "closed") {312dbg("everything is properly setup and working");313return;314}315}316317dbg(`kernel='${kernel}', current='${current}'`);318if (319this.jupyter_kernel != null &&320this.jupyter_kernel.get_state() != "closed"321) {322if (current != kernel) {323dbg("kernel changed -- kill running kernel to trigger switch");324this.jupyter_kernel.close();325return;326} else {327dbg("nothing to do");328return;329}330}331332dbg("make a new kernel");333334// No kernel wrapper object setup at all. Make one.335this.jupyter_kernel = createJupyterKernel({336name: kernel,337path: this.store.get("path"),338actions: this,339});340341if (this.syncdb.ipywidgets_state == null) {342throw Error("syncdb's ipywidgets_state must be defined!");343}344this.syncdb.ipywidgets_state.clear();345346if (this.jupyter_kernel == null) {347// to satisfy typescript.348throw Error("jupyter_kernel must be defined");349}350351// save so gets reported to frontend, and surfaced to user:352// https://github.com/sagemathinc/cocalc/issues/4847353this.jupyter_kernel.on("kernel_error", (error) => {354this.set_kernel_error(error);355});356357// Since we just made a new kernel, clearly no cells are running on the backend.358this._running_cells = {};359this.clear_all_cell_run_state();360361this.restartKernelOnClose = () => {362// When the kernel closes, make sure a new kernel gets setup.363if (this.store == null || this._state !== "ready") {364// This event can also happen when this actions is being closed,365// in which case obviously we shouldn't make a new kernel.366return;367}368dbg("kernel closed -- make new one.");369this.ensure_backend_kernel_setup();370};371372this.jupyter_kernel.once("closed", this.restartKernelOnClose);373374// Track backend state changes other than closing, so they375// are visible to user etc.376// TODO: Maybe all these need to move to ephemeral table?377// There's a good argument that recording these is useful though, so when378// looking at time travel or debugging, you know what was going on.379this.jupyter_kernel.on("state", (state) => {380dbg("jupyter_kernel state --> ", state);381switch (state) {382case "off":383case "closed":384// things went wrong.385this._running_cells = {};386this.clear_all_cell_run_state();387this.set_backend_state("ready");388this.jupyter_kernel?.close();389this.running_manager_run_cell_process_queue = false;390delete this.jupyter_kernel;391return;392case "spawning":393case "starting":394this.set_connection_file(); // yes, fall through395case "running":396this.set_backend_state(state);397}398});399400this.jupyter_kernel.on("execution_state", this.set_kernel_state);401402this.handle_all_cell_attachments();403this.set_backend_state("ready");404};405406set_connection_file = () => {407const connection_file = this.jupyter_kernel?.get_connection_file() ?? "";408this._set({409type: "settings",410connection_file,411});412};413414init_kernel_info = async () => {415let kernels0 = this.store.get("kernels");416if (kernels0 != null) {417return;418}419const dbg = this.dbg("init_kernel_info");420dbg("getting");421let kernels;422try {423kernels = await get_kernel_data();424dbg("success");425} catch (err) {426dbg(`FAILED to get kernel info: ${err}`);427// TODO: what to do?? Saving will be broken...428return;429}430this.setState({431kernels: immutable.fromJS(kernels),432});433};434435async ensure_backend_kernel_is_running() {436const dbg = this.dbg("ensure_backend_kernel_is_running");437if (this._backend_state == "ready") {438dbg("in state 'ready', so kick it into gear");439await this.set_backend_kernel_info();440dbg("done getting kernel info");441}442const is_running = (s): boolean => {443if (this._state === "closed") return true;444const t = s.get_one({ type: "settings" });445if (t == null) {446dbg("no settings");447return false;448} else {449const state = t.get("backend_state");450dbg(`state = ${state}`);451return state == "running";452}453};454await this.syncdb.wait(is_running, 60);455}456457// onCellChange is called after a cell change has been458// incorporated into the store after the syncdb change event.459// - If we are responsible for running cells, then it ensures460// that cell gets computed.461// - We also handle attachments for markdown cells.462protected onCellChange(id: string, new_cell: any, old_cell: any) {463const dbg = this.dbg(`onCellChange(id='${id}')`);464dbg();465// this logging could be expensive due to toJS, so only uncomment466// if really needed467// dbg("new_cell=", new_cell?.toJS(), "old_cell", old_cell?.toJS());468469if (470new_cell?.get("state") === "start" &&471old_cell?.get("state") !== "start" &&472this.isCellRunner()473) {474this.manager_run_cell_enqueue(id);475// attachments below only happen for markdown cells, which don't get run,476// we can return here:477return;478}479480const attachments = new_cell?.get("attachments");481if (attachments != null && attachments !== old_cell?.get("attachments")) {482this.handle_cell_attachments(new_cell);483}484}485486protected __syncdb_change_post_hook(doInit: boolean) {487if (doInit) {488if (this.isCellRunner()) {489// Since just opening the actions in the project, definitely the kernel490// isn't running so set this fact in the shared database. It will make491// things always be in the right initial state.492this.syncdb.set({493type: "settings",494backend_state: "init",495kernel_state: "idle",496kernel_usage: { memory: 0, cpu: 0 },497});498this.syncdb.commit();499}500501// Also initialize the execution manager, which runs cells that have been502// requested to run.503this.initialize_manager();504}505if (this.store.get("kernel")) {506this.manager_run_cell_process_queue();507}508}509510// Ensure that the cells listed as running *are* exactly the511// ones actually running or queued up to run.512sync_exec_state = () => {513// sync_exec_state is debounced, so it is *expected* to get called514// after actions have been closed.515if (this.store == null || this._state !== "ready") {516// not initialized, so we better not517// mess with cell state (that is somebody else's responsibility).518return;519}520// we are not the cell runner521if (!this.isCellRunner()) {522return;523}524525const dbg = this.dbg("sync_exec_state");526let change = false;527const cells = this.store.get("cells");528// First verify that all actual cells that are said to be running529// (according to the store) are in fact running.530if (cells != null) {531cells.forEach((cell, id) => {532const state = cell.get("state");533if (534state != null &&535state != "done" &&536state != "start" && // regarding "start", see https://github.com/sagemathinc/cocalc/issues/5467537!this._running_cells?.[id]538) {539dbg(`set cell ${id} with state "${state}" to done`);540this._set({ type: "cell", id, state: "done" }, false);541change = true;542}543});544}545if (this._running_cells != null) {546const cells = this.store.get("cells");547// Next verify that every cell actually running is still in the document548// and listed as running. TimeTravel, deleting cells, etc., can549// certainly lead to this being necessary.550for (const id in this._running_cells) {551const state = cells.getIn([id, "state"]);552if (state == null || state === "done") {553// cell no longer exists or isn't in a running state554dbg(`tell kernel to not run ${id}`);555this._cancel_run(id);556}557}558}559if (change) {560return this._sync();561}562};563564_cancel_run = (id: any) => {565const dbg = this.dbg(`_cancel_run ${id}`);566// All these checks are so we only cancel if it is actually running567// with the current kernel...568if (this._running_cells == null || this.jupyter_kernel == null) return;569const identity = this._running_cells[id];570if (identity == null) return;571if (this.jupyter_kernel.identity == identity) {572dbg("canceling");573this.jupyter_kernel.cancel_execute(id);574} else {575dbg("not canceling since wrong identity");576}577};578579// Note that there is a request to run a given cell.580// You must call manager_run_cell_process_queue for them to actually start running.581protected manager_run_cell_enqueue(id: string) {582if (this._running_cells?.[id]) {583return;584}585if (this._manager_run_cell_queue == null) {586this._manager_run_cell_queue = {};587}588this._manager_run_cell_queue[id] = true;589}590591// properly start running -- in order -- the cells that have been requested to run592protected async manager_run_cell_process_queue() {593if (this.running_manager_run_cell_process_queue) {594return;595}596this.running_manager_run_cell_process_queue = true;597try {598const dbg = this.dbg("manager_run_cell_process_queue");599const queue = this._manager_run_cell_queue;600if (queue == null) {601//dbg("queue is null");602return;603}604delete this._manager_run_cell_queue;605const v: any[] = [];606for (const id in queue) {607if (!this._running_cells?.[id]) {608v.push(this.store.getIn(["cells", id]));609}610}611612if (v.length == 0) {613dbg("no non-running cells");614return; // nothing to do615}616617v.sort((a, b) =>618misc.cmp(619a != null ? a.get("start") : undefined,620b != null ? b.get("start") : undefined,621),622);623624dbg(625`found ${v.length} non-running cell that should be running, so ensuring kernel is running...`,626);627this.ensure_backend_kernel_setup();628try {629await this.ensure_backend_kernel_is_running();630if (this._state == "closed") return;631} catch (err) {632// if this fails, give up on evaluation.633return;634}635636dbg(637`kernel is now running; requesting that each ${v.length} cell gets executed`,638);639for (const cell of v) {640if (cell != null) {641this.manager_run_cell(cell.get("id"));642}643}644645if (this._manager_run_cell_queue != null) {646// run it again to process additional entries.647setTimeout(this.manager_run_cell_process_queue, 1);648}649} finally {650this.running_manager_run_cell_process_queue = false;651}652}653654// returns new output handler for this cell.655protected _output_handler(cell: any) {656const dbg = this.dbg(`handler(id='${cell.id}')`);657if (658this.jupyter_kernel == null ||659this.jupyter_kernel.get_state() == "closed"660) {661throw Error("jupyter kernel must exist and not be closed");662}663this.reset_more_output(cell.id);664665const handler = new OutputHandler({666cell,667max_output_length: this.store.get("max_output_length"),668report_started_ms: 250,669dbg,670});671672dbg("setting up jupyter_kernel.once('closed', ...) handler");673const handleKernelClose = () => {674dbg("output handler -- closing due to jupyter kernel closed");675handler.close();676};677this.jupyter_kernel.once("closed", handleKernelClose);678// remove the "closed" handler we just defined above once679// we are done waiting for output from this cell.680// The output handler removes all listeners whenever it is681// finished, so we don't have to remove this listener for done.682handler.once("done", () =>683this.jupyter_kernel?.removeListener("closed", handleKernelClose),684);685686handler.on("more_output", (mesg, mesg_length) => {687this.set_more_output(cell.id, mesg, mesg_length);688});689690handler.on("process", (mesg) => {691// Do not enable -- mesg often very large!692// dbg("handler.on('process')", mesg);693if (694this.jupyter_kernel == null ||695this.jupyter_kernel.get_state() == "closed"696) {697return;698}699this.jupyter_kernel.process_output(mesg);700// dbg("handler -- after processing ", mesg);701});702703return handler;704}705706manager_run_cell = (id: string) => {707const dbg = this.dbg(`manager_run_cell(id='${id}')`);708dbg(JSON.stringify(misc.keys(this._running_cells)));709710if (this._running_cells == null) {711this._running_cells = {};712}713714if (this._running_cells[id]) {715dbg("cell already queued to run in kernel");716return;717}718719// It's important to set this._running_cells[id] to be true so that720// sync_exec_state doesn't declare this cell done. The kernel identity721// will get set properly below in case it changes.722this._running_cells[id] = this.jupyter_kernel?.identity ?? "none";723724const orig_cell = this.store.get("cells").get(id);725if (orig_cell == null) {726// nothing to do -- cell deleted727return;728}729730let input: string | undefined = orig_cell.get("input", "");731if (input == null) {732input = "";733} else {734input = input.trim();735}736737const halt_on_error: boolean = !orig_cell.get("no_halt", false);738739if (this.jupyter_kernel == null) {740throw Error("bug -- this is guaranteed by the above");741}742this._running_cells[id] = this.jupyter_kernel.identity;743744const cell: any = {745id,746type: "cell",747kernel: this.store.get("kernel"),748};749750dbg(`using max_output_length=${this.store.get("max_output_length")}`);751const handler = this._output_handler(cell);752753// exponentiallyThrottledSaved calls this.syncdb?.save, but754// it throttles the calls, and does so using exponential backoff755// up to MAX_OUTPUT_SAVE_DELAY milliseconds. Basically every756// time exponentiallyThrottledSaved is called it increases the757// interval used for throttling by multiplying saveThrottleMs by 1.3758// until saveThrottleMs gets to MAX_OUTPUT_SAVE_DELAY. There is no759// need at all to do a trailing call, since other code handles that.760let saveThrottleMs = 1;761let lastCall = 0;762const exponentiallyThrottledSaved = () => {763const now = Date.now();764if (now - lastCall < saveThrottleMs) {765return;766}767lastCall = now;768saveThrottleMs = Math.min(1.3 * saveThrottleMs, MAX_OUTPUT_SAVE_DELAY);769this.syncdb?.save();770};771772handler.on("change", (save) => {773if (!this.store.getIn(["cells", id])) {774// The cell was deleted, but we just got some output775// NOTE: client shouldn't allow deleting running or queued776// cells, but we still want to do something useful/sensible.777// We put cell back where it was with same input.778cell.input = orig_cell.get("input");779cell.pos = orig_cell.get("pos");780}781this.syncdb.set(cell);782// This is potentially very verbose -- don't due it unless783// doing low level debugging:784//dbg(`change (save=${save}): cell='${JSON.stringify(cell)}'`);785if (save) {786exponentiallyThrottledSaved();787}788});789790handler.once("done", () => {791dbg("handler is done");792this.store.removeListener("cell_change", cell_change);793exec.close();794if (this._running_cells != null) {795delete this._running_cells[id];796}797this.syncdb?.save();798setTimeout(() => this.syncdb?.save(), 100);799});800801if (this.jupyter_kernel == null) {802handler.error("Unable to start Jupyter");803return;804}805806const get_password = (): string => {807if (this.jupyter_kernel == null) {808dbg("get_password", id, "no kernel");809return "";810}811const password = this.jupyter_kernel.store.get(id);812dbg("get_password", id, password);813this.jupyter_kernel.store.delete(id);814return password;815};816817// This is used only for stdin right now.818const cell_change = (cell_id, new_cell) => {819if (id === cell_id) {820dbg("cell_change");821handler.cell_changed(new_cell, get_password);822}823};824this.store.on("cell_change", cell_change);825826const exec = this.jupyter_kernel.execute_code({827code: input,828id,829stdin: handler.stdin,830halt_on_error,831});832833exec.on("output", (mesg) => {834// uncomment only for specific low level debugging -- see https://github.com/sagemathinc/cocalc/issues/7022835// dbg(`got mesg='${JSON.stringify(mesg)}'`); // !!!☡ ☡ ☡ -- EXTREME DANGER ☡ ☡ ☡ !!!!836837if (mesg == null) {838// can't possibly happen, of course.839const err = "empty mesg";840dbg(`got error='${err}'`);841handler.error(err);842return;843}844if (mesg.done) {845// done is a special internal cocalc message.846handler.done();847return;848}849if (mesg.content?.transient?.display_id != null) {850// See https://github.com/sagemathinc/cocalc/issues/2132851// We find any other outputs in the document with852// the same transient.display_id, and set their output to853// this mesg's output.854this.handleTransientUpdate(mesg);855if (mesg.msg_type == "update_display_data") {856// don't also create a new output857return;858}859}860861if (mesg.msg_type === "clear_output") {862handler.clear(mesg.content.wait);863return;864}865866if (mesg.content.comm_id != null) {867// ignore any comm/widget related messages868return;869}870871if (mesg.content.execution_state === "idle") {872this.store.removeListener("cell_change", cell_change);873return;874}875if (mesg.content.execution_state === "busy") {876handler.start();877}878if (mesg.content.payload != null) {879if (mesg.content.payload.length > 0) {880// payload shell message:881// Despite https://ipython.org/ipython-doc/3/development/messaging.html#payloads saying882// ""Payloads are considered deprecated, though their replacement is not yet implemented."883// we fully have to implement them, since they are used to implement (crazy, IMHO)884// things like %load in the python2 kernel!885mesg.content.payload.map((p) => handler.payload(p));886return;887}888} else {889// Normal iopub output message890handler.message(mesg.content);891return;892}893});894895exec.on("error", (err) => {896dbg(`got error='${err}'`);897handler.error(err);898});899};900901reset_more_output = (id: any) => {902if (id == null) {903delete this.store._more_output;904}905if (906(this.store._more_output != null907? this.store._more_output[id]908: undefined) != null909) {910return delete this.store._more_output[id];911}912};913914set_more_output = (id: any, mesg: any, length: any): void => {915if (this.store._more_output == null) {916this.store._more_output = {};917}918const output =919this.store._more_output[id] != null920? this.store._more_output[id]921: (this.store._more_output[id] = {922length: 0,923messages: [],924lengths: [],925discarded: 0,926truncated: 0,927});928929output.length += length;930output.lengths.push(length);931output.messages.push(mesg);932933const goal_length = 10 * this.store.get("max_output_length");934while (output.length > goal_length) {935let need: any;936let did_truncate = false;937938// check if there is a text field, which we can truncate939let len =940output.messages[0].text != null941? output.messages[0].text.length942: undefined;943if (len != null) {944need = output.length - goal_length + 50;945if (len > need) {946// Instead of throwing this message away, let's truncate its text part. After947// doing this, the message is at least need shorter than it was before.948output.messages[0].text = misc.trunc(949output.messages[0].text,950len - need,951);952did_truncate = true;953}954}955956// check if there is a text/plain field, which we can thus also safely truncate957if (!did_truncate && output.messages[0].data != null) {958for (const field in output.messages[0].data) {959if (field === "text/plain") {960const val = output.messages[0].data[field];961len = val.length;962if (len != null) {963need = output.length - goal_length + 50;964if (len > need) {965// Instead of throwing this message away, let's truncate its text part. After966// doing this, the message is at least need shorter than it was before.967output.messages[0].data[field] = misc.trunc(val, len - need);968did_truncate = true;969}970}971}972}973}974975if (did_truncate) {976const new_len = JSON.stringify(output.messages[0]).length;977output.length -= output.lengths[0] - new_len; // how much we saved978output.lengths[0] = new_len;979output.truncated += 1;980break;981}982983const n = output.lengths.shift();984output.messages.shift();985output.length -= n;986output.discarded += 1;987}988};989990private init_file_watcher() {991const dbg = this.dbg("file_watcher");992dbg();993this._file_watcher = this._client.watch_file({994path: this.store.get("path"),995debounce: 1000,996});997998this._file_watcher.on("change", async () => {999if (!this.isCellRunner()) {1000return;1001}1002dbg("change");1003try {1004await this.loadFromDiskIfNewer();1005} catch (err) {1006dbg("failed to load on change", err);1007}1008});1009}10101011/*1012* Unfortunately, though I spent two hours on this approach... it just doesn't work,1013* since, e.g., if the sync file doesn't already exist, it can't be created,1014* which breaks everything. So disabling for now and re-opening the issue.1015_sync_file_mode: =>1016dbg = @dbg("_sync_file_mode"); dbg()1017* Make the mode of the syncdb file the same as the mode of the .ipynb file.1018* This is used for read-only status.1019ipynb_file = @store.get('path')1020locals =1021ipynb_file_ro : undefined1022syncdb_file_ro : undefined1023syncdb_file = @syncdb.get_path()1024async.parallel([1025(cb) ->1026fs.access ipynb_file, fs.constants.W_OK, (err) ->1027* Also store in @_ipynb_file_ro to prevent starting kernel in this case.1028@_ipynb_file_ro = locals.ipynb_file_ro = !!err1029cb()1030(cb) ->1031fs.access syncdb_file, fs.constants.W_OK, (err) ->1032locals.syncdb_file_ro = !!err1033cb()1034], ->1035if locals.ipynb_file_ro == locals.syncdb_file_ro1036return1037dbg("mode change")1038async.parallel([1039(cb) ->1040fs.stat ipynb_file, (err, stats) ->1041locals.ipynb_stats = stats1042cb(err)1043(cb) ->1044* error if syncdb_file doesn't exist, which is GOOD, since1045* in that case we do not want to chmod which would create1046* that file as empty and blank it.1047fs.stat(syncdb_file, cb)1048], (err) ->1049if not err1050dbg("changing syncb mode to match ipynb mode")1051fs.chmod(syncdb_file, locals.ipynb_stats.mode)1052else1053dbg("error stating ipynb", err)1054)1055)1056*/10571058// Load file from disk if it is newer than1059// the last we saved to disk.1060private loadFromDiskIfNewer = async () => {1061const dbg = this.dbg("loadFromDiskIfNewer");1062// Get mtime of last .ipynb file that we explicitly saved.10631064// TODO: breaking the syncdb typescript data hiding. The1065// right fix will be to move1066// this info to a new ephemeral state table.1067const last_ipynb_save = await this.get_last_ipynb_save();1068dbg(`syncdb last_ipynb_save=${last_ipynb_save}`);1069let file_changed;1070if (last_ipynb_save == 0) {1071// we MUST load from file the first time, of course.1072file_changed = true;1073dbg("file changed because FIRST TIME");1074} else {1075const path = this.store.get("path");1076let stats;1077try {1078stats = await callback2(this._client.path_stat, { path });1079dbg(`stats.mtime = ${stats.mtime}`);1080} catch (err) {1081// This err just means the file doesn't exist.1082// We set the 'last load' to now in this case, since1083// the frontend clients need to know that we1084// have already scanned the disk.1085this.set_last_load();1086return;1087}1088const mtime = stats.mtime.getTime();1089file_changed = mtime > last_ipynb_save;1090dbg({ mtime, last_ipynb_save });1091}1092if (file_changed) {1093dbg(".ipynb disk file changed ==> loading state from disk");1094try {1095await this.load_ipynb_file();1096} catch (err) {1097dbg("failed to load on change", err);1098}1099} else {1100dbg("disk file NOT changed: NOT loading");1101}1102};11031104// if also set load is true, we also set the "last_ipynb_save" time.1105set_last_load = (alsoSetLoad: boolean = false) => {1106const last_load = new Date().getTime();1107this.syncdb.set({1108type: "file",1109last_load,1110});1111if (alsoSetLoad) {1112// yes, load v save is inconsistent!1113this.syncdb.set({ type: "settings", last_ipynb_save: last_load });1114}1115this.syncdb.commit();1116};11171118/* Determine timestamp of aux .ipynb file, and record it here,1119so we know that we do not have to load exactly that file1120back from disk. */1121private set_last_ipynb_save = async () => {1122let stats;1123try {1124stats = await callback2(this._client.path_stat, {1125path: this.store.get("path"),1126});1127} catch (err) {1128// no-op -- nothing to do.1129this.dbg("set_last_ipynb_save")(`WARNING -- issue in path_stat ${err}`);1130return;1131}11321133// This is ugly (i.e., how we get access), but I need to get this done.1134// This is the RIGHT place to save the info though.1135// TODO: move this state info to new ephemeral table.1136try {1137const last_ipynb_save = stats.mtime.getTime();1138this.last_ipynb_save = last_ipynb_save;1139this._set({1140type: "settings",1141last_ipynb_save,1142});1143this.dbg("stats.mtime.getTime()")(1144`set_last_ipynb_save = ${last_ipynb_save}`,1145);1146} catch (err) {1147this.dbg("set_last_ipynb_save")(1148`WARNING -- issue in set_last_ipynb_save ${err}`,1149);1150return;1151}1152};11531154private get_last_ipynb_save = async () => {1155const x =1156this.syncdb.get_one({ type: "settings" })?.get("last_ipynb_save") ?? 0;1157return Math.max(x, this.last_ipynb_save);1158};11591160load_ipynb_file = async () => {1161/*1162Read the ipynb file from disk. Fully use the ipynb file to1163set the syncdb's state. We do this when opening a new file, or when1164the file changes on disk (e.g., a git checkout or something).1165*/1166const dbg = this.dbg(`load_ipynb_file`);1167dbg("reading file");1168const path = this.store.get("path");1169let content: string;1170try {1171content = await callback2(this._client.path_read, {1172path,1173maxsize_MB: 50,1174});1175} catch (err) {1176// possibly file doesn't exist -- set notebook to empty.1177const exists = await callback2(this._client.path_exists, {1178path,1179});1180if (!exists) {1181content = "";1182} else {1183// It would be better to have a button to push instead of1184// suggesting running a command in the terminal, but1185// adding that took 1 second. Better than both would be1186// making it possible to edit huge files :-).1187const error = `Error reading ipynb file '${path}': ${err.toString()}. Fix this to continue. You can delete all output by typing cc-jupyter-no-output [filename].ipynb in a terminal.`;1188this.syncdb.set({ type: "fatal", error });1189throw Error(error);1190}1191}1192if (content.length === 0) {1193// Blank file, e.g., when creating in CoCalc.1194// This is good, works, etc. -- just clear state, including error.1195this.syncdb.delete();1196this.set_last_load(true);1197return;1198}11991200// File is nontrivial -- parse and load.1201let parsed_content;1202try {1203parsed_content = JSON.parse(content);1204} catch (err) {1205const error = `Error parsing the ipynb file '${path}': ${err}. You must fix the ipynb file somehow before continuing.`;1206dbg(error);1207this.syncdb.set({ type: "fatal", error });1208throw Error(error);1209}1210this.syncdb.delete({ type: "fatal" });1211await this.set_to_ipynb(parsed_content);1212this.set_last_load(true);1213};12141215save_ipynb_file = async () => {1216const dbg = this.dbg("save_ipynb_file");1217if (!this.isCellRunner()) {1218dbg("not cell runner, so NOT saving ipynb file to disk");1219return;1220}1221dbg("saving to file");12221223// Check first if file was deleted, in which case instead of saving to disk,1224// we should terminate and clean up everything.1225if (this.isDeleted()) {1226dbg("ipynb file is deleted, so NOT saving to disk and closing");1227this.close({ noSave: true });1228return;1229}12301231if (this.jupyter_kernel == null) {1232// The kernel is needed to get access to the blob store, which1233// may be needed to save to disk.1234this.ensure_backend_kernel_setup();1235if (this.jupyter_kernel == null) {1236// still not null? This would happen if no kernel is set at all,1237// in which case it's OK that saving isn't possible.1238throw Error("no kernel so cannot save");1239}1240}1241if (this.store.get("kernels") == null) {1242await this.init_kernel_info();1243if (this.store.get("kernels") == null) {1244// This should never happen, but maybe could in case of a very1245// messed up compute environment where the kernelspecs can't be listed.1246throw Error(1247"kernel info not known and can't be determined, so can't save",1248);1249}1250}1251dbg("going to try to save: getting ipynb object...");1252const blob_store = this.jupyter_kernel.get_blob_store();1253let ipynb = this.store.get_ipynb(blob_store);1254if (this.store.get("kernel")) {1255// if a kernel is set, check that it was sufficiently known that1256// we can fill in data about it -- see https://github.com/sagemathinc/cocalc/issues/72861257if (ipynb?.metadata?.kernelspec?.name == null) {1258dbg("kernelspec not known -- try loading kernels again");1259await this.fetch_jupyter_kernels();1260// and again grab the ipynb1261ipynb = this.store.get_ipynb(blob_store);1262if (ipynb?.metadata?.kernelspec?.name == null) {1263dbg("kernelspec STILL not known: metadata will be incomplete");1264}1265}1266}1267dbg("got ipynb object");1268// We use json_stable (and indent 1) to be more diff friendly to user,1269// and more consistent with official Jupyter.1270const data = json_stable(ipynb, { space: 1 });1271if (data == null) {1272dbg("failed -- ipynb not defined yet");1273throw Error("ipynb not defined yet; can't save");1274}1275dbg("converted ipynb to stable JSON string", data?.length);1276//dbg(`got string version '${data}'`)1277try {1278dbg("writing to disk...");1279await callback2(this._client.write_file, {1280path: this.store.get("path"),1281data,1282});1283dbg("succeeded at saving");1284await this.set_last_ipynb_save();1285} catch (err) {1286const e = `error writing file: ${err}`;1287dbg(e);1288throw Error(e);1289}1290};12911292ensure_there_is_a_cell = () => {1293if (this._state !== "ready") {1294return;1295}1296const cells = this.store.get("cells");1297if (cells == null || (cells.size === 0 && this.isCellRunner())) {1298this._set({1299type: "cell",1300id: this.new_id(),1301pos: 0,1302input: "",1303});1304// We are obviously contributing content to this (empty!) notebook.1305return this.set_trust_notebook(true);1306}1307};13081309private handle_all_cell_attachments() {1310// Check if any cell attachments need to be loaded.1311const cells = this.store.get("cells");1312cells?.forEach((cell) => {1313this.handle_cell_attachments(cell);1314});1315}13161317private handle_cell_attachments(cell) {1318if (this.jupyter_kernel == null) {1319// can't do anything1320return;1321}1322const dbg = this.dbg(`handle_cell_attachments(id=${cell.get("id")})`);1323dbg();13241325const attachments = cell.get("attachments");1326if (attachments == null) return; // nothing to do1327attachments.forEach(async (x, name) => {1328if (x == null) return;1329if (x.get("type") === "load") {1330if (this.jupyter_kernel == null) return; // try later1331// need to load from disk1332this.set_cell_attachment(cell.get("id"), name, {1333type: "loading",1334value: null,1335});1336let sha1: string;1337try {1338sha1 = await this.jupyter_kernel.load_attachment(x.get("value"));1339} catch (err) {1340this.set_cell_attachment(cell.get("id"), name, {1341type: "error",1342value: `${err}`,1343});1344return;1345}1346this.set_cell_attachment(cell.get("id"), name, {1347type: "sha1",1348value: sha1,1349});1350}1351});1352}13531354// handle_ipywidgets_state_change is called when the project ipywidgets_state1355// object changes, e.g., in response to a user moving a slider in the browser.1356// It crafts a comm message that is sent to the running Jupyter kernel telling1357// it about this change by calling send_comm_message_to_kernel.1358private handle_ipywidgets_state_change(keys): void {1359if (this.is_closed()) {1360return;1361}1362const dbg = this.dbg("handle_ipywidgets_state_change");1363dbg(keys);1364if (this.jupyter_kernel == null) {1365dbg("no kernel, so ignoring changes to ipywidgets");1366return;1367}1368if (this.syncdb.ipywidgets_state == null) {1369throw Error("syncdb's ipywidgets_state must be defined!");1370}1371for (const key of keys) {1372const [, model_id, type] = JSON.parse(key);1373dbg({ key, model_id, type });1374let data: any;1375if (type === "value") {1376const state = this.syncdb.ipywidgets_state.get_model_value(model_id);1377// Saving the buffers on change is critical since otherwise this breaks:1378// https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#file-upload1379// Note that stupidly the buffer (e.g., image upload) gets sent to the kernel twice.1380// But it does work robustly, and the kernel and nodejs server processes next to each1381// other so this isn't so bad.1382const { buffer_paths, buffers } =1383this.syncdb.ipywidgets_state.getKnownBuffers(model_id);1384data = { method: "update", state, buffer_paths };1385this.jupyter_kernel.send_comm_message_to_kernel({1386msg_id: misc.uuid(),1387target_name: "jupyter.widget",1388comm_id: model_id,1389data,1390buffers,1391});1392} else if (type === "buffers") {1393// TODO: we MIGHT need implement this... but MAYBE NOT. An example where this seems like it might be1394// required is by the file upload widget, but actually that just uses the value type above, since1395// we explicitly fill in the widgets there; also there is an explicit comm upload message that1396// the widget sends out that updates the buffer, and in send_comm_message_to_kernel in jupyter/kernel/kernel.ts1397// when processing that message, we saves those buffers and make sure they are set in the1398// value case above (otherwise they would get removed).1399// https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#file-upload1400// which creates a buffer from the content of the file, then sends it to the backend,1401// which sees a change and has to write that buffer to the kernel (here) so that1402// the running python process can actually do something with the file contents (e.g.,1403// process data, save file to disk, etc).1404// We need to be careful though to not send buffers to the kernel that the kernel sent us,1405// since that would be a waste.1406} else if (type === "state") {1407// TODO: currently ignoring this, since it seems chatty and pointless,1408// and could lead to race conditions probably with multiple users, etc.1409// It happens right when the widget is created.1410/*1411const state = this.syncdb.ipywidgets_state.getModelSerializedState(model_id);1412data = { method: "update", state };1413this.jupyter_kernel.send_comm_message_to_kernel(1414misc.uuid(),1415model_id,1416data1417);1418*/1419} else {1420throw Error(`invalid synctable state -- unknown type '${type}'`);1421}1422}1423}14241425public async process_comm_message_from_kernel(mesg: any): Promise<void> {1426const dbg = this.dbg("process_comm_message_from_kernel");1427// serializing the full message could cause enormous load on the server, since1428// the mesg may contain large buffers. Only do for low level debugging!1429// dbg(mesg); // EXTREME DANGER!1430// This should be safe:1431dbg(JSON.stringify(mesg.header));1432if (this.syncdb.ipywidgets_state == null) {1433throw Error("syncdb's ipywidgets_state must be defined!");1434}1435await this.syncdb.ipywidgets_state.process_comm_message_from_kernel(mesg);1436}14371438public capture_output_message(mesg: any): boolean {1439if (this.syncdb.ipywidgets_state == null) {1440throw Error("syncdb's ipywidgets_state must be defined!");1441}1442return this.syncdb.ipywidgets_state.capture_output_message(mesg);1443}14441445public close_project_only() {1446const dbg = this.dbg("close_project_only");1447dbg();1448if (this.run_all_loop) {1449this.run_all_loop.close();1450delete this.run_all_loop;1451}1452// this stops the kernel and cleans everything up1453// so no resources are wasted and next time starting1454// is clean1455(async () => {1456try {1457await removeJupyterRedux(this.store.get("path"), this.project_id);1458} catch (err) {1459dbg("WARNING -- issue removing jupyter redux", err);1460}1461})();1462}14631464// not actually async...1465public async signal(signal = "SIGINT"): Promise<void> {1466this.jupyter_kernel?.signal(signal);1467}14681469public handle_nbconvert_change(oldVal, newVal): void {1470nbconvertChange(this, oldVal?.toJS(), newVal?.toJS());1471}14721473protected isCellRunner = (): boolean => {1474if (this.is_closed()) {1475// it's closed, so obviously not the cell runner.1476return false;1477}1478const dbg = this.dbg("isCellRunner");1479let id;1480try {1481id = this.getComputeServerId();1482} catch (_) {1483// normal since debounced,1484// and anyways if anything like syncdb that getComputeServerId1485// depends on doesn't work, then we are clearly1486// not the cell runner1487return false;1488}1489dbg("id = ", id);1490if (id == 0 && this.is_project) {1491dbg("yes we are the cell runner (the project)");1492// when no remote compute servers are configured, the project is1493// responsible for evaluating code.1494return true;1495}1496if (this.is_compute_server) {1497// a remote compute server is supposed to be responsible. Are we it?1498try {1499const myId = decodeUUIDtoNum(this.syncdb.client_id());1500const isRunner = myId == id;1501dbg(isRunner ? "Yes, we are cell runner" : "NOT cell runner");1502return isRunner;1503} catch (err) {1504dbg(err);1505}1506}1507dbg("NO we are not the cell runner");1508return false;1509};15101511private lastComputeServerId = 0;1512private checkForComputeServerStateChange = (client_id) => {1513if (this.is_closed()) {1514return;1515}1516if (!isEncodedNumUUID(client_id)) {1517return;1518}1519const id = this.getComputeServerId();1520if (id != this.lastComputeServerId) {1521// reset all run state1522this.halt();1523this.clear_all_cell_run_state();1524}1525this.lastComputeServerId = id;1526};15271528/*1529WebSocket API153015311. Handles api requests from the user via the generic websocket message channel1532provided by the syncdb.153315342. In case a remote compute server connects and registers to handle api messages,1535then those are proxied to the remote server, handled there, and proxied back.1536*/15371538private initWebsocketApi = () => {1539if (this.is_project) {1540// only the project receives these messages from clients.1541this.syncdb.on("message", this.handleMessageFromClient);1542} else if (this.is_compute_server) {1543// compute servers receive messages from the project,1544// proxying an api request from a client.1545this.syncdb.on("message", this.handleMessageFromProject);1546}1547};15481549private remoteApiHandler: null | {1550spark: any; // the spark channel connection between project and compute server1551id: number; // this is a sequential id used for request/response pairing1552// when get response from computer server, one of these callbacks gets called:1553responseCallbacks: { [id: number]: (err: any, response: any) => void };1554} = null;15551556private handleMessageFromClient = async ({ data, spark }) => {1557// This is call in the project to handle api requests.1558// It either handles them directly, or if there is a remote1559// compute server, it forwards them to the remote compute server,1560// then proxies the response back to the client.15611562const dbg = this.dbg("handleMessageFromClient");1563dbg();1564// WARNING: potentially very verbose1565dbg(data);1566switch (data.event) {1567case "register-to-handle-api": {1568if (this.remoteApiHandler?.spark?.id == spark.id) {1569dbg(1570"register-to-handle-api -- it's the current one so nothing to do",1571);1572return;1573}1574if (this.remoteApiHandler?.spark != null) {1575dbg("register-to-handle-api -- remove existing handler");1576this.remoteApiHandler.spark.removeAllListeners();1577this.remoteApiHandler.spark.end();1578this.remoteApiHandler = null;1579}1580// a compute server client is volunteering to handle all api requests until they disconnect1581this.remoteApiHandler = { spark, id: 0, responseCallbacks: {} };1582dbg("register-to-handle-api -- spark.id = ", spark.id);1583spark.on("end", () => {1584dbg(1585"register-to-handle-api -- spark ended, spark.id = ",1586spark.id,1587" and this.remoteApiHandler?.spark.id=",1588this.remoteApiHandler?.spark.id,1589);1590if (this.remoteApiHandler?.spark.id == spark.id) {1591this.remoteApiHandler = null;1592}1593});1594return;1595}15961597case "api-request": {1598// browser client made an api request. This will get handled1599// either locally or via a remote compute server, depending on1600// whether this.remoteApiHandler is set (via the1601// register-to-handle-api event above).1602const response = await this.handleApiRequest(data);1603spark.write({1604event: "message",1605data: { event: "api-response", response, id: data.id },1606});1607return;1608}16091610case "api-response": {1611// handling api request that we proxied to a remote compute server.1612// We are handling the response from the remote compute server.1613if (this.remoteApiHandler == null) {1614dbg("WARNING: api-response event but there is no remote api handler");1615// api-response event can't be handled because no remote api handler is registered1616// This should only happen if the requesting spark just disconnected, so there's no way to1617// responsd anyways.1618return;1619}1620const cb = this.remoteApiHandler.responseCallbacks[data.id];1621if (cb != null) {1622delete this.remoteApiHandler.responseCallbacks[data.id];1623cb(undefined, data);1624} else {1625dbg("WARNING: api-response event for unknown id", data.id);1626}1627return;1628}16291630case "save-blob-to-project": {1631if (!this.is_project) {1632throw Error(1633"message save-blob-to-project should only be sent to the project",1634);1635}1636// A compute server sent the project a blob to store1637// in the local blob store.1638const blobStore = await get_blob_store();1639blobStore.save(data.data, data.type, data.ipynb);1640return;1641}16421643default: {1644// unknown event so send back error1645spark.write({1646event: "message",1647data: {1648event: "error",1649message: `unknown event ${data.event}`,1650id: data.id,1651},1652});1653}1654}1655};16561657// this should only be called on a compute server.1658public saveBlobToProject = (data: string, type: string, ipynb?: string) => {1659if (!this.is_compute_server) {1660throw Error(1661"saveBlobToProject should only be called on a compute server",1662);1663}1664const dbg = this.dbg("saveBlobToProject");1665if (this.is_closed()) {1666dbg("called AFTER closed");1667return;1668}1669// This is call on a compute server whenever something is1670// written to its local blob store. TODO: We do not wait for1671// confirmation that blob was sent yet though.1672dbg();1673this.syncdb.sendMessageToProject({1674event: "save-blob-to-project",1675data,1676type,1677ipynb,1678});1679};16801681private handleMessageFromProject = async (data) => {1682const dbg = this.dbg("handleMessageFromProject");1683if (this.is_closed()) {1684dbg("called AFTER closed");1685return;1686}1687// This is call on the remote compute server to handle api requests.1688dbg();1689// output could be very BIG:1690// dbg(data);1691if (data.event == "api-request") {1692const response = await this.handleApiRequest(data.request);1693try {1694await this.syncdb.sendMessageToProject({1695event: "api-response",1696id: data.id,1697response,1698});1699} catch (err) {1700// this happens when the websocket is disconnected1701dbg(`WARNING -- issue responding to message ${err}`);1702}1703return;1704}1705};17061707private handleApiRequest = async (data) => {1708if (this.remoteApiHandler != null) {1709return await this.handleApiRequestViaRemoteApiHandler(data);1710}1711const dbg = this.dbg("handleApiRequest");1712const { path, endpoint, query } = data;1713dbg("handling request in project", path);1714try {1715return await handleApiRequest(path, endpoint, query);1716} catch (err) {1717dbg("error -- ", err.message);1718return { event: "error", message: err.message };1719}1720};17211722private handleApiRequestViaRemoteApiHandler = async (data) => {1723const dbg = this.dbg("handleApiRequestViaRemoteApiHandler");1724dbg(data?.path);1725try {1726if (!this.is_project) {1727throw Error("BUG -- remote api requests only make sense in a project");1728}1729if (this.remoteApiHandler == null) {1730throw Error("BUG -- remote api handler not registered");1731}1732// Send a message to the remote asking it to handle this api request,1733// which calls the function handleMessageFromProject from above in that remote process.1734const { id, spark, responseCallbacks } = this.remoteApiHandler;1735spark.write({1736event: "message",1737data: { event: "api-request", request: data, id },1738});1739const waitForResponse = (cb) => {1740responseCallbacks[id] = cb;1741};1742this.remoteApiHandler.id += 1; // increment sequential protocol message tracker id1743return (await callback(waitForResponse)).response;1744} catch (err) {1745dbg("error -- ", err.message);1746return { event: "error", message: err.message };1747}1748};17491750// Handle transient cell messages.1751handleTransientUpdate = (mesg) => {1752const display_id = mesg.content?.transient?.display_id;1753if (!display_id) {1754return false;1755}17561757let matched = false;1758// are there any transient outputs in the entire document that1759// have this display_id? search to find them.1760// TODO: we could use a clever data structure to make1761// this faster and more likely to have bugs.1762const cells = this.syncdb.get({ type: "cell" });1763for (let cell of cells) {1764let output = cell.get("output");1765if (output != null) {1766for (const [n, val] of output) {1767if (val.getIn(["transient", "display_id"]) == display_id) {1768// found a match -- replace it1769output = output.set(n, immutable.fromJS(mesg.content));1770this.syncdb.set({ type: "cell", id: cell.get("id"), output });1771matched = true;1772}1773}1774}1775}1776if (matched) {1777this.syncdb.commit();1778}1779};1780// End Websocket API1781}178217831784