CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
Path: blob/master/Core/Debugger/WebSocket/GPUBufferSubscriber.cpp
Views: 1401
// Copyright (c) 2018- PPSSPP Project.12// This program is free software: you can redistribute it and/or modify3// it under the terms of the GNU General Public License as published by4// the Free Software Foundation, version 2.0 or later versions.56// This program is distributed in the hope that it will be useful,7// but WITHOUT ANY WARRANTY; without even the implied warranty of8// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the9// GNU General Public License 2.0 for more details.1011// A copy of the GPL 2.0 should have been included with the program.12// If not, see http://www.gnu.org/licenses/1314// Official git repository and contact information can be found at15// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.1617#include <algorithm>18#ifndef USING_QT_UI19#include <png.h>20#include <zlib.h>21#endif22#include "Common/Data/Encoding/Base64.h"23#include "Common/StringUtils.h"24#include "Core/Debugger/WebSocket/GPUBufferSubscriber.h"25#include "Core/Debugger/WebSocket/WebSocketUtils.h"26#include "Core/MIPS/MIPSDebugInterface.h"27#include "Core/Screenshot.h"28#include "GPU/Debugger/Stepping.h"2930DebuggerSubscriber *WebSocketGPUBufferInit(DebuggerEventHandlerMap &map) {31// No need to bind or alloc state, these are all global.32map["gpu.buffer.screenshot"] = &WebSocketGPUBufferScreenshot;33map["gpu.buffer.renderColor"] = &WebSocketGPUBufferRenderColor;34map["gpu.buffer.renderDepth"] = &WebSocketGPUBufferRenderDepth;35map["gpu.buffer.renderStencil"] = &WebSocketGPUBufferRenderStencil;36map["gpu.buffer.texture"] = &WebSocketGPUBufferTexture;37map["gpu.buffer.clut"] = &WebSocketGPUBufferClut;3839return nullptr;40}4142// Note: Calls req.Respond(). Other data can be added afterward.43static bool StreamBufferToDataURI(DebuggerRequest &req, const GPUDebugBuffer &buf, bool isFramebuffer, bool includeAlpha, int stackWidth) {44#ifdef USING_QT_UI45req.Fail("Not supported on Qt yet, pull requests accepted");46return false;47#else48u8 *flipbuffer = nullptr;49u32 w = (u32)-1;50u32 h = (u32)-1;51const u8 *buffer = ConvertBufferToScreenshot(buf, includeAlpha, flipbuffer, w, h);52if (!buffer) {53req.Fail("Internal error converting buffer for PNG encode");54return false;55}5657if (stackWidth > 0) {58u32 totalPixels = w * h;59w = stackWidth;60while ((totalPixels % w) != 0)61--w;62h = totalPixels / w;63}6465png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);66if (!png_ptr) {67req.Fail("Internal error setting up PNG encoder (png_ptr)");68return false;69}70png_infop info_ptr = png_create_info_struct(png_ptr);71if (!info_ptr) {72png_destroy_write_struct(&png_ptr, nullptr);73req.Fail("Internal error setting up PNG encoder (info_ptr)");74return false;75}7677// Speed. Wireless N should give 35 KB/ms. For most devices, zlib/filters will cost more.78png_set_compression_strategy(png_ptr, Z_RLE);79png_set_compression_level(png_ptr, 1);80png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_NONE);8182auto &json = req.Respond();83json.writeInt("width", w);84json.writeInt("height", h);85if (isFramebuffer) {86json.writeBool("isFramebuffer", isFramebuffer);87}8889// Start a value...90json.writeRaw("uri", "");91req.Flush();92// Now we'll write it directly to the stream.93req.ws->AddFragment(false, "\"data:image/png;base64,");9495struct Context {96DebuggerRequest *req;97uint8_t buf[3];98size_t bufSize;99};100Context ctx = { &req, {}, 0 };101102auto write = [](png_structp png_ptr, png_bytep data, png_size_t length) {103auto ctx = (Context *)png_get_io_ptr(png_ptr);104auto &req = *ctx->req;105106// If we buffered some bytes, fill to 3 bytes for a clean base64 encode.107// This way we don't have padding.108while (length > 0 && ctx->bufSize > 0 && ctx->bufSize != 3) {109ctx->buf[ctx->bufSize++] = data[0];110data++;111length--;112}113114if (ctx->bufSize == 3) {115req.ws->AddFragment(false, Base64Encode(ctx->buf, ctx->bufSize));116ctx->bufSize = 0;117}118_assert_(ctx->bufSize == 0 || length == 0);119120// Save bytes that would result in padding for next time.121size_t toBuffer = length % 3;122for (size_t i = 0; i < toBuffer; ++i) {123ctx->buf[i] = data[length - toBuffer + i];124ctx->bufSize++;125}126127if (length > toBuffer) {128req.ws->AddFragment(false, Base64Encode(data, length - toBuffer));129}130};131auto flush = [](png_structp png_ptr) {132// Nothing, just here to prevent stdio flush.133};134135png_bytep *row_pointers = new png_bytep[h];136u32 stride = includeAlpha ? w * 4 : w * 3;137for (u32 i = 0; i < h; ++i) {138row_pointers[i] = (u8 *)buffer + stride * i;139}140141png_set_write_fn(png_ptr, &ctx, write, flush);142int colorType = includeAlpha ? PNG_COLOR_TYPE_RGBA : PNG_COLOR_TYPE_RGB;143png_set_IHDR(png_ptr, info_ptr, w, h, 8, colorType, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);144png_set_rows(png_ptr, info_ptr, row_pointers);145png_write_png(png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, nullptr);146147png_destroy_write_struct(&png_ptr, &info_ptr);148delete [] row_pointers;149delete [] flipbuffer;150151if (ctx.bufSize > 0) {152req.ws->AddFragment(false, Base64Encode(ctx.buf, ctx.bufSize));153ctx.bufSize = 0;154}155156// End the string.157req.ws->AddFragment(false, "\"");158return true;159#endif160}161162static std::string DescribeFormat(GPUDebugBufferFormat fmt) {163switch (fmt) {164case GPU_DBG_FORMAT_565: return "B5G6R5_UNORM_PACK16";165case GPU_DBG_FORMAT_5551: return "A1B5G5R5_UNORM_PACK16";166case GPU_DBG_FORMAT_4444: return "A4B4G4R4_UNORM_PACK16";167case GPU_DBG_FORMAT_8888: return "R8G8B8A8_UNORM";168169case GPU_DBG_FORMAT_565_REV: return "R5G6B5_UNORM_PACK16";170case GPU_DBG_FORMAT_5551_REV: return "R5G5B5A1_UNORM_PACK16";171case GPU_DBG_FORMAT_4444_REV: return "R4G4B4A4_UNORM_PACK16";172173case GPU_DBG_FORMAT_5551_BGRA: return "A1R5G5B5_UNORM_PACK16";174case GPU_DBG_FORMAT_4444_BGRA: return "A4R4G4B4_UNORM_PACK16";175case GPU_DBG_FORMAT_8888_BGRA: return "B8G8R8A8_UNORM";176177case GPU_DBG_FORMAT_FLOAT: return "D32F";178case GPU_DBG_FORMAT_16BIT: return "D16";179case GPU_DBG_FORMAT_8BIT: return "S8";180case GPU_DBG_FORMAT_24BIT_8X: return "D24_X8";181case GPU_DBG_FORMAT_24X_8BIT: return "X24_S8";182183case GPU_DBG_FORMAT_FLOAT_DIV_256: return "D32F_DIV_256";184case GPU_DBG_FORMAT_24BIT_8X_DIV_256: return "D32F_X8_DIV_256";185186case GPU_DBG_FORMAT_888_RGB: return "R8G8B8_UNORM";187188case GPU_DBG_FORMAT_INVALID:189case GPU_DBG_FORMAT_BRSWAP_FLAG:190default:191return "UNDEFINED";192}193}194195// Note: Calls req.Respond(). Other data can be added afterward.196static bool StreamBufferToBase64(DebuggerRequest &req, const GPUDebugBuffer &buf, bool isFramebuffer) {197size_t length = buf.GetStride() * buf.GetHeight();198199auto &json = req.Respond();200json.writeInt("width", buf.GetStride());201json.writeInt("height", buf.GetHeight());202json.writeBool("flipped", buf.GetFlipped());203json.writeString("format", DescribeFormat(buf.GetFormat()));204if (isFramebuffer) {205json.writeBool("isFramebuffer", isFramebuffer);206}207208// Start a value without any actual data yet...209json.writeRaw("base64", "");210req.Flush();211212// Now we'll write it directly to the stream.213req.ws->AddFragment(false, "\"");214// 65535 is an "even" number of base64 characters.215static const size_t CHUNK_SIZE = 65535;216for (size_t i = 0; i < length; i += CHUNK_SIZE) {217size_t left = std::min(length - i, CHUNK_SIZE);218req.ws->AddFragment(false, Base64Encode(buf.GetData() + i, left));219}220req.ws->AddFragment(false, "\"");221222return true;223}224225static void GenericStreamBuffer(DebuggerRequest &req, std::function<bool(const GPUDebugBuffer *&, bool *isFramebuffer)> func) {226if (!currentDebugMIPS->isAlive()) {227return req.Fail("CPU not started");228}229if (coreState != CORE_STEPPING && !GPUStepping::IsStepping()) {230return req.Fail("Neither CPU or GPU is stepping");231}232233bool includeAlpha = false;234if (!req.ParamBool("alpha", &includeAlpha, DebuggerParamType::OPTIONAL))235return;236u32 stackWidth = 0;237if (!req.ParamU32("stackWidth", &stackWidth, false, DebuggerParamType::OPTIONAL))238return;239std::string type = "uri";240if (!req.ParamString("type", &type, DebuggerParamType::OPTIONAL))241return;242if (type != "uri" && type != "base64")243return req.Fail("Parameter 'type' must be either 'uri' or 'base64'");244245const GPUDebugBuffer *buf = nullptr;246bool isFramebuffer = false;247if (!func(buf, &isFramebuffer)) {248return req.Fail("Could not download output");249}250_assert_(buf != nullptr);251252if (type == "base64") {253StreamBufferToBase64(req, *buf, isFramebuffer);254} else if (type == "uri") {255StreamBufferToDataURI(req, *buf, isFramebuffer, includeAlpha, stackWidth);256} else {257_assert_(false);258}259}260261// Retrieve a screenshot (gpu.buffer.screenshot)262//263// Parameters:264// - type: either 'uri' or 'base64'.265// - alpha: boolean to include the alpha channel for 'uri' type (not normally useful for screenshots.)266//267// Response (same event name) for 'uri' type:268// - width: numeric width of screenshot.269// - height: numeric height of screenshot.270// - uri: data: URI of PNG image for display.271//272// Response (same event name) for 'base64' type:273// - width: numeric width of screenshot (also stride, in pixels, of binary data.)274// - height: numeric height of screenshot.275// - flipped: boolean to indicate whether buffer is vertically flipped.276// - format: string indicating format, such as 'R8G8B8A8_UNORM' or 'B8G8R8A8_UNORM'.277// - base64: base64 encode of binary data.278void WebSocketGPUBufferScreenshot(DebuggerRequest &req) {279GenericStreamBuffer(req, [](const GPUDebugBuffer *&buf, bool *isFramebuffer) {280*isFramebuffer = false;281return GPUStepping::GPU_GetOutputFramebuffer(buf);282});283}284285// Retrieve current color render buffer (gpu.buffer.renderColor)286//287// Parameters:288// - type: either 'uri' or 'base64'.289// - alpha: boolean to include the alpha channel for 'uri' type.290//291// Response (same event name) for 'uri' type:292// - width: numeric width of render buffer (may include stride.)293// - height: numeric height of render buffer.294// - uri: data: URI of PNG image for display.295//296// Response (same event name) for 'base64' type:297// - width: numeric width of render buffer (also stride, in pixels, of binary data.)298// - height: numeric height of render buffer.299// - flipped: boolean to indicate whether buffer is vertically flipped.300// - format: string indicating format, such as 'R8G8B8A8_UNORM' or 'B8G8R8A8_UNORM'.301// - base64: base64 encode of binary data.302void WebSocketGPUBufferRenderColor(DebuggerRequest &req) {303GenericStreamBuffer(req, [](const GPUDebugBuffer *&buf, bool *isFramebuffer) {304*isFramebuffer = false;305return GPUStepping::GPU_GetCurrentFramebuffer(buf, GPU_DBG_FRAMEBUF_RENDER);306});307}308309// Retrieve current depth render buffer (gpu.buffer.renderDepth)310//311// Parameters:312// - type: either 'uri' or 'base64'.313// - alpha: true to use alpha to encode depth, otherwise red for 'uri' type.314//315// Response (same event name) for 'uri' type:316// - width: numeric width of render buffer (may include stride.)317// - height: numeric height of render buffer.318// - uri: data: URI of PNG image for display.319//320// Response (same event name) for 'base64' type:321// - width: numeric width of render buffer (also stride, in pixels, of binary data.)322// - height: numeric height of render buffer.323// - flipped: boolean to indicate whether buffer is vertically flipped.324// - format: string indicating format, such as 'D16', 'D24_X8' or 'D32F'.325// - base64: base64 encode of binary data.326void WebSocketGPUBufferRenderDepth(DebuggerRequest &req) {327GenericStreamBuffer(req, [](const GPUDebugBuffer *&buf, bool *isFramebuffer) {328*isFramebuffer = false;329return GPUStepping::GPU_GetCurrentDepthbuffer(buf);330});331}332333// Retrieve current stencil render buffer (gpu.buffer.renderStencil)334//335// Parameters:336// - type: either 'uri' or 'base64'.337// - alpha: true to use alpha to encode stencil, otherwise red for 'uri' type.338//339// Response (same event name) for 'uri' type:340// - width: numeric width of render buffer (may include stride.)341// - height: numeric height of render buffer.342// - uri: data: URI of PNG image for display.343//344// Response (same event name) for 'base64' type:345// - width: numeric width of render buffer (also stride, in pixels, of binary data.)346// - height: numeric height of render buffer.347// - flipped: boolean to indicate whether buffer is vertically flipped.348// - format: string indicating format, such as 'X24_S8' or 'S8'.349// - base64: base64 encode of binary data.350void WebSocketGPUBufferRenderStencil(DebuggerRequest &req) {351GenericStreamBuffer(req, [](const GPUDebugBuffer *&buf, bool *isFramebuffer) {352*isFramebuffer = false;353return GPUStepping::GPU_GetCurrentStencilbuffer(buf);354});355}356357// Retrieve current texture (gpu.buffer.texture)358//359// Parameters:360// - type: either 'uri' or 'base64'.361// - alpha: boolean to include the alpha channel for 'uri' type.362// - level: texture mip level, default 0.363//364// Response (same event name) for 'uri' type:365// - width: numeric width of the texture (often wider than visual.)366// - height: numeric height of the texture (often wider than visual.)367// - isFramebuffer: optional, present and true if this came from a hardware framebuffer.368// - uri: data: URI of PNG image for display.369//370// Response (same event name) for 'base64' type:371// - width: numeric width and stride of the texture (often wider than visual.)372// - height: numeric height of the texture (often wider than visual.)373// - flipped: boolean to indicate whether buffer is vertically flipped.374// - format: string indicating format, such as 'R8G8B8A8_UNORM' or 'B8G8R8A8_UNORM'.375// - isFramebuffer: optional, present and true if this came from a hardware framebuffer.376// - base64: base64 encode of binary data.377void WebSocketGPUBufferTexture(DebuggerRequest &req) {378u32 level = 0;379if (!req.ParamU32("level", &level, false, DebuggerParamType::OPTIONAL))380return;381382GenericStreamBuffer(req, [level](const GPUDebugBuffer *&buf, bool *isFramebuffer) {383return GPUStepping::GPU_GetCurrentTexture(buf, level, isFramebuffer);384});385}386387// Retrieve current CLUT (gpu.buffer.clut)388//389// Parameters:390// - type: either 'uri' or 'base64'.391// - alpha: boolean to include the alpha channel for 'uri' type.392// - stackWidth: forced width for 'uri' type (increases height.)393//394// Response (same event name) for 'uri' type:395// - width: numeric width of CLUT.396// - height: numeric height of CLUT.397// - uri: data: URI of PNG image for display.398//399// Response (same event name) for 'base64' type:400// - width: number of pixels in CLUT.401// - height: always 1.402// - flipped: boolean to indicate whether buffer is vertically flipped.403// - format: string indicating format, such as 'R8G8B8A8_UNORM' or 'B8G8R8A8_UNORM'.404// - base64: base64 encode of binary data.405void WebSocketGPUBufferClut(DebuggerRequest &req) {406GenericStreamBuffer(req, [](const GPUDebugBuffer *&buf, bool *isFramebuffer) {407// TODO: Or maybe it could be?408*isFramebuffer = false;409return GPUStepping::GPU_GetCurrentClut(buf);410});411}412413414