Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(ext/canvas): OffscreenCanvas #23773

Draft
wants to merge 14 commits into
base: main
Choose a base branch
from
Prev Previous commit
Next Next commit
work
  • Loading branch information
crowlKats committed Jun 4, 2024
commit 40a75b6d68a30cecf87d264577174dd24da08810
11 changes: 2 additions & 9 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

84 changes: 84 additions & 0 deletions ext/canvas/02_canvas.js
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ class OffscreenCanvas extends EventTarget {
);
}

this[_fillCanvasBitmapHook]?.();

let image = webidl.createBranded(ImageBitmap);
image[_bitmapData] = this[_canvasBitmap].data;
image[_width] = this[_width];
Expand All @@ -160,6 +162,9 @@ class OffscreenCanvas extends EventTarget {
"Argument 1",
);

this[_context][_fillCanvasBitmapHook]?.();


// TODO: If the value of this OffscreenCanvas object's [[Detached]] internal slot is set to true, then return a promise rejected with an "InvalidStateError" DOMException.

if (this[_canvasBitmap].data.length === 0) {
Expand Down Expand Up @@ -242,6 +247,7 @@ const _configuration = Symbol("[[configuration]]");
const _textureDescriptor = Symbol("[[textureDescriptor]]");
const _currentTexture = Symbol("[[currentTexture]]");
const _drawingBuffer = Symbol("[[drawingBuffer]]");
const _fillCanvasBitmapHook = Symbol("[[fillCanvasBitmapHook]]");
class GPUCanvasContext {
[_configuration];
/** @type {GPUTexture | undefined} */
Expand All @@ -258,6 +264,8 @@ class GPUCanvasContext {
}

configure(configuration) {
loadWebGPU();

webidl.assertBranded(this, GPUCanvasContextPrototype);
const prefix = "Failed to execute 'configure' on 'GPUCanvasContext'";
webidl.requiredArguments(arguments.length, 1, prefix);
Expand Down Expand Up @@ -307,6 +315,10 @@ class GPUCanvasContext {
return this[_currentTexture];
}

[_fillCanvasBitmapHook]() {
this[_canvas][_canvasBitmap].data = getCopyOfImageContent(this);
}

[SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) {
return inspect(
createFilteredInspectProxy({
Expand Down Expand Up @@ -335,6 +347,8 @@ function getTextureDescriptorForCanvasAndConfiguration(canvas, configuration) {
function replaceDrawingBuffer(context) {
expireCurrentTexture(context);



// TODO
}

Expand All @@ -345,6 +359,76 @@ function expireCurrentTexture(context) {
}
}

function getCopyOfImageContent(context) {
const texture = context[_currentTexture];
const device = context[_configuration].device;
const { padded, unpadded } = getRowPadding(context[_canvas][_width]);

console.log(padded * context[_canvas][_height], unpadded * context[_canvas][_height]);

const encoder = device.createCommandEncoder({
label: "GPUCanvasCopyCommandEncoder"
});
const outputBuffer = device.createBuffer({
label: "GPUCanvasCopyBuffer",
size: unpadded * context[_canvas][_height],
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});


encoder.copyTextureToBuffer(
{
texture,
},
{
buffer: outputBuffer,
bytesPerRow: unpadded,
},
{
width: context[_canvas][_width],
height: context[_canvas][_height],
},
);

device.queue.onSubmittedWorkDone().then(() => console.log("foo"));

device.queue.submit([encoder.finish()]);

await outputBuffer.mapAsync(1);


const x = new Uint8Array(outputBuffer.getMappedRange());

Deno.writeTextFileSync("./debug.txt", JSON.stringify(Array.from(x), null, 2));

return x.slice();
}


/** Buffer-Texture copies must have [`bytes_per_row`] aligned to this number. */
export const COPY_BYTES_PER_ROW_ALIGNMENT = 256;

/** Number of bytes per pixel. */
export const BYTES_PER_PIXEL = 3;

export function getRowPadding(width) {
// It is a WebGPU requirement that
// GPUImageCopyBuffer.layout.bytesPerRow % COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate paddedBytesPerRow by rounding unpaddedBytesPerRow
// up to the next multiple of COPY_BYTES_PER_ROW_ALIGNMENT.

const unpaddedBytesPerRow = width * BYTES_PER_PIXEL;
const paddedBytesPerRowPadding = (COPY_BYTES_PER_ROW_ALIGNMENT -
(unpaddedBytesPerRow % COPY_BYTES_PER_ROW_ALIGNMENT)) %
COPY_BYTES_PER_ROW_ALIGNMENT;
const paddedBytesPerRow = unpaddedBytesPerRow + paddedBytesPerRowPadding;

return {
unpadded: unpaddedBytesPerRow,
padded: paddedBytesPerRow,
};
}

// ENUM: OffscreenRenderingContextId
webidl.converters["OffscreenRenderingContextId"] = webidl.createEnumConverter(
"OffscreenRenderingContextId",
Expand Down
2 changes: 1 addition & 1 deletion ext/canvas/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ path = "lib.rs"
[dependencies]
deno_core.workspace = true
deno_webgpu.workspace = true
image = { version = "0.24.7", default-features = false, features = ["png"] }
image = { version = "0.25.1", default-features = false, features = ["png"] }
serde = { workspace = true, features = ["derive"] }
tokio = { workspace = true, features = ["full"] }
13 changes: 7 additions & 6 deletions ext/canvas/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::ToJsBuffer;
use image::imageops::FilterType;
use image::ColorType;
use image::ExtendedColorType;
use image::GenericImageView;
use image::ImageEncoder;
use image::Pixel;
use image::RgbImage;
use image::RgbaImage;
use serde::Deserialize;
use serde::Serialize;
Expand Down Expand Up @@ -44,15 +45,14 @@ fn op_image_process(
#[buffer] buf: &[u8],
#[serde] args: ImageProcessArgs,
) -> Result<ToJsBuffer, AnyError> {
let view =
RgbaImage::from_vec(args.width, args.height, buf.to_vec()).unwrap();
let view = RgbImage::from_vec(args.width, args.height, buf.to_vec()).unwrap();

let surface = if !(args.width == args.surface_width
&& args.height == args.surface_height
&& args.input_x == 0
&& args.input_y == 0)
{
let mut surface = RgbaImage::new(args.surface_width, args.surface_height);
let mut surface = RgbImage::new(args.surface_width, args.surface_height);

image::imageops::overlay(&mut surface, &view, args.input_x, args.input_y);

Expand Down Expand Up @@ -118,7 +118,7 @@ struct DecodedPng {
#[op2]
#[serde]
fn op_image_decode_png(#[buffer] buf: &[u8]) -> Result<DecodedPng, AnyError> {
let reader = std::io::BufReader::new(buf);
let reader = std::io::BufReader::new(std::io::Cursor::new(buf));
let decoder = image::codecs::png::PngDecoder::new(reader)?;
let image = image::DynamicImage::from_decoder(decoder)?;
let (width, height) = image.dimensions();
Expand All @@ -139,8 +139,9 @@ fn op_image_encode_png(
) -> Result<Option<ToJsBuffer>, AnyError> {
let mut out = vec![];
let png = image::codecs::png::PngEncoder::new(&mut out);

if png
.write_image(buf, width, height, ColorType::Rgba8)
.write_image(buf, width, height, ExtendedColorType::Rgb8)
.is_err()
{
return Ok(None);
Expand Down
4 changes: 3 additions & 1 deletion ext/webgpu/01_webgpu.js
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ import {
op_webgpu_request_device,
op_webgpu_write_buffer,
op_webgpu_write_texture,
op_webgpu_queue_on_submitted_work_done,
} from "ext:core/ops";
const {
ArrayBuffer,
Expand Down Expand Up @@ -1933,7 +1934,8 @@ class GPUQueue {

onSubmittedWorkDone() {
webidl.assertBranded(this, GPUQueuePrototype);
return PromiseResolve();

return op_webgpu_queue_on_submitted_work_done(this[_rid]);
}

/**
Expand Down
1 change: 1 addition & 0 deletions ext/webgpu/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ deno_core::extension!(
queue::op_webgpu_queue_submit,
queue::op_webgpu_write_buffer,
queue::op_webgpu_write_texture,
queue::op_webgpu_queue_on_submitted_work_done,
// shader
shader::op_webgpu_create_shader_module,
// surface
Expand Down
31 changes: 31 additions & 0 deletions ext/webgpu/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
use crate::command_encoder::WebGpuCommandBuffer;
use crate::Instance;
use deno_core::error::AnyError;
use deno_core::futures::channel::oneshot;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use serde::Deserialize;
use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;

use super::error::WebGpuResult;
Expand Down Expand Up @@ -141,3 +143,32 @@ pub fn op_webgpu_write_texture(
&size
))
}

#[op2(async)]
#[serde]
pub async fn op_webgpu_queue_on_submitted_work_done(
state: Rc<RefCell<OpState>>,
#[smi] queue_rid: ResourceId,
) -> Result<(), AnyError> {
let (sender, receiver) = oneshot::channel::<()>();

{
let state_ = state.borrow();
let instance = state_.borrow::<Instance>();

let queue_resource = state_.resource_table.get::<WebGpuQueue>(queue_rid)?;
let queue = queue_resource.1;

let closure = wgpu_core::device::queue::SubmittedWorkDoneClosure::from_rust(
Box::new(|| {
sender.send(()).unwrap();
}),
);

gfx_select!(queue => instance.queue_on_submitted_work_done(queue, closure))?;
}

receiver.await?;

Ok(())
}