1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "CanvasContext.h"
18
19 #include <apex/window.h>
20 #include <fcntl.h>
21 #include <gui/TraceUtils.h>
22 #include <strings.h>
23 #include <sys/stat.h>
24 #include <ui/Fence.h>
25
26 #include <algorithm>
27 #include <cstdint>
28 #include <cstdlib>
29 #include <functional>
30
31 #include "../Properties.h"
32 #include "AnimationContext.h"
33 #include "Frame.h"
34 #include "LayerUpdateQueue.h"
35 #include "Properties.h"
36 #include "RenderThread.h"
37 #include "hwui/Canvas.h"
38 #include "pipeline/skia/SkiaOpenGLPipeline.h"
39 #include "pipeline/skia/SkiaPipeline.h"
40 #include "pipeline/skia/SkiaVulkanPipeline.h"
41 #include "thread/CommonPool.h"
42 #include "utils/GLUtils.h"
43 #include "utils/TimeUtils.h"
44
45 #define LOG_FRAMETIME_MMA 0
46
47 #if LOG_FRAMETIME_MMA
48 static float sBenchMma = 0;
49 static int sFrameCount = 0;
50 static const float NANOS_PER_MILLIS_F = 1000000.0f;
51 #endif
52
53 namespace android {
54 namespace uirenderer {
55 namespace renderthread {
56
57 namespace {
58 class ScopedActiveContext {
59 public:
ScopedActiveContext(CanvasContext * context)60 ScopedActiveContext(CanvasContext* context) { sActiveContext = context; }
61
~ScopedActiveContext()62 ~ScopedActiveContext() { sActiveContext = nullptr; }
63
getActiveContext()64 static CanvasContext* getActiveContext() { return sActiveContext; }
65
66 private:
67 static CanvasContext* sActiveContext;
68 };
69
70 CanvasContext* ScopedActiveContext::sActiveContext = nullptr;
71 } /* namespace */
72
create(RenderThread & thread,bool translucent,RenderNode * rootRenderNode,IContextFactory * contextFactory,int32_t uiThreadId,int32_t renderThreadId)73 CanvasContext* CanvasContext::create(RenderThread& thread, bool translucent,
74 RenderNode* rootRenderNode, IContextFactory* contextFactory,
75 int32_t uiThreadId, int32_t renderThreadId) {
76 auto renderType = Properties::getRenderPipelineType();
77
78 switch (renderType) {
79 case RenderPipelineType::SkiaGL:
80 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
81 std::make_unique<skiapipeline::SkiaOpenGLPipeline>(thread),
82 uiThreadId, renderThreadId);
83 case RenderPipelineType::SkiaVulkan:
84 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
85 std::make_unique<skiapipeline::SkiaVulkanPipeline>(thread),
86 uiThreadId, renderThreadId);
87 default:
88 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
89 break;
90 }
91 return nullptr;
92 }
93
invokeFunctor(const RenderThread & thread,Functor * functor)94 void CanvasContext::invokeFunctor(const RenderThread& thread, Functor* functor) {
95 ATRACE_CALL();
96 auto renderType = Properties::getRenderPipelineType();
97 switch (renderType) {
98 case RenderPipelineType::SkiaGL:
99 skiapipeline::SkiaOpenGLPipeline::invokeFunctor(thread, functor);
100 break;
101 case RenderPipelineType::SkiaVulkan:
102 skiapipeline::SkiaVulkanPipeline::invokeFunctor(thread, functor);
103 break;
104 default:
105 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
106 break;
107 }
108 }
109
prepareToDraw(const RenderThread & thread,Bitmap * bitmap)110 void CanvasContext::prepareToDraw(const RenderThread& thread, Bitmap* bitmap) {
111 skiapipeline::SkiaPipeline::prepareToDraw(thread, bitmap);
112 }
113
CanvasContext(RenderThread & thread,bool translucent,RenderNode * rootRenderNode,IContextFactory * contextFactory,std::unique_ptr<IRenderPipeline> renderPipeline,pid_t uiThreadId,pid_t renderThreadId)114 CanvasContext::CanvasContext(RenderThread& thread, bool translucent, RenderNode* rootRenderNode,
115 IContextFactory* contextFactory,
116 std::unique_ptr<IRenderPipeline> renderPipeline, pid_t uiThreadId,
117 pid_t renderThreadId)
118 : mRenderThread(thread)
119 , mGenerationID(0)
120 , mOpaque(!translucent)
121 , mAnimationContext(contextFactory->createAnimationContext(mRenderThread.timeLord()))
122 , mJankTracker(&thread.globalProfileData())
123 , mProfiler(mJankTracker.frames(), thread.timeLord().frameIntervalNanos())
124 , mContentDrawBounds(0, 0, 0, 0)
125 , mRenderPipeline(std::move(renderPipeline))
126 , mHintSessionWrapper(uiThreadId, renderThreadId) {
127 mRenderThread.cacheManager().registerCanvasContext(this);
128 rootRenderNode->makeRoot();
129 mRenderNodes.emplace_back(rootRenderNode);
130 mProfiler.setDensity(DeviceInfo::getDensity());
131 }
132
~CanvasContext()133 CanvasContext::~CanvasContext() {
134 destroy();
135 for (auto& node : mRenderNodes) {
136 node->clearRoot();
137 }
138 mRenderNodes.clear();
139 mRenderThread.cacheManager().unregisterCanvasContext(this);
140 }
141
addRenderNode(RenderNode * node,bool placeFront)142 void CanvasContext::addRenderNode(RenderNode* node, bool placeFront) {
143 int pos = placeFront ? 0 : static_cast<int>(mRenderNodes.size());
144 node->makeRoot();
145 mRenderNodes.emplace(mRenderNodes.begin() + pos, node);
146 }
147
removeRenderNode(RenderNode * node)148 void CanvasContext::removeRenderNode(RenderNode* node) {
149 node->clearRoot();
150 mRenderNodes.erase(std::remove(mRenderNodes.begin(), mRenderNodes.end(), node),
151 mRenderNodes.end());
152 }
153
destroy()154 void CanvasContext::destroy() {
155 stopDrawing();
156 setHardwareBuffer(nullptr);
157 setSurface(nullptr);
158 setSurfaceControl(nullptr);
159 freePrefetchedLayers();
160 destroyHardwareResources();
161 mAnimationContext->destroy();
162 mRenderThread.cacheManager().onContextStopped(this);
163 }
164
setBufferCount(ANativeWindow * window)165 static void setBufferCount(ANativeWindow* window) {
166 int query_value;
167 int err = window->query(window, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &query_value);
168 if (err != 0 || query_value < 0) {
169 ALOGE("window->query failed: %s (%d) value=%d", strerror(-err), err, query_value);
170 return;
171 }
172 auto min_undequeued_buffers = static_cast<uint32_t>(query_value);
173
174 // We only need to set min_undequeued + 2 because the renderahead amount was already factored into the
175 // query for min_undequeued
176 int bufferCount = min_undequeued_buffers + 2;
177 native_window_set_buffer_count(window, bufferCount);
178 }
179
setHardwareBuffer(AHardwareBuffer * buffer)180 void CanvasContext::setHardwareBuffer(AHardwareBuffer* buffer) {
181 if (mHardwareBuffer) {
182 AHardwareBuffer_release(mHardwareBuffer);
183 mHardwareBuffer = nullptr;
184 }
185
186 if (buffer) {
187 AHardwareBuffer_acquire(buffer);
188 mHardwareBuffer = buffer;
189 }
190 mRenderPipeline->setHardwareBuffer(mHardwareBuffer);
191 }
192
setSurface(ANativeWindow * window,bool enableTimeout)193 void CanvasContext::setSurface(ANativeWindow* window, bool enableTimeout) {
194 ATRACE_CALL();
195
196 startHintSession();
197 if (window) {
198 mNativeSurface = std::make_unique<ReliableSurface>(window);
199 mNativeSurface->init();
200 if (enableTimeout) {
201 // TODO: Fix error handling & re-shorten timeout
202 ANativeWindow_setDequeueTimeout(window, 4000_ms);
203 }
204 } else {
205 mNativeSurface = nullptr;
206 }
207 setupPipelineSurface();
208 }
209
setSurfaceControl(ASurfaceControl * surfaceControl)210 void CanvasContext::setSurfaceControl(ASurfaceControl* surfaceControl) {
211 if (surfaceControl == mSurfaceControl) return;
212
213 auto funcs = mRenderThread.getASurfaceControlFunctions();
214
215 if (surfaceControl == nullptr) {
216 setASurfaceTransactionCallback(nullptr);
217 setPrepareSurfaceControlForWebviewCallback(nullptr);
218 }
219
220 if (mSurfaceControl != nullptr) {
221 funcs.unregisterListenerFunc(this, &onSurfaceStatsAvailable);
222 funcs.releaseFunc(mSurfaceControl);
223 }
224 mSurfaceControl = surfaceControl;
225 mSurfaceControlGenerationId++;
226 mExpectSurfaceStats = surfaceControl != nullptr;
227 if (mExpectSurfaceStats) {
228 funcs.acquireFunc(mSurfaceControl);
229 funcs.registerListenerFunc(surfaceControl, mSurfaceControlGenerationId, this,
230 &onSurfaceStatsAvailable);
231 }
232 }
233
setupPipelineSurface()234 void CanvasContext::setupPipelineSurface() {
235 bool hasSurface = mRenderPipeline->setSurface(
236 mNativeSurface ? mNativeSurface->getNativeWindow() : nullptr, mSwapBehavior);
237
238 if (mNativeSurface && !mNativeSurface->didSetExtraBuffers()) {
239 setBufferCount(mNativeSurface->getNativeWindow());
240 }
241
242 mFrameNumber = 0;
243
244 if (mNativeSurface != nullptr && hasSurface) {
245 mHaveNewSurface = true;
246 mSwapHistory.clear();
247 // Enable frame stats after the surface has been bound to the appropriate graphics API.
248 // Order is important when new and old surfaces are the same, because old surface has
249 // its frame stats disabled automatically.
250 native_window_enable_frame_timestamps(mNativeSurface->getNativeWindow(), true);
251 native_window_set_scaling_mode(mNativeSurface->getNativeWindow(),
252 NATIVE_WINDOW_SCALING_MODE_FREEZE);
253 } else {
254 mRenderThread.removeFrameCallback(this);
255 mGenerationID++;
256 }
257 }
258
setSwapBehavior(SwapBehavior swapBehavior)259 void CanvasContext::setSwapBehavior(SwapBehavior swapBehavior) {
260 mSwapBehavior = swapBehavior;
261 }
262
pauseSurface()263 bool CanvasContext::pauseSurface() {
264 mGenerationID++;
265 return mRenderThread.removeFrameCallback(this);
266 }
267
setStopped(bool stopped)268 void CanvasContext::setStopped(bool stopped) {
269 if (mStopped != stopped) {
270 mStopped = stopped;
271 if (mStopped) {
272 mGenerationID++;
273 mRenderThread.removeFrameCallback(this);
274 mRenderPipeline->onStop();
275 mRenderThread.cacheManager().onContextStopped(this);
276 } else if (mIsDirty && hasOutputTarget()) {
277 mRenderThread.postFrameCallback(this);
278 }
279 }
280 }
281
allocateBuffers()282 void CanvasContext::allocateBuffers() {
283 if (mNativeSurface && Properties::isDrawingEnabled()) {
284 ANativeWindow_tryAllocateBuffers(mNativeSurface->getNativeWindow());
285 }
286 }
287
setLightAlpha(uint8_t ambientShadowAlpha,uint8_t spotShadowAlpha)288 void CanvasContext::setLightAlpha(uint8_t ambientShadowAlpha, uint8_t spotShadowAlpha) {
289 mLightInfo.ambientShadowAlpha = ambientShadowAlpha;
290 mLightInfo.spotShadowAlpha = spotShadowAlpha;
291 }
292
setLightGeometry(const Vector3 & lightCenter,float lightRadius)293 void CanvasContext::setLightGeometry(const Vector3& lightCenter, float lightRadius) {
294 mLightGeometry.center = lightCenter;
295 mLightGeometry.radius = lightRadius;
296 }
297
setOpaque(bool opaque)298 void CanvasContext::setOpaque(bool opaque) {
299 mOpaque = opaque;
300 }
301
setColorMode(ColorMode mode)302 float CanvasContext::setColorMode(ColorMode mode) {
303 if (mode != mColorMode) {
304 mColorMode = mode;
305 mRenderPipeline->setSurfaceColorProperties(mode);
306 setupPipelineSurface();
307 }
308 switch (mColorMode) {
309 case ColorMode::Hdr:
310 return Properties::maxHdrHeadroomOn8bit;
311 case ColorMode::Hdr10:
312 return 10.f;
313 default:
314 return 1.f;
315 }
316 }
317
targetSdrHdrRatio() const318 float CanvasContext::targetSdrHdrRatio() const {
319 if (mColorMode == ColorMode::Hdr || mColorMode == ColorMode::Hdr10) {
320 return mTargetSdrHdrRatio;
321 } else {
322 return 1.f;
323 }
324 }
325
setTargetSdrHdrRatio(float ratio)326 void CanvasContext::setTargetSdrHdrRatio(float ratio) {
327 if (mTargetSdrHdrRatio == ratio) return;
328
329 mTargetSdrHdrRatio = ratio;
330 mRenderPipeline->setTargetSdrHdrRatio(ratio);
331 // We don't actually but we need to behave as if we do. Specifically we need to ensure
332 // all buffers in the swapchain are fully re-rendered as any partial updates to them will
333 // result in mixed target white points which looks really bad & flickery
334 mHaveNewSurface = true;
335 }
336
makeCurrent()337 bool CanvasContext::makeCurrent() {
338 if (mStopped) return false;
339
340 auto result = mRenderPipeline->makeCurrent();
341 switch (result) {
342 case MakeCurrentResult::AlreadyCurrent:
343 return true;
344 case MakeCurrentResult::Failed:
345 mHaveNewSurface = true;
346 setSurface(nullptr);
347 return false;
348 case MakeCurrentResult::Succeeded:
349 mHaveNewSurface = true;
350 return true;
351 default:
352 LOG_ALWAYS_FATAL("unexpected result %d from IRenderPipeline::makeCurrent",
353 (int32_t)result);
354 }
355
356 return true;
357 }
358
wasSkipped(FrameInfo * info)359 static bool wasSkipped(FrameInfo* info) {
360 return info && ((*info)[FrameInfoIndex::Flags] & FrameInfoFlags::SkippedFrame);
361 }
362
isSwapChainStuffed()363 bool CanvasContext::isSwapChainStuffed() {
364 static const auto SLOW_THRESHOLD = 6_ms;
365
366 if (mSwapHistory.size() != mSwapHistory.capacity()) {
367 // We want at least 3 frames of history before attempting to
368 // guess if the queue is stuffed
369 return false;
370 }
371 nsecs_t frameInterval = mRenderThread.timeLord().frameIntervalNanos();
372 auto& swapA = mSwapHistory[0];
373
374 // Was there a happy queue & dequeue time? If so, don't
375 // consider it stuffed
376 if (swapA.dequeueDuration < SLOW_THRESHOLD && swapA.queueDuration < SLOW_THRESHOLD) {
377 return false;
378 }
379
380 for (size_t i = 1; i < mSwapHistory.size(); i++) {
381 auto& swapB = mSwapHistory[i];
382
383 // If there's a multi-frameInterval gap we effectively already dropped a frame,
384 // so consider the queue healthy.
385 if (std::abs(swapA.swapCompletedTime - swapB.swapCompletedTime) > frameInterval * 3) {
386 return false;
387 }
388
389 // Was there a happy queue & dequeue time? If so, don't
390 // consider it stuffed
391 if (swapB.dequeueDuration < SLOW_THRESHOLD && swapB.queueDuration < SLOW_THRESHOLD) {
392 return false;
393 }
394
395 swapA = swapB;
396 }
397
398 // All signs point to a stuffed swap chain
399 ATRACE_NAME("swap chain stuffed");
400 return true;
401 }
402
prepareTree(TreeInfo & info,int64_t * uiFrameInfo,int64_t syncQueued,RenderNode * target)403 void CanvasContext::prepareTree(TreeInfo& info, int64_t* uiFrameInfo, int64_t syncQueued,
404 RenderNode* target) {
405 mRenderThread.removeFrameCallback(this);
406
407 // If the previous frame was dropped we don't need to hold onto it, so
408 // just keep using the previous frame's structure instead
409 if (wasSkipped(mCurrentFrameInfo)) {
410 // Use the oldest skipped frame in case we skip more than a single frame
411 if (!mSkippedFrameInfo) {
412 mSkippedFrameInfo.emplace();
413 mSkippedFrameInfo->vsyncId =
414 mCurrentFrameInfo->get(FrameInfoIndex::FrameTimelineVsyncId);
415 mSkippedFrameInfo->startTime = mCurrentFrameInfo->get(FrameInfoIndex::FrameStartTime);
416 }
417 } else {
418 mCurrentFrameInfo = mJankTracker.startFrame();
419 mSkippedFrameInfo.reset();
420 }
421
422 mCurrentFrameInfo->importUiThreadInfo(uiFrameInfo);
423 mCurrentFrameInfo->set(FrameInfoIndex::SyncQueued) = syncQueued;
424 mCurrentFrameInfo->markSyncStart();
425
426 info.damageAccumulator = &mDamageAccumulator;
427 info.layerUpdateQueue = &mLayerUpdateQueue;
428 info.damageGenerationId = mDamageId++;
429 info.out.canDrawThisFrame = true;
430
431 mAnimationContext->startFrame(info.mode);
432 for (const sp<RenderNode>& node : mRenderNodes) {
433 // Only the primary target node will be drawn full - all other nodes would get drawn in
434 // real time mode. In case of a window, the primary node is the window content and the other
435 // node(s) are non client / filler nodes.
436 info.mode = (node.get() == target ? TreeInfo::MODE_FULL : TreeInfo::MODE_RT_ONLY);
437 node->prepareTree(info);
438 GL_CHECKPOINT(MODERATE);
439 }
440 mAnimationContext->runRemainingAnimations(info);
441 GL_CHECKPOINT(MODERATE);
442
443 freePrefetchedLayers();
444 GL_CHECKPOINT(MODERATE);
445
446 mIsDirty = true;
447
448 if (CC_UNLIKELY(!hasOutputTarget())) {
449 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
450 info.out.canDrawThisFrame = false;
451 return;
452 }
453
454 if (CC_LIKELY(mSwapHistory.size() && !info.forceDrawFrame)) {
455 nsecs_t latestVsync = mRenderThread.timeLord().latestVsync();
456 SwapHistory& lastSwap = mSwapHistory.back();
457 nsecs_t vsyncDelta = std::abs(lastSwap.vsyncTime - latestVsync);
458 // The slight fudge-factor is to deal with cases where
459 // the vsync was estimated due to being slow handling the signal.
460 // See the logic in TimeLord#computeFrameTimeNanos or in
461 // Choreographer.java for details on when this happens
462 if (vsyncDelta < 2_ms) {
463 // Already drew for this vsync pulse, UI draw request missed
464 // the deadline for RT animations
465 info.out.canDrawThisFrame = false;
466 }
467 } else {
468 info.out.canDrawThisFrame = true;
469 }
470
471 // TODO: Do we need to abort out if the backdrop is added but not ready? Should that even
472 // be an allowable combination?
473 if (mRenderNodes.size() > 2 && !mRenderNodes[1]->isRenderable()) {
474 info.out.canDrawThisFrame = false;
475 }
476
477 if (info.out.canDrawThisFrame) {
478 int err = mNativeSurface->reserveNext();
479 if (err != OK) {
480 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
481 info.out.canDrawThisFrame = false;
482 ALOGW("reserveNext failed, error = %d (%s)", err, strerror(-err));
483 if (err != TIMED_OUT) {
484 // A timed out surface can still recover, but assume others are permanently dead.
485 setSurface(nullptr);
486 return;
487 }
488 }
489 } else {
490 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
491 }
492
493 bool postedFrameCallback = false;
494 if (info.out.hasAnimations || !info.out.canDrawThisFrame) {
495 if (CC_UNLIKELY(!Properties::enableRTAnimations)) {
496 info.out.requiresUiRedraw = true;
497 }
498 if (!info.out.requiresUiRedraw) {
499 // If animationsNeedsRedraw is set don't bother posting for an RT anim
500 // as we will just end up fighting the UI thread.
501 mRenderThread.postFrameCallback(this);
502 postedFrameCallback = true;
503 }
504 }
505
506 if (!postedFrameCallback &&
507 info.out.animatedImageDelay != TreeInfo::Out::kNoAnimatedImageDelay) {
508 // Subtract the time of one frame so it can be displayed on time.
509 const nsecs_t kFrameTime = mRenderThread.timeLord().frameIntervalNanos();
510 if (info.out.animatedImageDelay <= kFrameTime) {
511 mRenderThread.postFrameCallback(this);
512 } else {
513 const auto delay = info.out.animatedImageDelay - kFrameTime;
514 int genId = mGenerationID;
515 mRenderThread.queue().postDelayed(delay, [this, genId]() {
516 if (mGenerationID == genId) {
517 mRenderThread.postFrameCallback(this);
518 }
519 });
520 }
521 }
522 }
523
stopDrawing()524 void CanvasContext::stopDrawing() {
525 mRenderThread.removeFrameCallback(this);
526 mAnimationContext->pauseAnimators();
527 mGenerationID++;
528 }
529
notifyFramePending()530 void CanvasContext::notifyFramePending() {
531 ATRACE_CALL();
532 mRenderThread.pushBackFrameCallback(this);
533 sendLoadResetHint();
534 }
535
getFrame()536 Frame CanvasContext::getFrame() {
537 if (mHardwareBuffer != nullptr) {
538 return {mBufferParams.getLogicalWidth(), mBufferParams.getLogicalHeight(), 0};
539 } else {
540 return mRenderPipeline->getFrame();
541 }
542 }
543
draw(bool solelyTextureViewUpdates)544 void CanvasContext::draw(bool solelyTextureViewUpdates) {
545 if (auto grContext = getGrContext()) {
546 if (grContext->abandoned()) {
547 LOG_ALWAYS_FATAL("GrContext is abandoned/device lost at start of CanvasContext::draw");
548 return;
549 }
550 }
551 SkRect dirty;
552 mDamageAccumulator.finish(&dirty);
553
554 // reset syncDelayDuration each time we draw
555 nsecs_t syncDelayDuration = mSyncDelayDuration;
556 nsecs_t idleDuration = mIdleDuration;
557 mSyncDelayDuration = 0;
558 mIdleDuration = 0;
559
560 if (!Properties::isDrawingEnabled() ||
561 (dirty.isEmpty() && Properties::skipEmptyFrames && !surfaceRequiresRedraw())) {
562 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
563 if (auto grContext = getGrContext()) {
564 // Submit to ensure that any texture uploads complete and Skia can
565 // free its staging buffers.
566 grContext->flushAndSubmit();
567 }
568
569 // Notify the callbacks, even if there's nothing to draw so they aren't waiting
570 // indefinitely
571 waitOnFences();
572 for (auto& func : mFrameCommitCallbacks) {
573 std::invoke(func, false /* didProduceBuffer */);
574 }
575 mFrameCommitCallbacks.clear();
576 return;
577 }
578
579 ScopedActiveContext activeContext(this);
580 mCurrentFrameInfo->set(FrameInfoIndex::FrameInterval) =
581 mRenderThread.timeLord().frameIntervalNanos();
582
583 mCurrentFrameInfo->markIssueDrawCommandsStart();
584
585 Frame frame = getFrame();
586
587 SkRect windowDirty = computeDirtyRect(frame, &dirty);
588
589 ATRACE_FORMAT("Drawing " RECT_STRING, SK_RECT_ARGS(dirty));
590
591 IRenderPipeline::DrawResult drawResult;
592 {
593 // FrameInfoVisualizer accesses the frame events, which cannot be mutated mid-draw
594 // or it can lead to memory corruption.
595 // This lock is overly broad, but it's the quickest fix since this mutex is otherwise
596 // not visible to IRenderPipeline much less FrameInfoVisualizer. And since this is
597 // the thread we're primarily concerned about being responsive, this being too broad
598 // shouldn't pose a performance issue.
599 std::scoped_lock lock(mFrameMetricsReporterMutex);
600 drawResult = mRenderPipeline->draw(frame, windowDirty, dirty, mLightGeometry,
601 &mLayerUpdateQueue, mContentDrawBounds, mOpaque,
602 mLightInfo, mRenderNodes, &(profiler()), mBufferParams);
603 }
604
605 uint64_t frameCompleteNr = getFrameNumber();
606
607 waitOnFences();
608
609 if (mNativeSurface) {
610 // TODO(b/165985262): measure performance impact
611 const auto vsyncId = mCurrentFrameInfo->get(FrameInfoIndex::FrameTimelineVsyncId);
612 if (vsyncId != UiFrameInfoBuilder::INVALID_VSYNC_ID) {
613 const auto inputEventId =
614 static_cast<int32_t>(mCurrentFrameInfo->get(FrameInfoIndex::InputEventId));
615 const ANativeWindowFrameTimelineInfo ftl = {
616 .frameNumber = frameCompleteNr,
617 .frameTimelineVsyncId = vsyncId,
618 .inputEventId = inputEventId,
619 .startTimeNanos = mCurrentFrameInfo->get(FrameInfoIndex::FrameStartTime),
620 .useForRefreshRateSelection = solelyTextureViewUpdates,
621 .skippedFrameVsyncId = mSkippedFrameInfo ? mSkippedFrameInfo->vsyncId
622 : UiFrameInfoBuilder::INVALID_VSYNC_ID,
623 .skippedFrameStartTimeNanos =
624 mSkippedFrameInfo ? mSkippedFrameInfo->startTime : 0,
625 };
626 native_window_set_frame_timeline_info(mNativeSurface->getNativeWindow(), ftl);
627 }
628 }
629
630 bool requireSwap = false;
631 bool didDraw = false;
632
633 int error = OK;
634 bool didSwap = mRenderPipeline->swapBuffers(frame, drawResult.success, windowDirty,
635 mCurrentFrameInfo, &requireSwap);
636
637 mCurrentFrameInfo->set(FrameInfoIndex::CommandSubmissionCompleted) = std::max(
638 drawResult.commandSubmissionTime, mCurrentFrameInfo->get(FrameInfoIndex::SwapBuffers));
639
640 mIsDirty = false;
641
642 if (requireSwap) {
643 didDraw = true;
644 // Handle any swapchain errors
645 error = mNativeSurface->getAndClearError();
646 if (error == TIMED_OUT) {
647 // Try again
648 mRenderThread.postFrameCallback(this);
649 // But since this frame didn't happen, we need to mark full damage in the swap
650 // history
651 didDraw = false;
652
653 } else if (error != OK || !didSwap) {
654 // Unknown error, abandon the surface
655 setSurface(nullptr);
656 didDraw = false;
657 }
658
659 SwapHistory& swap = mSwapHistory.next();
660 if (didDraw) {
661 swap.damage = windowDirty;
662 } else {
663 float max = static_cast<float>(INT_MAX);
664 swap.damage = SkRect::MakeWH(max, max);
665 }
666 swap.swapCompletedTime = systemTime(SYSTEM_TIME_MONOTONIC);
667 swap.vsyncTime = mRenderThread.timeLord().latestVsync();
668 if (didDraw) {
669 nsecs_t dequeueStart =
670 ANativeWindow_getLastDequeueStartTime(mNativeSurface->getNativeWindow());
671 if (dequeueStart < mCurrentFrameInfo->get(FrameInfoIndex::SyncStart)) {
672 // Ignoring dequeue duration as it happened prior to frame render start
673 // and thus is not part of the frame.
674 swap.dequeueDuration = 0;
675 } else {
676 swap.dequeueDuration =
677 ANativeWindow_getLastDequeueDuration(mNativeSurface->getNativeWindow());
678 }
679 swap.queueDuration =
680 ANativeWindow_getLastQueueDuration(mNativeSurface->getNativeWindow());
681 } else {
682 swap.dequeueDuration = 0;
683 swap.queueDuration = 0;
684 }
685 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = swap.dequeueDuration;
686 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = swap.queueDuration;
687 mHaveNewSurface = false;
688 mFrameNumber = 0;
689 } else {
690 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = 0;
691 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = 0;
692 }
693
694 mCurrentFrameInfo->markSwapBuffersCompleted();
695
696 #if LOG_FRAMETIME_MMA
697 float thisFrame = mCurrentFrameInfo->duration(FrameInfoIndex::IssueDrawCommandsStart,
698 FrameInfoIndex::FrameCompleted) /
699 NANOS_PER_MILLIS_F;
700 if (sFrameCount) {
701 sBenchMma = ((9 * sBenchMma) + thisFrame) / 10;
702 } else {
703 sBenchMma = thisFrame;
704 }
705 if (++sFrameCount == 10) {
706 sFrameCount = 1;
707 ALOGD("Average frame time: %.4f", sBenchMma);
708 }
709 #endif
710
711 if (didSwap) {
712 for (auto& func : mFrameCommitCallbacks) {
713 std::invoke(func, true /* didProduceBuffer */);
714 }
715 mFrameCommitCallbacks.clear();
716 }
717
718 if (requireSwap) {
719 if (mExpectSurfaceStats) {
720 reportMetricsWithPresentTime();
721 { // acquire lock
722 std::lock_guard lock(mLast4FrameMetricsInfosMutex);
723 FrameMetricsInfo& next = mLast4FrameMetricsInfos.next();
724 next.frameInfo = mCurrentFrameInfo;
725 next.frameNumber = frameCompleteNr;
726 next.surfaceId = mSurfaceControlGenerationId;
727 } // release lock
728 } else {
729 mCurrentFrameInfo->markFrameCompleted();
730 mCurrentFrameInfo->set(FrameInfoIndex::GpuCompleted)
731 = mCurrentFrameInfo->get(FrameInfoIndex::FrameCompleted);
732 std::scoped_lock lock(mFrameMetricsReporterMutex);
733 mJankTracker.finishFrame(*mCurrentFrameInfo, mFrameMetricsReporter, frameCompleteNr,
734 mSurfaceControlGenerationId);
735 }
736 }
737
738 int64_t intendedVsync = mCurrentFrameInfo->get(FrameInfoIndex::IntendedVsync);
739 int64_t frameDeadline = mCurrentFrameInfo->get(FrameInfoIndex::FrameDeadline);
740 int64_t dequeueBufferDuration = mCurrentFrameInfo->get(FrameInfoIndex::DequeueBufferDuration);
741
742 mHintSessionWrapper.updateTargetWorkDuration(frameDeadline - intendedVsync);
743
744 if (didDraw) {
745 int64_t frameStartTime = mCurrentFrameInfo->get(FrameInfoIndex::FrameStartTime);
746 int64_t frameDuration = systemTime(SYSTEM_TIME_MONOTONIC) - frameStartTime;
747 int64_t actualDuration = frameDuration -
748 (std::min(syncDelayDuration, mLastDequeueBufferDuration)) -
749 dequeueBufferDuration - idleDuration;
750 mHintSessionWrapper.reportActualWorkDuration(actualDuration);
751 }
752
753 mLastDequeueBufferDuration = dequeueBufferDuration;
754
755 mRenderThread.cacheManager().onFrameCompleted();
756 return;
757 }
758
reportMetricsWithPresentTime()759 void CanvasContext::reportMetricsWithPresentTime() {
760 { // acquire lock
761 std::scoped_lock lock(mFrameMetricsReporterMutex);
762 if (mFrameMetricsReporter == nullptr) {
763 return;
764 }
765 } // release lock
766 if (mNativeSurface == nullptr) {
767 return;
768 }
769 ATRACE_CALL();
770 FrameInfo* forthBehind;
771 int64_t frameNumber;
772 int32_t surfaceControlId;
773
774 { // acquire lock
775 std::scoped_lock lock(mLast4FrameMetricsInfosMutex);
776 if (mLast4FrameMetricsInfos.size() != mLast4FrameMetricsInfos.capacity()) {
777 // Not enough frames yet
778 return;
779 }
780 auto frameMetricsInfo = mLast4FrameMetricsInfos.front();
781 forthBehind = frameMetricsInfo.frameInfo;
782 frameNumber = frameMetricsInfo.frameNumber;
783 surfaceControlId = frameMetricsInfo.surfaceId;
784 } // release lock
785
786 nsecs_t presentTime = 0;
787 native_window_get_frame_timestamps(
788 mNativeSurface->getNativeWindow(), frameNumber, nullptr /*outRequestedPresentTime*/,
789 nullptr /*outAcquireTime*/, nullptr /*outLatchTime*/,
790 nullptr /*outFirstRefreshStartTime*/, nullptr /*outLastRefreshStartTime*/,
791 nullptr /*outGpuCompositionDoneTime*/, &presentTime, nullptr /*outDequeueReadyTime*/,
792 nullptr /*outReleaseTime*/);
793
794 forthBehind->set(FrameInfoIndex::DisplayPresentTime) = presentTime;
795 { // acquire lock
796 std::scoped_lock lock(mFrameMetricsReporterMutex);
797 if (mFrameMetricsReporter != nullptr) {
798 mFrameMetricsReporter->reportFrameMetrics(forthBehind->data(), true /*hasPresentTime*/,
799 frameNumber, surfaceControlId);
800 }
801 } // release lock
802 }
803
addFrameMetricsObserver(FrameMetricsObserver * observer)804 void CanvasContext::addFrameMetricsObserver(FrameMetricsObserver* observer) {
805 std::scoped_lock lock(mFrameMetricsReporterMutex);
806 if (mFrameMetricsReporter.get() == nullptr) {
807 mFrameMetricsReporter.reset(new FrameMetricsReporter());
808 }
809
810 // We want to make sure we aren't reporting frames that have already been queued by the
811 // BufferQueueProducer on the rendner thread but are still pending the callback to report their
812 // their frame metrics.
813 uint64_t nextFrameNumber = getFrameNumber();
814 observer->reportMetricsFrom(nextFrameNumber, mSurfaceControlGenerationId);
815 mFrameMetricsReporter->addObserver(observer);
816 }
817
removeFrameMetricsObserver(FrameMetricsObserver * observer)818 void CanvasContext::removeFrameMetricsObserver(FrameMetricsObserver* observer) {
819 std::scoped_lock lock(mFrameMetricsReporterMutex);
820 if (mFrameMetricsReporter.get() != nullptr) {
821 mFrameMetricsReporter->removeObserver(observer);
822 if (!mFrameMetricsReporter->hasObservers()) {
823 mFrameMetricsReporter.reset(nullptr);
824 }
825 }
826 }
827
getFrameInfoFromLast4(uint64_t frameNumber,uint32_t surfaceControlId)828 FrameInfo* CanvasContext::getFrameInfoFromLast4(uint64_t frameNumber, uint32_t surfaceControlId) {
829 std::scoped_lock lock(mLast4FrameMetricsInfosMutex);
830 for (size_t i = 0; i < mLast4FrameMetricsInfos.size(); i++) {
831 if (mLast4FrameMetricsInfos[i].frameNumber == frameNumber &&
832 mLast4FrameMetricsInfos[i].surfaceId == surfaceControlId) {
833 return mLast4FrameMetricsInfos[i].frameInfo;
834 }
835 }
836
837 return nullptr;
838 }
839
onSurfaceStatsAvailable(void * context,int32_t surfaceControlId,ASurfaceControlStats * stats)840 void CanvasContext::onSurfaceStatsAvailable(void* context, int32_t surfaceControlId,
841 ASurfaceControlStats* stats) {
842 auto* instance = static_cast<CanvasContext*>(context);
843
844 const ASurfaceControlFunctions& functions =
845 instance->mRenderThread.getASurfaceControlFunctions();
846
847 nsecs_t gpuCompleteTime = functions.getAcquireTimeFunc(stats);
848 if (gpuCompleteTime == Fence::SIGNAL_TIME_PENDING) {
849 gpuCompleteTime = -1;
850 }
851 uint64_t frameNumber = functions.getFrameNumberFunc(stats);
852
853 FrameInfo* frameInfo = instance->getFrameInfoFromLast4(frameNumber, surfaceControlId);
854
855 if (frameInfo != nullptr) {
856 std::scoped_lock lock(instance->mFrameMetricsReporterMutex);
857 frameInfo->set(FrameInfoIndex::FrameCompleted) = std::max(gpuCompleteTime,
858 frameInfo->get(FrameInfoIndex::SwapBuffersCompleted));
859 frameInfo->set(FrameInfoIndex::GpuCompleted) = std::max(
860 gpuCompleteTime, frameInfo->get(FrameInfoIndex::CommandSubmissionCompleted));
861 instance->mJankTracker.finishFrame(*frameInfo, instance->mFrameMetricsReporter, frameNumber,
862 surfaceControlId);
863 }
864 }
865
866 // Called by choreographer to do an RT-driven animation
doFrame()867 void CanvasContext::doFrame() {
868 if (!mRenderPipeline->isSurfaceReady()) return;
869 mIdleDuration =
870 systemTime(SYSTEM_TIME_MONOTONIC) - mRenderThread.timeLord().computeFrameTimeNanos();
871 prepareAndDraw(nullptr);
872 }
873
getNextFrameSize() const874 SkISize CanvasContext::getNextFrameSize() const {
875 static constexpr SkISize defaultFrameSize = {INT32_MAX, INT32_MAX};
876 if (mNativeSurface == nullptr) {
877 return defaultFrameSize;
878 }
879 ANativeWindow* anw = mNativeSurface->getNativeWindow();
880
881 SkISize size;
882 size.fWidth = ANativeWindow_getWidth(anw);
883 size.fHeight = ANativeWindow_getHeight(anw);
884 mRenderThread.cacheManager().notifyNextFrameSize(size.fWidth, size.fHeight);
885 return size;
886 }
887
getPixelSnapMatrix() const888 const SkM44& CanvasContext::getPixelSnapMatrix() const {
889 return mRenderPipeline->getPixelSnapMatrix();
890 }
891
prepareAndDraw(RenderNode * node)892 void CanvasContext::prepareAndDraw(RenderNode* node) {
893 ATRACE_CALL();
894
895 nsecs_t vsync = mRenderThread.timeLord().computeFrameTimeNanos();
896 int64_t vsyncId = mRenderThread.timeLord().lastVsyncId();
897 int64_t frameDeadline = mRenderThread.timeLord().lastFrameDeadline();
898 int64_t frameInterval = mRenderThread.timeLord().frameIntervalNanos();
899 int64_t frameInfo[UI_THREAD_FRAME_INFO_SIZE];
900 UiFrameInfoBuilder(frameInfo)
901 .addFlag(FrameInfoFlags::RTAnimation)
902 .setVsync(vsync, vsync, vsyncId, frameDeadline, frameInterval);
903
904 TreeInfo info(TreeInfo::MODE_RT_ONLY, *this);
905 prepareTree(info, frameInfo, systemTime(SYSTEM_TIME_MONOTONIC), node);
906 if (info.out.canDrawThisFrame) {
907 draw(info.out.solelyTextureViewUpdates);
908 } else {
909 // wait on fences so tasks don't overlap next frame
910 waitOnFences();
911 }
912 }
913
markLayerInUse(RenderNode * node)914 void CanvasContext::markLayerInUse(RenderNode* node) {
915 if (mPrefetchedLayers.erase(node)) {
916 node->decStrong(nullptr);
917 }
918 }
919
freePrefetchedLayers()920 void CanvasContext::freePrefetchedLayers() {
921 if (mPrefetchedLayers.size()) {
922 for (auto& node : mPrefetchedLayers) {
923 ALOGW("Incorrectly called buildLayer on View: %s, destroying layer...",
924 node->getName());
925 node->destroyLayers();
926 node->decStrong(nullptr);
927 }
928 mPrefetchedLayers.clear();
929 }
930 }
931
buildLayer(RenderNode * node)932 void CanvasContext::buildLayer(RenderNode* node) {
933 ATRACE_CALL();
934 if (!mRenderPipeline->isContextReady()) return;
935
936 // buildLayer() will leave the tree in an unknown state, so we must stop drawing
937 stopDrawing();
938
939 TreeInfo info(TreeInfo::MODE_FULL, *this);
940 info.damageAccumulator = &mDamageAccumulator;
941 info.layerUpdateQueue = &mLayerUpdateQueue;
942 info.runAnimations = false;
943 node->prepareTree(info);
944 SkRect ignore;
945 mDamageAccumulator.finish(&ignore);
946 // Tickle the GENERIC property on node to mark it as dirty for damaging
947 // purposes when the frame is actually drawn
948 node->setPropertyFieldsDirty(RenderNode::GENERIC);
949
950 mRenderPipeline->renderLayers(mLightGeometry, &mLayerUpdateQueue, mOpaque, mLightInfo);
951
952 node->incStrong(nullptr);
953 mPrefetchedLayers.insert(node);
954 }
955
destroyHardwareResources()956 void CanvasContext::destroyHardwareResources() {
957 stopDrawing();
958 if (mRenderPipeline->isContextReady()) {
959 freePrefetchedLayers();
960 for (const sp<RenderNode>& node : mRenderNodes) {
961 node->destroyHardwareResources();
962 }
963 mRenderPipeline->onDestroyHardwareResources();
964 }
965 }
966
createTextureLayer()967 DeferredLayerUpdater* CanvasContext::createTextureLayer() {
968 return mRenderPipeline->createTextureLayer();
969 }
970
dumpFrames(int fd)971 void CanvasContext::dumpFrames(int fd) {
972 mJankTracker.dumpStats(fd);
973 mJankTracker.dumpFrames(fd);
974 }
975
resetFrameStats()976 void CanvasContext::resetFrameStats() {
977 mJankTracker.reset();
978 }
979
setName(const std::string && name)980 void CanvasContext::setName(const std::string&& name) {
981 mJankTracker.setDescription(JankTrackerType::Window, std::move(name));
982 }
983
waitOnFences()984 void CanvasContext::waitOnFences() {
985 if (mFrameFences.size()) {
986 ATRACE_CALL();
987 for (auto& fence : mFrameFences) {
988 fence.get();
989 }
990 mFrameFences.clear();
991 }
992 }
993
enqueueFrameWork(std::function<void ()> && func)994 void CanvasContext::enqueueFrameWork(std::function<void()>&& func) {
995 mFrameFences.push_back(CommonPool::async(std::move(func)));
996 }
997
getFrameNumber()998 uint64_t CanvasContext::getFrameNumber() {
999 // mFrameNumber is reset to 0 when the surface changes or we swap buffers
1000 if (mFrameNumber == 0 && mNativeSurface.get()) {
1001 mFrameNumber = ANativeWindow_getNextFrameId(mNativeSurface->getNativeWindow());
1002 }
1003 return mFrameNumber;
1004 }
1005
surfaceRequiresRedraw()1006 bool CanvasContext::surfaceRequiresRedraw() {
1007 if (!mNativeSurface) return false;
1008 if (mHaveNewSurface) return true;
1009
1010 ANativeWindow* anw = mNativeSurface->getNativeWindow();
1011 const int width = ANativeWindow_getWidth(anw);
1012 const int height = ANativeWindow_getHeight(anw);
1013
1014 return width != mLastFrameWidth || height != mLastFrameHeight;
1015 }
1016
computeDirtyRect(const Frame & frame,SkRect * dirty)1017 SkRect CanvasContext::computeDirtyRect(const Frame& frame, SkRect* dirty) {
1018 if (frame.width() != mLastFrameWidth || frame.height() != mLastFrameHeight) {
1019 // can't rely on prior content of window if viewport size changes
1020 dirty->setEmpty();
1021 mLastFrameWidth = frame.width();
1022 mLastFrameHeight = frame.height();
1023 } else if (mHaveNewSurface || frame.bufferAge() == 0) {
1024 // New surface needs a full draw
1025 dirty->setEmpty();
1026 } else {
1027 if (!dirty->isEmpty() && !dirty->intersect(SkRect::MakeIWH(frame.width(), frame.height()))) {
1028 ALOGW("Dirty " RECT_STRING " doesn't intersect with 0 0 %d %d ?", SK_RECT_ARGS(*dirty),
1029 frame.width(), frame.height());
1030 dirty->setEmpty();
1031 }
1032 profiler().unionDirty(dirty);
1033 }
1034
1035 if (dirty->isEmpty()) {
1036 dirty->setIWH(frame.width(), frame.height());
1037 }
1038
1039 // At this point dirty is the area of the window to update. However,
1040 // the area of the frame we need to repaint is potentially different, so
1041 // stash the screen area for later
1042 SkRect windowDirty(*dirty);
1043
1044 // If the buffer age is 0 we do a full-screen repaint (handled above)
1045 // If the buffer age is 1 the buffer contents are the same as they were
1046 // last frame so there's nothing to union() against
1047 // Therefore we only care about the > 1 case.
1048 if (frame.bufferAge() > 1) {
1049 if (frame.bufferAge() > (int)mSwapHistory.size()) {
1050 // We don't have enough history to handle this old of a buffer
1051 // Just do a full-draw
1052 dirty->setIWH(frame.width(), frame.height());
1053 } else {
1054 // At this point we haven't yet added the latest frame
1055 // to the damage history (happens below)
1056 // So we need to damage
1057 for (int i = mSwapHistory.size() - 1;
1058 i > ((int)mSwapHistory.size()) - frame.bufferAge(); i--) {
1059 dirty->join(mSwapHistory[i].damage);
1060 }
1061 }
1062 }
1063
1064 return windowDirty;
1065 }
1066
getActiveContext()1067 CanvasContext* CanvasContext::getActiveContext() {
1068 return ScopedActiveContext::getActiveContext();
1069 }
1070
mergeTransaction(ASurfaceTransaction * transaction,ASurfaceControl * control)1071 bool CanvasContext::mergeTransaction(ASurfaceTransaction* transaction, ASurfaceControl* control) {
1072 if (!mASurfaceTransactionCallback) return false;
1073 return std::invoke(mASurfaceTransactionCallback, reinterpret_cast<int64_t>(transaction),
1074 reinterpret_cast<int64_t>(control), getFrameNumber());
1075 }
1076
prepareSurfaceControlForWebview()1077 void CanvasContext::prepareSurfaceControlForWebview() {
1078 if (mPrepareSurfaceControlForWebviewCallback) {
1079 std::invoke(mPrepareSurfaceControlForWebviewCallback);
1080 }
1081 }
1082
sendLoadResetHint()1083 void CanvasContext::sendLoadResetHint() {
1084 mHintSessionWrapper.sendLoadResetHint();
1085 }
1086
sendLoadIncreaseHint()1087 void CanvasContext::sendLoadIncreaseHint() {
1088 mHintSessionWrapper.sendLoadIncreaseHint();
1089 }
1090
setSyncDelayDuration(nsecs_t duration)1091 void CanvasContext::setSyncDelayDuration(nsecs_t duration) {
1092 mSyncDelayDuration = duration;
1093 }
1094
startHintSession()1095 void CanvasContext::startHintSession() {
1096 mHintSessionWrapper.init();
1097 }
1098
shouldDither()1099 bool CanvasContext::shouldDither() {
1100 CanvasContext* self = getActiveContext();
1101 if (!self) return false;
1102 return self->mColorMode != ColorMode::Default;
1103 }
1104
1105 } /* namespace renderthread */
1106 } /* namespace uirenderer */
1107 } /* namespace android */
1108