aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xREADME.md2
-rwxr-xr-xsrc/core/hle/service/nvdrv/devices/nvhost_gpu.cpp7
-rwxr-xr-xsrc/video_core/control/channel_state.h6
-rwxr-xr-xsrc/video_core/control/scheduler.cpp194
-rwxr-xr-xsrc/video_core/control/scheduler.h22
-rwxr-xr-xsrc/video_core/engines/puller.cpp12
-rwxr-xr-xsrc/video_core/gpu.cpp8
-rwxr-xr-xsrc/video_core/gpu.h9
-rwxr-xr-xsrc/video_core/gpu_thread.cpp14
-rwxr-xr-xsrc/video_core/gpu_thread.h9
-rwxr-xr-xsrc/video_core/texture_cache/image_info.cpp1
-rwxr-xr-xsrc/video_core/texture_cache/image_info.h1
-rwxr-xr-xsrc/video_core/texture_cache/texture_cache.h15
13 files changed, 258 insertions, 42 deletions
diff --git a/README.md b/README.md
index b647f6931..0734f6955 100755
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
1yuzu emulator early access 1yuzu emulator early access
2============= 2=============
3 3
4This is the source code for early-access 4129. 4This is the source code for early-access 4130.
5 5
6## Legal Notice 6## Legal Notice
7 7
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index e71c58aa9..f537fb203 100755
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -13,6 +13,7 @@
13#include "core/hle/service/nvdrv/nvdrv.h" 13#include "core/hle/service/nvdrv/nvdrv.h"
14#include "core/memory.h" 14#include "core/memory.h"
15#include "video_core/control/channel_state.h" 15#include "video_core/control/channel_state.h"
16#include "video_core/control/scheduler.h"
16#include "video_core/engines/puller.h" 17#include "video_core/engines/puller.h"
17#include "video_core/gpu.h" 18#include "video_core/gpu.h"
18#include "video_core/host1x/host1x.h" 19#include "video_core/host1x/host1x.h"
@@ -33,6 +34,7 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
33 syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()}, 34 syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
34 channel_state{system.GPU().AllocateChannel()} { 35 channel_state{system.GPU().AllocateChannel()} {
35 channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); 36 channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
37 channel_state->syncpoint_id = channel_syncpoint;
36 sm_exception_breakpoint_int_report_event = 38 sm_exception_breakpoint_int_report_event =
37 events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt"); 39 events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
38 sm_exception_breakpoint_pause_report_event = 40 sm_exception_breakpoint_pause_report_event =
@@ -157,6 +159,9 @@ NvResult nvhost_gpu::SetErrorNotifier(IoctlSetErrorNotifier& params) {
157 159
158NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) { 160NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) {
159 channel_priority = params.priority; 161 channel_priority = params.priority;
162 if (channel_state->initialized) {
163 system.GPU().Scheduler().ChangePriority(channel_state->bind_id, channel_priority);
164 }
160 LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority); 165 LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
161 return NvResult::Success; 166 return NvResult::Success;
162} 167}
@@ -314,6 +319,7 @@ NvResult nvhost_gpu::GetWaitbase(IoctlGetWaitbase& params) {
314NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) { 319NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) {
315 LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout); 320 LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
316 321
322 channel_state->timeout = params.timeout;
317 return NvResult::Success; 323 return NvResult::Success;
318} 324}
319 325
@@ -321,6 +327,7 @@ NvResult nvhost_gpu::ChannelSetTimeslice(IoctlSetTimeslice& params) {
321 LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice); 327 LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
322 328
323 channel_timeslice = params.timeslice; 329 channel_timeslice = params.timeslice;
330 channel_state->timeslice = params.timeslice;
324 331
325 return NvResult::Success; 332 return NvResult::Success;
326} 333}
diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h
index ceaa92647..9f9b4ff75 100755
--- a/src/video_core/control/channel_state.h
+++ b/src/video_core/control/channel_state.h
@@ -45,6 +45,12 @@ struct ChannelState {
45 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); 45 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
46 46
47 s32 bind_id = -1; 47 s32 bind_id = -1;
48 /// Scheduling info
49 u32 syncpoint_id = 0xFFFF;
50 u32 priority = 0;
51 u32 timeslice = 0;
52 u32 timeout = 0;
53
48 /// 3D engine 54 /// 3D engine
49 std::unique_ptr<Engines::Maxwell3D> maxwell_3d; 55 std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
50 /// 2D engine 56 /// 2D engine
diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp
index 31e3b1235..91d81500a 100755
--- a/src/video_core/control/scheduler.cpp
+++ b/src/video_core/control/scheduler.cpp
@@ -1,32 +1,204 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later 2// SPDX-License-Identifier: GPL-3.0-or-later
3 3
4#include <atomic>
5#include <deque>
6#include <map>
4#include <memory> 7#include <memory>
8#include <mutex>
9#include <unordered_map>
5 10
6#include "common/assert.h" 11#include "common/assert.h"
7#include "video_core/control/channel_state.h" 12#include "common/fiber.h"
8#include "video_core/control/scheduler.h" 13#include "video_core/control/scheduler.h"
14#include "video_core/dma_pusher.h"
9#include "video_core/gpu.h" 15#include "video_core/gpu.h"
10 16
11namespace Tegra::Control { 17namespace Tegra::Control {
12Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {} 18
19struct GPFifoContext {
20 bool is_active;
21 bool is_running;
22 std::shared_ptr<Common::Fiber> context;
23 std::deque<CommandList> pending_work;
24 std::mutex guard;
25 s32 bind_id;
26 std::shared_ptr<ChannelState> info;
27 size_t yield_count;
28 size_t scheduled_count;
29};
30
31struct Scheduler::SchedulerImpl {
32 // Fifos
33 std::map<u32, std::list<size_t>, std::greater<u32>> schedule_priority_queue;
34 std::unordered_map<s32, size_t> channel_gpfifo_ids;
35 std::deque<GPFifoContext> gpfifos;
36 std::deque<size_t> free_fifos;
37
38 // Scheduling
39 std::mutex scheduling_guard;
40 std::shared_ptr<Common::Fiber> master_control;
41 bool must_reschedule{};
42 GPFifoContext* current_fifo{};
43};
44
45Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {
46 impl = std::make_unique<SchedulerImpl>();
47}
13 48
14Scheduler::~Scheduler() = default; 49Scheduler::~Scheduler() = default;
15 50
51void Scheduler::Init() {
52 impl->master_control = Common::Fiber::ThreadToFiber();
53}
54
55void Scheduler::Resume() {
56 bool pending_work;
57 do {
58 pending_work = false;
59 {
60 std::unique_lock lk(impl->scheduling_guard);
61 impl->current_fifo = nullptr;
62 auto it = impl->schedule_priority_queue.begin();
63 while (it != impl->schedule_priority_queue.end()) {
64 pending_work = ScheduleLevel(it->second);
65 if (pending_work) {
66 break;
67 }
68 it = std::next(it);
69 }
70 if (pending_work) {
71 impl->must_reschedule = false;
72 }
73 }
74 if (impl->current_fifo) {
75 impl->current_fifo->scheduled_count++;
76 Common::Fiber::YieldTo(impl->master_control, *impl->current_fifo->context);
77 }
78 } while (pending_work);
79}
80
81bool Scheduler::ScheduleLevel(std::list<size_t>& queue) {
82 bool found_anything = false;
83 size_t min_schedule_count = std::numeric_limits<size_t>::max();
84 for (auto id : queue) {
85 auto& fifo = impl->gpfifos[id];
86 std::scoped_lock lk2(fifo.guard);
87 if (!fifo.pending_work.empty() || fifo.is_running) {
88 if (fifo.scheduled_count > min_schedule_count) {
89 continue;
90 }
91 if (fifo.scheduled_count < fifo.yield_count) {
92 fifo.scheduled_count++;
93 continue;
94 }
95 min_schedule_count = fifo.scheduled_count;
96 impl->current_fifo = &fifo;
97 found_anything = true;
98 }
99 }
100 return found_anything;
101}
102
103void Scheduler::ChangePriority(s32 channel_id, u32 new_priority) {
104 std::unique_lock lk(impl->scheduling_guard);
105 auto fifo_it = impl->channel_gpfifo_ids.find(channel_id);
106 if (fifo_it == impl->channel_gpfifo_ids.end()) {
107 return;
108 }
109 const size_t fifo_id = fifo_it->second;
110 auto& fifo = impl->gpfifos[fifo_id];
111 const auto old_priority = fifo.info->priority;
112 fifo.info->priority = new_priority;
113 impl->schedule_priority_queue.try_emplace(new_priority);
114 impl->schedule_priority_queue[new_priority].push_back(fifo_id);
115 impl->schedule_priority_queue[old_priority].remove_if(
116 [fifo_id](size_t id) { return id == fifo_id; });
117}
118
119void Scheduler::Yield() {
120 ASSERT(impl->current_fifo != nullptr);
121 impl->current_fifo->yield_count = impl->current_fifo->scheduled_count + 1;
122 Common::Fiber::YieldTo(impl->current_fifo->context, *impl->master_control);
123 gpu.BindChannel(impl->current_fifo->bind_id);
124}
125
126void Scheduler::CheckStatus() {
127 {
128 std::unique_lock lk(impl->scheduling_guard);
129 if (!impl->must_reschedule) {
130 return;
131 }
132 }
133 Common::Fiber::YieldTo(impl->current_fifo->context, *impl->master_control);
134 gpu.BindChannel(impl->current_fifo->bind_id);
135}
136
16void Scheduler::Push(s32 channel, CommandList&& entries) { 137void Scheduler::Push(s32 channel, CommandList&& entries) {
17 std::unique_lock lk(scheduling_guard); 138 std::unique_lock lk(impl->scheduling_guard);
18 auto it = channels.find(channel); 139 auto it = impl->channel_gpfifo_ids.find(channel);
19 ASSERT(it != channels.end()); 140 ASSERT(it != impl->channel_gpfifo_ids.end());
20 auto channel_state = it->second; 141 auto gpfifo_id = it->second;
21 gpu.BindChannel(channel_state->bind_id); 142 auto& fifo = impl->gpfifos[gpfifo_id];
22 channel_state->dma_pusher->Push(std::move(entries)); 143 {
23 channel_state->dma_pusher->DispatchCalls(); 144 std::scoped_lock lk2(fifo.guard);
145 fifo.pending_work.emplace_back(std::move(entries));
146 }
147 if (impl->current_fifo != nullptr && impl->current_fifo->info->priority < fifo.info->priority) {
148 impl->must_reschedule = true;
149 }
150}
151
152void Scheduler::ChannelLoop(size_t gpfifo_id, s32 channel_id) {
153 gpu.BindChannel(channel_id);
154 auto& fifo = impl->gpfifos[gpfifo_id];
155 while (true) {
156 auto* channel_state = fifo.info.get();
157 fifo.guard.lock();
158 while (!fifo.pending_work.empty()) {
159 fifo.is_running = true;
160 {
161 CommandList&& entries = std::move(fifo.pending_work.front());
162 channel_state->dma_pusher->Push(std::move(entries));
163 fifo.pending_work.pop_front();
164 }
165 fifo.guard.unlock();
166 channel_state->dma_pusher->DispatchCalls();
167 CheckStatus();
168 fifo.guard.lock();
169 }
170 fifo.is_running = false;
171 fifo.guard.unlock();
172 Common::Fiber::YieldTo(fifo.context, *impl->master_control);
173 gpu.BindChannel(channel_id);
174 }
24} 175}
25 176
26void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) { 177void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
27 s32 channel = new_channel->bind_id; 178 s32 channel = new_channel->bind_id;
28 std::unique_lock lk(scheduling_guard); 179 std::unique_lock lk(impl->scheduling_guard);
29 channels.emplace(channel, new_channel); 180
181 size_t new_fifo_id;
182 if (!impl->free_fifos.empty()) {
183 new_fifo_id = impl->free_fifos.front();
184 impl->free_fifos.pop_front();
185 } else {
186 new_fifo_id = impl->gpfifos.size();
187 impl->gpfifos.emplace_back();
188 }
189 auto& new_fifo = impl->gpfifos[new_fifo_id];
190 impl->channel_gpfifo_ids[channel] = new_fifo_id;
191 new_fifo.is_active = true;
192 new_fifo.bind_id = channel;
193 new_fifo.pending_work.clear();
194 new_fifo.info = new_channel;
195 new_fifo.scheduled_count = 0;
196 new_fifo.yield_count = 0;
197 new_fifo.is_running = false;
198 impl->schedule_priority_queue.try_emplace(new_channel->priority);
199 impl->schedule_priority_queue[new_channel->priority].push_back(new_fifo_id);
200 std::function<void()> callback = std::bind(&Scheduler::ChannelLoop, this, new_fifo_id, channel);
201 new_fifo.context = std::make_shared<Common::Fiber>(std::move(callback));
30} 202}
31 203
32} // namespace Tegra::Control 204} // namespace Tegra::Control
diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h
index eab52e536..faa888dde 100755
--- a/src/video_core/control/scheduler.h
+++ b/src/video_core/control/scheduler.h
@@ -3,10 +3,11 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <list>
6#include <memory> 7#include <memory>
7#include <mutex>
8#include <unordered_map>
9 8
9#include "common/common_types.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/dma_pusher.h" 11#include "video_core/dma_pusher.h"
11 12
12namespace Tegra { 13namespace Tegra {
@@ -22,13 +23,26 @@ public:
22 explicit Scheduler(GPU& gpu_); 23 explicit Scheduler(GPU& gpu_);
23 ~Scheduler(); 24 ~Scheduler();
24 25
26 void Init();
27
28 void Resume();
29
30 void Yield();
31
25 void Push(s32 channel, CommandList&& entries); 32 void Push(s32 channel, CommandList&& entries);
26 33
27 void DeclareChannel(std::shared_ptr<ChannelState> new_channel); 34 void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
28 35
36 void ChangePriority(s32 channel_id, u32 new_priority);
37
29private: 38private:
30 std::unordered_map<s32, std::shared_ptr<ChannelState>> channels; 39 void ChannelLoop(size_t gpfifo_id, s32 channel_id);
31 std::mutex scheduling_guard; 40 bool ScheduleLevel(std::list<size_t>& queue);
41 void CheckStatus();
42
43 struct SchedulerImpl;
44 std::unique_ptr<SchedulerImpl> impl;
45
32 GPU& gpu; 46 GPU& gpu;
33}; 47};
34 48
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
index 79d84d662..c39cada43 100755
--- a/src/video_core/engines/puller.cpp
+++ b/src/video_core/engines/puller.cpp
@@ -6,6 +6,7 @@
6#include "common/settings.h" 6#include "common/settings.h"
7#include "core/core.h" 7#include "core/core.h"
8#include "video_core/control/channel_state.h" 8#include "video_core/control/channel_state.h"
9#include "video_core/control/scheduler.h"
9#include "video_core/dma_pusher.h" 10#include "video_core/dma_pusher.h"
10#include "video_core/engines/fermi_2d.h" 11#include "video_core/engines/fermi_2d.h"
11#include "video_core/engines/kepler_compute.h" 12#include "video_core/engines/kepler_compute.h"
@@ -14,6 +15,8 @@
14#include "video_core/engines/maxwell_dma.h" 15#include "video_core/engines/maxwell_dma.h"
15#include "video_core/engines/puller.h" 16#include "video_core/engines/puller.h"
16#include "video_core/gpu.h" 17#include "video_core/gpu.h"
18#include "video_core/host1x/host1x.h"
19#include "video_core/host1x/syncpoint_manager.h"
17#include "video_core/memory_manager.h" 20#include "video_core/memory_manager.h"
18#include "video_core/rasterizer_interface.h" 21#include "video_core/rasterizer_interface.h"
19 22
@@ -60,11 +63,14 @@ void Puller::ProcessBindMethod(const MethodCall& method_call) {
60} 63}
61 64
62void Puller::ProcessFenceActionMethod() { 65void Puller::ProcessFenceActionMethod() {
66 auto& syncpoint_manager = gpu.Host1x().GetSyncpointManager();
63 switch (regs.fence_action.op) { 67 switch (regs.fence_action.op) {
64 case Puller::FenceOperation::Acquire: 68 case Puller::FenceOperation::Acquire:
65 // UNIMPLEMENTED_MSG("Channel Scheduling pending."); 69 while (regs.fence_value >
66 // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value); 70 syncpoint_manager.GetGuestSyncpointValue(regs.fence_action.syncpoint_id)) {
67 rasterizer->ReleaseFences(); 71 rasterizer->ReleaseFences();
72 gpu.Scheduler().Yield();
73 }
68 break; 74 break;
69 case Puller::FenceOperation::Increment: 75 case Puller::FenceOperation::Increment:
70 rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id); 76 rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 59356015b..f45f797b3 100755
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -387,6 +387,14 @@ std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
387 return impl->AllocateChannel(); 387 return impl->AllocateChannel();
388} 388}
389 389
390Tegra::Control::Scheduler& GPU::Scheduler() {
391 return *impl->scheduler;
392}
393
394const Tegra::Control::Scheduler& GPU::Scheduler() const {
395 return *impl->scheduler;
396}
397
390void GPU::InitChannel(Control::ChannelState& to_init) { 398void GPU::InitChannel(Control::ChannelState& to_init) {
391 impl->InitChannel(to_init); 399 impl->InitChannel(to_init);
392} 400}
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 25c75a109..154466f23 100755
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -124,7 +124,8 @@ class KeplerCompute;
124 124
125namespace Control { 125namespace Control {
126struct ChannelState; 126struct ChannelState;
127} 127class Scheduler;
128} // namespace Control
128 129
129namespace Host1x { 130namespace Host1x {
130class Host1x; 131class Host1x;
@@ -204,6 +205,12 @@ public:
204 /// Returns a const reference to the shader notifier. 205 /// Returns a const reference to the shader notifier.
205 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; 206 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
206 207
208 /// Returns GPU Channel Scheduler.
209 [[nodiscard]] Tegra::Control::Scheduler& Scheduler();
210
211 /// Returns GPU Channel Scheduler.
212 [[nodiscard]] const Tegra::Control::Scheduler& Scheduler() const;
213
207 [[nodiscard]] u64 GetTicks() const; 214 [[nodiscard]] u64 GetTicks() const;
208 215
209 [[nodiscard]] bool IsAsync() const; 216 [[nodiscard]] bool IsAsync() const;
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 0832234af..3aa59d034 100755
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -34,13 +34,15 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
34 34
35 CommandDataContainer next; 35 CommandDataContainer next;
36 36
37 scheduler.Init();
38
37 while (!stop_token.stop_requested()) { 39 while (!stop_token.stop_requested()) {
38 state.queue.PopWait(next, stop_token); 40 state.queue.PopWait(next, stop_token);
39 if (stop_token.stop_requested()) { 41 if (stop_token.stop_requested()) {
40 break; 42 break;
41 } 43 }
42 if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { 44 if (std::holds_alternative<SubmitListCommand>(next.data)) {
43 scheduler.Push(submit_list->channel, std::move(submit_list->entries)); 45 scheduler.Resume();
44 } else if (std::holds_alternative<GPUTickCommand>(next.data)) { 46 } else if (std::holds_alternative<GPUTickCommand>(next.data)) {
45 system.GPU().TickWork(); 47 system.GPU().TickWork();
46 } else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) { 48 } else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) {
@@ -67,14 +69,16 @@ ThreadManager::~ThreadManager() = default;
67 69
68void ThreadManager::StartThread(VideoCore::RendererBase& renderer, 70void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
69 Core::Frontend::GraphicsContext& context, 71 Core::Frontend::GraphicsContext& context,
70 Tegra::Control::Scheduler& scheduler) { 72 Tegra::Control::Scheduler& scheduler_) {
71 rasterizer = renderer.ReadRasterizer(); 73 rasterizer = renderer.ReadRasterizer();
74 scheduler = &scheduler_;
72 thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), 75 thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
73 std::ref(scheduler), std::ref(state)); 76 std::ref(scheduler_), std::ref(state));
74} 77}
75 78
76void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) { 79void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
77 PushCommand(SubmitListCommand(channel, std::move(entries))); 80 scheduler->Push(channel, std::move(entries));
81 PushCommand(SubmitListCommand());
78} 82}
79 83
80void ThreadManager::FlushRegion(DAddr addr, u64 size) { 84void ThreadManager::FlushRegion(DAddr addr, u64 size) {
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 1a0de86b5..a4b01c42a 100755
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -36,13 +36,7 @@ class RendererBase;
36namespace VideoCommon::GPUThread { 36namespace VideoCommon::GPUThread {
37 37
38/// Command to signal to the GPU thread that a command list is ready for processing 38/// Command to signal to the GPU thread that a command list is ready for processing
39struct SubmitListCommand final { 39struct SubmitListCommand final {};
40 explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
41 : channel{channel_}, entries{std::move(entries_)} {}
42
43 s32 channel;
44 Tegra::CommandList entries;
45};
46 40
47/// Command to signal to the GPU thread to flush a region 41/// Command to signal to the GPU thread to flush a region
48struct FlushRegionCommand final { 42struct FlushRegionCommand final {
@@ -124,6 +118,7 @@ public:
124private: 118private:
125 /// Pushes a command to be executed by the GPU thread 119 /// Pushes a command to be executed by the GPU thread
126 u64 PushCommand(CommandData&& command_data, bool block = false); 120 u64 PushCommand(CommandData&& command_data, bool block = false);
121 Tegra::Control::Scheduler* scheduler;
127 122
128 Core::System& system; 123 Core::System& system;
129 const bool is_async; 124 const bool is_async;
diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp
index 4cfaf7ce6..22eb7bd00 100755
--- a/src/video_core/texture_cache/image_info.cpp
+++ b/src/video_core/texture_cache/image_info.cpp
@@ -42,7 +42,6 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
42 }; 42 };
43 } 43 }
44 rescaleable = false; 44 rescaleable = false;
45 is_sparse = config.is_sparse != 0;
46 tile_width_spacing = config.tile_width_spacing; 45 tile_width_spacing = config.tile_width_spacing;
47 if (config.texture_type != TextureType::Texture2D && 46 if (config.texture_type != TextureType::Texture2D &&
48 config.texture_type != TextureType::Texture2DNoMipmap) { 47 config.texture_type != TextureType::Texture2DNoMipmap) {
diff --git a/src/video_core/texture_cache/image_info.h b/src/video_core/texture_cache/image_info.h
index 286457acc..0a50795b3 100755
--- a/src/video_core/texture_cache/image_info.h
+++ b/src/video_core/texture_cache/image_info.h
@@ -41,7 +41,6 @@ struct ImageInfo {
41 bool downscaleable = false; 41 bool downscaleable = false;
42 bool forced_flushed = false; 42 bool forced_flushed = false;
43 bool dma_downloaded = false; 43 bool dma_downloaded = false;
44 bool is_sparse = false;
45}; 44};
46 45
47} // namespace VideoCommon 46} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 47ea0bd96..85046e708 100755
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -600,17 +600,17 @@ void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t siz
600 [&](ImageId id, Image&) { deleted_images.push_back(id); }); 600 [&](ImageId id, Image&) { deleted_images.push_back(id); });
601 for (const ImageId id : deleted_images) { 601 for (const ImageId id : deleted_images) {
602 Image& image = slot_images[id]; 602 Image& image = slot_images[id];
603 if (False(image.flags & ImageFlagBits::CpuModified)) { 603 if (True(image.flags & ImageFlagBits::CpuModified)) {
604 image.flags |= ImageFlagBits::CpuModified; 604 continue;
605 if (True(image.flags & ImageFlagBits::Tracked)) {
606 UntrackImage(image, id);
607 }
608 } 605 }
609 606 image.flags |= ImageFlagBits::CpuModified;
610 if (True(image.flags & ImageFlagBits::Remapped)) { 607 if (True(image.flags & ImageFlagBits::Remapped)) {
611 continue; 608 continue;
612 } 609 }
613 image.flags |= ImageFlagBits::Remapped; 610 image.flags |= ImageFlagBits::Remapped;
611 if (True(image.flags & ImageFlagBits::Tracked)) {
612 UntrackImage(image, id);
613 }
614 } 614 }
615} 615}
616 616
@@ -1469,8 +1469,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DA
1469 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); 1469 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
1470 Image& new_image = slot_images[new_image_id]; 1470 Image& new_image = slot_images[new_image_id];
1471 1471
1472 if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes) && 1472 if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
1473 new_info.is_sparse) {
1474 new_image.flags |= ImageFlagBits::Sparse; 1473 new_image.flags |= ImageFlagBits::Sparse;
1475 } 1474 }
1476 1475