Merge remote-tracking branch 'upstream/edge' into upstream-master
[clinton/Smoothieware.git] / src / modules / robot / Conveyor.cpp
CommitLineData
f80d18b9
L
1/*
2 This file is part of Smoothie (http://smoothieware.org/). The motion control part is heavily based on Grbl (https://github.com/simen/grbl) with additions from Sungeun K. Jeon (https://github.com/chamnit/grbl)
3 Smoothie is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
4 Smoothie is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
5 You should have received a copy of the GNU General Public License along with Smoothie. If not, see <http://www.gnu.org/licenses/>.
6*/
7
8using namespace std;
9#include <vector>
10#include "libs/nuts_bolts.h"
11#include "libs/RingBuffer.h"
12#include "../communication/utils/Gcode.h"
13#include "libs/Module.h"
14#include "libs/Kernel.h"
15#include "Timer.h" // mbed.h lib
16#include "wait_api.h" // mbed.h lib
17#include "Block.h"
18#include "Conveyor.h"
19#include "Planner.h"
55456577 20#include "mri.h"
61134a65
JM
21#include "checksumm.h"
22#include "Config.h"
23#include "libs/StreamOutputPool.h"
8d54c34c 24#include "ConfigValue.h"
f80d18b9 25
0b3e628f
MM
26#define planner_queue_size_checksum CHECKSUM("planner_queue_size")
27
8698e81a
MM
28/*
29 * The conveyor holds the queue of blocks, takes care of creating them, and starting the executing chain of blocks
30 *
31 * The Queue is implemented as a ringbuffer- with a twist
32 *
33 * Since delete() is not thread-safe, we must marshall deletable items out of ISR context
34 *
35 * To do this, we have implmented a *double* ringbuffer- two ringbuffers sharing the same ring, and one index pointer
36 *
37 * as in regular ringbuffers, HEAD always points to a clean, free block. We are free to prepare it as we see fit, at our leisure.
38 * When the block is fully prepared, we increment the head pointer, and from that point we must not touch it anymore.
39 *
40 * also, as in regular ringbuffers, we can 'use' the TAIL block, and increment tail pointer when we're finished with it
41 *
42 * Both of these are implemented here- see queue_head_block() (where head is pushed) and on_idle() (where tail is consumed)
43 *
44 * The double ring is implemented by adding a third index pointer that lives in between head and tail. We call it gc_pending which describes its function rather than its operation
45 *
46 * in ISR context, we use HEAD as the head pointer, and gc_pending as the tail pointer.
47 * As HEAD increments, ISR context can consume the new blocks which appear, and when we're finished with a block, we increment gc_pending to signal that they're finishd, and ready to be cleaned
48 *
49 * in IDLE context, we use gc_pending as the head pointer, and TAIL as the tail pointer.
50 * When gc_pending != tail, we clean up the tail block (performing ISR-unsafe delete operations) and consume it (increment tail pointer), returning it to the pool of clean, unused blocks which HEAD is allowed to prepare for queueing
51 *
52 * Thus, our two ringbuffers exist sharing the one ring of blocks, and we safely marshall used blocks from ISR context to IDLE context for safe cleanup.
53 */
edac9072 54
f80d18b9 55Conveyor::Conveyor(){
c501670b 56 gc_pending = queue.tail_i;
2134bcf2 57 running = false;
b375ba1d 58 flush = false;
728477c4 59 halted= false;
702023f3
MM
60}
61
d149c730 62void Conveyor::on_module_loaded(){
702023f3 63 register_for_event(ON_IDLE);
01b69353 64 register_for_event(ON_MAIN_LOOP);
b375ba1d 65 register_for_event(ON_HALT);
0b3e628f
MM
66
67 on_config_reload(this);
702023f3
MM
68}
69
b375ba1d 70void Conveyor::on_halt(void* argument){
728477c4
JM
71 if(argument == nullptr) {
72 halted= true;
73 flush_queue();
74 }else{
75 halted= false;
76 }
b375ba1d
JM
77}
78
edac9072 79// Delete blocks here, because they can't be deleted in interrupt context ( see Block.cpp:release )
8698e81a 80// note that blocks get cleaned as they come off the tail, so head ALWAYS points to a cleaned block.
d149c730 81void Conveyor::on_idle(void* argument){
55456577 82 if (queue.tail_i != gc_pending)
c501670b 83 {
ce6ee091 84 if (queue.is_empty()) {
55456577 85 __debugbreak();
ce6ee091 86 }else{
55456577
MM
87 // Cleanly delete block
88 Block* block = queue.tail_ref();
9d005957 89// block->debug();
3ac0b99e 90 block->clear();
55456577
MM
91 queue.consume_tail();
92 }
702023f3 93 }
01b69353
MM
94}
95
8698e81a
MM
96/*
97 * In on_main_loop, we check whether the queue should be running, but isn't.
98 *
99 * The main trigger for this event is other pieces of code adding gcode to a block, but not pushing it. This occurs frequently with gcodes that must be executed at the correct point in the queue, but take zero time to execute.
100 * Smoothie will happily attach many of such gcodes onto a single block, to save room in the queue.
101 *
102 * Any gcode which can potentially take time to execute, or might like to halt the queue MUST push the head block, otherwise gcodes that arrive later may get executed at the same time, and gcode execution order strictness would be violated.
103 *
104 * If we get back to main loop context and the block has gcode but isn't pushed, then we can safely push it and start the queue.
105 *
106 *
107 * It's also theoretically possible that a race condition could occur where we pop the final block and stop the queue, while at the same time main loop is pushing head but thinks the queue is running and thus does not start it.
108 *
109 * In this case, we start the queue again when execution returns to main loop.
110 * No stuttering or other visible effects could be caused by this event, as the planner will have set the last block to decelerate to zero, and the new block to accelerate from zero.
111 *
112 */
113
01b69353
MM
114void Conveyor::on_main_loop(void*)
115{
116 if (running)
117 return;
118
119 if (queue.is_empty())
3facc890 120 {
3facc890
MM
121 if (queue.head_ref()->gcodes.size())
122 {
123 queue_head_block();
124 ensure_running();
125 }
126 }
36aca284
MM
127 else
128 // queue not empty
129 ensure_running();
f80d18b9
L
130}
131
0b3e628f
MM
132void Conveyor::on_config_reload(void* argument)
133{
134 queue.resize(THEKERNEL->config->value(planner_queue_size_checksum)->by_default(32)->as_number());
135}
136
e0ee24ed
MM
137void Conveyor::append_gcode(Gcode* gcode)
138{
c87f8e07 139 queue.head_ref()->append_gcode(gcode);
e0ee24ed
MM
140}
141
f80d18b9 142// Process a new block in the queue
2134bcf2
MM
143void Conveyor::on_block_end(void* block)
144{
55456577
MM
145 if (queue.is_empty())
146 __debugbreak();
0b3e628f 147
2134bcf2 148 gc_pending = queue.next(gc_pending);
f80d18b9 149
b375ba1d
JM
150 // mark entire queue for GC if flush flag is asserted
151 if (flush){
152 while (gc_pending != queue.head_i) {
153 gc_pending = queue.next(gc_pending);
154 }
155 }
156
f80d18b9 157 // Return if queue is empty
2134bcf2
MM
158 if (gc_pending == queue.head_i)
159 {
160 running = false;
f80d18b9
L
161 return;
162 }
702023f3 163
f80d18b9 164 // Get a new block
2134bcf2 165 Block* next = this->queue.item_ref(gc_pending);
f80d18b9 166
2134bcf2 167 next->begin();
f80d18b9
L
168}
169
edac9072 170// Wait for the queue to be empty
c501670b
MM
171void Conveyor::wait_for_empty_queue()
172{
728477c4 173 while (!queue.is_empty()) {
2134bcf2 174 ensure_running();
dda52007 175 THEKERNEL->call_event(ON_IDLE, this);
2134bcf2 176 }
17c68379
BG
177}
178
8698e81a
MM
179/*
180 * push the pre-prepared head block onto the queue
181 */
2134bcf2
MM
182void Conveyor::queue_head_block()
183{
728477c4
JM
184 // upstream caller will block on this until there is room in the queue
185 while (queue.is_full()) {
2134bcf2
MM
186 ensure_running();
187 THEKERNEL->call_event(ON_IDLE, this);
188 }
189
728477c4
JM
190 if(halted) {
191 // we do not want to stick more stuff on the queue if we are in halt state
192 // clear and release the block on the head
193 queue.head_ref()->clear();
194
195 }else{
196 queue.head_ref()->ready();
197 queue.produce_head();
198 }
2134bcf2
MM
199}
200
201void Conveyor::ensure_running()
202{
203 if (!running)
204 {
56a3ab89
MM
205 if (gc_pending == queue.head_i)
206 return;
207
2134bcf2 208 running = true;
56a3ab89 209 queue.item_ref(gc_pending)->begin();
2134bcf2
MM
210 }
211}
c501670b 212
728477c4
JM
213/*
214
215 In most cases this will not totally flush the queue, as when streaming
216 gcode there is one stalled waiting for space in the queue, in
217 queue_head_block() so after this flush, once main_loop runs again one more
218 gcode gets stuck in the queue, this is bad. Current work around is to call
219 this when the queue in not full and streaming has stopped
220
221*/
222
b375ba1d
JM
223void Conveyor::flush_queue()
224{
225 flush = true;
226 wait_for_empty_queue();
227 flush = false;
228}
229
a617ac35
MM
230// Debug function
231void Conveyor::dump_queue()
232{
233 for (unsigned int index = queue.tail_i, i = 0; true; index = queue.next(index), i++ )
234 {
235 THEKERNEL->streams->printf("block %03d > ", i);
236 queue.item_ref(index)->debug();
237
238 if (index == queue.head_i)
239 break;
240 }
241}
242
c501670b
MM
243// feels hacky, but apparently the way to do it
244#include "HeapRing.cpp"
245template class HeapRing<Block>;