2 This file is part of Smoothie (http://smoothieware.org/). The motion control part is heavily based on Grbl (https://github.com/simen/grbl) with additions from Sungeun K. Jeon (https://github.com/chamnit/grbl)
3 Smoothie is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
4 Smoothie is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
5 You should have received a copy of the GNU General Public License along with Smoothie. If not, see <http://www.gnu.org/licenses/>.
10 #include "libs/nuts_bolts.h"
11 #include "libs/RingBuffer.h"
12 #include "../communication/utils/Gcode.h"
13 #include "libs/Module.h"
14 #include "libs/Kernel.h"
15 #include "Timer.h" // mbed.h lib
16 #include "wait_api.h" // mbed.h lib
21 #include "checksumm.h"
23 #include "libs/StreamOutputPool.h"
24 #include "ConfigValue.h"
26 #define planner_queue_size_checksum CHECKSUM("planner_queue_size")
29 * The conveyor holds the queue of blocks, takes care of creating them, and starting the executing chain of blocks
31 * The Queue is implemented as a ringbuffer- with a twist
33 * Since delete() is not thread-safe, we must marshall deletable items out of ISR context
35 * To do this, we have implmented a *double* ringbuffer- two ringbuffers sharing the same ring, and one index pointer
37 * as in regular ringbuffers, HEAD always points to a clean, free block. We are free to prepare it as we see fit, at our leisure.
38 * When the block is fully prepared, we increment the head pointer, and from that point we must not touch it anymore.
40 * also, as in regular ringbuffers, we can 'use' the TAIL block, and increment tail pointer when we're finished with it
42 * Both of these are implemented here- see queue_head_block() (where head is pushed) and on_idle() (where tail is consumed)
44 * The double ring is implemented by adding a third index pointer that lives in between head and tail. We call it gc_pending which describes its function rather than its operation
46 * in ISR context, we use HEAD as the head pointer, and gc_pending as the tail pointer.
47 * As HEAD increments, ISR context can consume the new blocks which appear, and when we're finished with a block, we increment gc_pending to signal that they're finishd, and ready to be cleaned
49 * in IDLE context, we use gc_pending as the head pointer, and TAIL as the tail pointer.
50 * When gc_pending != tail, we clean up the tail block (performing ISR-unsafe delete operations) and consume it (increment tail pointer), returning it to the pool of clean, unused blocks which HEAD is allowed to prepare for queueing
52 * Thus, our two ringbuffers exist sharing the one ring of blocks, and we safely marshall used blocks from ISR context to IDLE context for safe cleanup.
56 gc_pending
= queue
.tail_i
;
62 void Conveyor::on_module_loaded(){
63 register_for_event(ON_IDLE
);
64 register_for_event(ON_MAIN_LOOP
);
65 register_for_event(ON_HALT
);
67 on_config_reload(this);
70 void Conveyor::on_halt(void* argument
){
71 if(argument
== nullptr) {
79 // Delete blocks here, because they can't be deleted in interrupt context ( see Block.cpp:release )
80 // note that blocks get cleaned as they come off the tail, so head ALWAYS points to a cleaned block.
81 void Conveyor::on_idle(void* argument
){
82 if (queue
.tail_i
!= gc_pending
)
84 if (queue
.is_empty()) {
87 // Cleanly delete block
88 Block
* block
= queue
.tail_ref();
97 * In on_main_loop, we check whether the queue should be running, but isn't.
99 * The main trigger for this event is other pieces of code adding gcode to a block, but not pushing it. This occurs frequently with gcodes that must be executed at the correct point in the queue, but take zero time to execute.
100 * Smoothie will happily attach many of such gcodes onto a single block, to save room in the queue.
102 * Any gcode which can potentially take time to execute, or might like to halt the queue MUST push the head block, otherwise gcodes that arrive later may get executed at the same time, and gcode execution order strictness would be violated.
104 * If we get back to main loop context and the block has gcode but isn't pushed, then we can safely push it and start the queue.
107 * It's also theoretically possible that a race condition could occur where we pop the final block and stop the queue, while at the same time main loop is pushing head but thinks the queue is running and thus does not start it.
109 * In this case, we start the queue again when execution returns to main loop.
110 * No stuttering or other visible effects could be caused by this event, as the planner will have set the last block to decelerate to zero, and the new block to accelerate from zero.
114 void Conveyor::on_main_loop(void*)
119 if (queue
.is_empty())
121 if (queue
.head_ref()->gcodes
.size())
132 void Conveyor::on_config_reload(void* argument
)
134 queue
.resize(THEKERNEL
->config
->value(planner_queue_size_checksum
)->by_default(32)->as_number());
137 void Conveyor::append_gcode(Gcode
* gcode
)
139 queue
.head_ref()->append_gcode(gcode
);
142 // Process a new block in the queue
143 void Conveyor::on_block_end(void* block
)
145 if (queue
.is_empty())
148 gc_pending
= queue
.next(gc_pending
);
150 // mark entire queue for GC if flush flag is asserted
152 while (gc_pending
!= queue
.head_i
) {
153 gc_pending
= queue
.next(gc_pending
);
157 // Return if queue is empty
158 if (gc_pending
== queue
.head_i
)
165 Block
* next
= this->queue
.item_ref(gc_pending
);
170 // Wait for the queue to be empty
171 void Conveyor::wait_for_empty_queue()
173 while (!queue
.is_empty()) {
175 THEKERNEL
->call_event(ON_IDLE
, this);
180 * push the pre-prepared head block onto the queue
182 void Conveyor::queue_head_block()
184 // upstream caller will block on this until there is room in the queue
185 while (queue
.is_full()) {
187 THEKERNEL
->call_event(ON_IDLE
, this);
191 // we do not want to stick more stuff on the queue if we are in halt state
192 // clear and release the block on the head
193 queue
.head_ref()->clear();
196 queue
.head_ref()->ready();
197 queue
.produce_head();
201 void Conveyor::ensure_running()
205 if (gc_pending
== queue
.head_i
)
209 queue
.item_ref(gc_pending
)->begin();
215 In most cases this will not totally flush the queue, as when streaming
216 gcode there is one stalled waiting for space in the queue, in
217 queue_head_block() so after this flush, once main_loop runs again one more
218 gcode gets stuck in the queue, this is bad. Current work around is to call
219 this when the queue in not full and streaming has stopped
223 void Conveyor::flush_queue()
226 wait_for_empty_queue();
231 void Conveyor::dump_queue()
233 for (unsigned int index
= queue
.tail_i
, i
= 0; true; index
= queue
.next(index
), i
++ )
235 THEKERNEL
->streams
->printf("block %03d > ", i
);
236 queue
.item_ref(index
)->debug();
238 if (index
== queue
.head_i
)
243 // feels hacky, but apparently the way to do it
244 #include "HeapRing.cpp"
245 template class HeapRing
<Block
>;