cprover
Loading...
Searching...
No Matches
cycle_collection.cpp
Go to the documentation of this file.
1/*******************************************************************\
2
3Module: collection of cycles in graph of abstract events
4
5Author: Vincent Nimal
6
7Date: 2012
8
9\*******************************************************************/
10
13
14#include "event_graph.h"
15
16#include <util/message.h>
17
21 std::set<critical_cyclet> &set_of_cycles)
22{
23 for(std::set<critical_cyclet>::const_iterator it=set_of_cycles.begin();
24 it!=set_of_cycles.end(); )
25 {
26 std::set<critical_cyclet>::const_iterator next=it;
27 ++next;
28 auto e_it=it->begin();
29 /* is there an event in the cycle not in thin-air events? */
30 for(; e_it!=it->end(); ++e_it)
31 if(thin_air_events.find(*e_it)==thin_air_events.end())
32 break;
33
34 if(e_it==it->end())
35 set_of_cycles.erase(*it);
36
37 it=next;
38 }
39
40#ifdef DEBUG
41 for(std::set<event_idt>::const_iterator it=thin_air_events.begin();
42 it!=thin_air_events.end();
43 ++it)
44 egraph.message.debug()<<egraph[*it]<<";";
45
47#endif
48}
49
52 std::set<critical_cyclet> &set_of_cycles,
53 memory_modelt model)
54{
55 /* all the events initially unmarked */
56 for(std::size_t i=0; i<egraph.size(); i++)
57 mark[i]=false;
58
59 std::list<event_idt>* order=nullptr;
60 /* on Power, rfe pairs are also potentially unsafe */
61 switch(model)
62 {
63 case TSO:
64 case PSO:
65 case RMO:
66 order=&egraph.po_order;
67 break;
68 case Power:
69 order=&egraph.poUrfe_order;
70 break;
71
72 case Unknown:
73 assert(false);
74 }
75
76 if(order->empty())
77 return;
78
79 /* if we only consider a limited part of the graph */
80 order=order_filtering(order);
81
82 if(order->empty())
83 return;
84
85 for(std::list<event_idt>::const_iterator
86 st_it=order->begin();
87 st_it!=order->end();
88 ++st_it)
89 {
90 event_idt source=*st_it;
91 egraph.message.debug() << "explore " << egraph[source].id << messaget::eom;
92 backtrack(
93 set_of_cycles,
94 source,
95 source,
96 false,
98 false,
99 false,
100 false,
101 irep_idt(),
102 model);
103
104 while(!marked_stack.empty())
105 {
106 event_idt up=marked_stack.top();
107 mark[up]=false;
108 marked_stack.pop();
109 }
110 }
111
112 /* end of collection -- remove spurious by thin-air cycles */
113 if(egraph.filter_thin_air)
114 filter_thin_air(set_of_cycles);
115}
116
120 event_idt vertex,
121 event_idt source,
122 unsigned number)
123{
124 critical_cyclet new_cycle(egraph, number);
125 bool incycle=false;
126 std::stack<event_idt> stack(point_stack);
127
128 while(!stack.empty())
129 {
130 event_idt current_vertex=stack.top();
131 stack.pop();
132
133 egraph.message.debug() << "extract: "
134 << egraph[current_vertex].get_operation()
135 << egraph[current_vertex].variable << "@"
136 << egraph[current_vertex].thread << "~"
137 << egraph[current_vertex].local
138 << messaget::eom;
139
140 if(!new_cycle.has_user_defined_fence)
141 {
142 new_cycle.has_user_defined_fence=egraph[current_vertex].is_fence();
143 }
144
145 if(current_vertex==vertex)
146 incycle=true;
147
148 if(incycle)
149 new_cycle.push_front(current_vertex);
150
151 if(current_vertex==source)
152 break;
153 }
154
155 return new_cycle;
156}
157
160 std::set<critical_cyclet> &set_of_cycles,
161 event_idt source,
162 event_idt vertex,
163 bool unsafe_met, /* unsafe pair for the model met in the visited path */
164 event_idt po_trans, /* po-transition skips still allowed */
165 bool same_var_pair, /* in a thread, tells if we already met one rfi wsi fri */
166 bool lwfence_met, /* if we try to skip a lwsync (only valid for lwsyncWR) */
167 bool has_to_be_unsafe,
168 irep_idt var_to_avoid,
169 memory_modelt model)
170{
171#ifdef DEBUG
172 egraph.message.debug() << std::string(80, '-');
173 egraph.message.debug() << messaget::eom;
174 egraph.message.debug() << "marked size:" << marked_stack.size()
175 << messaget::eom;
176 std::stack<event_idt> tmp;
177 while(!point_stack.empty())
178 {
179 egraph.message.debug() << point_stack.top() << " | ";
180 tmp.push(point_stack.top());
181 point_stack.pop();
182 }
183 egraph.message.debug() << messaget::eom;
184 while(!tmp.empty())
185 {
186 point_stack.push(tmp.top());
187 tmp.pop();
188 }
189 while(!marked_stack.empty())
190 {
191 egraph.message.debug() << marked_stack.top() << " | ";
192 tmp.push(marked_stack.top());
193 marked_stack.pop();
194 }
195 egraph.message.debug() << messaget::eom;
196 while(!tmp.empty())
197 {
198 marked_stack.push(tmp.top());
199 tmp.pop();
200 }
201#endif
202
203 // TO DISCUSS: shouldn't we still allow po-transition through it instead?
204 if(filtering(vertex))
205 return false;
206
207 egraph.message.debug() << "bcktck "<<egraph[vertex].id<<"#"<<vertex<<", "
208 <<egraph[source].id<<"#"<<source<<" lw:"<<lwfence_met<<" unsafe:"
209 <<unsafe_met << messaget::eom;
210 bool f=false;
211 bool get_com_only=false;
212 bool unsafe_met_updated=unsafe_met;
213 bool same_var_pair_updated=same_var_pair;
214
215 bool not_thin_air=true;
216
217 const abstract_eventt &this_vertex=egraph[vertex];
218
219 /* if a thread starts with variable x, the last event of this thread in the
220 cycle cannot be with x */
221 irep_idt avoid_at_the_end=var_to_avoid;
222 bool no_comm=false;
223 if(!avoid_at_the_end.empty() && avoid_at_the_end == this_vertex.variable)
224 no_comm=true;
225
226 /* if specified, maximum number of events reached */
227 if(max_var!=0 && point_stack.size()>max_var*3)
228 return false;
229
230 /* we only explore shared variables */
231 if(!this_vertex.local)
232 {
233 /* only the lwsyncWR can be interpreted as poWR (i.e., skip of the fence) */
234 if(lwfence_met && this_vertex.operation!=abstract_eventt::operationt::Read)
235 return false; // {no_comm=true;get_com_only=false;}//return false;
236
237 bool has_to_be_unsafe_updated=false;
238 // TODO: propagate this constraint within the optimisation
239 // -- no optimisation can strongly affect performances
240 /* tab[] can appear several times */
241 if(egraph.ignore_arrays ||
242 id2string(this_vertex.variable).find("[]")==std::string::npos)
243 {
244 /* no more than 4 events per thread */
248 {
249 if(events_per_thread[this_vertex.thread]==4)
250 return false;
251 else
252 events_per_thread[this_vertex.thread]++;
253 }
254
255 /* Multiple re-orderings constraint: if the thread on this cycles contains
256 more than one, ensure that an unsafe pair is not protected by another
257 relation in the thread. E.g., in Wx Rx Wy, under TSO, the rfi cannot be
258 delayed, since the only way to make this transformation matter is to
259 re-order also the two writes, which is not permitted on TSO. */
260 if(has_to_be_unsafe && point_stack.size() >= 2)
261 {
262 const event_idt previous=point_stack.top();
263 point_stack.pop();
264 const event_idt preprevious=point_stack.top();
265 point_stack.push(previous);
266 if(!egraph[preprevious].unsafe_pair(this_vertex, model) &&
268 egraph[preprevious].operation==
271 egraph[preprevious].operation==
274 egraph[preprevious].operation==
276 return false;
277 }
278 }
279
280 has_to_be_unsafe_updated=has_to_be_unsafe;
281
282 /* constraint 1.a: there is at most one pair of events per thread
283 with different variables. Given that we cannot have more than
284 three events on the same variable, with two in the same thread,
285 this means that we can have at most 2 consecutive events by po
286 with the same variable, and two variables per thread (fences are
287 not taken into account) */
288 if(!point_stack.empty() &&
289 egraph.are_po_ordered(point_stack.top(), vertex) &&
293 this_vertex.variable==egraph[point_stack.top()].variable)
294 {
295 if(same_var_pair ||
297 egraph[point_stack.top()].operation==
299 {
300 events_per_thread[this_vertex.thread]--;
301 return false; // {no_comm=true;get_com_only=false;} //return false;
302 }
303 else
304 {
305 same_var_pair_updated=true;
306 if(events_per_thread[this_vertex.thread]>=3)
307 get_com_only=true;
308 }
309 }
310
311 /* constraint 1.b */
312 if(!point_stack.empty() &&
313 egraph.are_po_ordered(point_stack.top(), vertex) &&
317 this_vertex.variable!=egraph[point_stack.top()].variable)
318 {
319 same_var_pair_updated=false;
320 }
321
322 /* constraint 2: per variable, either W W, R W, W R, or R W R */
326 {
327 const unsigned char nb_writes=writes_per_variable[this_vertex.variable];
328 const unsigned char nb_reads=reads_per_variable[this_vertex.variable];
329
330 if(nb_writes+nb_reads==3)
331 {
332 events_per_thread[this_vertex.thread]--;
333 return false; // {no_comm=true;get_com_only=false;} //return false;
334 }
336 {
337 if(nb_writes==2)
338 {
339 events_per_thread[this_vertex.thread]--;
340 return false; // {no_comm=true;get_com_only=false;} //return false;
341 }
342 else
343 writes_per_variable[this_vertex.variable]++;
344 }
345 else if(this_vertex.operation==abstract_eventt::operationt::Read)
346 {
347 if(nb_reads==2)
348 {
349 events_per_thread[this_vertex.thread]--;
350 return false; // {no_comm=true;get_com_only=false;} //return false;
351 }
352 else
353 reads_per_variable[this_vertex.variable]++;
354 }
355 }
356
357 if(!point_stack.empty())
358 {
359 const abstract_eventt &prev_vertex=egraph[point_stack.top()];
360 unsafe_met_updated|=
361 prev_vertex.unsafe_pair(this_vertex, model) &&
362 !(prev_vertex.thread==this_vertex.thread &&
363 egraph.map_data_dp[this_vertex.thread].dp(prev_vertex, this_vertex));
364 if(unsafe_met_updated &&
365 !unsafe_met &&
366 egraph.are_po_ordered(point_stack.top(), vertex))
367 has_to_be_unsafe_updated=true;
368 }
369
370 point_stack.push(vertex);
371 mark[vertex]=true;
372 marked_stack.push(vertex);
373
374 if(!get_com_only)
375 {
376 /* we first visit via po transition, if existing */
377 for(wmm_grapht::edgest::const_iterator
378 w_it=egraph.po_out(vertex).begin();
379 w_it!=egraph.po_out(vertex).end(); w_it++)
380 {
381 const event_idt w=w_it->first;
382 if(w==source && point_stack.size()>=4
383 && (unsafe_met_updated
384 || this_vertex.unsafe_pair(egraph[source], model)) )
385 {
386 critical_cyclet new_cycle=extract_cycle(vertex, source, cycle_nb++);
387 not_thin_air=!egraph.filter_thin_air || new_cycle.is_not_thin_air();
388 if(!not_thin_air)
389 {
390 for(critical_cyclet::const_iterator e_it=new_cycle.begin();
391 e_it!=new_cycle.end();
392 ++e_it)
393 thin_air_events.insert(*e_it);
394 }
395 if((!egraph.filter_uniproc || new_cycle.is_not_uniproc(model)) &&
396 not_thin_air && new_cycle.is_cycle() &&
397 new_cycle.is_unsafe(model) /*&& new_cycle.is_unsafe_asm(model)*/)
398 {
399 egraph.message.debug() << new_cycle.print_name(model, false)
400 << messaget::eom;
401 set_of_cycles.insert(new_cycle);
402#if 0
403 const critical_cyclet* reduced=new_cycle.hide_internals();
404 set_of_cycles.insert(*reduced);
405 delete(reduced);
406#endif
407 }
408 f=true;
409 }
410 else if(!mark[w])
411 f|=
412 backtrack(
413 set_of_cycles,
414 source,
415 w,
416 unsafe_met_updated,
417 po_trans,
418 same_var_pair_updated,
419 false,
420 has_to_be_unsafe_updated,
421 avoid_at_the_end, model);
422 }
423 }
424
425 if(!no_comm)
426 {
427 /* we then visit via com transitions, if existing */
428 std::set<event_idt> edges_to_remove;
429
430 for(wmm_grapht::edgest::const_iterator
431 w_it=egraph.com_out(vertex).begin();
432 w_it!=egraph.com_out(vertex).end(); w_it++)
433 {
434 const event_idt w=w_it->first;
435 if(w < source)
436 edges_to_remove.insert(w);
437 else if(w==source && point_stack.size()>=4 &&
438 (unsafe_met_updated ||
439 this_vertex.unsafe_pair(egraph[source], model)))
440 {
441 critical_cyclet new_cycle=extract_cycle(vertex, source, cycle_nb++);
442 not_thin_air=!egraph.filter_thin_air || new_cycle.is_not_thin_air();
443 if(!not_thin_air)
444 {
445 for(critical_cyclet::const_iterator e_it=new_cycle.begin();
446 e_it!=new_cycle.end();
447 ++e_it)
448 thin_air_events.insert(*e_it);
449 }
450 if((!egraph.filter_uniproc || new_cycle.is_not_uniproc(model)) &&
451 not_thin_air && new_cycle.is_cycle() &&
452 new_cycle.is_unsafe(model) /*&& new_cycle.is_unsafe_asm(model)*/)
453 {
454 egraph.message.debug() << new_cycle.print_name(model, false)
455 << messaget::eom;
456 set_of_cycles.insert(new_cycle);
457#if 0
458 const critical_cyclet* reduced=new_cycle.hide_internals();
459 set_of_cycles.insert(*reduced);
460 delete(reduced);
461#endif
462 }
463 f=true;
464 }
465 else if(!mark[w])
466 f |= backtrack(
467 set_of_cycles,
468 source,
469 w,
470 unsafe_met_updated,
471 po_trans,
472 false,
473 false,
474 false,
475 irep_idt(),
476 model);
477 }
478
479 for(const event_idt e : edges_to_remove)
480 egraph.remove_com_edge(vertex, e);
481 }
482
483 if(f)
484 {
485 while(!marked_stack.empty() && marked_stack.top()!=vertex)
486 {
487 event_idt up=marked_stack.top();
488 marked_stack.pop();
489 mark[up]=false;
490 }
491
492 if(!marked_stack.empty())
493 marked_stack.pop();
494 mark[vertex]=false;
495 }
496
497 assert(!point_stack.empty());
498 point_stack.pop();
499
500 /* removes variable access */
504 {
506 writes_per_variable[this_vertex.variable]--;
507 else
508 reads_per_variable[this_vertex.variable]--;
509
510 events_per_thread[this_vertex.thread]--;
511 }
512 }
513
514 /* transitivity of po: do the same, but skip this event
515 (except if it is a fence or no more po-transition skips allowed);
516 if the cycle explored so far has a thin-air subcycle, this cycle
517 is not valid: stop this exploration here */
518 if(skip_tracked.find(vertex)==skip_tracked.end()) // 25 oct
519 if(not_thin_air && !get_com_only &&
520 (po_trans > 1 || po_trans==0) &&
521 !point_stack.empty() &&
522 egraph.are_po_ordered(point_stack.top(), vertex) &&
525 egraph[point_stack.top()].operation==
528 !this_vertex.WRfence ||
529 egraph[point_stack.top()].operation==
531 {
532 skip_tracked.insert(vertex);
533
534 std::stack<event_idt> tmp;
535
536 while(!marked_stack.empty() && marked_stack.top()!=vertex)
537 {
538 tmp.push(marked_stack.top());
539 mark[marked_stack.top()]=false;
540 marked_stack.pop();
541 }
542
543 if(!marked_stack.empty())
544 {
545 assert(marked_stack.top()==vertex);
546 mark[vertex]=true;
547 }
548 else
549 {
550 while(!tmp.empty())
551 {
552 marked_stack.push(tmp.top());
553 mark[tmp.top()]=true;
554 tmp.pop();
555 }
556 mark[vertex]=true;
557 marked_stack.push(vertex);
558 }
559
560 if(!egraph[point_stack.top()].unsafe_pair(this_vertex, model))
561 {
562 /* tab[] should never be avoided */
563 if(egraph.ignore_arrays ||
564 id2string(this_vertex.variable).find("[]")==std::string::npos)
565 avoid_at_the_end=this_vertex.variable;
566 }
567
568 /* skip lwfence by po-transition only if we consider a WR */
569 // TO CHECK
570 const bool is_lwfence=
572 egraph[point_stack.top()].operation==
575 (!this_vertex.WRfence &&
576 egraph[point_stack.top()].operation==
578
579 for(wmm_grapht::edgest::const_iterator w_it=
580 egraph.po_out(vertex).begin();
581 w_it!=egraph.po_out(vertex).end(); w_it++)
582 {
583 const event_idt w=w_it->first;
584 f|=
585 backtrack(
586 set_of_cycles,
587 source,
588 w,
589 unsafe_met/*_updated*/,
590 (po_trans==0?0:po_trans-1),
591 same_var_pair/*_updated*/,
593 has_to_be_unsafe,
594 avoid_at_the_end,
595 model);
596 }
597
598 if(f)
599 {
600 while(!marked_stack.empty() && marked_stack.top()!=vertex)
601 {
602 event_idt up=marked_stack.top();
603 marked_stack.pop();
604 mark[up]=false;
605 }
606
607 if(!marked_stack.empty())
608 marked_stack.pop();
609 mark[vertex]=false;
610 }
611
612 skip_tracked.erase(vertex);
613 }
614
615 return f;
616}
bool unsafe_pair(const abstract_eventt &next, memory_modelt model) const
operationt operation
dstringt has one field, an unsigned integer no which is an index into a static table of strings.
Definition dstring.h:37
bool empty() const
Definition dstring.h:88
void hide_internals(critical_cyclet &reduced) const
data_typet::const_iterator const_iterator
Definition event_graph.h:70
bool is_unsafe(memory_modelt model, bool fast=false)
checks whether there is at least one pair which is unsafe (takes fences and dependencies into account...
std::string print_name(const critical_cyclet &redyced, memory_modelt model) const
std::set< event_idt > thin_air_events
critical_cyclet extract_cycle(event_idt vertex, event_idt source, unsigned number_of_cycles)
extracts a (whole, unreduced) cycle from the stack.
void collect_cycles(std::set< critical_cyclet > &set_of_cycles, memory_modelt model)
Tarjan 1972 adapted and modified for events.
bool backtrack(std::set< critical_cyclet > &set_of_cycles, event_idt source, event_idt vertex, bool unsafe_met, event_idt po_trans, bool same_var_pair, bool lwsync_met, bool has_to_be_unsafe, irep_idt var_to_avoid, memory_modelt model)
see event_grapht::collect_cycles
void filter_thin_air(std::set< critical_cyclet > &set_of_cycles)
after the collection, eliminates the executions forbidden by an indirect thin-air
unsigned max_po_trans
unsigned max_var
messaget & message
bool filter_thin_air
mstreamt & debug() const
Definition message.h:429
static eomt eom
Definition message.h:297
graph of abstract events
wmm_grapht::node_indext event_idt
Definition event_graph.h:32
bool is_lwfence(const goto_programt::instructiont &instruction, const namespacet &ns)
Definition fence.cpp:35
dstringt irep_idt
Definition irep.h:37
const std::string & id2string(const irep_idt &d)
Definition irep.h:47
memory_modelt
Definition wmm.h:18
@ Unknown
Definition wmm.h:19
@ TSO
Definition wmm.h:20
@ RMO
Definition wmm.h:22
@ PSO
Definition wmm.h:21
@ Power
Definition wmm.h:23