Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * ring.h |
| 3 | * |
| 4 | * Shared producer-consumer ring macros. |
| 5 | * |
| 6 | * Tim Deegan and Andrew Warfield November 2004. |
| 7 | */ |
| 8 | |
| 9 | #ifndef __XEN_PUBLIC_IO_RING_H__ |
| 10 | #define __XEN_PUBLIC_IO_RING_H__ |
| 11 | |
| 12 | typedef unsigned int RING_IDX; |
| 13 | |
| 14 | /* Round a 32-bit unsigned constant down to the nearest power of two. */ |
| 15 | #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) |
| 16 | #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) |
| 17 | #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) |
| 18 | #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) |
| 19 | #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) |
| 20 | |
| 21 | /* |
| 22 | * Calculate size of a shared ring, given the total available space for the |
| 23 | * ring and indexes (_sz), and the name tag of the request/response structure. |
| 24 | * A ring contains as many entries as will fit, rounded down to the nearest |
| 25 | * power of two (so we can mask with (size-1) to loop around). |
| 26 | */ |
Jeremy Fitzhardinge | 667c78af | 2010-12-08 12:39:12 -0800 | [diff] [blame] | 27 | #define __CONST_RING_SIZE(_s, _sz) \ |
| 28 | (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ |
| 29 | sizeof(((struct _s##_sring *)0)->ring[0]))) |
| 30 | |
| 31 | /* |
| 32 | * The same for passing in an actual pointer instead of a name tag. |
| 33 | */ |
| 34 | #define __RING_SIZE(_s, _sz) \ |
| 35 | (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 36 | |
| 37 | /* |
| 38 | * Macros to make the correct C datatypes for a new kind of ring. |
| 39 | * |
| 40 | * To make a new ring datatype, you need to have two message structures, |
| 41 | * let's say struct request, and struct response already defined. |
| 42 | * |
| 43 | * In a header where you want the ring datatype declared, you then do: |
| 44 | * |
| 45 | * DEFINE_RING_TYPES(mytag, struct request, struct response); |
| 46 | * |
| 47 | * These expand out to give you a set of types, as you can see below. |
| 48 | * The most important of these are: |
| 49 | * |
| 50 | * struct mytag_sring - The shared ring. |
| 51 | * struct mytag_front_ring - The 'front' half of the ring. |
| 52 | * struct mytag_back_ring - The 'back' half of the ring. |
| 53 | * |
| 54 | * To initialize a ring in your code you need to know the location and size |
| 55 | * of the shared memory area (PAGE_SIZE, for instance). To initialise |
| 56 | * the front half: |
| 57 | * |
| 58 | * struct mytag_front_ring front_ring; |
| 59 | * SHARED_RING_INIT((struct mytag_sring *)shared_page); |
| 60 | * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, |
| 61 | * PAGE_SIZE); |
| 62 | * |
| 63 | * Initializing the back follows similarly (note that only the front |
| 64 | * initializes the shared ring): |
| 65 | * |
| 66 | * struct mytag_back_ring back_ring; |
| 67 | * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, |
| 68 | * PAGE_SIZE); |
| 69 | */ |
| 70 | |
| 71 | #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ |
| 72 | \ |
| 73 | /* Shared ring entry */ \ |
| 74 | union __name##_sring_entry { \ |
| 75 | __req_t req; \ |
| 76 | __rsp_t rsp; \ |
| 77 | }; \ |
| 78 | \ |
| 79 | /* Shared ring page */ \ |
| 80 | struct __name##_sring { \ |
| 81 | RING_IDX req_prod, req_event; \ |
| 82 | RING_IDX rsp_prod, rsp_event; \ |
| 83 | uint8_t pad[48]; \ |
| 84 | union __name##_sring_entry ring[1]; /* variable-length */ \ |
| 85 | }; \ |
| 86 | \ |
| 87 | /* "Front" end's private variables */ \ |
| 88 | struct __name##_front_ring { \ |
| 89 | RING_IDX req_prod_pvt; \ |
| 90 | RING_IDX rsp_cons; \ |
| 91 | unsigned int nr_ents; \ |
| 92 | struct __name##_sring *sring; \ |
| 93 | }; \ |
| 94 | \ |
| 95 | /* "Back" end's private variables */ \ |
| 96 | struct __name##_back_ring { \ |
| 97 | RING_IDX rsp_prod_pvt; \ |
| 98 | RING_IDX req_cons; \ |
| 99 | unsigned int nr_ents; \ |
| 100 | struct __name##_sring *sring; \ |
| 101 | }; |
| 102 | |
| 103 | /* |
| 104 | * Macros for manipulating rings. |
| 105 | * |
| 106 | * FRONT_RING_whatever works on the "front end" of a ring: here |
| 107 | * requests are pushed on to the ring and responses taken off it. |
| 108 | * |
| 109 | * BACK_RING_whatever works on the "back end" of a ring: here |
| 110 | * requests are taken off the ring and responses put on. |
| 111 | * |
| 112 | * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. |
| 113 | * This is OK in 1-for-1 request-response situations where the |
| 114 | * requestor (front end) never has more than RING_SIZE()-1 |
| 115 | * outstanding requests. |
| 116 | */ |
| 117 | |
| 118 | /* Initialising empty rings */ |
| 119 | #define SHARED_RING_INIT(_s) do { \ |
| 120 | (_s)->req_prod = (_s)->rsp_prod = 0; \ |
| 121 | (_s)->req_event = (_s)->rsp_event = 1; \ |
| 122 | memset((_s)->pad, 0, sizeof((_s)->pad)); \ |
| 123 | } while(0) |
| 124 | |
| 125 | #define FRONT_RING_INIT(_r, _s, __size) do { \ |
| 126 | (_r)->req_prod_pvt = 0; \ |
| 127 | (_r)->rsp_cons = 0; \ |
| 128 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
| 129 | (_r)->sring = (_s); \ |
| 130 | } while (0) |
| 131 | |
| 132 | #define BACK_RING_INIT(_r, _s, __size) do { \ |
| 133 | (_r)->rsp_prod_pvt = 0; \ |
| 134 | (_r)->req_cons = 0; \ |
| 135 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
| 136 | (_r)->sring = (_s); \ |
| 137 | } while (0) |
| 138 | |
| 139 | /* Initialize to existing shared indexes -- for recovery */ |
| 140 | #define FRONT_RING_ATTACH(_r, _s, __size) do { \ |
| 141 | (_r)->sring = (_s); \ |
| 142 | (_r)->req_prod_pvt = (_s)->req_prod; \ |
| 143 | (_r)->rsp_cons = (_s)->rsp_prod; \ |
| 144 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
| 145 | } while (0) |
| 146 | |
| 147 | #define BACK_RING_ATTACH(_r, _s, __size) do { \ |
| 148 | (_r)->sring = (_s); \ |
| 149 | (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ |
| 150 | (_r)->req_cons = (_s)->req_prod; \ |
| 151 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
| 152 | } while (0) |
| 153 | |
| 154 | /* How big is this ring? */ |
| 155 | #define RING_SIZE(_r) \ |
| 156 | ((_r)->nr_ents) |
| 157 | |
| 158 | /* Number of free requests (for use on front side only). */ |
| 159 | #define RING_FREE_REQUESTS(_r) \ |
| 160 | (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) |
| 161 | |
| 162 | /* Test if there is an empty slot available on the front ring. |
| 163 | * (This is only meaningful from the front. ) |
| 164 | */ |
| 165 | #define RING_FULL(_r) \ |
| 166 | (RING_FREE_REQUESTS(_r) == 0) |
| 167 | |
| 168 | /* Test if there are outstanding messages to be processed on a ring. */ |
| 169 | #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ |
| 170 | ((_r)->sring->rsp_prod - (_r)->rsp_cons) |
| 171 | |
| 172 | #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ |
| 173 | ({ \ |
| 174 | unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ |
| 175 | unsigned int rsp = RING_SIZE(_r) - \ |
| 176 | ((_r)->req_cons - (_r)->rsp_prod_pvt); \ |
| 177 | req < rsp ? req : rsp; \ |
| 178 | }) |
| 179 | |
| 180 | /* Direct access to individual ring elements, by index. */ |
| 181 | #define RING_GET_REQUEST(_r, _idx) \ |
| 182 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) |
| 183 | |
| 184 | #define RING_GET_RESPONSE(_r, _idx) \ |
| 185 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) |
| 186 | |
| 187 | /* Loop termination condition: Would the specified index overflow the ring? */ |
| 188 | #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ |
| 189 | (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) |
| 190 | |
Jan Beulich | 8d92569 | 2013-06-17 15:16:33 -0400 | [diff] [blame] | 191 | /* Ill-behaved frontend determination: Can there be this many requests? */ |
| 192 | #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ |
| 193 | (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) |
| 194 | |
| 195 | |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 196 | #define RING_PUSH_REQUESTS(_r) do { \ |
| 197 | wmb(); /* back sees requests /before/ updated producer index */ \ |
| 198 | (_r)->sring->req_prod = (_r)->req_prod_pvt; \ |
| 199 | } while (0) |
| 200 | |
| 201 | #define RING_PUSH_RESPONSES(_r) do { \ |
| 202 | wmb(); /* front sees responses /before/ updated producer index */ \ |
| 203 | (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ |
| 204 | } while (0) |
| 205 | |
| 206 | /* |
| 207 | * Notification hold-off (req_event and rsp_event): |
| 208 | * |
| 209 | * When queueing requests or responses on a shared ring, it may not always be |
| 210 | * necessary to notify the remote end. For example, if requests are in flight |
| 211 | * in a backend, the front may be able to queue further requests without |
| 212 | * notifying the back (if the back checks for new requests when it queues |
| 213 | * responses). |
| 214 | * |
| 215 | * When enqueuing requests or responses: |
| 216 | * |
| 217 | * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument |
| 218 | * is a boolean return value. True indicates that the receiver requires an |
| 219 | * asynchronous notification. |
| 220 | * |
| 221 | * After dequeuing requests or responses (before sleeping the connection): |
| 222 | * |
| 223 | * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). |
| 224 | * The second argument is a boolean return value. True indicates that there |
| 225 | * are pending messages on the ring (i.e., the connection should not be put |
| 226 | * to sleep). |
| 227 | * |
| 228 | * These macros will set the req_event/rsp_event field to trigger a |
| 229 | * notification on the very next message that is enqueued. If you want to |
| 230 | * create batches of work (i.e., only receive a notification after several |
| 231 | * messages have been enqueued) then you will need to create a customised |
| 232 | * version of the FINAL_CHECK macro in your own code, which sets the event |
| 233 | * field appropriately. |
| 234 | */ |
| 235 | |
| 236 | #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ |
| 237 | RING_IDX __old = (_r)->sring->req_prod; \ |
| 238 | RING_IDX __new = (_r)->req_prod_pvt; \ |
| 239 | wmb(); /* back sees requests /before/ updated producer index */ \ |
| 240 | (_r)->sring->req_prod = __new; \ |
| 241 | mb(); /* back sees new requests /before/ we check req_event */ \ |
| 242 | (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ |
| 243 | (RING_IDX)(__new - __old)); \ |
| 244 | } while (0) |
| 245 | |
| 246 | #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ |
| 247 | RING_IDX __old = (_r)->sring->rsp_prod; \ |
| 248 | RING_IDX __new = (_r)->rsp_prod_pvt; \ |
| 249 | wmb(); /* front sees responses /before/ updated producer index */ \ |
| 250 | (_r)->sring->rsp_prod = __new; \ |
| 251 | mb(); /* front sees new responses /before/ we check rsp_event */ \ |
| 252 | (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ |
| 253 | (RING_IDX)(__new - __old)); \ |
| 254 | } while (0) |
| 255 | |
| 256 | #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ |
| 257 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
| 258 | if (_work_to_do) break; \ |
| 259 | (_r)->sring->req_event = (_r)->req_cons + 1; \ |
| 260 | mb(); \ |
| 261 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
| 262 | } while (0) |
| 263 | |
| 264 | #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ |
| 265 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
| 266 | if (_work_to_do) break; \ |
| 267 | (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ |
| 268 | mb(); \ |
| 269 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
| 270 | } while (0) |
| 271 | |
| 272 | #endif /* __XEN_PUBLIC_IO_RING_H__ */ |