|
1 /* |
|
2 * Copyright (c) 1995 Danny Gasparovski. |
|
3 * |
|
4 * Please read the file COPYRIGHT for the |
|
5 * terms and conditions of the copyright. |
|
6 */ |
|
7 |
|
8 #include <slirp.h> |
|
9 |
|
10 int if_queued = 0; /* Number of packets queued so far */ |
|
11 |
|
12 struct mbuf if_fastq; /* fast queue (for interactive data) */ |
|
13 struct mbuf if_batchq; /* queue for non-interactive data */ |
|
14 struct mbuf *next_m; /* Pointer to next mbuf to output */ |
|
15 |
|
16 #define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm)) |
|
17 |
|
18 static void |
|
19 ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) |
|
20 { |
|
21 ifm->ifs_next = ifmhead->ifs_next; |
|
22 ifmhead->ifs_next = ifm; |
|
23 ifm->ifs_prev = ifmhead; |
|
24 ifm->ifs_next->ifs_prev = ifm; |
|
25 } |
|
26 |
|
27 static void |
|
28 ifs_remque(struct mbuf *ifm) |
|
29 { |
|
30 ifm->ifs_prev->ifs_next = ifm->ifs_next; |
|
31 ifm->ifs_next->ifs_prev = ifm->ifs_prev; |
|
32 } |
|
33 |
|
34 void |
|
35 if_init() |
|
36 { |
|
37 if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq; |
|
38 if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq; |
|
39 // sl_compress_init(&comp_s); |
|
40 next_m = &if_batchq; |
|
41 } |
|
42 |
|
43 #if 0 |
|
44 /* |
|
45 * This shouldn't be needed since the modem is blocking and |
|
46 * we don't expect any signals, but what the hell.. |
|
47 */ |
|
48 inline int |
|
49 writen(fd, bptr, n) |
|
50 int fd; |
|
51 char *bptr; |
|
52 int n; |
|
53 { |
|
54 int ret; |
|
55 int total; |
|
56 |
|
57 /* This should succeed most of the time */ |
|
58 ret = send(fd, bptr, n,0); |
|
59 if (ret == n || ret <= 0) |
|
60 return ret; |
|
61 |
|
62 /* Didn't write everything, go into the loop */ |
|
63 total = ret; |
|
64 while (n > total) { |
|
65 ret = send(fd, bptr+total, n-total,0); |
|
66 if (ret <= 0) |
|
67 return ret; |
|
68 total += ret; |
|
69 } |
|
70 return total; |
|
71 } |
|
72 |
|
73 /* |
|
74 * if_input - read() the tty, do "top level" processing (ie: check for any escapes), |
|
75 * and pass onto (*ttyp->if_input) |
|
76 * |
|
77 * XXXXX Any zeros arriving by themselves are NOT placed into the arriving packet. |
|
78 */ |
|
79 #define INBUFF_SIZE 2048 /* XXX */ |
|
80 void |
|
81 if_input(ttyp) |
|
82 struct ttys *ttyp; |
|
83 { |
|
84 u_char if_inbuff[INBUFF_SIZE]; |
|
85 int if_n; |
|
86 |
|
87 DEBUG_CALL("if_input"); |
|
88 DEBUG_ARG("ttyp = %lx", (long)ttyp); |
|
89 |
|
90 if_n = recv(ttyp->fd, (char *)if_inbuff, INBUFF_SIZE,0); |
|
91 |
|
92 DEBUG_MISC((dfd, " read %d bytes\n", if_n)); |
|
93 |
|
94 if (if_n <= 0) { |
|
95 if (if_n == 0 || (errno != EINTR && errno != EAGAIN)) { |
|
96 if (ttyp->up) |
|
97 link_up--; |
|
98 tty_detached(ttyp, 0); |
|
99 } |
|
100 return; |
|
101 } |
|
102 if (if_n == 1) { |
|
103 if (*if_inbuff == '0') { |
|
104 ttyp->ones = 0; |
|
105 if (++ttyp->zeros >= 5) |
|
106 slirp_exit(0); |
|
107 return; |
|
108 } |
|
109 if (*if_inbuff == '1') { |
|
110 ttyp->zeros = 0; |
|
111 if (++ttyp->ones >= 5) |
|
112 tty_detached(ttyp, 0); |
|
113 return; |
|
114 } |
|
115 } |
|
116 ttyp->ones = ttyp->zeros = 0; |
|
117 |
|
118 (*ttyp->if_input)(ttyp, if_inbuff, if_n); |
|
119 } |
|
120 #endif |
|
121 |
|
122 /* |
|
123 * if_output: Queue packet into an output queue. |
|
124 * There are 2 output queue's, if_fastq and if_batchq. |
|
125 * Each output queue is a doubly linked list of double linked lists |
|
126 * of mbufs, each list belonging to one "session" (socket). This |
|
127 * way, we can output packets fairly by sending one packet from each |
|
128 * session, instead of all the packets from one session, then all packets |
|
129 * from the next session, etc. Packets on the if_fastq get absolute |
|
130 * priority, but if one session hogs the link, it gets "downgraded" |
|
131 * to the batchq until it runs out of packets, then it'll return |
|
132 * to the fastq (eg. if the user does an ls -alR in a telnet session, |
|
133 * it'll temporarily get downgraded to the batchq) |
|
134 */ |
|
135 void |
|
136 if_output(so, ifm) |
|
137 struct socket *so; |
|
138 struct mbuf *ifm; |
|
139 { |
|
140 struct mbuf *ifq; |
|
141 int on_fastq = 1; |
|
142 |
|
143 DEBUG_CALL("if_output"); |
|
144 DEBUG_ARG("so = %lx", (long)so); |
|
145 DEBUG_ARG("ifm = %lx", (long)ifm); |
|
146 |
|
147 /* |
|
148 * First remove the mbuf from m_usedlist, |
|
149 * since we're gonna use m_next and m_prev ourselves |
|
150 * XXX Shouldn't need this, gotta change dtom() etc. |
|
151 */ |
|
152 if (ifm->m_flags & M_USEDLIST) { |
|
153 remque(ifm); |
|
154 ifm->m_flags &= ~M_USEDLIST; |
|
155 } |
|
156 |
|
157 /* |
|
158 * See if there's already a batchq list for this session. |
|
159 * This can include an interactive session, which should go on fastq, |
|
160 * but gets too greedy... hence it'll be downgraded from fastq to batchq. |
|
161 * We mustn't put this packet back on the fastq (or we'll send it out of order) |
|
162 * XXX add cache here? |
|
163 */ |
|
164 for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) { |
|
165 if (so == ifq->ifq_so) { |
|
166 /* A match! */ |
|
167 ifm->ifq_so = so; |
|
168 ifs_insque(ifm, ifq->ifs_prev); |
|
169 goto diddit; |
|
170 } |
|
171 } |
|
172 |
|
173 /* No match, check which queue to put it on */ |
|
174 if (so && (so->so_iptos & IPTOS_LOWDELAY)) { |
|
175 ifq = if_fastq.ifq_prev; |
|
176 on_fastq = 1; |
|
177 /* |
|
178 * Check if this packet is a part of the last |
|
179 * packet's session |
|
180 */ |
|
181 if (ifq->ifq_so == so) { |
|
182 ifm->ifq_so = so; |
|
183 ifs_insque(ifm, ifq->ifs_prev); |
|
184 goto diddit; |
|
185 } |
|
186 } else |
|
187 ifq = if_batchq.ifq_prev; |
|
188 |
|
189 /* Create a new doubly linked list for this session */ |
|
190 ifm->ifq_so = so; |
|
191 ifs_init(ifm); |
|
192 insque(ifm, ifq); |
|
193 |
|
194 diddit: |
|
195 ++if_queued; |
|
196 |
|
197 if (so) { |
|
198 /* Update *_queued */ |
|
199 so->so_queued++; |
|
200 so->so_nqueued++; |
|
201 /* |
|
202 * Check if the interactive session should be downgraded to |
|
203 * the batchq. A session is downgraded if it has queued 6 |
|
204 * packets without pausing, and at least 3 of those packets |
|
205 * have been sent over the link |
|
206 * (XXX These are arbitrary numbers, probably not optimal..) |
|
207 */ |
|
208 if (on_fastq && ((so->so_nqueued >= 6) && |
|
209 (so->so_nqueued - so->so_queued) >= 3)) { |
|
210 |
|
211 /* Remove from current queue... */ |
|
212 remque(ifm->ifs_next); |
|
213 |
|
214 /* ...And insert in the new. That'll teach ya! */ |
|
215 insque(ifm->ifs_next, &if_batchq); |
|
216 } |
|
217 } |
|
218 |
|
219 #ifndef FULL_BOLT |
|
220 /* |
|
221 * This prevents us from malloc()ing too many mbufs |
|
222 */ |
|
223 if (link_up) { |
|
224 /* if_start will check towrite */ |
|
225 if_start(); |
|
226 } |
|
227 #endif |
|
228 } |
|
229 |
|
230 /* |
|
231 * Send a packet |
|
232 * We choose a packet based on it's position in the output queues; |
|
233 * If there are packets on the fastq, they are sent FIFO, before |
|
234 * everything else. Otherwise we choose the first packet from the |
|
235 * batchq and send it. the next packet chosen will be from the session |
|
236 * after this one, then the session after that one, and so on.. So, |
|
237 * for example, if there are 3 ftp session's fighting for bandwidth, |
|
238 * one packet will be sent from the first session, then one packet |
|
239 * from the second session, then one packet from the third, then back |
|
240 * to the first, etc. etc. |
|
241 */ |
|
242 void |
|
243 if_start(void) |
|
244 { |
|
245 struct mbuf *ifm, *ifqt; |
|
246 |
|
247 DEBUG_CALL("if_start"); |
|
248 |
|
249 if (if_queued == 0) |
|
250 return; /* Nothing to do */ |
|
251 |
|
252 again: |
|
253 /* check if we can really output */ |
|
254 if (!slirp_can_output()) |
|
255 return; |
|
256 |
|
257 /* |
|
258 * See which queue to get next packet from |
|
259 * If there's something in the fastq, select it immediately |
|
260 */ |
|
261 if (if_fastq.ifq_next != &if_fastq) { |
|
262 ifm = if_fastq.ifq_next; |
|
263 } else { |
|
264 /* Nothing on fastq, see if next_m is valid */ |
|
265 if (next_m != &if_batchq) |
|
266 ifm = next_m; |
|
267 else |
|
268 ifm = if_batchq.ifq_next; |
|
269 |
|
270 /* Set which packet to send on next iteration */ |
|
271 next_m = ifm->ifq_next; |
|
272 } |
|
273 /* Remove it from the queue */ |
|
274 ifqt = ifm->ifq_prev; |
|
275 remque(ifm); |
|
276 --if_queued; |
|
277 |
|
278 /* If there are more packets for this session, re-queue them */ |
|
279 if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) { |
|
280 insque(ifm->ifs_next, ifqt); |
|
281 ifs_remque(ifm); |
|
282 } |
|
283 |
|
284 /* Update so_queued */ |
|
285 if (ifm->ifq_so) { |
|
286 if (--ifm->ifq_so->so_queued == 0) |
|
287 /* If there's no more queued, reset nqueued */ |
|
288 ifm->ifq_so->so_nqueued = 0; |
|
289 } |
|
290 |
|
291 /* Encapsulate the packet for sending */ |
|
292 if_encap((uint8_t *)ifm->m_data, ifm->m_len); |
|
293 |
|
294 m_free(ifm); |
|
295 |
|
296 if (if_queued) |
|
297 goto again; |
|
298 } |