symbian-qemu-0.9.1-12/qemu-symbian-svp/hw/etraxfs_dma.c
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  * QEMU ETRAX DMA Controller.
       
     3  *
       
     4  * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
       
     5  *
       
     6  * Permission is hereby granted, free of charge, to any person obtaining a copy
       
     7  * of this software and associated documentation files (the "Software"), to deal
       
     8  * in the Software without restriction, including without limitation the rights
       
     9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
       
    10  * copies of the Software, and to permit persons to whom the Software is
       
    11  * furnished to do so, subject to the following conditions:
       
    12  *
       
    13  * The above copyright notice and this permission notice shall be included in
       
    14  * all copies or substantial portions of the Software.
       
    15  *
       
    16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
       
    17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
       
    18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
       
    19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
       
    20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
       
    21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
       
    22  * THE SOFTWARE.
       
    23  */
       
    24 #include <stdio.h>
       
    25 #include <sys/time.h>
       
    26 #include "hw.h"
       
    27 #include "qemu-common.h"
       
    28 #include "sysemu.h"
       
    29 
       
    30 #include "etraxfs_dma.h"
       
    31 
       
    32 #define D(x)
       
    33 
       
    34 #define RW_DATA           0x0
       
    35 #define RW_SAVED_DATA     0x58
       
    36 #define RW_SAVED_DATA_BUF 0x5c
       
    37 #define RW_GROUP          0x60
       
    38 #define RW_GROUP_DOWN     0x7c
       
    39 #define RW_CMD            0x80
       
    40 #define RW_CFG            0x84
       
    41 #define RW_STAT           0x88
       
    42 #define RW_INTR_MASK      0x8c
       
    43 #define RW_ACK_INTR       0x90
       
    44 #define R_INTR            0x94
       
    45 #define R_MASKED_INTR     0x98
       
    46 #define RW_STREAM_CMD     0x9c
       
    47 
       
    48 #define DMA_REG_MAX   0x100
       
    49 
       
    50 /* descriptors */
       
    51 
       
    52 // ------------------------------------------------------------ dma_descr_group
       
    53 typedef struct dma_descr_group {
       
    54   struct dma_descr_group       *next;
       
    55   unsigned                      eol        : 1;
       
    56   unsigned                      tol        : 1;
       
    57   unsigned                      bol        : 1;
       
    58   unsigned                                 : 1;
       
    59   unsigned                      intr       : 1;
       
    60   unsigned                                 : 2;
       
    61   unsigned                      en         : 1;
       
    62   unsigned                                 : 7;
       
    63   unsigned                      dis        : 1;
       
    64   unsigned                      md         : 16;
       
    65   struct dma_descr_group       *up;
       
    66   union {
       
    67     struct dma_descr_context   *context;
       
    68     struct dma_descr_group     *group;
       
    69   }                             down;
       
    70 } dma_descr_group;
       
    71 
       
    72 // ---------------------------------------------------------- dma_descr_context
       
    73 typedef struct dma_descr_context {
       
    74   struct dma_descr_context     *next;
       
    75   unsigned                      eol        : 1;
       
    76   unsigned                                 : 3;
       
    77   unsigned                      intr       : 1;
       
    78   unsigned                                 : 1;
       
    79   unsigned                      store_mode : 1;
       
    80   unsigned                      en         : 1;
       
    81   unsigned                                 : 7;
       
    82   unsigned                      dis        : 1;
       
    83   unsigned                      md0        : 16;
       
    84   unsigned                      md1;
       
    85   unsigned                      md2;
       
    86   unsigned                      md3;
       
    87   unsigned                      md4;
       
    88   struct dma_descr_data        *saved_data;
       
    89   char                         *saved_data_buf;
       
    90 } dma_descr_context;
       
    91 
       
    92 // ------------------------------------------------------------- dma_descr_data
       
    93 typedef struct dma_descr_data {
       
    94   struct dma_descr_data        *next;
       
    95   char                         *buf;
       
    96   unsigned                      eol        : 1;
       
    97   unsigned                                 : 2;
       
    98   unsigned                      out_eop    : 1;
       
    99   unsigned                      intr       : 1;
       
   100   unsigned                      wait       : 1;
       
   101   unsigned                                 : 2;
       
   102   unsigned                                 : 3;
       
   103   unsigned                      in_eop     : 1;
       
   104   unsigned                                 : 4;
       
   105   unsigned                      md         : 16;
       
   106   char                         *after;
       
   107 } dma_descr_data;
       
   108 
       
   109 /* Constants */
       
   110 enum {
       
   111   regk_dma_ack_pkt                         = 0x00000100,
       
   112   regk_dma_anytime                         = 0x00000001,
       
   113   regk_dma_array                           = 0x00000008,
       
   114   regk_dma_burst                           = 0x00000020,
       
   115   regk_dma_client                          = 0x00000002,
       
   116   regk_dma_copy_next                       = 0x00000010,
       
   117   regk_dma_copy_up                         = 0x00000020,
       
   118   regk_dma_data_at_eol                     = 0x00000001,
       
   119   regk_dma_dis_c                           = 0x00000010,
       
   120   regk_dma_dis_g                           = 0x00000020,
       
   121   regk_dma_idle                            = 0x00000001,
       
   122   regk_dma_intern                          = 0x00000004,
       
   123   regk_dma_load_c                          = 0x00000200,
       
   124   regk_dma_load_c_n                        = 0x00000280,
       
   125   regk_dma_load_c_next                     = 0x00000240,
       
   126   regk_dma_load_d                          = 0x00000140,
       
   127   regk_dma_load_g                          = 0x00000300,
       
   128   regk_dma_load_g_down                     = 0x000003c0,
       
   129   regk_dma_load_g_next                     = 0x00000340,
       
   130   regk_dma_load_g_up                       = 0x00000380,
       
   131   regk_dma_next_en                         = 0x00000010,
       
   132   regk_dma_next_pkt                        = 0x00000010,
       
   133   regk_dma_no                              = 0x00000000,
       
   134   regk_dma_only_at_wait                    = 0x00000000,
       
   135   regk_dma_restore                         = 0x00000020,
       
   136   regk_dma_rst                             = 0x00000001,
       
   137   regk_dma_running                         = 0x00000004,
       
   138   regk_dma_rw_cfg_default                  = 0x00000000,
       
   139   regk_dma_rw_cmd_default                  = 0x00000000,
       
   140   regk_dma_rw_intr_mask_default            = 0x00000000,
       
   141   regk_dma_rw_stat_default                 = 0x00000101,
       
   142   regk_dma_rw_stream_cmd_default           = 0x00000000,
       
   143   regk_dma_save_down                       = 0x00000020,
       
   144   regk_dma_save_up                         = 0x00000020,
       
   145   regk_dma_set_reg                         = 0x00000050,
       
   146   regk_dma_set_w_size1                     = 0x00000190,
       
   147   regk_dma_set_w_size2                     = 0x000001a0,
       
   148   regk_dma_set_w_size4                     = 0x000001c0,
       
   149   regk_dma_stopped                         = 0x00000002,
       
   150   regk_dma_store_c                         = 0x00000002,
       
   151   regk_dma_store_descr                     = 0x00000000,
       
   152   regk_dma_store_g                         = 0x00000004,
       
   153   regk_dma_store_md                        = 0x00000001,
       
   154   regk_dma_sw                              = 0x00000008,
       
   155   regk_dma_update_down                     = 0x00000020,
       
   156   regk_dma_yes                             = 0x00000001
       
   157 };
       
   158 
       
   159 enum dma_ch_state
       
   160 {
       
   161 	RST = 1,
       
   162 	STOPPED = 2,
       
   163 	RUNNING = 4
       
   164 };
       
   165 
       
   166 struct fs_dma_channel
       
   167 {
       
   168 	qemu_irq *irq;
       
   169 	struct etraxfs_dma_client *client;
       
   170 
       
   171 	/* Internal status.  */
       
   172 	int stream_cmd_src;
       
   173 	enum dma_ch_state state;
       
   174 
       
   175 	unsigned int input : 1;
       
   176 	unsigned int eol : 1;
       
   177 
       
   178 	struct dma_descr_group current_g;
       
   179 	struct dma_descr_context current_c;
       
   180 	struct dma_descr_data current_d;
       
   181 
       
   182 	/* Controll registers.  */
       
   183 	uint32_t regs[DMA_REG_MAX];
       
   184 };
       
   185 
       
   186 struct fs_dma_ctrl
       
   187 {
       
   188 	int map;
       
   189 	CPUState *env;
       
   190 
       
   191 	int nr_channels;
       
   192 	struct fs_dma_channel *channels;
       
   193 
       
   194         QEMUBH *bh;
       
   195 };
       
   196 
       
   197 static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
       
   198 {
       
   199 	return ctrl->channels[c].regs[reg];
       
   200 }
       
   201 
       
   202 static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
       
   203 {
       
   204 	return channel_reg(ctrl, c, RW_CFG) & 2;
       
   205 }
       
   206 
       
   207 static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
       
   208 {
       
   209 	return (channel_reg(ctrl, c, RW_CFG) & 1)
       
   210 		&& ctrl->channels[c].client;
       
   211 }
       
   212 
       
   213 static inline int fs_channel(target_phys_addr_t addr)
       
   214 {
       
   215 	/* Every channel has a 0x2000 ctrl register map.  */
       
   216 	return addr >> 13;
       
   217 }
       
   218 
       
   219 #ifdef USE_THIS_DEAD_CODE
       
   220 static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
       
   221 {
       
   222 	target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP);
       
   223 
       
   224 	/* Load and decode. FIXME: handle endianness.  */
       
   225 	cpu_physical_memory_read (addr, 
       
   226 				  (void *) &ctrl->channels[c].current_g, 
       
   227 				  sizeof ctrl->channels[c].current_g);
       
   228 }
       
   229 
       
   230 static void dump_c(int ch, struct dma_descr_context *c)
       
   231 {
       
   232 	printf("%s ch=%d\n", __func__, ch);
       
   233 	printf("next=%p\n", c->next);
       
   234 	printf("saved_data=%p\n", c->saved_data);
       
   235 	printf("saved_data_buf=%p\n", c->saved_data_buf);
       
   236 	printf("eol=%x\n", (uint32_t) c->eol);
       
   237 }
       
   238 
       
   239 static void dump_d(int ch, struct dma_descr_data *d)
       
   240 {
       
   241 	printf("%s ch=%d\n", __func__, ch);
       
   242 	printf("next=%p\n", d->next);
       
   243 	printf("buf=%p\n", d->buf);
       
   244 	printf("after=%p\n", d->after);
       
   245 	printf("intr=%x\n", (uint32_t) d->intr);
       
   246 	printf("out_eop=%x\n", (uint32_t) d->out_eop);
       
   247 	printf("in_eop=%x\n", (uint32_t) d->in_eop);
       
   248 	printf("eol=%x\n", (uint32_t) d->eol);
       
   249 }
       
   250 #endif
       
   251 
       
   252 static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
       
   253 {
       
   254 	target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
       
   255 
       
   256 	/* Load and decode. FIXME: handle endianness.  */
       
   257 	cpu_physical_memory_read (addr, 
       
   258 				  (void *) &ctrl->channels[c].current_c, 
       
   259 				  sizeof ctrl->channels[c].current_c);
       
   260 
       
   261 	D(dump_c(c, &ctrl->channels[c].current_c));
       
   262 	/* I guess this should update the current pos.  */
       
   263 	ctrl->channels[c].regs[RW_SAVED_DATA] =
       
   264 		(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
       
   265 	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
       
   266 		(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
       
   267 }
       
   268 
       
   269 static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
       
   270 {
       
   271 	target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA);
       
   272 
       
   273 	/* Load and decode. FIXME: handle endianness.  */
       
   274 	D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
       
   275 	cpu_physical_memory_read (addr,
       
   276 				  (void *) &ctrl->channels[c].current_d, 
       
   277 				  sizeof ctrl->channels[c].current_d);
       
   278 
       
   279 	D(dump_d(c, &ctrl->channels[c].current_d));
       
   280 	ctrl->channels[c].regs[RW_DATA] = addr;
       
   281 }
       
   282 
       
   283 static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
       
   284 {
       
   285 	target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
       
   286 
       
   287 	/* Encode and store. FIXME: handle endianness.  */
       
   288 	D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
       
   289 	D(dump_d(c, &ctrl->channels[c].current_d));
       
   290 	cpu_physical_memory_write (addr,
       
   291 				  (void *) &ctrl->channels[c].current_c,
       
   292 				  sizeof ctrl->channels[c].current_c);
       
   293 }
       
   294 
       
   295 static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
       
   296 {
       
   297 	target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA);
       
   298 
       
   299 	/* Encode and store. FIXME: handle endianness.  */
       
   300 	D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
       
   301 	cpu_physical_memory_write (addr,
       
   302 				  (void *) &ctrl->channels[c].current_d, 
       
   303 				  sizeof ctrl->channels[c].current_d);
       
   304 }
       
   305 
       
   306 static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
       
   307 {
       
   308 	/* FIXME:  */
       
   309 }
       
   310 
       
   311 static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
       
   312 {
       
   313 	if (ctrl->channels[c].client)
       
   314 	{
       
   315 		ctrl->channels[c].eol = 0;
       
   316 		ctrl->channels[c].state = RUNNING;
       
   317 	} else
       
   318 		printf("WARNING: starting DMA ch %d with no client\n", c);
       
   319 
       
   320         qemu_bh_schedule_idle(ctrl->bh);
       
   321 }
       
   322 
       
   323 static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
       
   324 {
       
   325 	if (!channel_en(ctrl, c) 
       
   326 	    || channel_stopped(ctrl, c)
       
   327 	    || ctrl->channels[c].state != RUNNING
       
   328 	    /* Only reload the current data descriptor if it has eol set.  */
       
   329 	    || !ctrl->channels[c].current_d.eol) {
       
   330 		D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", 
       
   331 			 c, ctrl->channels[c].state,
       
   332 			 channel_stopped(ctrl, c),
       
   333 			 channel_en(ctrl,c),
       
   334 			 ctrl->channels[c].eol));
       
   335 		D(dump_d(c, &ctrl->channels[c].current_d));
       
   336 		return;
       
   337 	}
       
   338 
       
   339 	/* Reload the current descriptor.  */
       
   340 	channel_load_d(ctrl, c);
       
   341 
       
   342 	/* If the current descriptor cleared the eol flag and we had already
       
   343 	   reached eol state, do the continue.  */
       
   344 	if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
       
   345 		D(printf("continue %d ok %p\n", c,
       
   346 			 ctrl->channels[c].current_d.next));
       
   347 		ctrl->channels[c].regs[RW_SAVED_DATA] =
       
   348 			(uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
       
   349 		channel_load_d(ctrl, c);
       
   350 		channel_start(ctrl, c);
       
   351 	}
       
   352 	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
       
   353 		(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
       
   354 }
       
   355 
       
   356 static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
       
   357 {
       
   358 	unsigned int cmd = v & ((1 << 10) - 1);
       
   359 
       
   360 	D(printf("%s ch=%d cmd=%x\n",
       
   361 		 __func__, c, cmd));
       
   362 	if (cmd & regk_dma_load_d) {
       
   363 		channel_load_d(ctrl, c);
       
   364 		if (cmd & regk_dma_burst)
       
   365 			channel_start(ctrl, c);
       
   366 	}
       
   367 
       
   368 	if (cmd & regk_dma_load_c) {
       
   369 		channel_load_c(ctrl, c);
       
   370 		channel_start(ctrl, c);
       
   371 	}
       
   372 }
       
   373 
       
   374 static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
       
   375 {
       
   376 	D(printf("%s %d\n", __func__, c));
       
   377         ctrl->channels[c].regs[R_INTR] &=
       
   378 		~(ctrl->channels[c].regs[RW_ACK_INTR]);
       
   379 
       
   380         ctrl->channels[c].regs[R_MASKED_INTR] =
       
   381 		ctrl->channels[c].regs[R_INTR]
       
   382 		& ctrl->channels[c].regs[RW_INTR_MASK];
       
   383 
       
   384 	D(printf("%s: chan=%d masked_intr=%x\n", __func__, 
       
   385 		 c,
       
   386 		 ctrl->channels[c].regs[R_MASKED_INTR]));
       
   387 
       
   388         if (ctrl->channels[c].regs[R_MASKED_INTR])
       
   389                 qemu_irq_raise(ctrl->channels[c].irq[0]);
       
   390         else
       
   391                 qemu_irq_lower(ctrl->channels[c].irq[0]);
       
   392 }
       
   393 
       
   394 static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
       
   395 {
       
   396 	uint32_t len;
       
   397 	uint32_t saved_data_buf;
       
   398 	unsigned char buf[2 * 1024];
       
   399 
       
   400 	if (ctrl->channels[c].eol)
       
   401 		return 0;
       
   402 
       
   403 	do {
       
   404 		saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
       
   405 
       
   406 		D(printf("ch=%d buf=%x after=%x saved_data_buf=%x\n",
       
   407 			 c,
       
   408 			 (uint32_t)ctrl->channels[c].current_d.buf,
       
   409 			 (uint32_t)ctrl->channels[c].current_d.after,
       
   410 			 saved_data_buf));
       
   411 
       
   412 		len = (uint32_t)(unsigned long)
       
   413 			ctrl->channels[c].current_d.after;
       
   414 		len -= saved_data_buf;
       
   415 
       
   416 		if (len > sizeof buf)
       
   417 			len = sizeof buf;
       
   418 		cpu_physical_memory_read (saved_data_buf, buf, len);
       
   419 
       
   420 		D(printf("channel %d pushes %x %u bytes\n", c, 
       
   421 			 saved_data_buf, len));
       
   422 
       
   423 		if (ctrl->channels[c].client->client.push)
       
   424 			ctrl->channels[c].client->client.push(
       
   425 				ctrl->channels[c].client->client.opaque,
       
   426 				buf, len);
       
   427 		else
       
   428 			printf("WARNING: DMA ch%d dataloss,"
       
   429 			       " no attached client.\n", c);
       
   430 
       
   431 		saved_data_buf += len;
       
   432 
       
   433 		if (saved_data_buf == (uint32_t)(unsigned long)
       
   434 				ctrl->channels[c].current_d.after) {
       
   435 			/* Done. Step to next.  */
       
   436 			if (ctrl->channels[c].current_d.out_eop) {
       
   437 				/* TODO: signal eop to the client.  */
       
   438 				D(printf("signal eop\n"));
       
   439 			}
       
   440 			if (ctrl->channels[c].current_d.intr) {
       
   441 				/* TODO: signal eop to the client.  */
       
   442 				/* data intr.  */
       
   443 				D(printf("signal intr\n"));
       
   444 				ctrl->channels[c].regs[R_INTR] |= (1 << 2);
       
   445 				channel_update_irq(ctrl, c);
       
   446 			}
       
   447 			if (ctrl->channels[c].current_d.eol) {
       
   448 				D(printf("channel %d EOL\n", c));
       
   449 				ctrl->channels[c].eol = 1;
       
   450 
       
   451 				/* Mark the context as disabled.  */
       
   452 				ctrl->channels[c].current_c.dis = 1;
       
   453 				channel_store_c(ctrl, c);
       
   454 
       
   455 				channel_stop(ctrl, c);
       
   456 			} else {
       
   457 				ctrl->channels[c].regs[RW_SAVED_DATA] =
       
   458 					(uint32_t)(unsigned long)ctrl->
       
   459 						channels[c].current_d.next;
       
   460 				/* Load new descriptor.  */
       
   461 				channel_load_d(ctrl, c);
       
   462 				saved_data_buf = (uint32_t)(unsigned long)
       
   463 					ctrl->channels[c].current_d.buf;
       
   464 			}
       
   465 
       
   466 			channel_store_d(ctrl, c);
       
   467 			ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
       
   468 							saved_data_buf;
       
   469 			D(dump_d(c, &ctrl->channels[c].current_d));
       
   470 		}
       
   471 		ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
       
   472 	} while (!ctrl->channels[c].eol);
       
   473 	return 1;
       
   474 }
       
   475 
       
   476 static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, 
       
   477 			      unsigned char *buf, int buflen, int eop)
       
   478 {
       
   479 	uint32_t len;
       
   480 	uint32_t saved_data_buf;
       
   481 
       
   482 	if (ctrl->channels[c].eol == 1)
       
   483 		return 0;
       
   484 
       
   485 	saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
       
   486 	len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
       
   487 	len -= saved_data_buf;
       
   488 	
       
   489 	if (len > buflen)
       
   490 		len = buflen;
       
   491 
       
   492 	cpu_physical_memory_write (saved_data_buf, buf, len);
       
   493 	saved_data_buf += len;
       
   494 
       
   495 	if (saved_data_buf ==
       
   496 	    (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
       
   497 	    || eop) {
       
   498 		uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
       
   499 
       
   500 		D(printf("in dscr end len=%d\n", 
       
   501 			 ctrl->channels[c].current_d.after
       
   502 			 - ctrl->channels[c].current_d.buf));
       
   503 		ctrl->channels[c].current_d.after = 
       
   504 			(void *)(unsigned long) saved_data_buf;
       
   505 
       
   506 		/* Done. Step to next.  */
       
   507 		if (ctrl->channels[c].current_d.intr) {
       
   508 			/* TODO: signal eop to the client.  */
       
   509 			/* data intr.  */
       
   510 			ctrl->channels[c].regs[R_INTR] |= 3;
       
   511 		}
       
   512 		if (eop) {
       
   513 			ctrl->channels[c].current_d.in_eop = 1;
       
   514 			ctrl->channels[c].regs[R_INTR] |= 8;
       
   515 		}
       
   516 		if (r_intr != ctrl->channels[c].regs[R_INTR])
       
   517 			channel_update_irq(ctrl, c);
       
   518 
       
   519 		channel_store_d(ctrl, c);
       
   520 		D(dump_d(c, &ctrl->channels[c].current_d));
       
   521 
       
   522 		if (ctrl->channels[c].current_d.eol) {
       
   523 			D(printf("channel %d EOL\n", c));
       
   524 			ctrl->channels[c].eol = 1;
       
   525 
       
   526 			/* Mark the context as disabled.  */
       
   527 			ctrl->channels[c].current_c.dis = 1;
       
   528 			channel_store_c(ctrl, c);
       
   529 
       
   530 			channel_stop(ctrl, c);
       
   531 		} else {
       
   532 			ctrl->channels[c].regs[RW_SAVED_DATA] =
       
   533 				(uint32_t)(unsigned long)ctrl->
       
   534 					channels[c].current_d.next;
       
   535 			/* Load new descriptor.  */
       
   536 			channel_load_d(ctrl, c);
       
   537 			saved_data_buf = (uint32_t)(unsigned long)
       
   538 				ctrl->channels[c].current_d.buf;
       
   539 		}
       
   540 	}
       
   541 
       
   542 	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
       
   543 	return len;
       
   544 }
       
   545 
       
   546 static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
       
   547 {
       
   548 	if (ctrl->channels[c].client->client.pull) {
       
   549 		ctrl->channels[c].client->client.pull(
       
   550 			ctrl->channels[c].client->client.opaque);
       
   551 		return 1;
       
   552 	} else
       
   553 		return 0;
       
   554 }
       
   555 
       
   556 static uint32_t dma_rinvalid (void *opaque, target_phys_addr_t addr)
       
   557 {
       
   558         struct fs_dma_ctrl *ctrl = opaque;
       
   559         CPUState *env = ctrl->env;
       
   560         cpu_abort(env, "Unsupported short access. reg=" TARGET_FMT_plx "\n",
       
   561                   addr);
       
   562         return 0;
       
   563 }
       
   564 
       
   565 static uint32_t
       
   566 dma_readl (void *opaque, target_phys_addr_t addr)
       
   567 {
       
   568         struct fs_dma_ctrl *ctrl = opaque;
       
   569 	int c;
       
   570 	uint32_t r = 0;
       
   571 
       
   572 	/* Make addr relative to this channel and bounded to nr regs.  */
       
   573 	c = fs_channel(addr);
       
   574 	addr &= 0xff;
       
   575 	switch (addr)
       
   576 	{
       
   577 		case RW_STAT:
       
   578 			r = ctrl->channels[c].state & 7;
       
   579 			r |= ctrl->channels[c].eol << 5;
       
   580 			r |= ctrl->channels[c].stream_cmd_src << 8;
       
   581 			break;
       
   582 
       
   583 		default:
       
   584 			r = ctrl->channels[c].regs[addr];
       
   585 			D(printf ("%s c=%d addr=%x\n",
       
   586 				  __func__, c, addr));
       
   587 			break;
       
   588 	}
       
   589 	return r;
       
   590 }
       
   591 
       
   592 static void
       
   593 dma_winvalid (void *opaque, target_phys_addr_t addr, uint32_t value)
       
   594 {
       
   595         struct fs_dma_ctrl *ctrl = opaque;
       
   596         CPUState *env = ctrl->env;
       
   597         cpu_abort(env, "Unsupported short access. reg=" TARGET_FMT_plx "\n",
       
   598                   addr);
       
   599 }
       
   600 
       
   601 static void
       
   602 dma_update_state(struct fs_dma_ctrl *ctrl, int c)
       
   603 {
       
   604 	if ((ctrl->channels[c].regs[RW_CFG] & 1) != 3) {
       
   605 		if (ctrl->channels[c].regs[RW_CFG] & 2)
       
   606 			ctrl->channels[c].state = STOPPED;
       
   607 		if (!(ctrl->channels[c].regs[RW_CFG] & 1))
       
   608 			ctrl->channels[c].state = RST;
       
   609 	}
       
   610 }
       
   611 
       
   612 static void
       
   613 dma_writel (void *opaque, target_phys_addr_t addr, uint32_t value)
       
   614 {
       
   615         struct fs_dma_ctrl *ctrl = opaque;
       
   616 	int c;
       
   617 
       
   618         /* Make addr relative to this channel and bounded to nr regs.  */
       
   619 	c = fs_channel(addr);
       
   620         addr &= 0xff;
       
   621         switch (addr)
       
   622 	{
       
   623 		case RW_DATA:
       
   624 			ctrl->channels[c].regs[addr] = value;
       
   625 			break;
       
   626 
       
   627 		case RW_CFG:
       
   628 			ctrl->channels[c].regs[addr] = value;
       
   629 			dma_update_state(ctrl, c);
       
   630 			break;
       
   631 		case RW_CMD:
       
   632 			/* continue.  */
       
   633 			if (value & ~1)
       
   634 				printf("Invalid store to ch=%d RW_CMD %x\n",
       
   635 				       c, value);
       
   636 			ctrl->channels[c].regs[addr] = value;
       
   637 			channel_continue(ctrl, c);
       
   638 			break;
       
   639 
       
   640 		case RW_SAVED_DATA:
       
   641 		case RW_SAVED_DATA_BUF:
       
   642 		case RW_GROUP:
       
   643 		case RW_GROUP_DOWN:
       
   644 			ctrl->channels[c].regs[addr] = value;
       
   645 			break;
       
   646 
       
   647 		case RW_ACK_INTR:
       
   648 		case RW_INTR_MASK:
       
   649 			ctrl->channels[c].regs[addr] = value;
       
   650 			channel_update_irq(ctrl, c);
       
   651 			if (addr == RW_ACK_INTR)
       
   652 				ctrl->channels[c].regs[RW_ACK_INTR] = 0;
       
   653 			break;
       
   654 
       
   655 		case RW_STREAM_CMD:
       
   656 			if (value & ~1023)
       
   657 				printf("Invalid store to ch=%d "
       
   658 				       "RW_STREAMCMD %x\n",
       
   659 				       c, value);
       
   660 			ctrl->channels[c].regs[addr] = value;
       
   661 			D(printf("stream_cmd ch=%d\n", c));
       
   662 			channel_stream_cmd(ctrl, c, value);
       
   663 			break;
       
   664 
       
   665 	        default:
       
   666 			D(printf ("%s c=%d %x %x\n", __func__, c, addr));
       
   667 			break;
       
   668         }
       
   669 }
       
   670 
       
   671 static CPUReadMemoryFunc *dma_read[] = {
       
   672 	&dma_rinvalid,
       
   673 	&dma_rinvalid,
       
   674 	&dma_readl,
       
   675 };
       
   676 
       
   677 static CPUWriteMemoryFunc *dma_write[] = {
       
   678 	&dma_winvalid,
       
   679 	&dma_winvalid,
       
   680 	&dma_writel,
       
   681 };
       
   682 
       
   683 static int etraxfs_dmac_run(void *opaque)
       
   684 {
       
   685 	struct fs_dma_ctrl *ctrl = opaque;
       
   686 	int i;
       
   687 	int p = 0;
       
   688 
       
   689 	for (i = 0; 
       
   690 	     i < ctrl->nr_channels;
       
   691 	     i++)
       
   692 	{
       
   693 		if (ctrl->channels[i].state == RUNNING)
       
   694 		{
       
   695 			if (ctrl->channels[i].input) {
       
   696 				p += channel_in_run(ctrl, i);
       
   697 			} else {
       
   698 				p += channel_out_run(ctrl, i);
       
   699 			}
       
   700 		}
       
   701 	}
       
   702 	return p;
       
   703 }
       
   704 
       
   705 int etraxfs_dmac_input(struct etraxfs_dma_client *client, 
       
   706 		       void *buf, int len, int eop)
       
   707 {
       
   708 	return channel_in_process(client->ctrl, client->channel, 
       
   709 				  buf, len, eop);
       
   710 }
       
   711 
       
   712 /* Connect an IRQ line with a channel.  */
       
   713 void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
       
   714 {
       
   715 	struct fs_dma_ctrl *ctrl = opaque;
       
   716 	ctrl->channels[c].irq = line;
       
   717 	ctrl->channels[c].input = input;
       
   718 }
       
   719 
       
   720 void etraxfs_dmac_connect_client(void *opaque, int c, 
       
   721 				 struct etraxfs_dma_client *cl)
       
   722 {
       
   723 	struct fs_dma_ctrl *ctrl = opaque;
       
   724 	cl->ctrl = ctrl;
       
   725 	cl->channel = c;
       
   726 	ctrl->channels[c].client = cl;
       
   727 }
       
   728 
       
   729 
       
   730 static void DMA_run(void *opaque)
       
   731 {
       
   732     struct fs_dma_ctrl *etraxfs_dmac = opaque;
       
   733     int p = 1;
       
   734 
       
   735     if (vm_running)
       
   736         p = etraxfs_dmac_run(etraxfs_dmac);
       
   737 
       
   738     if (p)
       
   739         qemu_bh_schedule_idle(etraxfs_dmac->bh);
       
   740 }
       
   741 
       
   742 void *etraxfs_dmac_init(CPUState *env, 
       
   743 			target_phys_addr_t base, int nr_channels)
       
   744 {
       
   745 	struct fs_dma_ctrl *ctrl = NULL;
       
   746 
       
   747 	ctrl = qemu_mallocz(sizeof *ctrl);
       
   748 	if (!ctrl)
       
   749 		return NULL;
       
   750 
       
   751         ctrl->bh = qemu_bh_new(DMA_run, ctrl);
       
   752 
       
   753 	ctrl->env = env;
       
   754 	ctrl->nr_channels = nr_channels;
       
   755 	ctrl->channels = qemu_mallocz(sizeof ctrl->channels[0] * nr_channels);
       
   756 	if (!ctrl->channels)
       
   757 		goto err;
       
   758 
       
   759 	ctrl->map = cpu_register_io_memory(0, dma_read, dma_write, ctrl);
       
   760 	cpu_register_physical_memory(base, nr_channels * 0x2000, ctrl->map);
       
   761 	return ctrl;
       
   762   err:
       
   763 	qemu_free(ctrl->channels);
       
   764 	qemu_free(ctrl);
       
   765 	return NULL;
       
   766 }