|
1 # Copyright (c) 2001-2002 Joshua Chamas, Chamas Enterprises Inc. All rights reserved. |
|
2 # Sponsored by development on NodeWorks http://www.nodeworks.com and Apache::ASP |
|
3 # http://www.apache-asp.org |
|
4 # |
|
5 # This program is free software; you can redistribute it |
|
6 # and/or modify it under the same terms as Perl itself. |
|
7 |
|
8 |
|
9 package MLDBM::Sync; |
|
10 $VERSION = '0.30'; |
|
11 |
|
12 use MLDBM; |
|
13 use MLDBM::Sync::SDBM_File; |
|
14 use Data::Dumper; |
|
15 use Fcntl qw(:flock); |
|
16 use Digest::MD5 qw(md5_hex); |
|
17 use strict; |
|
18 use Carp qw(confess); |
|
19 no strict qw(refs); |
|
20 use vars qw($AUTOLOAD @EXT $CACHE_ERR $LOCK_SH $LOCK_EX $LOCK_UN); |
|
21 |
|
22 eval "use Tie::Cache;"; |
|
23 if (($@)) { |
|
24 $CACHE_ERR = $@; |
|
25 } |
|
26 |
|
27 $LOCK_SH = LOCK_SH; |
|
28 $LOCK_UN = LOCK_UN; |
|
29 $LOCK_EX = LOCK_EX; |
|
30 |
|
31 @EXT = ('.pag', '.dir', ''); |
|
32 |
|
33 sub TIEHASH { |
|
34 my($class, $file, @args) = @_; |
|
35 |
|
36 $file =~ /^(.*)$/s; |
|
37 $file = $1; |
|
38 my $fh = $file.".lock"; |
|
39 |
|
40 my $self = bless { |
|
41 'file' => $file, |
|
42 'args' => [ $file, @args ], |
|
43 'lock_fh' => $fh, |
|
44 'lock_file' => $fh, |
|
45 'lock_num' => 0, |
|
46 'md5_keys' => 0, |
|
47 'pid' => $$, |
|
48 'keys' => [], |
|
49 'db_type' => $MLDBM::UseDB, |
|
50 'serializer' => $MLDBM::Serializer, |
|
51 'remove_taint' => $MLDBM::RemoveTaint, |
|
52 }; |
|
53 |
|
54 $self; |
|
55 } |
|
56 |
|
57 sub DESTROY { |
|
58 my $self = shift; |
|
59 if($self->{lock_num}) { |
|
60 $self->{lock_num} = 1; |
|
61 $self->UnLock; |
|
62 } |
|
63 } |
|
64 |
|
65 sub AUTOLOAD { |
|
66 my($self, $key, $value) = @_; |
|
67 $AUTOLOAD =~ /::([^:]+)$/; |
|
68 my $func = $1; |
|
69 grep($func eq $_, ('FETCH', 'STORE', 'EXISTS', 'DELETE')) |
|
70 || die("$func not handled by object $self"); |
|
71 |
|
72 ## CHECKSUM KEYS |
|
73 if(defined $key && $self->{md5_keys}) { |
|
74 $key = $self->SyncChecksum($key); |
|
75 } |
|
76 |
|
77 # CACHE, short circuit if found in cache on FETCH/EXISTS |
|
78 # after checksum, since that's what we store |
|
79 my $cache = (defined $key) ? $self->{cache} : undef; |
|
80 if($cache && ($func eq 'FETCH' or $func eq 'EXISTS')) { |
|
81 my $rv = $cache->$func($key); |
|
82 defined($rv) && return($rv); |
|
83 } |
|
84 |
|
85 my $rv; |
|
86 if ($func eq 'FETCH' or $func eq 'EXISTS') { |
|
87 $self->read_lock; |
|
88 } else { |
|
89 $self->lock; |
|
90 } |
|
91 |
|
92 { |
|
93 local $MLDBM::RemoveTaint = $self->{remove_taint}; |
|
94 if (defined $value) { |
|
95 $rv = $self->{dbm}->$func($key, $value); |
|
96 } else { |
|
97 $rv = $self->{dbm}->$func($key); |
|
98 } |
|
99 } |
|
100 |
|
101 $self->unlock; |
|
102 |
|
103 # do after lock critical section, no point taking |
|
104 # any extra time there |
|
105 $cache && $cache->$func($key, $value); |
|
106 |
|
107 $rv; |
|
108 } |
|
109 |
|
110 sub CLEAR { |
|
111 my $self = shift; |
|
112 |
|
113 $self->lock; |
|
114 $self->{dbm}->CLEAR; |
|
115 $self->{dbm} = undef; |
|
116 # delete the files to free disk space |
|
117 my $unlinked = 0; |
|
118 for (@EXT) { |
|
119 my $file = $self->{file}.$_; |
|
120 next if(! -e $file); |
|
121 if(-d $file) { |
|
122 rmdir($file) || warn("can't unlink dir $file: $!"); |
|
123 } else { |
|
124 unlink($file) || die("can't unlink file $file: $!"); |
|
125 } |
|
126 |
|
127 $unlinked++; |
|
128 } |
|
129 if($self->{lock_num} > 1) { |
|
130 $self->SyncTie; # recreate, not done with it yet |
|
131 } |
|
132 |
|
133 $self->unlock; |
|
134 if($self->{lock_num} == 0) { |
|
135 # only unlink if we are clear of all the locks |
|
136 unlink($self->{lock_file}); |
|
137 } |
|
138 |
|
139 $self->{cache} && $self->{cache}->CLEAR; |
|
140 |
|
141 1; |
|
142 }; |
|
143 |
|
144 # don't bother with cache for first/next key since it'll kill |
|
145 # the cache anyway likely |
|
146 sub FIRSTKEY { |
|
147 my $self = shift; |
|
148 |
|
149 if($self->{md5_keys}) { |
|
150 confess("can't get keys() or each() on MLDBM::Sync database ". |
|
151 "with SyncKeysChecksum(1) set"); |
|
152 } |
|
153 |
|
154 $self->read_lock; |
|
155 my $key = $self->{dbm}->FIRSTKEY(); |
|
156 my @keys; |
|
157 while(1) { |
|
158 last if ! defined($key); |
|
159 push(@keys, $key); |
|
160 $key = $self->{dbm}->NEXTKEY($key); |
|
161 } |
|
162 $self->unlock; |
|
163 $self->{'keys'} = \@keys; |
|
164 |
|
165 $self->NEXTKEY; |
|
166 } |
|
167 |
|
168 sub NEXTKEY { |
|
169 my $self = shift; |
|
170 |
|
171 if($self->{md5_keys}) { |
|
172 confess("can't get keys() or each() on MLDBM::Sync database ". |
|
173 "with SyncKeysChecksum(1) set"); |
|
174 } |
|
175 |
|
176 my $rv = shift(@{$self->{'keys'}}); |
|
177 } |
|
178 |
|
179 sub SyncChecksum { |
|
180 my($self, $key) = @_; |
|
181 if(ref $key) { |
|
182 join('g', md5_hex($$key), sprintf("%07d",length($$key))); |
|
183 } else { |
|
184 join('g', md5_hex($key), sprintf("%07d", length($key))); |
|
185 } |
|
186 } |
|
187 |
|
188 sub SyncCacheSize { |
|
189 my($self, $size) = @_; |
|
190 $CACHE_ERR && die("need Tie::Cache installed to use this feature: $@"); |
|
191 |
|
192 if ($size =~ /^(\d+)(M|K)$/) { |
|
193 my($num, $type) = ($1, $2); |
|
194 if (($type eq 'M')) { |
|
195 $size = $num * 1024 * 1024; |
|
196 } elsif (($type eq 'K')) { |
|
197 $size = $num * 1024; |
|
198 } else { |
|
199 die "$type symbol not understood for $size"; |
|
200 } |
|
201 } else { |
|
202 ($size =~ /^\d+$/) or die("$size must be bytes size for cache"); |
|
203 } |
|
204 |
|
205 if ($self->{cache}) { |
|
206 $self->{cache}->CLEAR(); # purge old cache, to free up RAM maybe for mem leaks |
|
207 } |
|
208 |
|
209 my %cache; |
|
210 my $cache = tie %cache, 'Tie::Cache', { MaxBytes => $size }; |
|
211 $self->{cache} = $cache; # use non tied interface, faster |
|
212 } |
|
213 |
|
214 sub SyncTie { |
|
215 my $self = shift; |
|
216 my %temp_hash; |
|
217 my $args = $self->{args}; |
|
218 local $MLDBM::UseDB = $self->{db_type}; |
|
219 local $MLDBM::Serializer = $self->{serializer}; |
|
220 local $MLDBM::RemoveTaint = $self->{remove_taint}; |
|
221 $self->{dbm} = tie(%temp_hash, 'MLDBM', @$args) || die("can't tie to MLDBM with args: ".join(',', @$args)."; error: $!"); |
|
222 |
|
223 $self->{dbm}; |
|
224 } |
|
225 |
|
226 #### DOCUMENTED API ################################################################ |
|
227 |
|
228 sub SyncKeysChecksum { |
|
229 my($self, $setting) = @_; |
|
230 if(defined $setting) { |
|
231 $self->{md5_keys} = $setting; |
|
232 } else { |
|
233 $self->{md5_keys}; |
|
234 } |
|
235 } |
|
236 |
|
237 *read_lock = *ReadLock; |
|
238 sub ReadLock { shift->Lock(1); } |
|
239 |
|
240 *lock = *SyncLock = *Lock; |
|
241 sub Lock { |
|
242 my($self, $read_lock) = @_; |
|
243 |
|
244 if($self->{lock_num}++ == 0) { |
|
245 my $file = $self->{lock_file}; |
|
246 open($self->{lock_fh}, "+>$file") || die("can't open file $file: $!"); |
|
247 flock($self->{lock_fh}, ($read_lock ? $LOCK_SH : $LOCK_EX)) |
|
248 || die("can't ". ($read_lock ? "read" : "write") ." lock $file: $!"); |
|
249 $self->{read_lock} = $read_lock; |
|
250 $self->SyncTie; |
|
251 } else { |
|
252 if ($self->{read_lock} and ! $read_lock) { |
|
253 $self->{lock_num}--; # roll back lock count |
|
254 # confess here to help developer track this down |
|
255 confess("Can't upgrade lock type from LOCK_SH to LOCK_EX! ". |
|
256 "This could happen if you tried to write to the MLDBM ". |
|
257 "in a critical section locked by ReadLock(). ". |
|
258 "Also the read expression my \$v = \$db{'key1'}{'key2'} will trigger a write ". |
|
259 "if \$db{'key1'} does not already exist, so this will error in a ReadLock() section" |
|
260 ); |
|
261 } |
|
262 1; |
|
263 } |
|
264 } |
|
265 |
|
266 *unlock = *SyncUnLock = *UnLock; |
|
267 sub UnLock { |
|
268 my $self = shift; |
|
269 |
|
270 if($self->{lock_num} && $self->{lock_num}-- == 1) { |
|
271 $self->{lock_num} = 0; |
|
272 undef $self->{dbm}; |
|
273 flock($self->{'lock_fh'}, $LOCK_UN) || die("can't unlock $self->{'lock_file'}: $!"); |
|
274 close($self->{'lock_fh'}) || die("can't close $self->{'lock_file'}"); |
|
275 $self->{read_lock} = undef; |
|
276 1; |
|
277 } else { |
|
278 1; |
|
279 } |
|
280 } |
|
281 |
|
282 sub SyncSize { |
|
283 my $self = shift; |
|
284 my $size = 0; |
|
285 for (@EXT) { |
|
286 my $file = $self->{file}.$_; |
|
287 next unless -e $file; |
|
288 $size += (stat($file))[7]; |
|
289 |
|
290 if(-d $file) { |
|
291 $size += (stat($file))[7]; |
|
292 opendir(DIR, $file) || next; |
|
293 my @files = readdir(DIR); |
|
294 for my $dir_file (@files) { |
|
295 next if $dir_file =~ /^\.\.?$/; |
|
296 $size += (stat("$file/$dir_file"))[7]; |
|
297 } |
|
298 closedir(DIR); |
|
299 } |
|
300 } |
|
301 |
|
302 $size; |
|
303 } |
|
304 |
|
305 1; |
|
306 |
|
307 __END__ |
|
308 |
|
309 =head1 NAME |
|
310 |
|
311 MLDBM::Sync - safe concurrent access to MLDBM databases |
|
312 |
|
313 =head1 SYNOPSIS |
|
314 |
|
315 use MLDBM::Sync; # this gets the default, SDBM_File |
|
316 use MLDBM qw(DB_File Storable); # use Storable for serializing |
|
317 use MLDBM qw(MLDBM::Sync::SDBM_File); # use extended SDBM_File, handles values > 1024 bytes |
|
318 use Fcntl qw(:DEFAULT); # import symbols O_CREAT & O_RDWR for use with DBMs |
|
319 |
|
320 # NORMAL PROTECTED read/write with implicit locks per i/o request |
|
321 my $sync_dbm_obj = tie %cache, 'MLDBM::Sync' [..other DBM args..] or die $!; |
|
322 $cache{"AAAA"} = "BBBB"; |
|
323 my $value = $cache{"AAAA"}; |
|
324 |
|
325 # SERIALIZED PROTECTED read/write with explicit lock for both i/o requests |
|
326 my $sync_dbm_obj = tie %cache, 'MLDBM::Sync', '/tmp/syncdbm', O_CREAT|O_RDWR, 0640; |
|
327 $sync_dbm_obj->Lock; |
|
328 $cache{"AAAA"} = "BBBB"; |
|
329 my $value = $cache{"AAAA"}; |
|
330 $sync_dbm_obj->UnLock; |
|
331 |
|
332 # SERIALIZED PROTECTED READ access with explicit read lock for both reads |
|
333 $sync_dbm_obj->ReadLock; |
|
334 my @keys = keys %cache; |
|
335 my $value = $cache{'AAAA'}; |
|
336 $sync_dbm_obj->UnLock; |
|
337 |
|
338 # MEMORY CACHE LAYER with Tie::Cache |
|
339 $sync_dbm_obj->SyncCacheSize('100K'); |
|
340 |
|
341 # KEY CHECKSUMS, for lookups on MD5 checksums on large keys |
|
342 my $sync_dbm_obj = tie %cache, 'MLDBM::Sync', '/tmp/syncdbm', O_CREAT|O_RDWR, 0640; |
|
343 $sync_dbm_obj->SyncKeysChecksum(1); |
|
344 my $large_key = "KEY" x 10000; |
|
345 $sync{$large_key} = "LARGE"; |
|
346 my $value = $sync{$large_key}; |
|
347 |
|
348 =head1 DESCRIPTION |
|
349 |
|
350 This module wraps around the MLDBM interface, by handling concurrent |
|
351 access to MLDBM databases with file locking, and flushes i/o explicity |
|
352 per lock/unlock. The new [Read]Lock()/UnLock() API can be used to serialize |
|
353 requests logically and improve performance for bundled reads & writes. |
|
354 |
|
355 my $sync_dbm_obj = tie %cache, 'MLDBM::Sync', '/tmp/syncdbm', O_CREAT|O_RDWR, 0640; |
|
356 |
|
357 # Write locked critical section |
|
358 $sync_dbm_obj->Lock; |
|
359 ... all accesses to DBM LOCK_EX protected, and go to same tied file handles |
|
360 $cache{'KEY'} = 'VALUE'; |
|
361 $sync_dbm_obj->UnLock; |
|
362 |
|
363 # Read locked critical section |
|
364 $sync_dbm_obj->ReadLock; |
|
365 ... all read accesses to DBM LOCK_SH protected, and go to same tied files |
|
366 ... WARNING, cannot write to DBM in ReadLock() section, will die() |
|
367 ... WARNING, my $v = $cache{'KEY'}{'SUBKEY'} will trigger a write so not safe |
|
368 ... to use in ReadLock() section |
|
369 my $value = $cache{'KEY'}; |
|
370 $sync_dbm_obj->UnLock; |
|
371 |
|
372 # Normal access OK too, without explicity locking |
|
373 $cache{'KEY'} = 'VALUE'; |
|
374 my $value = $cache{'KEY'}; |
|
375 |
|
376 MLDBM continues to serve as the underlying OO layer that |
|
377 serializes complex data structures to be stored in the databases. |
|
378 See the MLDBM L<BUGS> section for important limitations. |
|
379 |
|
380 MLDBM::Sync also provides built in RAM caching with Tie::Cache |
|
381 md5 key checksum functionality. |
|
382 |
|
383 =head1 INSTALL |
|
384 |
|
385 Like any other CPAN module, either use CPAN.pm, or perl -MCPAN C<-e> shell, |
|
386 or get the file MLDBM-Sync-x.xx.tar.gz, unzip, untar and: |
|
387 |
|
388 perl Makefile.PL |
|
389 make |
|
390 make test |
|
391 make install |
|
392 |
|
393 =head1 LOCKING |
|
394 |
|
395 The MLDBM::Sync wrapper protects MLDBM databases by locking |
|
396 and unlocking around read and write requests to the databases. |
|
397 Also necessary is for each new lock to tie() to the database |
|
398 internally, untie()ing when unlocking. This flushes any |
|
399 i/o for the dbm to the operating system, and allows for |
|
400 concurrent read/write access to the databases. |
|
401 |
|
402 Without any extra effort from the developer, an existing |
|
403 MLDBM database will benefit from MLDBM::sync. |
|
404 |
|
405 my $dbm_obj = tie %dbm, ...; |
|
406 $dbm{"key"} = "value"; |
|
407 |
|
408 As a write or STORE operation, the above will automatically |
|
409 cause the following: |
|
410 |
|
411 $dbm_obj->Lock; # also ties |
|
412 $dbm{"key"} = "value"; |
|
413 $dbm_obj->UnLock; # also unties |
|
414 |
|
415 Just so, a read or FETCH operation like: |
|
416 |
|
417 my $value = $dbm{"key"}; |
|
418 |
|
419 will really trigger: |
|
420 |
|
421 $dbm_obj->ReadLock; # also ties |
|
422 my $value = $dbm{"key"}; |
|
423 $dbm_obj->Lock; # also unties |
|
424 |
|
425 However, these lock operations are expensive because of the |
|
426 underlying tie()/untie() that occurs for i/o flushing, so |
|
427 when bundling reads & writes, a developer may explicitly |
|
428 use this API for greater performance: |
|
429 |
|
430 # tie once to database, write 100 times |
|
431 $dbm_obj->Lock; |
|
432 for (1..100) { |
|
433 $dbm{$_} = $_ * 100; |
|
434 ... |
|
435 } |
|
436 $dbm_obj->UnLock; |
|
437 |
|
438 # only tie once to database, and read 100 times |
|
439 $dbm_obj->ReadLock; |
|
440 for(1..100) { |
|
441 my $value = $dbm{$_}; |
|
442 ... |
|
443 } |
|
444 $dbm_obj->UnLock; |
|
445 |
|
446 =head1 CACHING |
|
447 |
|
448 I built MLDBM::Sync to serve as a fast and robust caching layer |
|
449 for use in multi-process environments like mod_perl. In order |
|
450 to provide an additional speed boost when caching static data, |
|
451 I have added an RAM caching layer with Tie::Cache, which |
|
452 regulates the size of the memory used with its MaxBytes setting. |
|
453 |
|
454 To activate this caching, just: |
|
455 |
|
456 my $dbm = tie %cache, 'MLDBM::Sync', '/tmp/syncdbm', O_CREAT|O_RDWR, 0640; |
|
457 $dbm->SyncCacheSize(100000); # 100000 bytes max memory used |
|
458 $dbm->SyncCacheSize('100K'); # 100 Kbytes max memory used |
|
459 $dbm->SyncCacheSize('1M'); # 1 Megabyte max memory used |
|
460 |
|
461 The ./bench/bench_sync.pl, run like "bench_sync.pl C<-c>" will run |
|
462 the tests with caching turned on creating a benchmark with 50% |
|
463 cache hits. |
|
464 |
|
465 One run without caching was: |
|
466 |
|
467 === INSERT OF 50 BYTE RECORDS === |
|
468 Time for 100 writes + 100 reads for SDBM_File 0.16 seconds 12288 bytes |
|
469 Time for 100 writes + 100 reads for MLDBM::Sync::SDBM_File 0.17 seconds 12288 bytes |
|
470 Time for 100 writes + 100 reads for GDBM_File 3.37 seconds 17980 bytes |
|
471 Time for 100 writes + 100 reads for DB_File 4.45 seconds 20480 bytes |
|
472 |
|
473 And with caching, with 50% cache hits: |
|
474 |
|
475 === INSERT OF 50 BYTE RECORDS === |
|
476 Time for 100 writes + 100 reads for SDBM_File 0.11 seconds 12288 bytes |
|
477 Time for 100 writes + 100 reads for MLDBM::Sync::SDBM_File 0.11 seconds 12288 bytes |
|
478 Time for 100 writes + 100 reads for GDBM_File 2.49 seconds 17980 bytes |
|
479 Time for 100 writes + 100 reads for DB_File 2.55 seconds 20480 bytes |
|
480 |
|
481 Even for SDBM_File, this speedup is near 33%. |
|
482 |
|
483 =head1 KEYS CHECKSUM |
|
484 |
|
485 A common operation on database lookups is checksumming |
|
486 the key, prior to the lookup, because the key could be |
|
487 very large, and all one really wants is the data it maps |
|
488 too. To enable this functionality automatically with |
|
489 MLDBM::Sync, just: |
|
490 |
|
491 my $sync_dbm_obj = tie %cache, 'MLDBM::Sync', '/tmp/syncdbm', O_CREAT|O_RDWR, 0640; |
|
492 $sync_dbm_obj->SyncKeysChecksum(1); |
|
493 |
|
494 !! WARNING: keys() & each() do not work on these databases |
|
495 !! as of v.03, so the developer will not be fooled into thinking |
|
496 !! the stored key values are meaningful to the calling application |
|
497 !! and will die() if called. |
|
498 !! |
|
499 !! This behavior could be relaxed in the future. |
|
500 |
|
501 An example of this might be to cache a XSLT conversion, |
|
502 which are typically very expensive. You have the |
|
503 XML data and the XSLT data, so all you do is: |
|
504 |
|
505 # $xml_data, $xsl_data are strings |
|
506 my $xslt_output; |
|
507 unless ($xslt_output = $cache{$xml_data.'&&&&'.$xsl_data}) { |
|
508 ... do XSLT conversion here for $xslt_output ... |
|
509 $cache{$xml_data.'&&&&'.xsl_data} = $xslt_output; |
|
510 } |
|
511 |
|
512 What you save by doing this is having to create HUGE keys |
|
513 to lookup on, which no DBM is likely to do efficiently. |
|
514 This is the same method that File::Cache uses internally to |
|
515 hash its file lookups in its directories. |
|
516 |
|
517 =head1 New MLDBM::Sync::SDBM_File |
|
518 |
|
519 SDBM_File, the default used for MLDBM and therefore MLDBM::Sync |
|
520 has a limit of 1024 bytes for the size of a record. |
|
521 |
|
522 SDBM_File is also an order of magnitude faster for small records |
|
523 to use with MLDBM::Sync, than DB_File or GDBM_File, because the |
|
524 tie()/untie() to the dbm is much faster. Therefore, |
|
525 bundled with MLDBM::Sync release is a MLDBM::Sync::SDBM_File |
|
526 layer which works around this 1024 byte limit. To use, just: |
|
527 |
|
528 use MLDBM qw(MLDBM::Sync::SDBM_File); |
|
529 |
|
530 It works by breaking up up the STORE() values into small 128 |
|
531 byte segments, and spreading those segments across many records, |
|
532 creating a virtual record layer. It also uses Compress::Zlib |
|
533 to compress STORED data, reducing the number of these 128 byte |
|
534 records. In benchmarks, 128 byte record segments seemed to be a |
|
535 sweet spot for space/time efficiency, as SDBM_File created |
|
536 very bloated *.pag files for 128+ byte records. |
|
537 |
|
538 =head1 BENCHMARKS |
|
539 |
|
540 In the distribution ./bench directory is a bench_sync.pl script |
|
541 that can benchmark using the various DBMs with MLDBM::Sync. |
|
542 |
|
543 The MLDBM::Sync::SDBM_File DBM is special because is uses |
|
544 SDBM_File for fast small inserts, but slows down linearly |
|
545 with the size of the data being inserted and read. |
|
546 |
|
547 The results for a dual PIII-450 linux 2.4.7, with a ext3 file system |
|
548 blocksize 4096 mounted async on a RAID-1 2xIDE 7200 RPM disk were as follows: |
|
549 |
|
550 === INSERT OF 50 BYTE RECORDS === |
|
551 Time for 100 writes + 100 reads for SDBM_File 0.16 seconds 12288 bytes |
|
552 Time for 100 writes + 100 reads for MLDBM::Sync::SDBM_File 0.19 seconds 12288 bytes |
|
553 Time for 100 writes + 100 reads for GDBM_File 1.09 seconds 18066 bytes |
|
554 Time for 100 writes + 100 reads for DB_File 0.67 seconds 12288 bytes |
|
555 Time for 100 writes + 100 reads for Tie::TextDir .04 0.31 seconds 13192 bytes |
|
556 |
|
557 === INSERT OF 500 BYTE RECORDS === |
|
558 (skipping test for SDBM_File 100 byte limit) |
|
559 Time for 100 writes + 100 reads for MLDBM::Sync::SDBM_File 0.52 seconds 110592 bytes |
|
560 Time for 100 writes + 100 reads for GDBM_File 1.20 seconds 63472 bytes |
|
561 Time for 100 writes + 100 reads for DB_File 0.66 seconds 86016 bytes |
|
562 Time for 100 writes + 100 reads for Tie::TextDir .04 0.32 seconds 58192 bytes |
|
563 |
|
564 === INSERT OF 5000 BYTE RECORDS === |
|
565 (skipping test for SDBM_File 100 byte limit) |
|
566 Time for 100 writes + 100 reads for MLDBM::Sync::SDBM_File 1.41 seconds 1163264 bytes |
|
567 Time for 100 writes + 100 reads for GDBM_File 1.38 seconds 832400 bytes |
|
568 Time for 100 writes + 100 reads for DB_File 1.21 seconds 831488 bytes |
|
569 Time for 100 writes + 100 reads for Tie::TextDir .04 0.58 seconds 508192 bytes |
|
570 |
|
571 === INSERT OF 20000 BYTE RECORDS === |
|
572 (skipping test for SDBM_File 100 byte limit) |
|
573 (skipping test for MLDBM::Sync db size > 1M) |
|
574 Time for 100 writes + 100 reads for GDBM_File 2.23 seconds 2063912 bytes |
|
575 Time for 100 writes + 100 reads for DB_File 1.89 seconds 2060288 bytes |
|
576 Time for 100 writes + 100 reads for Tie::TextDir .04 1.26 seconds 2008192 bytes |
|
577 |
|
578 === INSERT OF 50000 BYTE RECORDS === |
|
579 (skipping test for SDBM_File 100 byte limit) |
|
580 (skipping test for MLDBM::Sync db size > 1M) |
|
581 Time for 100 writes + 100 reads for GDBM_File 3.66 seconds 5337944 bytes |
|
582 Time for 100 writes + 100 reads for DB_File 3.64 seconds 5337088 bytes |
|
583 Time for 100 writes + 100 reads for Tie::TextDir .04 2.80 seconds 5008192 bytes |
|
584 |
|
585 =head1 AUTHORS |
|
586 |
|
587 Copyright (c) 2001-2002 Joshua Chamas, Chamas Enterprises Inc. All rights reserved. |
|
588 Sponsored by development on NodeWorks http://www.nodeworks.com and Apache::ASP |
|
589 http://www.apache-asp.org |
|
590 |
|
591 This program is free software; you can redistribute it |
|
592 and/or modify it under the same terms as Perl itself. |
|
593 |
|
594 =head1 SEE ALSO |
|
595 |
|
596 MLDBM(3), SDBM_File(3), DB_File(3), GDBM_File(3) |