my $dryrun = 0; # just show build order and don't build actually
my $help = 0; # show help information
my $keepgoing = "on"; # If a package build fails, do not abort and continue
-my $fail_fast = 0; # stop build immediately if one of packages fails
+my $fail_fast = 0; # stop build immediately if one of packages fails
my $clean_repos = 0; # clean corresponding local rpm repos
my $create_baselibs = 0; # create baselibs packages if baselibs.conf exists
my $skip_srcrpm = 0; # don't generate source rpm package if $skip_srcrpm == 1
"vm-swap=s" => \$vmswapsize,
"disable-debuginfo" => \$disable_debuginfo,
"depends" => \$depends,
- "enable-cluster" => \$enable_cluster,
- "max-partitions" => \$max_partitions,
+ "enable-cluster" => \$enable_cluster,
+ "max-partitions" => \$max_partitions,
);
if ( $help ) {
}
@s = split(' ', $rec{$packs{$pack}} || '');
while (@s) {
- $s = shift @s;
- next if !$dofileprovides && $s =~ /^\//;
- if ($s =~ /^rpmlib\(/) {
- splice(@s, 0, 2);
- next;
- }
- push @rec, $s;
- splice(@s, 0, 2) if @s && $s[0] =~ /^[<=>]/;
+ $s = shift @s;
+ next if !$dofileprovides && $s =~ /^\//;
+ if ($s =~ /^rpmlib\(/) {
+ splice(@s, 0, 2);
+ next;
+ }
+ push @rec, $s;
+ splice(@s, 0, 2) if @s && $s[0] =~ /^[<=>]/;
}
$r->{'provides'} = \@pr;
$r->{'requires'} = \@re;
# according to BFS to solve topological sorting issue
#---------------------------------------------------------------------
sub print_level_packages {
- my $cur_level = 0;
+ my $cur_level = 0;
while(defined $packages_level{$cur_level}) {
print "level: $cur_level: ";
- foreach my $package (@{$packages_level{$cur_level}}) {
+ foreach my $package (@{$packages_level{$cur_level}}) {
print " $package";
}
- print "\n";
- $cur_level++;
- }
+ print "\n";
+ $cur_level++;
+ }
}
sub get_top_order {
my $level = 0;
# pkgrddeps => pkgrdeps
my @top_order = get_top_order();
if ($get_order == 0) {
- @build_order = @top_order;
- $get_order = 1;
+ @build_order = @top_order;
+ $get_order = 1;
}
%pkgdeps = ();
%pkgrdeps = ();
foreach my $p (keys %to_build) {
- push @packs, $p;
- $pdeps{$p} = \@{$pkgdeps{$p}};
+ push @packs, $p;
+ $pdeps{$p} = \@{$pkgdeps{$p}};
}
@packs = BSSolv::depsort(\%pdeps, undef, undef, @packs);
}
sub dealWithException {
- my $error = shift;
- if ( blessed( $error ) && $error->isa( 'Kafka::Exception' ) ) {
- warn 'Error: (', $error->code, ') ', $error->message, "\n";
+ my $error = shift;
+ if ( blessed( $error ) && $error->isa( 'Kafka::Exception' ) ) {
+ warn 'Error: (', $error->code, ') ', $error->message, "\n";
if ( $error->isa( 'Kafka::Exception::Connection' ) ) {
# Specific treatment for 'Kafka::Connection' class error
warn 'kafka connection execption, wait for retry...';
- sleep(1);
+ sleep(1);
} elsif ( $error->isa( 'Kafka::Exception::IO' ) ) {
# Specific treatment for 'Kafka::IO' class error
warn 'kafka io exception, wait for retry...';
sleep(1);
}else{
- warn 'other kafka exception, exit...';
+ warn 'other kafka exception, exit...';
$connection->close;
exit;
- }
- } else {
- die $error;
- }
-}
-
+ }
+ } else {
+ die $error;
+ }
+}
+my @special_packages = (
+ 'org.tizen.nlp.service',
+ 'chromium-efl',
+ 'tensorflow',
+ 'capi-media-codec',
+ 'odroid-linux-kernel',
+ 'rpi3-linux-kernel',
+ 'com.samsung.dali-demo',
+ 'linux-3.10-sc7730',
+ 'linux-3.4-exynos3250',
+ 'bcc-tools'
+);
sub writeToKafka {
my $cur_level = shift;
my $partition = 0;
foreach my $package (@{$packages_level{$cur_level}}) {
- try {
- $producer->send('tizen-unified',$partition,"$package");
- $partition = ($partition+1)%($max_partitions);
+ try {
+ if( grep $_ eq $package, @special_packages ) {
+ $producer->send('tizen-unified-large-packages', $partition, "$package");
+ } else {
+ $producer->send('tizen-unified',$partition,"$package");
+ $partition = ($partition+1)%($max_partitions);
+ }
} catch {
- dealWithException($_);
- };
- }
+ dealWithException($_);
+ };
+ }
}
sub getCurOffset {
my $topic = shift;
- my $partition = shift;
- my $curoffset;
- try {
- $curoffset = $consumer->offset_latest($topic, $partition);
- } catch {
- dealWithException($_);
- };
- return $curoffset;
+ my $partition = shift;
+ my $curoffset;
+ try {
+ $curoffset = $consumer->offset_latest($topic, $partition);
+ } catch {
+ dealWithException($_);
+ };
+ return $curoffset;
}
sub getMessage {
- my $partition = shift;
- my $cur_offset = shift;
- my $messages;
- try {
- $messages = $consumer->fetch(
- 'tizen-unified-status',
- $partition,
- $cur_offset,
- $DEFAULT_MAX_BYTES
- );
- } catch {
- dealWithException($_);
- };
- return $messages;
+ my $partition = shift;
+ my $cur_offset = shift;
+ my $messages;
+ try {
+ $messages = $consumer->fetch(
+ 'tizen-unified-status',
+ $partition,
+ $cur_offset,
+ $DEFAULT_MAX_BYTES
+ );
+ } catch {
+ dealWithException($_);
+ };
+ return $messages;
}
sub readFromKafka {
my $cur_level = shift;
my $cur_offset = getCurOffset("tizen-unified-status", $partition);
my $packageNum = @{$packages_level{$cur_level}};
my %packages = map {$_ => 1} @{$packages_level{$cur_level}};
- my $fail_num = 0;
+ my $fail_num = 0;
my $succeed_num = 0;
my $curIndex = 0;
- info("current offset: $cur_offset");
- info("current building package number: $packageNum");
+ info("current offset: $cur_offset");
+ info("current building package number: $packageNum");
while($fail_num + $succeed_num < $packageNum ) {
my $messages = getMessage($partition, $cur_offset);
foreach my $message ( @$messages ) {
if ( $message->valid ) {
$cur_offset = $message->next_offset;
- if(!exists($packages{$message->key})) {
- next;
- }
+ if(!exists($packages{$message->key})) {
+ next;
+ }
$curIndex++;
if($message->payload eq "failed") {
push(@fail_packages, $message->key);
info("prepare sources...");
read_not_export($not_export_cf);
- my @data_queue = ();
- foreach my $pack (@pre_packs) {
- if ($not_export_source == 1) {
- my $name = basename($pack->{"project_base_path"});
- my $r = grep /^$name$/, @not_export;
- if ($vmtype eq "kvm") {
- $r = 0;
- }
- if ($r) {
- info("skip export $name for accel...");
- my $specs = $pack->{"filename"};
- my $new_p;
- $new_p->{"project_base_path"} = $pack->{"project_base_path"};
- $new_p->{"packaging_dir"} = $pack->{"packaging_dir"};
- $new_p->{"upstream_branch"} = $pack->{"upstream_branch"};
- $new_p->{"upstream_tag"} = $pack->{"upstream_tag"};
- my @spec_list = split(",", $specs);
- foreach my $spec (@spec_list) {
- $new_p->{"filename"} = $spec;
- push @packs, $new_p;
- }
- } else {
- info("package $name not support skip export source");
- push @data_queue, $pack;
- }
- } else {
- push @data_queue, $pack;
- }
- }
+ my @data_queue = ();
+ foreach my $pack (@pre_packs) {
+ if ($not_export_source == 1) {
+ my $name = basename($pack->{"project_base_path"});
+ my $r = grep /^$name$/, @not_export;
+ if ($vmtype eq "kvm") {
+ $r = 0;
+ }
+ if ($r) {
+ info("skip export $name for accel...");
+ my $specs = $pack->{"filename"};
+ my $new_p;
+ $new_p->{"project_base_path"} = $pack->{"project_base_path"};
+ $new_p->{"packaging_dir"} = $pack->{"packaging_dir"};
+ $new_p->{"upstream_branch"} = $pack->{"upstream_branch"};
+ $new_p->{"upstream_tag"} = $pack->{"upstream_tag"};
+ my @spec_list = split(",", $specs);
+ foreach my $spec (@spec_list) {
+ $new_p->{"filename"} = $spec;
+ push @packs, $new_p;
+ }
+ } else {
+ info("package $name not support skip export source");
+ push @data_queue, $pack;
+ }
+ } else {
+ push @data_queue, $pack;
+ }
+ }
my $thread_num = int(sysconf(SC_NPROCESSORS_ONLN));
if ($thread_num > 28) {
}
}
} elsif($style eq 'tar') {
- File::Find::find({wanted => \&dir_wanted}, $package_path );
- if (@packs == 0) {
- error("No source package found at $package_path");
- }
+ File::Find::find({wanted => \&dir_wanted}, $package_path );
+ if (@packs == 0) {
+ error("No source package found at $package_path");
+ }
} else {
@packs = @ARGV;
if (@packs == 0) {
dealWithException($_);
};
- my $cur_level = 0;
- my $ret = 0;
+ my $cur_level = 0;
+ my $ret = 0;
if($debug) {
print_level_packages();
}
my @fail_packages;
# when a level packages building return status are all succeed,
# then go to next level packages.
- while(defined $packages_level{$cur_level}) {
- info("current level: $cur_level");
- writeToKafka($cur_level);
- @fail_packages = readFromKafka($cur_level);
- if( @fail_packages ) {
- foreach my $p ( @fail_packages ) {
- print "$p ";
- }
- print "\n";
- error("these @fail_packages packages build failed");
- }
- else {
- info("@{$packages_level{$cur_level}} packages build succeed");
- }
- $cur_level++;
- }
- $connection->close;
- exit $ret;
+ while(defined $packages_level{$cur_level}) {
+ info("current level: $cur_level");
+ writeToKafka($cur_level);
+ @fail_packages = readFromKafka($cur_level);
+ if( @fail_packages ) {
+ foreach my $p ( @fail_packages ) {
+ print "$p ";
+ }
+ print "\n";
+ error("these @fail_packages packages build failed");
+ }
+ else {
+ info("@{$packages_level{$cur_level}} packages build succeed");
+ }
+ $cur_level++;
+ }
+ $connection->close;
+ exit $ret;
}
# only one package need to be built, do it directly
}
if ($add == 1 ) {
push(@order, $name);
- last;
+ last;
}
} else {
- push(@order_clean, $name);
- }
+ push(@order_clean, $name);
+ }
+ }
+ #remove unuseful package name from build_order
+ foreach my $u_name (@order_clean) {
+ @build_order = grep { $_ ne $u_name} @build_order;
}
- #remove unuseful package name from build_order
- foreach my $u_name (@order_clean) {
- @build_order = grep { $_ ne $u_name} @build_order;
- }
# No candidate packges and all thread works are idle, and pkgdeps
# is updated, in this case, set packages in %tmp_expansion_errors
for (; $needed && ! $TERM; $needed--) {
- my $job ;
- if (scalar (@order) != 0) {
- $job = shift(@order);
- }
- else {
- last ;
- }
+ my $job ;
+ if (scalar (@order) != 0) {
+ $job = shift(@order);
+ }
+ else {
+ last ;
+ }
my $worker = find_idle();
my $index;