Skip to content

Commit

Permalink
Refactor/test volatile (#2039)
Browse files Browse the repository at this point in the history
* refactor: short and log tests
* refactor: properly start nodes
* refactor: return undef when no virtual manager in localhost
  • Loading branch information
frankiejol committed Apr 17, 2024
1 parent c09d0ac commit e3ee172
Show file tree
Hide file tree
Showing 7 changed files with 128 additions and 56 deletions.
12 changes: 9 additions & 3 deletions lib/Ravada.pm
Original file line number Diff line number Diff line change
Expand Up @@ -5684,8 +5684,11 @@ sub _cmd_shutdown_node($self, $request) {

sub _cmd_start_node($self, $request) {
my $id_node = $request->args('id_node');
my $node = Ravada::VM->open($id_node);
$node->start();
my $node;
eval{ $node = Ravada::VM->open($id_node);
$node->start() if$node;
};
Ravada::VM::_wake_on_lan($id_node) if !$node;
}

sub _cmd_connect_node($self, $request) {
Expand Down Expand Up @@ -6505,7 +6508,10 @@ sub search_vm {
);
$sth->execute($type, $host);
my ($id) = $sth->fetchrow();
return Ravada::VM->open($id) if $id;
my $vm;
$vm = Ravada::VM->open($id) if $id;
return if $host eq 'localhost' && $vm && !$vm->vm;

return if $host ne 'localhost';

my $vms = $self->_create_vm($type);
Expand Down
1 change: 1 addition & 0 deletions lib/Ravada/Request.pm
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ our %COMMAND = (
,priority => 4
,commands => ['shutdown','shutdown_now', 'enforce_limits', 'set_time'
,'remove_domain', 'remove', 'refresh_machine_ports'
,'connect_node','start_node','shutdown_node'
]
}

Expand Down
38 changes: 28 additions & 10 deletions lib/Ravada/VM.pm
Original file line number Diff line number Diff line change
Expand Up @@ -234,8 +234,13 @@ sub open {
$args{security} = decode_json($row->{security}) if $row->{security};

my $vm = $self->new(%args);
return if !$vm || !$vm->vm;
$VM{$args{id}} = $vm unless $args{readonly};

my $internal_vm;
eval {
$internal_vm = $vm->vm;
};
$VM{$args{id}} = $vm unless $args{readonly} || !$internal_vm;
return if $self->is_local && !$internal_vm;
return $vm;

}
Expand Down Expand Up @@ -1283,9 +1288,10 @@ Returns wether this virtual manager is in the local host
=cut

sub is_local($self) {
return 1 if $self->host eq 'localhost'
return 1 if !$self->host
|| $self->host eq 'localhost'
|| $self->host eq '127.0.0,1'
|| !$self->host;
;
return 0;
}

Expand Down Expand Up @@ -1527,7 +1533,8 @@ sub _around_create_network($orig, $self,$data, $id_owner, $request=undef) {
my ($found) = grep { $_->{$field} eq $data->{$field} }
$self->list_virtual_networks();

die "Error: network $field=$data->{$field} already exists\n"
die "Error: network $field=$data->{$field} already exists in "
.$self->name."\n"
if $found;
}

Expand Down Expand Up @@ -1655,7 +1662,9 @@ sub is_active($self, $force=0) {
sub _do_is_active($self, $force=undef) {
my $ret = 0;
if ( $self->is_local ) {
eval {
$ret = 1 if $self->vm;
};
} else {
my @ping_args = ();
@ping_args = (undef,0) if $force; # no cache
Expand Down Expand Up @@ -2398,17 +2407,26 @@ sub _store_mac_address($self, $force=0 ) {
}
}

sub _wake_on_lan( $self ) {
return if $self->is_local;
sub _wake_on_lan( $self=undef ) {
return if $self && ref($self) && $self->is_local;

die "Error: I don't know the MAC address for node ".$self->name
if !$self->_data('mac');
my ($mac_addr, $id_node);
if (ref($self)) {
$mac_addr = $self->_data('mac');
$id_node = $self->id;
} else {
$id_node = $self;
my $sth = $$CONNECTOR->dbh->prepare("SELECT mac FROM vms WHERE id=?");
$sth->execute($id_node);
$mac_addr = $sth->fetchrow();
}
die "Error: I don't know the MAC address for node $id_node"
if !$mac_addr;

my $sock = new IO::Socket::INET(Proto=>'udp', Timeout => 60)
or die "Error: I can't create an UDP socket";
my $host = '255.255.255.255';
my $port = 9;
my $mac_addr = $self->_data('mac');

my $ip_addr = inet_aton($host);
my $sock_addr = sockaddr_in($port, $ip_addr);
Expand Down
3 changes: 2 additions & 1 deletion lib/Ravada/VM/Void.pm
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,8 @@ sub create_network($self, $data, $id_owner=undef, $request=undef) {

sub remove_network($self, $name) {
my $file_out = $self->dir_img."/networks/$name.yml";
unlink $file_out or die "$! $file_out" if $self->file_exists($file_out);
return if !$self->file_exists($file_out);
$self->remove_file($file_out);
}


Expand Down
3 changes: 3 additions & 0 deletions public/js/ravada.js
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,9 @@
};

$scope.topology_changed = function() {
if (!$scope.showmachine.hardware['cpu']) {
return;
}
var cpu = $scope.showmachine.hardware.cpu[0];
var item = cpu.cpu.topology;
if(typeof(item) == undefined || !item) {
Expand Down
9 changes: 6 additions & 3 deletions t/lib/Test/Ravada.pm
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,10 @@ sub _discover() {
uid => user_admin->id
,id_vm => $id_vm
);
wait_request();
for ( 1 .. 10 ) {
wait_request();
last if $req->status('done');
}
my $out = $req->output;
warn $req->error if $req->error;
next if !$out;
Expand Down Expand Up @@ -1379,7 +1382,7 @@ sub wait_request {
my $error = ($req->error or '');
next if $error =~ /waiting for processes/i;
if ($req->command =~ m{rsync_back|set_base_vm|start}) {
like($error,qr{^($|.*port \d+ already used|rsync done)}) or confess $req->command;
like($error,qr{^($|.*port \d+ already used|.*rsync)}) or confess $req->command;
} elsif($req->command eq 'refresh_machine_ports') {
like($error,qr{^($|.*is not up|.*has ports down|nc: |Connection)});
$req->status('done');
Expand All @@ -1389,7 +1392,7 @@ sub wait_request {
like($error,qr{^($|.*compacted)});
} elsif($req->command eq 'refresh_machine') {
like($error,qr{^($|.*port.*already used|.*Domain not found)});
} elsif($req->command eq 'force_shutdown') {
} elsif($req->command =~ /shutdown/) {
like($error,qr{^($|.*Unknown domain)});
} elsif($req->command eq 'connect_node') {
like($error,qr{^($|Connection OK)});
Expand Down
Loading

0 comments on commit e3ee172

Please sign in to comment.