| /* |
| * CDDL HEADER START |
| * |
| * The contents of this file are subject to the terms of the |
| * Common Development and Distribution License (the "License"). |
| * You may not use this file except in compliance with the License. |
| * |
| * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| * or http://www.opensolaris.org/os/licensing. |
| * See the License for the specific language governing permissions |
| * and limitations under the License. |
| * |
| * When distributing Covered Code, include this CDDL HEADER in each |
| * file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| * If applicable, add the following below this CDDL HEADER, with the |
| * fields enclosed by brackets "[]" replaced with your own identifying |
| * information: Portions Copyright [yyyy] [name of copyright owner] |
| * |
| * CDDL HEADER END |
| */ |
| |
| /* |
| * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. |
| * Copyright (c) 2015, Syneto S.R.L. All rights reserved. |
| * Copyright 2016 Toomas Soome <tsoome@me.com> |
| * Copyright 2016 RackTop Systems. |
| */ |
| |
| /* |
| * graph.c - master restarter graph engine |
| * |
| * The graph engine keeps a dependency graph of all service instances on the |
| * system, as recorded in the repository. It decides when services should |
| * be brought up or down based on service states and dependencies and sends |
| * commands to restarters to effect any changes. It also executes |
| * administrator commands sent by svcadm via the repository. |
| * |
| * The graph is stored in uu_list_t *dgraph and its vertices are |
| * graph_vertex_t's, each of which has a name and an integer id unique to |
| * its name (see dict.c). A vertex's type attribute designates the type |
| * of object it represents: GVT_INST for service instances, GVT_SVC for |
| * service objects (since service instances may depend on another service, |
| * rather than service instance), GVT_FILE for files (which services may |
| * depend on), and GVT_GROUP for dependencies on multiple objects. GVT_GROUP |
| * vertices are necessary because dependency lists may have particular |
| * grouping types (require any, require all, optional, or exclude) and |
| * event-propagation characteristics. |
| * |
| * The initial graph is built by libscf_populate_graph() invoking |
| * dgraph_add_instance() for each instance in the repository. The function |
| * adds a GVT_SVC vertex for the service if one does not already exist, adds |
| * a GVT_INST vertex named by the FMRI of the instance, and sets up the edges. |
| * The resulting web of vertices & edges associated with an instance's vertex |
| * includes |
| * |
| * - an edge from the GVT_SVC vertex for the instance's service |
| * |
| * - an edge to the GVT_INST vertex of the instance's resarter, if its |
| * restarter is not svc.startd |
| * |
| * - edges from other GVT_INST vertices if the instance is a restarter |
| * |
| * - for each dependency property group in the instance's "running" |
| * snapshot, an edge to a GVT_GROUP vertex named by the FMRI of the |
| * instance and the name of the property group |
| * |
| * - for each value of the "entities" property in each dependency property |
| * group, an edge from the corresponding GVT_GROUP vertex to a |
| * GVT_INST, GVT_SVC, or GVT_FILE vertex |
| * |
| * - edges from GVT_GROUP vertices for each dependent instance |
| * |
| * After the edges are set up the vertex's GV_CONFIGURED flag is set. If |
| * there are problems, or if a service is mentioned in a dependency but does |
| * not exist in the repository, the GV_CONFIGURED flag will be clear. |
| * |
| * The graph and all of its vertices are protected by the dgraph_lock mutex. |
| * See restarter.c for more information. |
| * |
| * The properties of an instance fall into two classes: immediate and |
| * snapshotted. Immediate properties should have an immediate effect when |
| * changed. Snapshotted properties should be read from a snapshot, so they |
| * only change when the snapshot changes. The immediate properties used by |
| * the graph engine are general/enabled, general/restarter, and the properties |
| * in the restarter_actions property group. Since they are immediate, they |
| * are not read out of a snapshot. The snapshotted properties used by the |
| * graph engine are those in the property groups with type "dependency" and |
| * are read out of the "running" snapshot. The "running" snapshot is created |
| * by the the graph engine as soon as possible, and it is updated, along with |
| * in-core copies of the data (dependency information for the graph engine) on |
| * receipt of the refresh command from svcadm. In addition, the graph engine |
| * updates the "start" snapshot from the "running" snapshot whenever a service |
| * comes online. |
| * |
| * When a DISABLE event is requested by the administrator, svc.startd shutdown |
| * the dependents first before shutting down the requested service. |
| * In graph_enable_by_vertex, we create a subtree that contains the dependent |
| * vertices by marking those vertices with the GV_TOOFFLINE flag. And we mark |
| * the vertex to disable with the GV_TODISABLE flag. Once the tree is created, |
| * we send the _ADMIN_DISABLE event to the leaves. The leaves will then |
| * transition from STATE_ONLINE/STATE_DEGRADED to STATE_OFFLINE/STATE_MAINT. |
| * In gt_enter_offline and gt_enter_maint if the vertex was in a subtree then |
| * we clear the GV_TOOFFLINE flag and walk the dependencies to offline the new |
| * exposed leaves. We do the same until we reach the last leaf (the one with |
| * the GV_TODISABLE flag). If the vertex to disable is also part of a larger |
| * subtree (eg. multiple DISABLE events on vertices in the same subtree) then |
| * once the first vertex is disabled (GV_TODISABLE flag is removed), we |
| * continue to propagate the offline event to the vertex's dependencies. |
| * |
| * |
| * SMF state transition notifications |
| * |
| * When an instance of a service managed by SMF changes state, svc.startd may |
| * publish a GPEC sysevent. All transitions to or from maintenance, a |
| * transition cause by a hardware error will generate an event. |
| * Other transitions will generate an event if there exist notification |
| * parameter for that transition. Notification parameters are stored in the |
| * SMF repository for the service/instance they refer to. System-wide |
| * notification parameters are stored in the global instance. |
| * svc.startd can be told to send events for all SMF state transitions despite |
| * of notification parameters by setting options/info_events_all to true in |
| * restarter:default |
| * |
| * The set of transitions that generate events is cached in the |
| * dgraph_vertex_t gv_stn_tset for service/instance and in the global |
| * stn_global for the system-wide set. They are re-read when instances are |
| * refreshed. |
| * |
| * The GPEC events published by svc.startd are consumed by fmd(1M). After |
| * processing these events, fmd(1M) publishes the processed events to |
| * notification agents. The notification agents read the notification |
| * parameters from the SMF repository through libscf(3LIB) interfaces and send |
| * the notification, or not, based on those parameters. |
| * |
| * Subscription and publishing to the GPEC channels is done with the |
| * libfmevent(3LIB) wrappers fmev_[r]publish_*() and |
| * fmev_shdl_(un)subscribe(). |
| * |
| */ |
| |
| #include <sys/uadmin.h> |
| #include <sys/wait.h> |
| |
| #include <assert.h> |
| #include <errno.h> |
| #include <fcntl.h> |
| #include <fm/libfmevent.h> |
| #include <libscf.h> |
| #include <libscf_priv.h> |
| #include <librestart.h> |
| #include <libuutil.h> |
| #include <locale.h> |
| #include <poll.h> |
| #include <pthread.h> |
| #include <signal.h> |
| #include <stddef.h> |
| #include <stdio.h> |
| #include <stdlib.h> |
| #include <string.h> |
| #include <strings.h> |
| #include <sys/statvfs.h> |
| #include <sys/uadmin.h> |
| #include <zone.h> |
| #if defined(__x86) |
| #include <libbe.h> |
| #endif /* __x86 */ |
| |
| #include "startd.h" |
| #include "protocol.h" |
| |
| |
| #define MILESTONE_NONE ((graph_vertex_t *)1) |
| |
| #define CONSOLE_LOGIN_FMRI "svc:/system/console-login:default" |
| #define FS_MINIMAL_FMRI "svc:/system/filesystem/minimal:default" |
| |
| #define VERTEX_REMOVED 0 /* vertex has been freed */ |
| #define VERTEX_INUSE 1 /* vertex is still in use */ |
| |
| #define IS_ENABLED(v) ((v)->gv_flags & (GV_ENABLED | GV_ENBLD_NOOVR)) |
| |
| /* |
| * stn_global holds the tset for the system wide notification parameters. |
| * It is updated on refresh of svc:/system/svc/global:default |
| * |
| * There are two assumptions that relax the need for a mutex: |
| * 1. 32-bit value assignments are atomic |
| * 2. Its value is consumed only in one point at |
| * dgraph_state_transition_notify(). There are no test and set races. |
| * |
| * If either assumption is broken, we'll need a mutex to synchronize |
| * access to stn_global |
| */ |
| int32_t stn_global; |
| /* |
| * info_events_all holds a flag to override notification parameters and send |
| * Information events for all state transitions. |
| * same about the need of a mutex here. |
| */ |
| int info_events_all; |
| |
| /* |
| * Services in these states are not considered 'down' by the |
| * milestone/shutdown code. |
| */ |
| #define up_state(state) ((state) == RESTARTER_STATE_ONLINE || \ |
| (state) == RESTARTER_STATE_DEGRADED || \ |
| (state) == RESTARTER_STATE_OFFLINE) |
| |
| #define is_depgrp_bypassed(v) ((v->gv_type == GVT_GROUP) && \ |
| ((v->gv_depgroup == DEPGRP_EXCLUDE_ALL) || \ |
| (v->gv_restart < RERR_RESTART))) |
| |
| #define is_inst_bypassed(v) ((v->gv_type == GVT_INST) && \ |
| ((v->gv_flags & GV_TODISABLE) || \ |
| (v->gv_flags & GV_TOOFFLINE))) |
| |
| static uu_list_pool_t *graph_edge_pool, *graph_vertex_pool; |
| static uu_list_t *dgraph; |
| static pthread_mutex_t dgraph_lock; |
| |
| /* |
| * milestone indicates the current subgraph. When NULL, it is the entire |
| * graph. When MILESTONE_NONE, it is the empty graph. Otherwise, it is all |
| * services on which the target vertex depends. |
| */ |
| static graph_vertex_t *milestone = NULL; |
| static boolean_t initial_milestone_set = B_FALSE; |
| static pthread_cond_t initial_milestone_cv = PTHREAD_COND_INITIALIZER; |
| |
| /* protected by dgraph_lock */ |
| static boolean_t sulogin_thread_running = B_FALSE; |
| static boolean_t sulogin_running = B_FALSE; |
| static boolean_t console_login_ready = B_FALSE; |
| |
| /* Number of services to come down to complete milestone transition. */ |
| static uint_t non_subgraph_svcs; |
| |
| /* |
| * These variables indicate what should be done when we reach the milestone |
| * target milestone, i.e., when non_subgraph_svcs == 0. They are acted upon in |
| * dgraph_set_instance_state(). |
| */ |
| static int halting = -1; |
| static boolean_t go_single_user_mode = B_FALSE; |
| static boolean_t go_to_level1 = B_FALSE; |
| |
| /* |
| * Tracks when we started halting. |
| */ |
| static time_t halting_time = 0; |
| |
| /* |
| * This tracks the legacy runlevel to ensure we signal init and manage |
| * utmpx entries correctly. |
| */ |
| static char current_runlevel = '\0'; |
| |
| /* Number of single user threads currently running */ |
| static pthread_mutex_t single_user_thread_lock; |
| static int single_user_thread_count = 0; |
| |
| /* Statistics for dependency cycle-checking */ |
| static u_longlong_t dep_inserts = 0; |
| static u_longlong_t dep_cycle_ns = 0; |
| static u_longlong_t dep_insert_ns = 0; |
| |
| |
| static const char * const emsg_invalid_restarter = |
| "Transitioning %s to maintenance, restarter FMRI %s is invalid " |
| "(see 'svcs -xv' for details).\n"; |
| static const char * const console_login_fmri = CONSOLE_LOGIN_FMRI; |
| static const char * const single_user_fmri = SCF_MILESTONE_SINGLE_USER; |
| static const char * const multi_user_fmri = SCF_MILESTONE_MULTI_USER; |
| static const char * const multi_user_svr_fmri = SCF_MILESTONE_MULTI_USER_SERVER; |
| |
| |
| /* |
| * These services define the system being "up". If none of them can come |
| * online, then we will run sulogin on the console. Note that the install ones |
| * are for the miniroot and when installing CDs after the first. can_come_up() |
| * does the decision making, and an sulogin_thread() runs sulogin, which can be |
| * started by dgraph_set_instance_state() or single_user_thread(). |
| * |
| * NOTE: can_come_up() relies on SCF_MILESTONE_SINGLE_USER being the first |
| * entry, which is only used when booting_to_single_user (boot -s) is set. |
| * This is because when doing a "boot -s", sulogin is started from specials.c |
| * after milestone/single-user comes online, for backwards compatibility. |
| * In this case, SCF_MILESTONE_SINGLE_USER needs to be part of up_svcs |
| * to ensure sulogin will be spawned if milestone/single-user cannot be reached. |
| */ |
| static const char * const up_svcs[] = { |
| SCF_MILESTONE_SINGLE_USER, |
| CONSOLE_LOGIN_FMRI, |
| "svc:/system/install-setup:default", |
| "svc:/system/install:default", |
| NULL |
| }; |
| |
| /* This array must have an element for each non-NULL element of up_svcs[]. */ |
| static graph_vertex_t *up_svcs_p[] = { NULL, NULL, NULL, NULL }; |
| |
| /* These are for seed repository magic. See can_come_up(). */ |
| static const char * const manifest_import = SCF_INSTANCE_MI; |
| static graph_vertex_t *manifest_import_p = NULL; |
| |
| |
| static char target_milestone_as_runlevel(void); |
| static void graph_runlevel_changed(char rl, int online); |
| static int dgraph_set_milestone(const char *, scf_handle_t *, boolean_t); |
| static boolean_t should_be_in_subgraph(graph_vertex_t *v); |
| static int mark_subtree(graph_edge_t *, void *); |
| static boolean_t insubtree_dependents_down(graph_vertex_t *); |
| |
| /* |
| * graph_vertex_compare() |
| * This function can compare either int *id or * graph_vertex_t *gv |
| * values, as the vertex id is always the first element of a |
| * graph_vertex structure. |
| */ |
| /* ARGSUSED */ |
| static int |
| graph_vertex_compare(const void *lc_arg, const void *rc_arg, void *private) |
| { |
| int lc_id = ((const graph_vertex_t *)lc_arg)->gv_id; |
| int rc_id = *(int *)rc_arg; |
| |
| if (lc_id > rc_id) |
| return (1); |
| if (lc_id < rc_id) |
| return (-1); |
| return (0); |
| } |
| |
| void |
| graph_init() |
| { |
| graph_edge_pool = startd_list_pool_create("graph_edges", |
| sizeof (graph_edge_t), offsetof(graph_edge_t, ge_link), NULL, |
| UU_LIST_POOL_DEBUG); |
| assert(graph_edge_pool != NULL); |
| |
| graph_vertex_pool = startd_list_pool_create("graph_vertices", |
| sizeof (graph_vertex_t), offsetof(graph_vertex_t, gv_link), |
| graph_vertex_compare, UU_LIST_POOL_DEBUG); |
| assert(graph_vertex_pool != NULL); |
| |
| (void) pthread_mutex_init(&dgraph_lock, &mutex_attrs); |
| (void) pthread_mutex_init(&single_user_thread_lock, &mutex_attrs); |
| dgraph = startd_list_create(graph_vertex_pool, NULL, UU_LIST_SORTED); |
| assert(dgraph != NULL); |
| |
| if (!st->st_initial) |
| current_runlevel = utmpx_get_runlevel(); |
| |
| log_framework(LOG_DEBUG, "Initialized graph\n"); |
| } |
| |
| static graph_vertex_t * |
| vertex_get_by_name(const char *name) |
| { |
| int id; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| id = dict_lookup_byname(name); |
| if (id == -1) |
| return (NULL); |
| |
| return (uu_list_find(dgraph, &id, NULL, NULL)); |
| } |
| |
| static graph_vertex_t * |
| vertex_get_by_id(int id) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| if (id == -1) |
| return (NULL); |
| |
| return (uu_list_find(dgraph, &id, NULL, NULL)); |
| } |
| |
| /* |
| * Creates a new vertex with the given name, adds it to the graph, and returns |
| * a pointer to it. The graph lock must be held by this thread on entry. |
| */ |
| static graph_vertex_t * |
| graph_add_vertex(const char *name) |
| { |
| int id; |
| graph_vertex_t *v; |
| void *p; |
| uu_list_index_t idx; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| id = dict_insert(name); |
| |
| v = startd_zalloc(sizeof (*v)); |
| |
| v->gv_id = id; |
| |
| v->gv_name = startd_alloc(strlen(name) + 1); |
| (void) strcpy(v->gv_name, name); |
| |
| v->gv_dependencies = startd_list_create(graph_edge_pool, v, 0); |
| v->gv_dependents = startd_list_create(graph_edge_pool, v, 0); |
| |
| p = uu_list_find(dgraph, &id, NULL, &idx); |
| assert(p == NULL); |
| |
| uu_list_node_init(v, &v->gv_link, graph_vertex_pool); |
| uu_list_insert(dgraph, v, idx); |
| |
| return (v); |
| } |
| |
| /* |
| * Removes v from the graph and frees it. The graph should be locked by this |
| * thread, and v should have no edges associated with it. |
| */ |
| static void |
| graph_remove_vertex(graph_vertex_t *v) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| assert(uu_list_numnodes(v->gv_dependencies) == 0); |
| assert(uu_list_numnodes(v->gv_dependents) == 0); |
| assert(v->gv_refs == 0); |
| |
| startd_free(v->gv_name, strlen(v->gv_name) + 1); |
| uu_list_destroy(v->gv_dependencies); |
| uu_list_destroy(v->gv_dependents); |
| uu_list_remove(dgraph, v); |
| |
| startd_free(v, sizeof (graph_vertex_t)); |
| } |
| |
| static void |
| graph_add_edge(graph_vertex_t *fv, graph_vertex_t *tv) |
| { |
| graph_edge_t *e, *re; |
| int r; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| e = startd_alloc(sizeof (graph_edge_t)); |
| re = startd_alloc(sizeof (graph_edge_t)); |
| |
| e->ge_parent = fv; |
| e->ge_vertex = tv; |
| |
| re->ge_parent = tv; |
| re->ge_vertex = fv; |
| |
| uu_list_node_init(e, &e->ge_link, graph_edge_pool); |
| r = uu_list_insert_before(fv->gv_dependencies, NULL, e); |
| assert(r == 0); |
| |
| uu_list_node_init(re, &re->ge_link, graph_edge_pool); |
| r = uu_list_insert_before(tv->gv_dependents, NULL, re); |
| assert(r == 0); |
| } |
| |
| static void |
| graph_remove_edge(graph_vertex_t *v, graph_vertex_t *dv) |
| { |
| graph_edge_t *e; |
| |
| for (e = uu_list_first(v->gv_dependencies); |
| e != NULL; |
| e = uu_list_next(v->gv_dependencies, e)) { |
| if (e->ge_vertex == dv) { |
| uu_list_remove(v->gv_dependencies, e); |
| startd_free(e, sizeof (graph_edge_t)); |
| break; |
| } |
| } |
| |
| for (e = uu_list_first(dv->gv_dependents); |
| e != NULL; |
| e = uu_list_next(dv->gv_dependents, e)) { |
| if (e->ge_vertex == v) { |
| uu_list_remove(dv->gv_dependents, e); |
| startd_free(e, sizeof (graph_edge_t)); |
| break; |
| } |
| } |
| } |
| |
| static void |
| remove_inst_vertex(graph_vertex_t *v) |
| { |
| graph_edge_t *e; |
| graph_vertex_t *sv; |
| int i; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert(uu_list_numnodes(v->gv_dependents) == 1); |
| assert(uu_list_numnodes(v->gv_dependencies) == 0); |
| assert(v->gv_refs == 0); |
| assert((v->gv_flags & GV_CONFIGURED) == 0); |
| |
| e = uu_list_first(v->gv_dependents); |
| sv = e->ge_vertex; |
| graph_remove_edge(sv, v); |
| |
| for (i = 0; up_svcs[i] != NULL; ++i) { |
| if (up_svcs_p[i] == v) |
| up_svcs_p[i] = NULL; |
| } |
| |
| if (manifest_import_p == v) |
| manifest_import_p = NULL; |
| |
| graph_remove_vertex(v); |
| |
| if (uu_list_numnodes(sv->gv_dependencies) == 0 && |
| uu_list_numnodes(sv->gv_dependents) == 0 && |
| sv->gv_refs == 0) |
| graph_remove_vertex(sv); |
| } |
| |
| static void |
| graph_walk_dependents(graph_vertex_t *v, void (*func)(graph_vertex_t *, void *), |
| void *arg) |
| { |
| graph_edge_t *e; |
| |
| for (e = uu_list_first(v->gv_dependents); |
| e != NULL; |
| e = uu_list_next(v->gv_dependents, e)) |
| func(e->ge_vertex, arg); |
| } |
| |
| static void |
| graph_walk_dependencies(graph_vertex_t *v, |
| void (*func)(graph_vertex_t *, void *), void *arg) |
| { |
| graph_edge_t *e; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| for (e = uu_list_first(v->gv_dependencies); |
| e != NULL; |
| e = uu_list_next(v->gv_dependencies, e)) { |
| |
| func(e->ge_vertex, arg); |
| } |
| } |
| |
| /* |
| * Generic graph walking function. |
| * |
| * Given a vertex, this function will walk either dependencies |
| * (WALK_DEPENDENCIES) or dependents (WALK_DEPENDENTS) of a vertex recursively |
| * for the entire graph. It will avoid cycles and never visit the same vertex |
| * twice. |
| * |
| * We avoid traversing exclusion dependencies, because they are allowed to |
| * create cycles in the graph. When propagating satisfiability, there is no |
| * need to walk exclusion dependencies because exclude_all_satisfied() doesn't |
| * test for satisfiability. |
| * |
| * The walker takes two callbacks. The first is called before examining the |
| * dependents of each vertex. The second is called on each vertex after |
| * examining its dependents. This allows is_path_to() to construct a path only |
| * after the target vertex has been found. |
| */ |
| typedef enum { |
| WALK_DEPENDENTS, |
| WALK_DEPENDENCIES |
| } graph_walk_dir_t; |
| |
| typedef int (*graph_walk_cb_t)(graph_vertex_t *, void *); |
| |
| typedef struct graph_walk_info { |
| graph_walk_dir_t gi_dir; |
| uchar_t *gi_visited; /* vertex bitmap */ |
| int (*gi_pre)(graph_vertex_t *, void *); |
| void (*gi_post)(graph_vertex_t *, void *); |
| void *gi_arg; /* callback arg */ |
| int gi_ret; /* return value */ |
| } graph_walk_info_t; |
| |
| static int |
| graph_walk_recurse(graph_edge_t *e, graph_walk_info_t *gip) |
| { |
| uu_list_t *list; |
| int r; |
| graph_vertex_t *v = e->ge_vertex; |
| int i; |
| uint_t b; |
| |
| i = v->gv_id / 8; |
| b = 1 << (v->gv_id % 8); |
| |
| /* |
| * Check to see if we've visited this vertex already. |
| */ |
| if (gip->gi_visited[i] & b) |
| return (UU_WALK_NEXT); |
| |
| gip->gi_visited[i] |= b; |
| |
| /* |
| * Don't follow exclusions. |
| */ |
| if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL) |
| return (UU_WALK_NEXT); |
| |
| /* |
| * Call pre-visit callback. If this doesn't terminate the walk, |
| * continue search. |
| */ |
| if ((gip->gi_ret = gip->gi_pre(v, gip->gi_arg)) == UU_WALK_NEXT) { |
| /* |
| * Recurse using appropriate list. |
| */ |
| if (gip->gi_dir == WALK_DEPENDENTS) |
| list = v->gv_dependents; |
| else |
| list = v->gv_dependencies; |
| |
| r = uu_list_walk(list, (uu_walk_fn_t *)graph_walk_recurse, |
| gip, 0); |
| assert(r == 0); |
| } |
| |
| /* |
| * Callbacks must return either UU_WALK_NEXT or UU_WALK_DONE. |
| */ |
| assert(gip->gi_ret == UU_WALK_NEXT || gip->gi_ret == UU_WALK_DONE); |
| |
| /* |
| * If given a post-callback, call the function for every vertex. |
| */ |
| if (gip->gi_post != NULL) |
| (void) gip->gi_post(v, gip->gi_arg); |
| |
| /* |
| * Preserve the callback's return value. If the callback returns |
| * UU_WALK_DONE, then we propagate that to the caller in order to |
| * terminate the walk. |
| */ |
| return (gip->gi_ret); |
| } |
| |
| static void |
| graph_walk(graph_vertex_t *v, graph_walk_dir_t dir, |
| int (*pre)(graph_vertex_t *, void *), |
| void (*post)(graph_vertex_t *, void *), void *arg) |
| { |
| graph_walk_info_t gi; |
| graph_edge_t fake; |
| size_t sz = dictionary->dict_new_id / 8 + 1; |
| |
| gi.gi_visited = startd_zalloc(sz); |
| gi.gi_pre = pre; |
| gi.gi_post = post; |
| gi.gi_arg = arg; |
| gi.gi_dir = dir; |
| gi.gi_ret = 0; |
| |
| /* |
| * Fake up an edge for the first iteration |
| */ |
| fake.ge_vertex = v; |
| (void) graph_walk_recurse(&fake, &gi); |
| |
| startd_free(gi.gi_visited, sz); |
| } |
| |
| typedef struct child_search { |
| int id; /* id of vertex to look for */ |
| uint_t depth; /* recursion depth */ |
| /* |
| * While the vertex is not found, path is NULL. After the search, if |
| * the vertex was found then path should point to a -1-terminated |
| * array of vertex id's which constitute the path to the vertex. |
| */ |
| int *path; |
| } child_search_t; |
| |
| static int |
| child_pre(graph_vertex_t *v, void *arg) |
| { |
| child_search_t *cs = arg; |
| |
| cs->depth++; |
| |
| if (v->gv_id == cs->id) { |
| cs->path = startd_alloc((cs->depth + 1) * sizeof (int)); |
| cs->path[cs->depth] = -1; |
| return (UU_WALK_DONE); |
| } |
| |
| return (UU_WALK_NEXT); |
| } |
| |
| static void |
| child_post(graph_vertex_t *v, void *arg) |
| { |
| child_search_t *cs = arg; |
| |
| cs->depth--; |
| |
| if (cs->path != NULL) |
| cs->path[cs->depth] = v->gv_id; |
| } |
| |
| /* |
| * Look for a path from from to to. If one exists, returns a pointer to |
| * a NULL-terminated array of pointers to the vertices along the path. If |
| * there is no path, returns NULL. |
| */ |
| static int * |
| is_path_to(graph_vertex_t *from, graph_vertex_t *to) |
| { |
| child_search_t cs; |
| |
| cs.id = to->gv_id; |
| cs.depth = 0; |
| cs.path = NULL; |
| |
| graph_walk(from, WALK_DEPENDENCIES, child_pre, child_post, &cs); |
| |
| return (cs.path); |
| } |
| |
| /* |
| * Given an array of int's as returned by is_path_to, allocates a string of |
| * their names joined by newlines. Returns the size of the allocated buffer |
| * in *sz and frees path. |
| */ |
| static void |
| path_to_str(int *path, char **cpp, size_t *sz) |
| { |
| int i; |
| graph_vertex_t *v; |
| size_t allocd, new_allocd; |
| char *new, *name; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert(path[0] != -1); |
| |
| allocd = 1; |
| *cpp = startd_alloc(1); |
| (*cpp)[0] = '\0'; |
| |
| for (i = 0; path[i] != -1; ++i) { |
| name = NULL; |
| |
| v = vertex_get_by_id(path[i]); |
| |
| if (v == NULL) |
| name = "<deleted>"; |
| else if (v->gv_type == GVT_INST || v->gv_type == GVT_SVC) |
| name = v->gv_name; |
| |
| if (name != NULL) { |
| new_allocd = allocd + strlen(name) + 1; |
| new = startd_alloc(new_allocd); |
| (void) strcpy(new, *cpp); |
| (void) strcat(new, name); |
| (void) strcat(new, "\n"); |
| |
| startd_free(*cpp, allocd); |
| |
| *cpp = new; |
| allocd = new_allocd; |
| } |
| } |
| |
| startd_free(path, sizeof (int) * (i + 1)); |
| |
| *sz = allocd; |
| } |
| |
| |
| /* |
| * This function along with run_sulogin() implements an exclusion relationship |
| * between system/console-login and sulogin. run_sulogin() will fail if |
| * system/console-login is online, and the graph engine should call |
| * graph_clogin_start() to bring system/console-login online, which defers the |
| * start if sulogin is running. |
| */ |
| static void |
| graph_clogin_start(graph_vertex_t *v) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| if (sulogin_running) |
| console_login_ready = B_TRUE; |
| else |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_START); |
| } |
| |
| static void |
| graph_su_start(graph_vertex_t *v) |
| { |
| /* |
| * /etc/inittab used to have the initial /sbin/rcS as a 'sysinit' |
| * entry with a runlevel of 'S', before jumping to the final |
| * target runlevel (as set in initdefault). We mimic that legacy |
| * behavior here. |
| */ |
| utmpx_set_runlevel('S', '0', B_FALSE); |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_START); |
| } |
| |
| static void |
| graph_post_su_online(void) |
| { |
| graph_runlevel_changed('S', 1); |
| } |
| |
| static void |
| graph_post_su_disable(void) |
| { |
| graph_runlevel_changed('S', 0); |
| } |
| |
| static void |
| graph_post_mu_online(void) |
| { |
| graph_runlevel_changed('2', 1); |
| } |
| |
| static void |
| graph_post_mu_disable(void) |
| { |
| graph_runlevel_changed('2', 0); |
| } |
| |
| static void |
| graph_post_mus_online(void) |
| { |
| graph_runlevel_changed('3', 1); |
| } |
| |
| static void |
| graph_post_mus_disable(void) |
| { |
| graph_runlevel_changed('3', 0); |
| } |
| |
| static struct special_vertex_info { |
| const char *name; |
| void (*start_f)(graph_vertex_t *); |
| void (*post_online_f)(void); |
| void (*post_disable_f)(void); |
| } special_vertices[] = { |
| { CONSOLE_LOGIN_FMRI, graph_clogin_start, NULL, NULL }, |
| { SCF_MILESTONE_SINGLE_USER, graph_su_start, |
| graph_post_su_online, graph_post_su_disable }, |
| { SCF_MILESTONE_MULTI_USER, NULL, |
| graph_post_mu_online, graph_post_mu_disable }, |
| { SCF_MILESTONE_MULTI_USER_SERVER, NULL, |
| graph_post_mus_online, graph_post_mus_disable }, |
| { NULL }, |
| }; |
| |
| |
| void |
| vertex_send_event(graph_vertex_t *v, restarter_event_type_t e) |
| { |
| switch (e) { |
| case RESTARTER_EVENT_TYPE_ADD_INSTANCE: |
| assert(v->gv_state == RESTARTER_STATE_UNINIT); |
| |
| MUTEX_LOCK(&st->st_load_lock); |
| st->st_load_instances++; |
| MUTEX_UNLOCK(&st->st_load_lock); |
| break; |
| |
| case RESTARTER_EVENT_TYPE_ENABLE: |
| log_framework(LOG_DEBUG, "Enabling %s.\n", v->gv_name); |
| assert(v->gv_state == RESTARTER_STATE_UNINIT || |
| v->gv_state == RESTARTER_STATE_DISABLED || |
| v->gv_state == RESTARTER_STATE_MAINT); |
| break; |
| |
| case RESTARTER_EVENT_TYPE_DISABLE: |
| case RESTARTER_EVENT_TYPE_ADMIN_DISABLE: |
| log_framework(LOG_DEBUG, "Disabling %s.\n", v->gv_name); |
| assert(v->gv_state != RESTARTER_STATE_DISABLED); |
| break; |
| |
| case RESTARTER_EVENT_TYPE_STOP_RESET: |
| case RESTARTER_EVENT_TYPE_STOP: |
| log_framework(LOG_DEBUG, "Stopping %s.\n", v->gv_name); |
| assert(v->gv_state == RESTARTER_STATE_DEGRADED || |
| v->gv_state == RESTARTER_STATE_ONLINE); |
| break; |
| |
| case RESTARTER_EVENT_TYPE_START: |
| log_framework(LOG_DEBUG, "Starting %s.\n", v->gv_name); |
| assert(v->gv_state == RESTARTER_STATE_OFFLINE); |
| break; |
| |
| case RESTARTER_EVENT_TYPE_REMOVE_INSTANCE: |
| case RESTARTER_EVENT_TYPE_ADMIN_DEGRADED: |
| case RESTARTER_EVENT_TYPE_ADMIN_REFRESH: |
| case RESTARTER_EVENT_TYPE_ADMIN_RESTART: |
| case RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF: |
| case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON: |
| case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE: |
| case RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE: |
| case RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY: |
| break; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Bad event %d.\n", __FILE__, __LINE__, e); |
| #endif |
| abort(); |
| } |
| |
| restarter_protocol_send_event(v->gv_name, v->gv_restarter_channel, e, |
| v->gv_reason); |
| } |
| |
| static void |
| graph_unset_restarter(graph_vertex_t *v) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert(v->gv_flags & GV_CONFIGURED); |
| |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_REMOVE_INSTANCE); |
| |
| if (v->gv_restarter_id != -1) { |
| graph_vertex_t *rv; |
| |
| rv = vertex_get_by_id(v->gv_restarter_id); |
| graph_remove_edge(v, rv); |
| } |
| |
| v->gv_restarter_id = -1; |
| v->gv_restarter_channel = NULL; |
| } |
| |
| /* |
| * Return VERTEX_REMOVED when the vertex passed in argument is deleted from the |
| * dgraph otherwise return VERTEX_INUSE. |
| */ |
| static int |
| free_if_unrefed(graph_vertex_t *v) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| if (v->gv_refs > 0) |
| return (VERTEX_INUSE); |
| |
| if (v->gv_type == GVT_SVC && |
| uu_list_numnodes(v->gv_dependents) == 0 && |
| uu_list_numnodes(v->gv_dependencies) == 0) { |
| graph_remove_vertex(v); |
| return (VERTEX_REMOVED); |
| } else if (v->gv_type == GVT_INST && |
| (v->gv_flags & GV_CONFIGURED) == 0 && |
| uu_list_numnodes(v->gv_dependents) == 1 && |
| uu_list_numnodes(v->gv_dependencies) == 0) { |
| remove_inst_vertex(v); |
| return (VERTEX_REMOVED); |
| } |
| |
| return (VERTEX_INUSE); |
| } |
| |
| static void |
| delete_depgroup(graph_vertex_t *v) |
| { |
| graph_edge_t *e; |
| graph_vertex_t *dv; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert(v->gv_type == GVT_GROUP); |
| assert(uu_list_numnodes(v->gv_dependents) == 0); |
| |
| while ((e = uu_list_first(v->gv_dependencies)) != NULL) { |
| dv = e->ge_vertex; |
| |
| graph_remove_edge(v, dv); |
| |
| switch (dv->gv_type) { |
| case GVT_INST: /* instance dependency */ |
| case GVT_SVC: /* service dependency */ |
| (void) free_if_unrefed(dv); |
| break; |
| |
| case GVT_FILE: /* file dependency */ |
| assert(uu_list_numnodes(dv->gv_dependencies) == 0); |
| if (uu_list_numnodes(dv->gv_dependents) == 0) |
| graph_remove_vertex(dv); |
| break; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected node type %d", __FILE__, |
| __LINE__, dv->gv_type); |
| #endif |
| abort(); |
| } |
| } |
| |
| graph_remove_vertex(v); |
| } |
| |
| static int |
| delete_instance_deps_cb(graph_edge_t *e, void **ptrs) |
| { |
| graph_vertex_t *v = ptrs[0]; |
| boolean_t delete_restarter_dep = (boolean_t)ptrs[1]; |
| graph_vertex_t *dv; |
| |
| dv = e->ge_vertex; |
| |
| /* |
| * We have four possibilities here: |
| * - GVT_INST: restarter |
| * - GVT_GROUP - GVT_INST: instance dependency |
| * - GVT_GROUP - GVT_SVC - GV_INST: service dependency |
| * - GVT_GROUP - GVT_FILE: file dependency |
| */ |
| switch (dv->gv_type) { |
| case GVT_INST: /* restarter */ |
| assert(dv->gv_id == v->gv_restarter_id); |
| if (delete_restarter_dep) |
| graph_remove_edge(v, dv); |
| break; |
| |
| case GVT_GROUP: /* pg dependency */ |
| graph_remove_edge(v, dv); |
| delete_depgroup(dv); |
| break; |
| |
| case GVT_FILE: |
| /* These are currently not direct dependencies */ |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Bad vertex type %d.\n", __FILE__, __LINE__, |
| dv->gv_type); |
| #endif |
| abort(); |
| } |
| |
| return (UU_WALK_NEXT); |
| } |
| |
| static void |
| delete_instance_dependencies(graph_vertex_t *v, boolean_t delete_restarter_dep) |
| { |
| void *ptrs[2]; |
| int r; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert(v->gv_type == GVT_INST); |
| |
| ptrs[0] = v; |
| ptrs[1] = (void *)delete_restarter_dep; |
| |
| r = uu_list_walk(v->gv_dependencies, |
| (uu_walk_fn_t *)delete_instance_deps_cb, &ptrs, UU_WALK_ROBUST); |
| assert(r == 0); |
| } |
| |
| /* |
| * int graph_insert_vertex_unconfigured() |
| * Insert a vertex without sending any restarter events. If the vertex |
| * already exists or creation is successful, return a pointer to it in *vp. |
| * |
| * If type is not GVT_GROUP, dt can remain unset. |
| * |
| * Returns 0, EEXIST, or EINVAL if the arguments are invalid (i.e., fmri |
| * doesn't agree with type, or type doesn't agree with dt). |
| */ |
| static int |
| graph_insert_vertex_unconfigured(const char *fmri, gv_type_t type, |
| depgroup_type_t dt, restarter_error_t rt, graph_vertex_t **vp) |
| { |
| int r; |
| int i; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| switch (type) { |
| case GVT_SVC: |
| case GVT_INST: |
| if (strncmp(fmri, "svc:", sizeof ("svc:") - 1) != 0) |
| return (EINVAL); |
| break; |
| |
| case GVT_FILE: |
| if (strncmp(fmri, "file:", sizeof ("file:") - 1) != 0) |
| return (EINVAL); |
| break; |
| |
| case GVT_GROUP: |
| if (dt <= 0 || rt < 0) |
| return (EINVAL); |
| break; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unknown type %d.\n", __FILE__, __LINE__, type); |
| #endif |
| abort(); |
| } |
| |
| *vp = vertex_get_by_name(fmri); |
| if (*vp != NULL) |
| return (EEXIST); |
| |
| *vp = graph_add_vertex(fmri); |
| |
| (*vp)->gv_type = type; |
| (*vp)->gv_depgroup = dt; |
| (*vp)->gv_restart = rt; |
| |
| (*vp)->gv_flags = 0; |
| (*vp)->gv_state = RESTARTER_STATE_NONE; |
| |
| for (i = 0; special_vertices[i].name != NULL; ++i) { |
| if (strcmp(fmri, special_vertices[i].name) == 0) { |
| (*vp)->gv_start_f = special_vertices[i].start_f; |
| (*vp)->gv_post_online_f = |
| special_vertices[i].post_online_f; |
| (*vp)->gv_post_disable_f = |
| special_vertices[i].post_disable_f; |
| break; |
| } |
| } |
| |
| (*vp)->gv_restarter_id = -1; |
| (*vp)->gv_restarter_channel = 0; |
| |
| if (type == GVT_INST) { |
| char *sfmri; |
| graph_vertex_t *sv; |
| |
| sfmri = inst_fmri_to_svc_fmri(fmri); |
| sv = vertex_get_by_name(sfmri); |
| if (sv == NULL) { |
| r = graph_insert_vertex_unconfigured(sfmri, GVT_SVC, 0, |
| 0, &sv); |
| assert(r == 0); |
| } |
| startd_free(sfmri, max_scf_fmri_size); |
| |
| graph_add_edge(sv, *vp); |
| } |
| |
| /* |
| * If this vertex is in the subgraph, mark it as so, for both |
| * GVT_INST and GVT_SERVICE verteces. |
| * A GVT_SERVICE vertex can only be in the subgraph if another instance |
| * depends on it, in which case it's already been added to the graph |
| * and marked as in the subgraph (by refresh_vertex()). If a |
| * GVT_SERVICE vertex was freshly added (by the code above), it means |
| * that it has no dependents, and cannot be in the subgraph. |
| * Regardless of this, we still check that gv_flags includes |
| * GV_INSUBGRAPH in the event that future behavior causes the above |
| * code to add a GVT_SERVICE vertex which should be in the subgraph. |
| */ |
| |
| (*vp)->gv_flags |= (should_be_in_subgraph(*vp)? GV_INSUBGRAPH : 0); |
| |
| return (0); |
| } |
| |
| /* |
| * Returns 0 on success or ELOOP if the dependency would create a cycle. |
| */ |
| static int |
| graph_insert_dependency(graph_vertex_t *fv, graph_vertex_t *tv, int **pathp) |
| { |
| hrtime_t now; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| /* cycle detection */ |
| now = gethrtime(); |
| |
| /* Don't follow exclusions. */ |
| if (!(fv->gv_type == GVT_GROUP && |
| fv->gv_depgroup == DEPGRP_EXCLUDE_ALL)) { |
| *pathp = is_path_to(tv, fv); |
| if (*pathp) |
| return (ELOOP); |
| } |
| |
| dep_cycle_ns += gethrtime() - now; |
| ++dep_inserts; |
| now = gethrtime(); |
| |
| graph_add_edge(fv, tv); |
| |
| dep_insert_ns += gethrtime() - now; |
| |
| /* Check if the dependency adds the "to" vertex to the subgraph */ |
| tv->gv_flags |= (should_be_in_subgraph(tv) ? GV_INSUBGRAPH : 0); |
| |
| return (0); |
| } |
| |
| static int |
| inst_running(graph_vertex_t *v) |
| { |
| assert(v->gv_type == GVT_INST); |
| |
| if (v->gv_state == RESTARTER_STATE_ONLINE || |
| v->gv_state == RESTARTER_STATE_DEGRADED) |
| return (1); |
| |
| return (0); |
| } |
| |
| /* |
| * The dependency evaluation functions return |
| * 1 - dependency satisfied |
| * 0 - dependency unsatisfied |
| * -1 - dependency unsatisfiable (without administrator intervention) |
| * |
| * The functions also take a boolean satbility argument. When true, the |
| * functions may recurse in order to determine satisfiability. |
| */ |
| static int require_any_satisfied(graph_vertex_t *, boolean_t); |
| static int dependency_satisfied(graph_vertex_t *, boolean_t); |
| |
| /* |
| * A require_all dependency is unsatisfied if any elements are unsatisfied. It |
| * is unsatisfiable if any elements are unsatisfiable. |
| */ |
| static int |
| require_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) |
| { |
| graph_edge_t *edge; |
| int i; |
| boolean_t any_unsatisfied; |
| |
| if (uu_list_numnodes(groupv->gv_dependencies) == 0) |
| return (1); |
| |
| any_unsatisfied = B_FALSE; |
| |
| for (edge = uu_list_first(groupv->gv_dependencies); |
| edge != NULL; |
| edge = uu_list_next(groupv->gv_dependencies, edge)) { |
| i = dependency_satisfied(edge->ge_vertex, satbility); |
| if (i == 1) |
| continue; |
| |
| log_framework2(LOG_DEBUG, DEBUG_DEPENDENCIES, |
| "require_all(%s): %s is unsatisfi%s.\n", groupv->gv_name, |
| edge->ge_vertex->gv_name, i == 0 ? "ed" : "able"); |
| |
| if (!satbility) |
| return (0); |
| |
| if (i == -1) |
| return (-1); |
| |
| any_unsatisfied = B_TRUE; |
| } |
| |
| return (any_unsatisfied ? 0 : 1); |
| } |
| |
| /* |
| * A require_any dependency is satisfied if any element is satisfied. It is |
| * satisfiable if any element is satisfiable. |
| */ |
| static int |
| require_any_satisfied(graph_vertex_t *groupv, boolean_t satbility) |
| { |
| graph_edge_t *edge; |
| int s; |
| boolean_t satisfiable; |
| |
| if (uu_list_numnodes(groupv->gv_dependencies) == 0) |
| return (1); |
| |
| satisfiable = B_FALSE; |
| |
| for (edge = uu_list_first(groupv->gv_dependencies); |
| edge != NULL; |
| edge = uu_list_next(groupv->gv_dependencies, edge)) { |
| s = dependency_satisfied(edge->ge_vertex, satbility); |
| |
| if (s == 1) |
| return (1); |
| |
| log_framework2(LOG_DEBUG, DEBUG_DEPENDENCIES, |
| "require_any(%s): %s is unsatisfi%s.\n", |
| groupv->gv_name, edge->ge_vertex->gv_name, |
| s == 0 ? "ed" : "able"); |
| |
| if (satbility && s == 0) |
| satisfiable = B_TRUE; |
| } |
| |
| return ((!satbility || satisfiable) ? 0 : -1); |
| } |
| |
| /* |
| * An optional_all dependency only considers elements which are configured, |
| * enabled, and not in maintenance. If any are unsatisfied, then the dependency |
| * is unsatisfied. |
| * |
| * Offline dependencies which are waiting for a dependency to come online are |
| * unsatisfied. Offline dependences which cannot possibly come online |
| * (unsatisfiable) are always considered satisfied. |
| */ |
| static int |
| optional_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) |
| { |
| graph_edge_t *edge; |
| graph_vertex_t *v; |
| boolean_t any_qualified; |
| boolean_t any_unsatisfied; |
| int i; |
| |
| any_qualified = B_FALSE; |
| any_unsatisfied = B_FALSE; |
| |
| for (edge = uu_list_first(groupv->gv_dependencies); |
| edge != NULL; |
| edge = uu_list_next(groupv->gv_dependencies, edge)) { |
| v = edge->ge_vertex; |
| |
| switch (v->gv_type) { |
| case GVT_INST: |
| /* Skip missing instances */ |
| if ((v->gv_flags & GV_CONFIGURED) == 0) |
| continue; |
| |
| if (v->gv_state == RESTARTER_STATE_MAINT) |
| continue; |
| |
| any_qualified = B_TRUE; |
| if (v->gv_state == RESTARTER_STATE_OFFLINE || |
| v->gv_state == RESTARTER_STATE_DISABLED) { |
| /* |
| * For offline/disabled dependencies, |
| * treat unsatisfiable as satisfied. |
| */ |
| i = dependency_satisfied(v, B_TRUE); |
| if (i == -1) |
| i = 1; |
| } else { |
| i = dependency_satisfied(v, satbility); |
| } |
| break; |
| |
| case GVT_FILE: |
| any_qualified = B_TRUE; |
| i = dependency_satisfied(v, satbility); |
| |
| break; |
| |
| case GVT_SVC: { |
| any_qualified = B_TRUE; |
| i = optional_all_satisfied(v, satbility); |
| |
| break; |
| } |
| |
| case GVT_GROUP: |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, |
| __LINE__, v->gv_type); |
| #endif |
| abort(); |
| } |
| |
| if (i == 1) |
| continue; |
| |
| log_framework2(LOG_DEBUG, DEBUG_DEPENDENCIES, |
| "optional_all(%s): %s is unsatisfi%s.\n", groupv->gv_name, |
| v->gv_name, i == 0 ? "ed" : "able"); |
| |
| if (!satbility) |
| return (0); |
| if (i == -1) |
| return (-1); |
| any_unsatisfied = B_TRUE; |
| } |
| |
| if (!any_qualified) |
| return (1); |
| |
| return (any_unsatisfied ? 0 : 1); |
| } |
| |
| /* |
| * An exclude_all dependency is unsatisfied if any non-service element is |
| * satisfied or any service instance which is configured, enabled, and not in |
| * maintenance is satisfied. Usually when unsatisfied, it is also |
| * unsatisfiable. |
| */ |
| #define LOG_EXCLUDE(u, v) \ |
| log_framework2(LOG_DEBUG, DEBUG_DEPENDENCIES, \ |
| "exclude_all(%s): %s is satisfied.\n", \ |
| (u)->gv_name, (v)->gv_name) |
| |
| /* ARGSUSED */ |
| static int |
| exclude_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) |
| { |
| graph_edge_t *edge, *e2; |
| graph_vertex_t *v, *v2; |
| |
| for (edge = uu_list_first(groupv->gv_dependencies); |
| edge != NULL; |
| edge = uu_list_next(groupv->gv_dependencies, edge)) { |
| v = edge->ge_vertex; |
| |
| switch (v->gv_type) { |
| case GVT_INST: |
| if ((v->gv_flags & GV_CONFIGURED) == 0) |
| continue; |
| |
| switch (v->gv_state) { |
| case RESTARTER_STATE_ONLINE: |
| case RESTARTER_STATE_DEGRADED: |
| LOG_EXCLUDE(groupv, v); |
| return (v->gv_flags & GV_ENABLED ? -1 : 0); |
| |
| case RESTARTER_STATE_OFFLINE: |
| case RESTARTER_STATE_UNINIT: |
| LOG_EXCLUDE(groupv, v); |
| return (0); |
| |
| case RESTARTER_STATE_DISABLED: |
| case RESTARTER_STATE_MAINT: |
| continue; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected vertex state %d.\n", |
| __FILE__, __LINE__, v->gv_state); |
| #endif |
| abort(); |
| } |
| /* NOTREACHED */ |
| |
| case GVT_SVC: |
| break; |
| |
| case GVT_FILE: |
| if (!file_ready(v)) |
| continue; |
| LOG_EXCLUDE(groupv, v); |
| return (-1); |
| |
| case GVT_GROUP: |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, |
| __LINE__, v->gv_type); |
| #endif |
| abort(); |
| } |
| |
| /* v represents a service */ |
| if (uu_list_numnodes(v->gv_dependencies) == 0) |
| continue; |
| |
| for (e2 = uu_list_first(v->gv_dependencies); |
| e2 != NULL; |
| e2 = uu_list_next(v->gv_dependencies, e2)) { |
| v2 = e2->ge_vertex; |
| assert(v2->gv_type == GVT_INST); |
| |
| if ((v2->gv_flags & GV_CONFIGURED) == 0) |
| continue; |
| |
| switch (v2->gv_state) { |
| case RESTARTER_STATE_ONLINE: |
| case RESTARTER_STATE_DEGRADED: |
| LOG_EXCLUDE(groupv, v2); |
| return (v2->gv_flags & GV_ENABLED ? -1 : 0); |
| |
| case RESTARTER_STATE_OFFLINE: |
| case RESTARTER_STATE_UNINIT: |
| LOG_EXCLUDE(groupv, v2); |
| return (0); |
| |
| case RESTARTER_STATE_DISABLED: |
| case RESTARTER_STATE_MAINT: |
| continue; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected vertex type %d.\n", |
| __FILE__, __LINE__, v2->gv_type); |
| #endif |
| abort(); |
| } |
| } |
| } |
| |
| return (1); |
| } |
| |
| /* |
| * int instance_satisfied() |
| * Determine if all the dependencies are satisfied for the supplied instance |
| * vertex. Return 1 if they are, 0 if they aren't, and -1 if they won't be |
| * without administrator intervention. |
| */ |
| static int |
| instance_satisfied(graph_vertex_t *v, boolean_t satbility) |
| { |
| assert(v->gv_type == GVT_INST); |
| assert(!inst_running(v)); |
| |
| return (require_all_satisfied(v, satbility)); |
| } |
| |
| /* |
| * Decide whether v can satisfy a dependency. v can either be a child of |
| * a group vertex, or of an instance vertex. |
| */ |
| static int |
| dependency_satisfied(graph_vertex_t *v, boolean_t satbility) |
| { |
| switch (v->gv_type) { |
| case GVT_INST: |
| if ((v->gv_flags & GV_CONFIGURED) == 0) { |
| if (v->gv_flags & GV_DEATHROW) { |
| /* |
| * A dependency on an instance with GV_DEATHROW |
| * flag is always considered as satisfied. |
| */ |
| return (1); |
| } |
| return (-1); |
| } |
| |
| /* |
| * Vertices may be transitioning so we try to figure out if |
| * the end state is likely to satisfy the dependency instead |
| * of assuming the dependency is unsatisfied/unsatisfiable. |
| * |
| * Support for optional_all dependencies depends on us getting |
| * this right because unsatisfiable dependencies are treated |
| * as being satisfied. |
| */ |
| switch (v->gv_state) { |
| case RESTARTER_STATE_ONLINE: |
| case RESTARTER_STATE_DEGRADED: |
| if (v->gv_flags & GV_TODISABLE) |
| return (-1); |
| if (v->gv_flags & GV_TOOFFLINE) |
| return (0); |
| return (1); |
| |
| case RESTARTER_STATE_OFFLINE: |
| if (!satbility || v->gv_flags & GV_TODISABLE) |
| return (satbility ? -1 : 0); |
| return (instance_satisfied(v, satbility) != -1 ? |
| 0 : -1); |
| |
| case RESTARTER_STATE_DISABLED: |
| if (!satbility || !(v->gv_flags & GV_ENABLED)) |
| return (satbility ? -1 : 0); |
| return (instance_satisfied(v, satbility) != -1 ? |
| 0 : -1); |
| |
| case RESTARTER_STATE_MAINT: |
| return (-1); |
| |
| case RESTARTER_STATE_UNINIT: |
| return (0); |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected vertex state %d.\n", |
| __FILE__, __LINE__, v->gv_state); |
| #endif |
| abort(); |
| /* NOTREACHED */ |
| } |
| |
| case GVT_SVC: |
| if (uu_list_numnodes(v->gv_dependencies) == 0) |
| return (-1); |
| return (require_any_satisfied(v, satbility)); |
| |
| case GVT_FILE: |
| /* i.e., we assume files will not be automatically generated */ |
| return (file_ready(v) ? 1 : -1); |
| |
| case GVT_GROUP: |
| break; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected node type %d.\n", __FILE__, __LINE__, |
| v->gv_type); |
| #endif |
| abort(); |
| /* NOTREACHED */ |
| } |
| |
| switch (v->gv_depgroup) { |
| case DEPGRP_REQUIRE_ANY: |
| return (require_any_satisfied(v, satbility)); |
| |
| case DEPGRP_REQUIRE_ALL: |
| return (require_all_satisfied(v, satbility)); |
| |
| case DEPGRP_OPTIONAL_ALL: |
| return (optional_all_satisfied(v, satbility)); |
| |
| case DEPGRP_EXCLUDE_ALL: |
| return (exclude_all_satisfied(v, satbility)); |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unknown dependency grouping %d.\n", __FILE__, |
| __LINE__, v->gv_depgroup); |
| #endif |
| abort(); |
| } |
| } |
| |
| void |
| graph_start_if_satisfied(graph_vertex_t *v) |
| { |
| if (v->gv_state == RESTARTER_STATE_OFFLINE && |
| instance_satisfied(v, B_FALSE) == 1) { |
| if (v->gv_start_f == NULL) |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_START); |
| else |
| v->gv_start_f(v); |
| } |
| } |
| |
| /* |
| * propagate_satbility() |
| * |
| * This function is used when the given vertex changes state in such a way that |
| * one of its dependents may become unsatisfiable. This happens when an |
| * instance transitions between offline -> online, or from !running -> |
| * maintenance, as well as when an instance is removed from the graph. |
| * |
| * We have to walk all the dependents, since optional_all dependencies several |
| * levels up could become (un)satisfied, instead of unsatisfiable. For example, |
| * |
| * +-----+ optional_all +-----+ require_all +-----+ |
| * | A |--------------->| B |-------------->| C | |
| * +-----+ +-----+ +-----+ |
| * |
| * offline -> maintenance |
| * |
| * If C goes into maintenance, it's not enough simply to check B. Because A has |
| * an optional dependency, what was previously an unsatisfiable situation is now |
| * satisfied (B will never come online, even though its state hasn't changed). |
| * |
| * Note that it's not necessary to continue examining dependents after reaching |
| * an optional_all dependency. It's not possible for an optional_all dependency |
| * to change satisfiability without also coming online, in which case we get a |
| * start event and propagation continues naturally. However, it does no harm to |
| * continue propagating satisfiability (as it is a relatively rare event), and |
| * keeps the walker code simple and generic. |
| */ |
| /*ARGSUSED*/ |
| static int |
| satbility_cb(graph_vertex_t *v, void *arg) |
| { |
| if (is_inst_bypassed(v)) |
| return (UU_WALK_NEXT); |
| |
| if (v->gv_type == GVT_INST) |
| graph_start_if_satisfied(v); |
| |
| return (UU_WALK_NEXT); |
| } |
| |
| static void |
| propagate_satbility(graph_vertex_t *v) |
| { |
| graph_walk(v, WALK_DEPENDENTS, satbility_cb, NULL, NULL); |
| } |
| |
| static void propagate_stop(graph_vertex_t *, void *); |
| |
| /* |
| * propagate_start() |
| * |
| * This function is used to propagate a start event to the dependents of the |
| * given vertex. Any dependents that are offline but have their dependencies |
| * satisfied are started. Any dependents that are online and have restart_on |
| * set to "restart" or "refresh" are restarted because their dependencies have |
| * just changed. This only happens with optional_all dependencies. |
| */ |
| static void |
| propagate_start(graph_vertex_t *v, void *arg) |
| { |
| restarter_error_t err = (restarter_error_t)arg; |
| |
| if (is_inst_bypassed(v)) |
| return; |
| |
| switch (v->gv_type) { |
| case GVT_INST: |
| /* Restarter */ |
| if (inst_running(v)) { |
| if (err == RERR_RESTART || err == RERR_REFRESH) { |
| vertex_send_event(v, |
| RESTARTER_EVENT_TYPE_STOP_RESET); |
| } |
| } else { |
| graph_start_if_satisfied(v); |
| } |
| break; |
| |
| case GVT_GROUP: |
| if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) { |
| graph_walk_dependents(v, propagate_stop, |
| (void *)RERR_RESTART); |
| break; |
| } |
| err = v->gv_restart; |
| /* FALLTHROUGH */ |
| |
| case GVT_SVC: |
| graph_walk_dependents(v, propagate_start, (void *)err); |
| break; |
| |
| case GVT_FILE: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: propagate_start() encountered GVT_FILE.\n", |
| __FILE__, __LINE__); |
| #endif |
| abort(); |
| /* NOTREACHED */ |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__, |
| v->gv_type); |
| #endif |
| abort(); |
| } |
| } |
| |
| /* |
| * propagate_stop() |
| * |
| * This function is used to propagate a stop event to the dependents of the |
| * given vertex. Any dependents that are online (or in degraded state) with |
| * the restart_on property set to "restart" or "refresh" will be stopped as |
| * their dependencies have just changed, propagate_start() will start them |
| * again once their dependencies have been re-satisfied. |
| */ |
| static void |
| propagate_stop(graph_vertex_t *v, void *arg) |
| { |
| restarter_error_t err = (restarter_error_t)arg; |
| |
| if (is_inst_bypassed(v)) |
| return; |
| |
| switch (v->gv_type) { |
| case GVT_INST: |
| /* Restarter */ |
| if (err > RERR_NONE && inst_running(v)) { |
| if (err == RERR_RESTART || err == RERR_REFRESH) { |
| vertex_send_event(v, |
| RESTARTER_EVENT_TYPE_STOP_RESET); |
| } else { |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_STOP); |
| } |
| } |
| break; |
| |
| case GVT_SVC: |
| graph_walk_dependents(v, propagate_stop, arg); |
| break; |
| |
| case GVT_FILE: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: propagate_stop() encountered GVT_FILE.\n", |
| __FILE__, __LINE__); |
| #endif |
| abort(); |
| /* NOTREACHED */ |
| |
| case GVT_GROUP: |
| if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) { |
| graph_walk_dependents(v, propagate_start, |
| (void *)RERR_NONE); |
| break; |
| } |
| |
| if (err == RERR_NONE || err > v->gv_restart) |
| break; |
| |
| graph_walk_dependents(v, propagate_stop, arg); |
| break; |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__, |
| v->gv_type); |
| #endif |
| abort(); |
| } |
| } |
| |
| void |
| offline_vertex(graph_vertex_t *v) |
| { |
| scf_handle_t *h = libscf_handle_create_bound_loop(); |
| scf_instance_t *scf_inst = safe_scf_instance_create(h); |
| scf_propertygroup_t *pg = safe_scf_pg_create(h); |
| restarter_instance_state_t state, next_state; |
| int r; |
| |
| assert(v->gv_type == GVT_INST); |
| |
| if (scf_inst == NULL) |
| bad_error("safe_scf_instance_create", scf_error()); |
| if (pg == NULL) |
| bad_error("safe_scf_pg_create", scf_error()); |
| |
| /* if the vertex is already going offline, return */ |
| rep_retry: |
| if (scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, scf_inst, NULL, |
| NULL, SCF_DECODE_FMRI_EXACT) != 0) { |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| libscf_handle_rebind(h); |
| goto rep_retry; |
| |
| case SCF_ERROR_NOT_FOUND: |
| scf_pg_destroy(pg); |
| scf_instance_destroy(scf_inst); |
| (void) scf_handle_unbind(h); |
| scf_handle_destroy(h); |
| return; |
| } |
| uu_die("Can't decode FMRI %s: %s\n", v->gv_name, |
| scf_strerror(scf_error())); |
| } |
| |
| r = scf_instance_get_pg(scf_inst, SCF_PG_RESTARTER, pg); |
| if (r != 0) { |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| libscf_handle_rebind(h); |
| goto rep_retry; |
| |
| case SCF_ERROR_NOT_SET: |
| case SCF_ERROR_NOT_FOUND: |
| scf_pg_destroy(pg); |
| scf_instance_destroy(scf_inst); |
| (void) scf_handle_unbind(h); |
| scf_handle_destroy(h); |
| return; |
| |
| default: |
| bad_error("scf_instance_get_pg", scf_error()); |
| } |
| } else { |
| r = libscf_read_states(pg, &state, &next_state); |
| if (r == 0 && (next_state == RESTARTER_STATE_OFFLINE || |
| next_state == RESTARTER_STATE_DISABLED)) { |
| log_framework(LOG_DEBUG, |
| "%s: instance is already going down.\n", |
| v->gv_name); |
| scf_pg_destroy(pg); |
| scf_instance_destroy(scf_inst); |
| (void) scf_handle_unbind(h); |
| scf_handle_destroy(h); |
| return; |
| } |
| } |
| |
| scf_pg_destroy(pg); |
| scf_instance_destroy(scf_inst); |
| (void) scf_handle_unbind(h); |
| scf_handle_destroy(h); |
| |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_STOP_RESET); |
| } |
| |
| /* |
| * void graph_enable_by_vertex() |
| * If admin is non-zero, this is an administrative request for change |
| * of the enabled property. Thus, send the ADMIN_DISABLE rather than |
| * a plain DISABLE restarter event. |
| */ |
| void |
| graph_enable_by_vertex(graph_vertex_t *vertex, int enable, int admin) |
| { |
| graph_vertex_t *v; |
| int r; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert((vertex->gv_flags & GV_CONFIGURED)); |
| |
| vertex->gv_flags = (vertex->gv_flags & ~GV_ENABLED) | |
| (enable ? GV_ENABLED : 0); |
| |
| if (enable) { |
| if (vertex->gv_state != RESTARTER_STATE_OFFLINE && |
| vertex->gv_state != RESTARTER_STATE_DEGRADED && |
| vertex->gv_state != RESTARTER_STATE_ONLINE) { |
| /* |
| * In case the vertex was notified to go down, |
| * but now can return online, clear the _TOOFFLINE |
| * and _TODISABLE flags. |
| */ |
| vertex->gv_flags &= ~GV_TOOFFLINE; |
| vertex->gv_flags &= ~GV_TODISABLE; |
| |
| vertex_send_event(vertex, RESTARTER_EVENT_TYPE_ENABLE); |
| } |
| |
| /* |
| * Wait for state update from restarter before sending _START or |
| * _STOP. |
| */ |
| |
| return; |
| } |
| |
| if (vertex->gv_state == RESTARTER_STATE_DISABLED) |
| return; |
| |
| if (!admin) { |
| vertex_send_event(vertex, RESTARTER_EVENT_TYPE_DISABLE); |
| |
| /* |
| * Wait for state update from restarter before sending _START or |
| * _STOP. |
| */ |
| |
| return; |
| } |
| |
| /* |
| * If it is a DISABLE event requested by the administrator then we are |
| * offlining the dependents first. |
| */ |
| |
| /* |
| * Set GV_TOOFFLINE for the services we are offlining. We cannot |
| * clear the GV_TOOFFLINE bits from all the services because |
| * other DISABLE events might be handled at the same time. |
| */ |
| vertex->gv_flags |= GV_TOOFFLINE; |
| |
| /* remember which vertex to disable... */ |
| vertex->gv_flags |= GV_TODISABLE; |
| |
| log_framework(LOG_DEBUG, "Marking in-subtree vertices before " |
| "disabling %s.\n", vertex->gv_name); |
| |
| /* set GV_TOOFFLINE for its dependents */ |
| r = uu_list_walk(vertex->gv_dependents, (uu_walk_fn_t *)mark_subtree, |
| NULL, 0); |
| assert(r == 0); |
| |
| /* disable the instance now if there is nothing else to offline */ |
| if (insubtree_dependents_down(vertex) == B_TRUE) { |
| vertex_send_event(vertex, RESTARTER_EVENT_TYPE_ADMIN_DISABLE); |
| return; |
| } |
| |
| /* |
| * This loop is similar to the one used for the graph reversal shutdown |
| * and could be improved in term of performance for the subtree reversal |
| * disable case. |
| */ |
| for (v = uu_list_first(dgraph); v != NULL; |
| v = uu_list_next(dgraph, v)) { |
| /* skip the vertex we are disabling for now */ |
| if (v == vertex) |
| continue; |
| |
| if (v->gv_type != GVT_INST || |
| (v->gv_flags & GV_CONFIGURED) == 0 || |
| (v->gv_flags & GV_ENABLED) == 0 || |
| (v->gv_flags & GV_TOOFFLINE) == 0) |
| continue; |
| |
| if ((v->gv_state != RESTARTER_STATE_ONLINE) && |
| (v->gv_state != RESTARTER_STATE_DEGRADED)) { |
| /* continue if there is nothing to offline */ |
| continue; |
| } |
| |
| /* |
| * Instances which are up need to come down before we're |
| * done, but we can only offline the leaves here. An |
| * instance is a leaf when all its dependents are down. |
| */ |
| if (insubtree_dependents_down(v) == B_TRUE) { |
| log_framework(LOG_DEBUG, "Offlining in-subtree " |
| "instance %s for %s.\n", |
| v->gv_name, vertex->gv_name); |
| offline_vertex(v); |
| } |
| } |
| } |
| |
| static int configure_vertex(graph_vertex_t *, scf_instance_t *); |
| |
| /* |
| * Set the restarter for v to fmri_arg. That is, make sure a vertex for |
| * fmri_arg exists, make v depend on it, and send _ADD_INSTANCE for v. If |
| * v is already configured and fmri_arg indicates the current restarter, do |
| * nothing. If v is configured and fmri_arg is a new restarter, delete v's |
| * dependency on the restarter, send _REMOVE_INSTANCE for v, and set the new |
| * restarter. Returns 0 on success, EINVAL if the FMRI is invalid, |
| * ECONNABORTED if the repository connection is broken, and ELOOP |
| * if the dependency would create a cycle. In the last case, *pathp will |
| * point to a -1-terminated array of ids which compose the path from v to |
| * restarter_fmri. |
| */ |
| int |
| graph_change_restarter(graph_vertex_t *v, const char *fmri_arg, scf_handle_t *h, |
| int **pathp) |
| { |
| char *restarter_fmri = NULL; |
| graph_vertex_t *rv; |
| int err; |
| int id; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| if (fmri_arg[0] != '\0') { |
| err = fmri_canonify(fmri_arg, &restarter_fmri, B_TRUE); |
| if (err != 0) { |
| assert(err == EINVAL); |
| return (err); |
| } |
| } |
| |
| if (restarter_fmri == NULL || |
| strcmp(restarter_fmri, SCF_SERVICE_STARTD) == 0) { |
| if (v->gv_flags & GV_CONFIGURED) { |
| if (v->gv_restarter_id == -1) { |
| if (restarter_fmri != NULL) |
| startd_free(restarter_fmri, |
| max_scf_fmri_size); |
| return (0); |
| } |
| |
| graph_unset_restarter(v); |
| } |
| |
| /* Master restarter, nothing to do. */ |
| v->gv_restarter_id = -1; |
| v->gv_restarter_channel = NULL; |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE); |
| return (0); |
| } |
| |
| if (v->gv_flags & GV_CONFIGURED) { |
| id = dict_lookup_byname(restarter_fmri); |
| if (id != -1 && v->gv_restarter_id == id) { |
| startd_free(restarter_fmri, max_scf_fmri_size); |
| return (0); |
| } |
| |
| graph_unset_restarter(v); |
| } |
| |
| err = graph_insert_vertex_unconfigured(restarter_fmri, GVT_INST, 0, |
| RERR_NONE, &rv); |
| startd_free(restarter_fmri, max_scf_fmri_size); |
| assert(err == 0 || err == EEXIST); |
| |
| if (rv->gv_delegate_initialized == 0) { |
| if ((rv->gv_delegate_channel = restarter_protocol_init_delegate( |
| rv->gv_name)) == NULL) |
| return (EINVAL); |
| rv->gv_delegate_initialized = 1; |
| } |
| v->gv_restarter_id = rv->gv_id; |
| v->gv_restarter_channel = rv->gv_delegate_channel; |
| |
| err = graph_insert_dependency(v, rv, pathp); |
| if (err != 0) { |
| assert(err == ELOOP); |
| return (ELOOP); |
| } |
| |
| vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE); |
| |
| if (!(rv->gv_flags & GV_CONFIGURED)) { |
| scf_instance_t *inst; |
| |
| err = libscf_fmri_get_instance(h, rv->gv_name, &inst); |
| switch (err) { |
| case 0: |
| err = configure_vertex(rv, inst); |
| scf_instance_destroy(inst); |
| switch (err) { |
| case 0: |
| case ECANCELED: |
| break; |
| |
| case ECONNABORTED: |
| return (ECONNABORTED); |
| |
| default: |
| bad_error("configure_vertex", err); |
| } |
| break; |
| |
| case ECONNABORTED: |
| return (ECONNABORTED); |
| |
| case ENOENT: |
| break; |
| |
| case ENOTSUP: |
| /* |
| * The fmri doesn't specify an instance - translate |
| * to EINVAL. |
| */ |
| return (EINVAL); |
| |
| case EINVAL: |
| default: |
| bad_error("libscf_fmri_get_instance", err); |
| } |
| } |
| |
| return (0); |
| } |
| |
| |
| /* |
| * Add all of the instances of the service named by fmri to the graph. |
| * Returns |
| * 0 - success |
| * ENOENT - service indicated by fmri does not exist |
| * |
| * In both cases *reboundp will be B_TRUE if the handle was rebound, or B_FALSE |
| * otherwise. |
| */ |
| static int |
| add_service(const char *fmri, scf_handle_t *h, boolean_t *reboundp) |
| { |
| scf_service_t *svc; |
| scf_instance_t *inst; |
| scf_iter_t *iter; |
| char *inst_fmri; |
| int ret, r; |
| |
| *reboundp = B_FALSE; |
| |
| svc = safe_scf_service_create(h); |
| inst = safe_scf_instance_create(h); |
| iter = safe_scf_iter_create(h); |
| inst_fmri = startd_alloc(max_scf_fmri_size); |
| |
| rebound: |
| if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL, |
| SCF_DECODE_FMRI_EXACT) != 0) { |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| default: |
| libscf_handle_rebind(h); |
| *reboundp = B_TRUE; |
| goto rebound; |
| |
| case SCF_ERROR_NOT_FOUND: |
| ret = ENOENT; |
| goto out; |
| |
| case SCF_ERROR_INVALID_ARGUMENT: |
| case SCF_ERROR_CONSTRAINT_VIOLATED: |
| case SCF_ERROR_NOT_BOUND: |
| case SCF_ERROR_HANDLE_MISMATCH: |
| bad_error("scf_handle_decode_fmri", scf_error()); |
| } |
| } |
| |
| if (scf_iter_service_instances(iter, svc) != 0) { |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| default: |
| libscf_handle_rebind(h); |
| *reboundp = B_TRUE; |
| goto rebound; |
| |
| case SCF_ERROR_DELETED: |
| ret = ENOENT; |
| goto out; |
| |
| case SCF_ERROR_HANDLE_MISMATCH: |
| case SCF_ERROR_NOT_BOUND: |
| case SCF_ERROR_NOT_SET: |
| bad_error("scf_iter_service_instances", scf_error()); |
| } |
| } |
| |
| for (;;) { |
| r = scf_iter_next_instance(iter, inst); |
| if (r == 0) |
| break; |
| if (r != 1) { |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| default: |
| libscf_handle_rebind(h); |
| *reboundp = B_TRUE; |
| goto rebound; |
| |
| case SCF_ERROR_DELETED: |
| ret = ENOENT; |
| goto out; |
| |
| case SCF_ERROR_HANDLE_MISMATCH: |
| case SCF_ERROR_NOT_BOUND: |
| case SCF_ERROR_NOT_SET: |
| case SCF_ERROR_INVALID_ARGUMENT: |
| bad_error("scf_iter_next_instance", |
| scf_error()); |
| } |
| } |
| |
| if (scf_instance_to_fmri(inst, inst_fmri, max_scf_fmri_size) < |
| 0) { |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| libscf_handle_rebind(h); |
| *reboundp = B_TRUE; |
| goto rebound; |
| |
| case SCF_ERROR_DELETED: |
| continue; |
| |
| case SCF_ERROR_NOT_BOUND: |
| case SCF_ERROR_NOT_SET: |
| bad_error("scf_instance_to_fmri", scf_error()); |
| } |
| } |
| |
| r = dgraph_add_instance(inst_fmri, inst, B_FALSE); |
| switch (r) { |
| case 0: |
| case ECANCELED: |
| break; |
| |
| case EEXIST: |
| continue; |
| |
| case ECONNABORTED: |
| libscf_handle_rebind(h); |
| *reboundp = B_TRUE; |
| goto rebound; |
| |
| case EINVAL: |
| default: |
| bad_error("dgraph_add_instance", r); |
| } |
| } |
| |
| ret = 0; |
| |
| out: |
| startd_free(inst_fmri, max_scf_fmri_size); |
| scf_iter_destroy(iter); |
| scf_instance_destroy(inst); |
| scf_service_destroy(svc); |
| return (ret); |
| } |
| |
| struct depfmri_info { |
| graph_vertex_t *v; /* GVT_GROUP vertex */ |
| gv_type_t type; /* type of dependency */ |
| const char *inst_fmri; /* FMRI of parental GVT_INST vert. */ |
| const char *pg_name; /* Name of dependency pg */ |
| scf_handle_t *h; |
| int err; /* return error code */ |
| int **pathp; /* return circular dependency path */ |
| }; |
| |
| /* |
| * Find or create a vertex for fmri and make info->v depend on it. |
| * Returns |
| * 0 - success |
| * nonzero - failure |
| * |
| * On failure, sets info->err to |
| * EINVAL - fmri is invalid |
| * fmri does not match info->type |
| * ELOOP - Adding the dependency creates a circular dependency. *info->pathp |
| * will point to an array of the ids of the members of the cycle. |
| * ECONNABORTED - repository connection was broken |
| * ECONNRESET - succeeded, but repository connection was reset |
| */ |
| static int |
| process_dependency_fmri(const char *fmri, struct depfmri_info *info) |
| { |
| int err; |
| graph_vertex_t *depgroup_v, *v; |
| char *fmri_copy, *cfmri; |
| size_t fmri_copy_sz; |
| const char *scope, *service, *instance, *pg; |
| scf_instance_t *inst; |
| boolean_t rebound; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| /* Get or create vertex for FMRI */ |
| depgroup_v = info->v; |
| |
| if (strncmp(fmri, "file:", sizeof ("file:") - 1) == 0) { |
| if (info->type != GVT_FILE) { |
| log_framework(LOG_NOTICE, |
| "FMRI \"%s\" is not allowed for the \"%s\" " |
| "dependency's type of instance %s.\n", fmri, |
| info->pg_name, info->inst_fmri); |
| return (info->err = EINVAL); |
| } |
| |
| err = graph_insert_vertex_unconfigured(fmri, info->type, 0, |
| RERR_NONE, &v); |
| switch (err) { |
| case 0: |
| break; |
| |
| case EEXIST: |
| assert(v->gv_type == GVT_FILE); |
| break; |
| |
| case EINVAL: /* prevented above */ |
| default: |
| bad_error("graph_insert_vertex_unconfigured", err); |
| } |
| } else { |
| if (info->type != GVT_INST) { |
| log_framework(LOG_NOTICE, |
| "FMRI \"%s\" is not allowed for the \"%s\" " |
| "dependency's type of instance %s.\n", fmri, |
| info->pg_name, info->inst_fmri); |
| return (info->err = EINVAL); |
| } |
| |
| /* |
| * We must canonify fmri & add a vertex for it. |
| */ |
| fmri_copy_sz = strlen(fmri) + 1; |
| fmri_copy = startd_alloc(fmri_copy_sz); |
| (void) strcpy(fmri_copy, fmri); |
| |
| /* Determine if the FMRI is a property group or instance */ |
| if (scf_parse_svc_fmri(fmri_copy, &scope, &service, |
| &instance, &pg, NULL) != 0) { |
| startd_free(fmri_copy, fmri_copy_sz); |
| log_framework(LOG_NOTICE, |
| "Dependency \"%s\" of %s has invalid FMRI " |
| "\"%s\".\n", info->pg_name, info->inst_fmri, |
| fmri); |
| return (info->err = EINVAL); |
| } |
| |
| if (service == NULL || pg != NULL) { |
| startd_free(fmri_copy, fmri_copy_sz); |
| log_framework(LOG_NOTICE, |
| "Dependency \"%s\" of %s does not designate a " |
| "service or instance.\n", info->pg_name, |
| info->inst_fmri); |
| return (info->err = EINVAL); |
| } |
| |
| if (scope == NULL || strcmp(scope, SCF_SCOPE_LOCAL) == 0) { |
| cfmri = uu_msprintf("svc:/%s%s%s", |
| service, instance ? ":" : "", instance ? instance : |
| ""); |
| } else { |
| cfmri = uu_msprintf("svc://%s/%s%s%s", |
| scope, service, instance ? ":" : "", instance ? |
| instance : ""); |
| } |
| |
| startd_free(fmri_copy, fmri_copy_sz); |
| |
| err = graph_insert_vertex_unconfigured(cfmri, instance ? |
| GVT_INST : GVT_SVC, instance ? 0 : DEPGRP_REQUIRE_ANY, |
| RERR_NONE, &v); |
| uu_free(cfmri); |
| switch (err) { |
| case 0: |
| break; |
| |
| case EEXIST: |
| /* Verify v. */ |
| if (instance != NULL) |
| assert(v->gv_type == GVT_INST); |
| else |
| assert(v->gv_type == GVT_SVC); |
| break; |
| |
| default: |
| bad_error("graph_insert_vertex_unconfigured", err); |
| } |
| } |
| |
| /* Add dependency from depgroup_v to new vertex */ |
| info->err = graph_insert_dependency(depgroup_v, v, info->pathp); |
| switch (info->err) { |
| case 0: |
| break; |
| |
| case ELOOP: |
| return (ELOOP); |
| |
| default: |
| bad_error("graph_insert_dependency", info->err); |
| } |
| |
| /* This must be after we insert the dependency, to avoid looping. */ |
| switch (v->gv_type) { |
| case GVT_INST: |
| if ((v->gv_flags & GV_CONFIGURED) != 0) |
| break; |
| |
| inst = safe_scf_instance_create(info->h); |
| |
| rebound = B_FALSE; |
| |
| rebound: |
| err = libscf_lookup_instance(v->gv_name, inst); |
| switch (err) { |
| case 0: |
| err = configure_vertex(v, inst); |
| switch (err) { |
| case 0: |
| case ECANCELED: |
| break; |
| |
| case ECONNABORTED: |
| libscf_handle_rebind(info->h); |
| rebound = B_TRUE; |
| goto rebound; |
| |
| default: |
| bad_error("configure_vertex", err); |
| } |
| break; |
| |
| case ENOENT: |
| break; |
| |
| case ECONNABORTED: |
| libscf_handle_rebind(info->h); |
| rebound = B_TRUE; |
| goto rebound; |
| |
| case EINVAL: |
| case ENOTSUP: |
| default: |
| bad_error("libscf_fmri_get_instance", err); |
| } |
| |
| scf_instance_destroy(inst); |
| |
| if (rebound) |
| return (info->err = ECONNRESET); |
| break; |
| |
| case GVT_SVC: |
| (void) add_service(v->gv_name, info->h, &rebound); |
| if (rebound) |
| return (info->err = ECONNRESET); |
| } |
| |
| return (0); |
| } |
| |
| struct deppg_info { |
| graph_vertex_t *v; /* GVT_INST vertex */ |
| int err; /* return error */ |
| int **pathp; /* return circular dependency path */ |
| }; |
| |
| /* |
| * Make info->v depend on a new GVT_GROUP node for this property group, |
| * and then call process_dependency_fmri() for the values of the entity |
| * property. Return 0 on success, or if something goes wrong return nonzero |
| * and set info->err to ECONNABORTED, EINVAL, or the error code returned by |
| * process_dependency_fmri(). |
| */ |
| static int |
| process_dependency_pg(scf_propertygroup_t *pg, struct deppg_info *info) |
| { |
| scf_handle_t *h; |
| depgroup_type_t deptype; |
| restarter_error_t rerr; |
| struct depfmri_info linfo; |
| char *fmri, *pg_name; |
| size_t fmri_sz; |
| graph_vertex_t *depgrp; |
| scf_property_t *prop; |
| int err; |
| int empty; |
| scf_error_t scferr; |
| ssize_t len; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| h = scf_pg_handle(pg); |
| |
| pg_name = startd_alloc(max_scf_name_size); |
| |
| len = scf_pg_get_name(pg, pg_name, max_scf_name_size); |
| if (len < 0) { |
| startd_free(pg_name, max_scf_name_size); |
| switch (scf_error()) { |
| case SCF_ERROR_CONNECTION_BROKEN: |
| default: |
| return (info->err = ECONNABORTED); |
| |
| case SCF_ERROR_DELETED: |
| return (info->err = 0); |
| |
| case SCF_ERROR_NOT_SET: |
| bad_error("scf_pg_get_name", scf_error()); |
| } |
| } |
| |
| /* |
| * Skip over empty dependency groups. Since dependency property |
| * groups are updated atomically, they are either empty or |
| * fully populated. |
| */ |
| empty = depgroup_empty(h, pg); |
| if (empty < 0) { |
| log_error(LOG_INFO, |
| "Error reading dependency group \"%s\" of %s: %s\n", |
| pg_name, info->v->gv_name, scf_strerror(scf_error())); |
| startd_free(pg_name, max_scf_name_size); |
| return (info->err = EINVAL); |
| |
| } else if (empty == 1) { |
| log_framework(LOG_DEBUG, |
| "Ignoring empty dependency group \"%s\" of %s\n", |
| pg_name, info->v->gv_name); |
| startd_free(pg_name, max_scf_name_size); |
| return (info->err = 0); |
| } |
| |
| fmri_sz = strlen(info->v->gv_name) + 1 + len + 1; |
| fmri = startd_alloc(fmri_sz); |
| |
| (void) snprintf(fmri, fmri_sz, "%s>%s", info->v->gv_name, |
| pg_name); |
| |
| /* Validate the pg before modifying the graph */ |
| deptype = depgroup_read_grouping(h, pg); |
| if (deptype == DEPGRP_UNSUPPORTED) { |
| log_error(LOG_INFO, |
| "Dependency \"%s\" of %s has an unknown grouping value.\n", |
| pg_name, info->v->gv_name); |
| startd_free(fmri, fmri_sz); |
| startd_free(pg_name, max_scf_name_size); |
| return (info->err = EINVAL); |
| } |
| |
| rerr = depgroup_read_restart(h, pg); |
| if (rerr == RERR_UNSUPPORTED) { |
| log_error(LOG_INFO, |
| "Dependency \"%s\" of %s has an unknown restart_on value." |
| "\n", pg_name, info->v->gv_name); |
| startd_free(fmri, fmri_sz); |
| startd_free(pg_name, max_scf_name_size); |
| return (info->err = EINVAL); |
| } |
| |
| prop = safe_scf_property_create(h); |
| |
| if (scf_pg_get_property(pg, SCF_PROPERTY_ENTITIES, prop) != 0) { |
| scferr = scf_error(); |
| scf_property_destroy(prop); |
| if (scferr == SCF_ERROR_DELETED) { |
| startd_free(fmri, fmri_sz); |
| startd_free(pg_name, max_scf_name_size); |
| return (info->err = 0); |
| } else if (scferr != SCF_ERROR_NOT_FOUND) { |
| startd_free(fmri, fmri_sz); |
| startd_free(pg_name, max_scf_name_size); |
| return (info->err = ECONNABORTED); |
| } |
| |
| log_error(LOG_INFO, |
| "Dependency \"%s\" of %s is missing a \"%s\" property.\n", |
| pg_name, info->v->gv_name, SCF_PROPERTY_ENTITIES); |
| |
| startd_free(fmri, fmri_sz); |
| startd_free(pg_name, max_scf_name_size); |
| |
| return (info->err = EINVAL); |
| } |
| |
| /* Create depgroup vertex for pg */ |
| err = graph_insert_vertex_unconfigured(fmri, GVT_GROUP, deptype, |
| rerr, &depgrp); |
| assert(err == 0); |
| startd_free(fmri, fmri_sz); |
| |
| /* Add dependency from inst vertex to new vertex */ |
| err = graph_insert_dependency(info->v, depgrp, info->pathp); |
| /* ELOOP can't happen because this should be a new vertex */ |
| assert(err == 0); |
| |
| linfo.v = depgrp; |
| linfo.type = depgroup_read_scheme(h, pg); |
| linfo.inst_fmri = info->v->gv_name; |
| linfo.pg_name = pg_name; |
| linfo.h = h; |
| linfo.err = 0; |
| linfo.pathp = info->pathp; |
| err = walk_property_astrings(prop, (callback_t)process_dependency_fmri, |
| &linfo); |
| |
| scf_property_destroy(prop); |
| startd_free(pg_name, max_scf_name_size); |
| |
| switch (err) { |
| case 0: |
| case EINTR: |
| return (info->err = linfo.err); |
| |
| case ECONNABORTED: |
| case EINVAL: |
| return (info->err = err); |
| |
| case ECANCELED: |
| return (info->err = 0); |
| |
| case ECONNRESET: |
| return (info->err = ECONNABORTED); |
| |
| default: |
| bad_error("walk_property_astrings", err); |
| /* NOTREACHED */ |
| } |
| } |
| |
| /* |
| * Build the dependency info for v from the repository. Returns 0 on success, |
| * ECONNABORTED on repository disconnection, EINVAL if the repository |
| * configuration is invalid, and ELOOP if a dependency would cause a cycle. |
| * In the last case, *pathp will point to a -1-terminated array of ids which |
| * constitute the rest of the dependency cycle. |
| */ |
| static int |
| set_dependencies(graph_vertex_t *v, scf_instance_t *inst, int **pathp) |
| { |
| struct deppg_info info; |
| int err; |
| uint_t old_configured; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| /* |
| * Mark the vertex as configured during dependency insertion to avoid |
| * dependency cycles (which can appear in the graph if one of the |
| * vertices is an exclusion-group). |
| */ |
| old_configured = v->gv_flags & GV_CONFIGURED; |
| v->gv_flags |= GV_CONFIGURED; |
| |
| info.err = 0; |
| info.v = v; |
| info.pathp = pathp; |
| |
| err = walk_dependency_pgs(inst, (callback_t)process_dependency_pg, |
| &info); |
| |
| if (!old_configured) |
| v->gv_flags &= ~GV_CONFIGURED; |
| |
| switch (err) { |
| case 0: |
| case EINTR: |
| return (info.err); |
| |
| case ECONNABORTED: |
| return (ECONNABORTED); |
| |
| case ECANCELED: |
| /* Should get delete event, so return 0. */ |
| return (0); |
| |
| default: |
| bad_error("walk_dependency_pgs", err); |
| /* NOTREACHED */ |
| } |
| } |
| |
| |
| static void |
| handle_cycle(const char *fmri, int *path) |
| { |
| const char *cp; |
| size_t sz; |
| |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| path_to_str(path, (char **)&cp, &sz); |
| |
| log_error(LOG_ERR, "Transitioning %s to maintenance " |
| "because it completes a dependency cycle (see svcs -xv for " |
| "details):\n%s", fmri ? fmri : "?", cp); |
| |
| startd_free((void *)cp, sz); |
| } |
| |
| /* |
| * Increment the vertex's reference count to prevent the vertex removal |
| * from the dgraph. |
| */ |
| static void |
| vertex_ref(graph_vertex_t *v) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| |
| v->gv_refs++; |
| } |
| |
| /* |
| * Decrement the vertex's reference count and remove the vertex from |
| * the dgraph when possible. |
| * |
| * Return VERTEX_REMOVED when the vertex has been removed otherwise |
| * return VERTEX_INUSE. |
| */ |
| static int |
| vertex_unref(graph_vertex_t *v) |
| { |
| assert(MUTEX_HELD(&dgraph_lock)); |
| assert(v->gv_refs > 0); |
| |
| v->gv_refs--; |
| |
| return (free_if_unrefed(v)); |
| } |
| |
| /* |
| * When run on the dependencies of a vertex, populates list with |
| * graph_edge_t's which point to the service vertices or the instance |
| * vertices (no GVT_GROUP nodes) on which the vertex depends. |
| * |
| * Increment the vertex's reference count once the vertex is inserted |
| * in the list. The vertex won't be able to be deleted from the dgraph |
| * while it is referenced. |
| */ |
| static int |
| append_svcs_or_insts(graph_edge_t *e, uu_list_t *list) |
| { |
| graph_vertex_t *v = e->ge_vertex; |
| graph_edge_t *new; |
| int r; |
| |
| switch (v->gv_type) { |
| case GVT_INST: |
| case GVT_SVC: |
| break; |
| |
| case GVT_GROUP: |
| r = uu_list_walk(v->gv_dependencies, |
| (uu_walk_fn_t *)append_svcs_or_insts, list, 0); |
| assert(r == 0); |
| return (UU_WALK_NEXT); |
| |
| case GVT_FILE: |
| return (UU_WALK_NEXT); |
| |
| default: |
| #ifndef NDEBUG |
| uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, |
| __LINE__, v->gv_type); |
| #endif |
| abort(); |
| } |
| |
| new = startd_alloc(sizeof (*new)); |
| new->ge_vertex = v; |
| uu_list_node_init(new, &new->ge_link, graph_edge_pool); |
| r = uu_list_insert_before(list, NULL, new); |
| assert(r == 0); |
| |
| /* |
| * Because we are inserting the vertex in a list, we don't want |
| * the vertex to be freed while the list is in use. In order to |
| * achieve that, increment the vertex's reference count. |
| */ |
| vertex_ref(v); |
| |
| return (UU_WALK_NEXT); |
| } |
| |
| static boolean_t |
| should_be_in_subgraph(graph_vertex_t *v) |
| { |
| graph_edge_t *e; |
| |
| if (v == milestone) |
| return (B_TRUE); |
| |
| /* |
| * v is in the subgraph if any of its dependents are in the subgraph. |
| * Except for EXCLUDE_ALL dependents. And OPTIONAL dependents only |
| * count if we're enabled. |
| */ |
| for (e = uu_list_first(v->gv_dependents); |
| e != NULL; |
| e = uu_list_next(v->gv_dependents, e)) { |
| graph_vertex_t *dv = e->ge_vertex; |
| |
| if (!(dv->gv_flags & GV_INSUBGRAPH)) |
| continue; |
| |
| /* |
| * Don't include instances that are optional and disabled. |
| */ |
| if (v->gv_type == GVT_INST && dv->gv_type == GVT_SVC) { |
| |
| int in = 0; |
| graph_edge_t *ee; |
| |
| for (ee = uu_list_first(dv->gv_dependents); |
| ee != NULL; |
| ee = uu_list_next(dv->gv_dependents, ee)) { |
| |
| graph_vertex_t *ddv = e->ge_vertex; |
| |
| if (ddv->gv_type == GVT_GROUP && |
| ddv->gv_depgroup == DEPGRP_EXCLUDE_ALL) |
| continue; |
| |
| if (ddv->gv_type == GVT_GROUP && |
| ddv->gv_depgroup == DEPGRP_OPTIONAL_ALL && |
| !(v->gv_flags & GV_ENBLD_NOOVR)) |
| continue; |
| |
| in = 1; |
| } |
| if (!in) |
| continue; |
| } |
| if (v->gv_type == GVT_INST && |
| dv->gv_type == GVT_GROUP && |
| dv->gv_depgroup == DEPGRP_OPTIONAL_ALL && |
| !(v->gv_flags & GV_ENBLD_NOOVR)) |
| continue; |
| |
| /* Don't include excluded services and instances */ |
| if (dv->gv_type == GVT_GROUP && |
| dv->gv_depgroup == DEPGRP_EXCLUDE_ALL) |
| continue; |
| |
| return (B_TRUE); |
| } |
| |
| return (B_FALSE); |
| } |
| |
| /* |
| * Ensures that GV_INSUBGRAPH is set properly for v and its descendents. If |
| * any bits change, manipulate the repository appropriately. Returns 0 or |
| * ECONNABORTED. |
| */ |
| static int |
| eval_subgraph(graph_vertex_t *v, scf_handle_t *h) |
| { |
| boolean_t old = (v->gv_flags & GV_INSUBGRAPH) != 0; |
| boolean_t new; |
| graph_edge_t *e; |
| scf_instance_t *inst; |
| int ret = 0, r; |
| |
| assert(milestone != NULL && milestone != MILESTONE_NONE); |
| |
| new = should_be_in_subgraph(v); |
| |
| if (new == old) |
| return (0); |
| |
| log_framework(LOG_DEBUG, new ? "Adding %s to the subgraph.\n" : |
| "Removing %s from the subgraph.\n", v->gv_name); |
| |
| v->gv_flags = (v->gv_flags & ~GV_INSUBGRAPH) | |
| (new ? GV_INSUBGRAPH : 0); |
| |
| if (v->gv_type == GVT_INST && (v->gv_flags & GV_CONFIGURED)) { |
| int err; |
| |
| get_inst: |
| err = libscf_fmri_get_instance(h, v->gv_name, &inst); |
| if (err != 0) { |
| switch (err) { |
| case ECONNABORTED: |
| libscf_handle_rebind(h); |
| ret = ECONNABORTED; |
| goto get_inst; |
| |
| case ENOENT: |
| break; |
| |
| case EINVAL: |
| case ENOTSUP: |
| default: |
| bad_error("libscf_fmri_get_instance", err); |
| } |
| } else { |
| const char * |