˓→PBDaemon.RemoteException'>: : Traceback (most recent call last): File "/opt/zenoss/Products/ZenHub/PBDaemon.py", line 85, in inner return callable(*args, **kw) File "/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 144, in remote_
˓→applyDataMaps result = inner(map) (continues on next page)
5.9. Modeling for Zenpacks: Selected Topics 135 ZenPackers Documentation
(continued from previous page) File "/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 140, in inner return self._do_with_retries(action) File "/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 166, in _do_with_
˓→retries return action() File "/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 131, in action completed= bool(adm._applyDataMap(device, map)) File "/opt/zenoss/lib/python/ZODB/transact.py", line 44, in g r = f(*args, **kwargs) File "/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 217, in _applyDataMap changed = self._updateRelationship(tobj, datamap) File "/z/ZenPacks.zenoss.PythonCollector/ZenPacks/zenoss/PythonCollector/patches/
˓→platform.py", line 38, in _updateRelationship return original(self, device, relmap) File "/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 271, in _
˓→updateRelationship objchange = self._updateObject(obj, objmap) File "/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 402, in _updateObject setter(*args) TypeError: name() takes exactly 1 argument (2 given)
There is a good chance you’ve tried to set an attribute using “name”. This is a reserved word. You probably meant to use “title” for your attribute.
AttributeError: networks
You see this when modeling:
2016-10-19 10:31:02,765 DEBUG zen.ZenModeler: Running1 clients 2016-10-19 10:31:02,770 ERROR zen.ZenModeler: : Traceback (most recent call last): File"/opt/zenoss/Products/ZenHub/PBDaemon.py", line 97, in inner return callable(*args, **kw) File"/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 157, in remote_
˓→applyDataMaps result= inner(map) File"/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 153, in inner return self._do_with_retries(action) File"/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 179, in _do_with_
˓→retries return action() File"/opt/zenoss/Products/ZenHub/services/ModelerService.py", line 144, in action completed= bool(adm._applyDataMap(device, map)) File"/opt/zenoss/lib/python/ZODB/transact.py", line 44, in g r= f( *args, **kwargs) File"/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 217, in _applyDataMap changed= self._updateRelationship(tobj, datamap) File"/z/ZenPacks.zenoss.PythonCollector/ZenPacks/zenoss/PythonCollector/patches/
˓→platform.py", line 38, in _updateRelationship return original(self, device, relmap) File"/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 280, in _
˓→updateRelationship objchange, obj= self._createRelObject(device, objmap, rname) File"/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 477, in _
˓→createRelObject (continues on next page)
136 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) up_changed= self._updateObject(remoteObj, objmap) File"/opt/zenoss/Products/DataCollector/ApplyDataMap.py", line 400, in _updateObject setter(*args) File"/opt/zenoss/Products/ZenModel/IpInterface.py", line 335, in setIpAddresses self.addIpAddress(ip) File"/opt/zenoss/Products/ZenModel/IpInterface.py", line 282, in addIpAddress ipobj= networks.createIp(ip, netmask) File"/opt/zenoss/Products/ZenModel/IpNetwork.py", line 329, in createIp netobj= self.createNet(ip, netmask) File"/opt/zenoss/Products/ZenModel/IpNetwork.py", line 253, in createNet self.dmd.ZenLinkManager.networks.add_net(net) AttributeError: networks :
Somehow the ZenLinkManager (class Products.ZenModel.LinkManager) has not been instantiated correctly and it’s networks attribute is missing. Fix this in zendmd:
[zenoss@mp4:/home/zenoss]: zendmd >>> zlm= dmd.ZenLinkManager >>> from Products.ZenModel.LinkManager import DrawNetworks >>> zlm.networks= DrawNetwork() >>> commit()
You can resume modeling now.
You see consecutive “Changes in configuration applied” messages
Once you model a device, any subsequent model should not see any changes. You should see a “No change in configuration detected” message on the 2nd model, assuming nothing changed. This is important because the if the modeler thinks there are changes to be recorded, it will go through and run ApplyDatamaps() on all the model data. This can be an expensive operation. If you continue to see changes, try these steps: • Ensure no changes show up after setting zCollectorLogChanges to true as above. – Inspect your Events for the /Change event class, – Do a verbose model, extract the datamaps and compare them using Unix diff command for example. If no difference is visible, go to the next sections. • Short-circuit your modeler maps until you finally get the “no-changes” message after the 2nd model. Then add back in each map sequentially, modeling twice each try until you identify the guilty map. • Ensure your property type match what you are setting in the modeler: – If your property is the default type (string) and feed modeler int, it will show the changes message. • Look for attribute setters of None or empty strings. Those should only be set if there are actual values.
5.9.9 Simulating Modeling
Gathering Modeler Results
There are a number of ways to do this:
5.9. Modeling for Zenpacks: Selected Topics 137 ZenPackers Documentation
• run the modeler with –save_raw_results or –save_processed_results • Use pickler to grab the maps directly
Playing Back the Modeler
If you use –save_raw_results or –save_processed_results you can use the playback tool at https://github.com/zenoss/ solutions-private/blob/develop/apply-zenmodeler-results If you used a pickler, you have to insert that data in your modeler.
5.10 Collection: General Concepts
5.10.1 General Considerations
Collecting data is the fundamental goal of Zenoss. This section explores specific tasks related to data collection and parsing that data.
Fig. 13: Zenoss Collector Subsystem
Prerequisites
• Zenoss ZenPack Developement Guide We assume that you are familiar with ZenPack developement and Python coding. We further assume that we work from the base of ZP_DIR. For NetBotz for example:
138 Chapter 5. General Zenpack Development ZenPackers Documentation
ZP_DIR_TOP=$ZENHOME/ZenPacks/ZenPacks.training.NetBotz ZP_DIR=$ZP_DIR_TOP/ZenPacks/training/NetBotz
As you should know, collectors and parsers typically live in the folder:
$ZP_DIR/datasources $ZP_DIR/parsers
Debugging Tips in General
• Run the collector manually like this:
zencommand run-v10-d mp3.zenoss.loc|& tee collect.log
• If you don’t get any output, you can try these basic steps: – Restart zenhub: it may have given up loading the modeler – Rerun zencommand and also monitor /opt/zenoss/log/zenhub.log for good measure. You may want to run zenhub in the foreground. • To test your parser command at a low level use:
cd/opt/zenoss/Products/ZenHub/services python CommandPerformanceConfig.py-d mp1.zenoss.loc
General Background Information
Collection process has the following steps: • A collection process (a collector) is started, often with ZenCommand against a collector name (db2.zenoss.loc). • The collector contacts Zenhub and load the commands to be run against devices for that collector (target) • Zencommand runs the collection command on the remote target. • If and when results are returned, a parser is created. • Zencommand passes results to the processResults() method of the parser. • The processResults() method is passed the command configuration fetched from ZenHub, and also an ob- ject into which parsed results will be placed. • Zencommand takes the returned Python dictionary from the parser and updates the ZODB 푎 퐻푂푆푇푎 퐶표푙푙푒푐푡(푡푎푟푔푒푡푠) −→ (푅푒푠푢푙푡푠) ↓ ↑푎 ↓ 푎 ZC/ZP 푏 Zenhub ←− (푃 푎푟푠푒푟, 푅푒푠푢푙푡) 푏 ↓푏 ZODB Ref: http://docs.huihoo.com/zenoss/dev-guide/2.4.2/ch12s05.html
5.10. Collection: General Concepts 139 ZenPackers Documentation
5.10.2 Custom Collectors and Custom Parsers
Collectors/Pollers
Collection can happen with a native plugin like [ssh, snmp, ping, https, etc.] or a custom plugin that you create. The output can come back in several formats like [Nagios, Cacti, JSON] or a custom format that you specify. Custom formats must be handled by you using a custom parser. Sometimes we refer to collectors as pollers or collection plugins. Since the collector/plugin information is passed to Zenhub, it must be an exectuable program or script. An example of a custom poller that outputs JSON is shown below. The example is pilfered from ZenPacks.zenoss.DB2. Note the following: • The poller is self-contained and self-calling. There are no magic functions that Zenoss calls automatically. • The poller can import from global and local modules • You normally setup your plugins in your __init__.py so that they have proper scope and permissions at installa- tion. • In your Monitoring Templates setup, you must use the “COMMAND” type for this datasource. • When you specify the datasource command, you will have to specifiy the full path (using TALES) to the poller. For example: ${here/ZenPackManager/packs/ZenPacks.zenoss.DB2/path}/poll_db2.py • See the PostgreSQL, DB2, and DatabaseMonitor zenpacks for more examples.
1 #!/usr/bin/env python
2 import sys
3 from lib import txdb2jdbc
4
5 class db2Poller(object):
6 _connectionString= None # DB2 JDBC ConnectionStrings only
7 _query= None # A Valid DB2 SQL query
8 _data= None # This is JSON data from Java connector
9
10 def __init__(self, conString, myQuery):
11 self._connectionString= conString
12 self._query= myQuery
13
14 def getData(self):
15 db2= txdb2jdbc.Database(self._connectionString)
16 self._data= db2.query_json(self._query)
17 return self._data
18
19 def printJSON(self):
20 data= None
21 try:
22 data= self.getData()
23
24 except Exception, ex:
25 print "Exception", ex
26 print data
27
28 if __name__ =='__main__':
29 usage="Usage: {0} "
30 connectionString= None
31
32 try: (continues on next page)
140 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page)
33 connectionString= sys.argv[1]
34 query= sys.argv[2]
35
36 except IndexError:
37 print "poll_db2 : insufficient parameters"
38 print >> sys.stderr, usage.format(sys.argv[0])
39 sys.exit(1)
40
41 except ValueError:
42 print >> sys.stderr, usage.format(sys.argv[0])
43 sys.exit(1)
44
45 poller= db2Poller(connectionString, query)
46 poller.printJSON()
Custom Parsers
The parser is invoked after a successful collection has occured. If you are not using one of the standard parsers like [Nagios, Cacti, JSON], then you must create your own custom parser. Custom parsers usually are located in the $ZP_DIR/parsers folder. Whatever parser you create can only be used when configured for the datasource (for your device) in the Monitoring Templates area. This information is typically stored in the $ZP_DIR/objects/objects.xml file. Our example is from the DatabaseMonitor zenpack (OracleDB): • Starting on line 10 we see the processResults() method definition. • On line 13, we try to determine if the returned data is valid data. • On line 38, we start to process the validated data • At 55, we return results determined by status returned from probed targets • Finally on line 69, we update our datapoints.
1 # ------
2 # File: $ZP_DIR/parsers/tablespaces.py ------
3 # ------
4 import json
5
6 from Products.ZenRRD.CommandParser import CommandParser
7 from ZenPacks.zenoss.DatabaseMonitor.lib import locallibs
8
9 class tablespace(CommandParser):
10 def processResults(self, cmd, result):
11
12 data= None
13 try:
14 data= json.loads(cmd.result.output)
15 # Auto-clear if possible
16 result.events.append({
17 'severity':0,
18 'summary':'Command parser status',
19 'eventKey':'tablespace.parser.key',
20 'eventClassKey':'tablespace.parse.class',
21 'component': cmd.component, (continues on next page)
5.10. Collection: General Concepts 141 ZenPackers Documentation
(continued from previous page)
22 })
23 except Exception, ex:
24 result.events.append({
25 'severity': cmd.severity,
26 'summary':'Command parser status',
27 'eventKey':'tablespace.parser.key',
28 'eventClassKey':'tablespace.parse.class',
29 'command_output': cmd.result.output,
30 'component': cmd.component,
31 'exception': str(ex),
32 })
33
34 return result
35
36 # Data is a list of dict: Iterate over them to find the right row
37 tbsp= None
38 for row in data:
39 if tbsp is not None:
40 break
41
42 inst_name= row['INSTANCE_NAME']
43 tbsp_name= row['TABLESPACE_NAME']
44 component_id='{0}_{1}'.format(inst_name, tbsp_name)
45
46 # Select the correct row here. Break when found. Set Status
47 if component_id == cmd.component:
48 tbsp= row
49
50 # If the TS reports an error, mark it as Critical.
51 ts_message='Tablespace Status is: %s'% tbsp['ONLINE_STATUS']
52
53 severity= locallibs.tbsp_status_map(tbsp['ONLINE_STATUS'])
54
55 result.events.append({
56 'severity': severity,
57 'summary': ts_message,
58 'eventKey':'tablespace.status.Key',
59 'eventClassKey':'oracle.tablespace.ClassKey',
60 'eventClass':"/Status",
61 'component': component_id,
62 })
63
64 break # Break the "for row" , component found.
65
66 #------
67 # Update/Filter on all datapoints. No need to check for non-numericals.
68 #------
69 for point in cmd.points:
70 if tbsp and point.id in tbsp:
71 result.values.append((point, tbsp[point.id]))
72
73 return result
142 Chapter 5. General Zenpack Development ZenPackers Documentation
5.11 Datasources in Detail
5.11.1 How to Determine what Daemon Runs a DataSource
The ZenPack documentation should list of daemons and datasource types. If that is not quite enough, then try this: • First look at the collector daemon’s class, it will specify a configurationService. – For a custom daemon, look in the daemons folder. • Then look at that service, to see what kinds of datasources it includes. – For custom services, look in the services folder. • If your datasource inherits from PythonCollector, it probably runs zenpython
5.11.2 How to Determine How Templates Set the DataSource
• Look for the sourcetypes variable in objects.xml or datasource.type in zenpack.yaml. • Now compare that to existing datasources’ sourcetype variable.
5.11.3 General Questions and Answers
• Question: What’s the difference between a datasource and a datasource plugin? A datasource is an instance of RRDDataSource. It’s the object that’s part of a monitoring template and stored in ZODB. A datasource plugin is not a persistent ZODB object. It’s a class that is used to perform collection of a specific subclass of RRDDataSource instance: PythonDataSource. A PythonDataSource has a plugin_classname property that must be set to the fully qualified classname of a PythonDataSourcePlugin class such as “ZenPacks.zenoss.AWS.dsplugins.S3BucketPlugin”.(edited) Also datasource plugins are a zenpython concept. So all PythonDataSources have PythonDataSourcePlugins, but other kinds of RRDDataSources do not. So for instance, vsphere has a datasource, but not a plugin.
5.11.4 PythonCollector
When using PythonCollector there are two ways to configure collection as described in PythonCollector’s documen- tation. 1. Using the existing PythonDataSource. http://wiki.zenoss.org/ZenPack:PythonCollector#Using_the_Python_ Data_Source_Type_Directly 2. Subclassing PythonDataSource. http://wiki.zenoss.org/ZenPack:PythonCollector#Extending_the_Python_ Data_Source_Type • How do you decide which method to use? It’s pretty easy to figure out. Do you expect that a Zenoss admin would ever be adding one of your datasources to a monitoring template? If so, the answer is to subclass PythonDataSource to create your own type. If the answer is no, the answer is to not create a PythonDataSource subclass, but to just deliver your own monitoring templates that use the Python data source type with the plugin_classname populated.
5.11. Datasources in Detail 143 ZenPackers Documentation
Zenoss adds each PythonDataSource subclass as an option in the datasource type drop-down when creating a new datasource in a monitoring template. So don’t pollute that drop-down with things the user will never use, but don’t make the user create “Python” type datasources where they have to provide the correct value for plugin_classname. Using the Ceph ZenPack as an example, the suggestion would be that none of the PythonDataSour- cePlugins implemented therein are generic enough to deserve a PythonDataSource subclass. Good examples of plugins that deserve a PythonDataSource subclass are “Windows Perfmon”, “Windows Event Log”, “Windows Process”.
5.11.5 Invalid config_keys() in Datasources: Clearing Redis
Sometimes if you work on datasource, you can accidentlly insert a bad entry into your config_key() method. This can get cached in redis and result your datasource using the cached value, even if you change the config_key() method later. You may see a messages like this in your collector logs:
2017-08-17 13:12:08,839 DEBUG zen.collector.config: Fetching daemon configuration
˓→properties Traceback (most recent call last): File"/usr/lib64/python2.7/logging/__init__.py", line 851, in emit msg= self.format(record) File"/usr/lib64/python2.7/logging/__init__.py", line 724, in format return fmt.format(record) File"/usr/lib64/python2.7/logging/__init__.py", line 464, in format record.message= record.getMessage() File"/usr/lib64/python2.7/logging/__init__.py", line 328, in getMessage msg= msg% self.args TypeError:%d format: a number is required, not str
This probably means that the cached config_keys are not in the proper order. Ensure your config_key returns items in the following order:
* context.device().id, * datasource.getCycleTime(context), * * *
You must ensure that the first and second fields are (str, int). If you get a broken config key n collectorredis or redis, you may have to flush redis’ cache : • connect to the redis container(s) • list the contents of table 2:
(zenoss) [zenoss@6f913fd44dbb ~]$ redis-cli -n 2 --scan --pattern '*' collector_cache_zenpython-0_devices@@nutanix64 collector_cache_zenpython-0_threshold_classes collector_cache_zenpython-0_property_items collector_cache_zenpython-0_thresholds
• Open up redis table 2 in redis-cli:
144 Chapter 5. General Zenpack Development ZenPackers Documentation
(zenoss) [zenoss@6f913fd44dbb ~]$ redis-cli -n 2 127.0.0.1:6379[2]> 127.0.0.1:6379[2]> del collector_cache_zenpython-0_devices@@nutanix64 (integer) 1 127.0.0.1:6379[2]> del collector_cache_zenpython-0_property_item (integer) 1 127.0.0.1:6379[2]> exit (zenoss) [zenoss@6f913fd44dbb ~]$ (zenoss) [zenoss@6f913fd44dbb ~]$ redis-cli -n 2 --scan --pattern '*'
• Now retry your datasource
5.12 Graphs
5.12.1 Special Examples
5.12.2 Putting Events in Things
• Jos: does anyone know if it’s possible to include a variable (like component id) in a Graph title? • Bri: Yes • Bri: I’ll give you an example • Bri: https://github.com/zenoss/ZenPacks.zenoss.UCSCapacity/blob/develop/ZenPacks/zenoss/UCSCapacity/__ init__.py#L212-L225 • Bri: Note that this one is monkeypatched, but you can just include it in your regular class • Bri: All you need to do is update the graph object before you release the list • Che: That seems dangerous. Wouldn’t Zope automatically commit that after processing the request? • Che: According to Joseph you have to implement getDefaultGraphDefs for Zenoss 4 and getGraphObjects for Zenoss 5. Also, you should do it like this. Note setting the volatile _v_title property. https://github.com/zenoss/ZenPacks.zenoss.CiscoUCS/blob/develop/ZenPacks/zenoss/CiscoUCS/ServiceProfile.py# L193
5.13 ZenPython (PythonCollector) Topics
This article by Jane Curry is excellent: http://www.skills-1st.co.uk/papers/jane/PythonZenPacks.pdf Source code and docs: http://wiki.zenoss.org/ZenPack:PythonCollector
5.13.1 Method Notes
• def config_key(cls, datasource, context): Return list that is used to filter/split configurations at the collector. Check zenhub.log for errors. Note: restart zenhub after modification
5.12. Graphs 145 ZenPackers Documentation
• def params(cls, datasource, context): Return params dictionary needed for plugin. Executed in zenhub. Note: restart zenhub after modi- fication • def __init__(self, config=None): Initialize the plugin with a configuration. • def collect(self, config): No default collect() behaviour. You must implement this method. This method *MUST* return a Twisted deferred. Results will be sent to the onResult then either onSuccess() or onError() callbacks below. Check zenpython.log for any log messages.
Note: collect()’s deferred return = result for onSuccess(), onError(), and onComplete()
• def onSuccess(self, result, config): Called only on success of collect(). After onResult, before onComplete. Return a data structure with zero or more events, values and maps. Note: values is a dictionary and events and maps are lists.
Note: The result input is the output of collect()’s deferred return.
It is easier to send events from onError()/onSuccess(), since you can just return them, along with maps and data values. But you can do them from collect() if you use the service directly. The thing to be careful about with collect() is that you need to return (or yield, if using inlinecall- backs) a deferred as soon as possible- don’t do anything cpu-intensive before you do this, or your plugin will get blocked. Even though this is really only critical on that first deferred you return, stylistically it’s best to keep the IO in collect() and the processing in onSuccess()/onError(), just so that you’re not tempted to do things in collect() that you probably shouldn’t. • def onError(self, result, config): Called only on error. After onResult, before onComplete. You can omit this method if you want the error result of the collect() to be used without further processing. Recommended to implement this method to capture errors. • def onComplete(self, result, config): Called last for success and error. You can omit this method if you want the result of either the onSuccess() or onError() method to be used without further processing. • def cleanup(self, config): Called when collector exits, or task is deleted or recreated. May be omitted.
5.13.2 Errors and Debugging
• Configuration for XYZ unavailable – is that the correct name? If you see an error like this in the zenpython logs:
146 Chapter 5. General Zenpack Development ZenPackers Documentation
DEBUG zen.collector.config: Fetching configurations ERROR zen.collector.config: Configuration for XYZ unavailable -- is that the
˓→correct name? DEBUG zen.collector.scheduler: Task configLoader finished, result: 'Configuration
˓→loaded' ... etc ...
Yes, you checked that your device name is correct, so what gives? This usually indicates a massive error somewhere in config_key() or params(). Check zenhub’s log for obvious clues and errors. Don’t forget to restart zenhub after your fixes.
5.14 ZenPack Security
We take the security of our systems and customers seriously. This means that we provide the most secure access to our customers devices as possible. The following outlines some of the general concepts.
5.14.1 General Stuff
• Always use password type of zProperty for passwords • Use TALES expressions with password zProps in composite credentials like ConnectionStrings • Document the creation of special restricted accounts on targets for more secure access.
5.14.2 SSH and Sudo Access
Often we use SSH in combination with sudo to access system data for Unix type systems. Here are some guidelines to mitigate customers having objections or other access problems. 1. Clearly document that we assume sudo NOPASSWD access for full functionality. 2. Document the exact minimal sudoers configuration we require. 3. Verify that we fail gracefully during modeling and monitoring in the following cases. (a) sudo not installed. (b) user not in sudoers. (c) sudo requiring password. Below is an example of an /etc/sudoers file that has minimal access for the commands: • /sbin/service –status-all • /sbin/initctl list This is the /etc/sudoers file:
# This file MUST be edited with the 'visudo' command as root. # # Please consider adding local content in /etc/sudoers.d/ instead of # directly modifying this file. Defaults mail_badpass Defaults secure_path="/usr/sbin:/usr/bin:/sbin:/bin"
(continues on next page)
5.14. ZenPack Security 147 ZenPackers Documentation
(continued from previous page) Cmnd_Alias STATUS=/sbin/service--status-all,/sbin/initctl list
# User privilege specification root ALL=(ALL:ALL) ALL
# Allow members of group sudo to execute any command %sudo ALL=(ALL:ALL) ALL
# Allow zenoss user access to STATUS commands zenoss ALL= NOPASSWD: STATUS
On newer systems, it is often better to take the advice listed in /etc/sudoers and put local commands into /etc/sudoers.d/zenoss because there is less chance that this setup is lost on upgrades:
# file: /etc/sudoers.d/zenoss for zenoss access # This file won't get overwritted by upgrades
# Commands for getting system status Cmnd_Alias STATUS=/sbin/service--status-all,/sbin/initctl list # Allow zenoss user access to STATUS commands zenoss ALL= NOPASSWD: STATUS
5.15 Reserved Words in Zenoss Modeler
Here is a list of all reserved words except those that start with double-underscore: absolute_url absolute_url_path access_debug_info ac_inherited_permissions acquiredRolesAreUsedBy addDeviceGroup addLocalTemplate addLocation addManufacturer addRelation _addRole addSystem addToZenPack all_meta_types applyDataMap _applyProdContext applyProductContext aqBaseHasAttr availability bindTemplates bobobase_modification_time breadCrumbs buildMenus buildRelations cacheComponents cacheRRDValue callZenScreen _canCopy (continues on next page)
148 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) cb_dataItems cb_dataValid cb_isCopyable cb_isMoveable cb_userHasCopyOrMovePermission changeDeviceClass changeOwnership changePythonClass _checkId checkRelations checkRemotePerm checkValidId collectDevice compile COPY _create_componentSearch _createComponentSearchPathIndex createHierarchyObj creator custPropertyIds custPropertyMap dav__init dav__simpleifhandler dav__validate DELETE deleteDevice _deleteOwnershipAfterAdd deleteZenProperty _delOb _delObject _delProperty _delPropValue _delRoles device doCommandForTarget editableDeviceList eligiblePacks exportXml exportXmlHook exportXmlProperties exportXmlRelationships exportZProperties fetchRRDValue fetchRRDValues filterAutomaticCreation filtered_manage_options filtered_meta_types findChild findObject _findParentWithProperty followNextHopIps fullRRDPath get getAdministrativeRoles getAllPaths getAqChainForUserCommands getAqProperty (continues on next page)
5.15. Reserved Words in Zenoss Modeler 149 ZenPackers Documentation
(continued from previous page) getAttribute getAttributeNode getAttributes getAvailableTemplates getBreadCrumbName getBreadCrumbUrlPath getChildNodes getClassObject getCollectorName getCollectors getComments _getCopy getCreatedTimeString get_csrf_token _getCurrentUserName getDataForJSON getDataSourceOptions getDefaultGraphDefs getDescription getDevice getDeviceClassName getDeviceClassPath getDeviceComponents getDeviceComponentsNoIndexGen getDeviceGroupNames getDeviceIp getDeviceIpAddress getDeviceLink getDeviceMacaddress getDeviceName getDeviceUrl getDmd getDmdKey getDmdObj getDmdRoot getElementsByTagName getEventManager getEventSeverities getEventSeveritiesCount getEventSummary getExpandedLinks getFirstChild getGraphDef getGraphDefUrl getHierarchyObj getHTMLEventSummary getHWManufacturerName getHWProductClass getHWProductKey getHWProductName getHWSerialNumber getHWTag getIconPath _get_id getId getIdLink _getImportPaths (continues on next page)
150 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) getInstDescription getIpAddress getLastChange getLastChangeString getLastChild getLastPollSnmpUpTime getLinks get_local_roles get_local_roles_for_userid getLocationLink getLocationName getMacAddressCache getMacAddresses getMaintenanceWindows getManageInterface getManageIp getManageIpObj getMenus getModificationTimeString getMonitoredComponents getNagiosTemplate getName getNetworkRoot getNextLockableParent getNextSibling getNodeName getNodeType getNodeValue getNonLoopbackIpAddresses getNowString _getOb getObjByPath getOSManufacturerName getOSProductKey getOSProductName _getOtherExpandedLinks getOverriddenObjects getOwner getOwnerDocument getOwnerTuple getParentDeviceName getParentDeviceTitle getParentDeviceUrl getParentNode getPeerDeviceClassNames getPerformanceServer getPerformanceServerName getPhysicalPath getPingStatus getPingStatusNumber getPingStatusString getPreMWProductionState getPrettyLink getPreviousSibling getPrimaryDmdId getPrimaryId getPrimaryParent (continues on next page)
5.15. Reserved Words in Zenoss Modeler 151 ZenPackers Documentation
(continued from previous page) getPrimaryPath getPrimaryUrlPath getPriority getPriorityString getProdState getProdStateManager getProductionState getProductionStateString getProperty getPropertyType getRackSlot getRelationshipManagerId getRelationshipNames getRelationships _getRelName getReportableComponents _get_request_var_or_attr getRRDContextData getRRDDataPoint getRRDDataPoints _getRRDDataPointsGen getRRDFileName getRRDNames getRRDPaths getRRDSum getRRDTemplate getRRDTemplateByName getRRDTemplateName getRRDTemplates getRRDValue getRRDValues getSerialNumber getSiteManager getSnmpAgent getSnmpConnInfo getSnmpContact getSnmpLastCollection getSnmpLastCollectionString getSnmpLocation getSnmpStatus getSnmpStatusNumber getSnmpStatusString getSnmpSysName getSnmpV3EngineId _getSourceObjects getStatus getStatusCssClass getStatusImgSrc getStatusString getSubComponentsNoIndexGen getSubObjects getSystemNames getSystemNamesString getTagName getTagNumber getThresholdInstances getThresholds (continues on next page)
152 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) getTreeItems getUnusedId getUrlForUserCommands getUserCommand getUserCommandEnvironment getUserCommandIds getUserCommands getUserCommandTargets getUUID get_valid_userids getWorstEventSeverity getWrappedOwner getXMLEdges getZ getZenRootNode getZopeObj hasChildNodes has_local_roles hasObject hasProperty _has_user_defined_role HEAD helpLink hostname http__etag http__parseMatchList http__processMatchHeaders http__refreshEtag _importObjectFromFile id index_object ipAddressAsInt _isBeingUsedAsAMethod _isDuplicateIp isLocal isLocalName isLockedFromDeletion isLockedFromUpdates isResultLockedFromDeletion isResultLockedFromUpdates isTempDevice isUnlocked items keys listDAVObjects list_imports LOCK lockFromDeletion lockFromUpdates lockStatus lockWarning lookupSchema makeLocalRRDTemplate manage manage_access manage_ackEvents manage_acquiredForm (continues on next page)
5.15. Reserved Words in Zenoss Modeler 153 ZenPackers Documentation
(continued from previous page) manage_acquiredPermissions manage_addAdministrativeRole manage_addItemsToZenMenu manage_addLocalRoles manage_addMaintenanceWindow manage_addProperty manage_addRelation manage_addUserCommand manage_addZenMenu manage_addZenMenuItem manage_afterAdd manage_afterClone manage_afterHistoryCopy manage_beforeDelete manage_beforeHistoryCopy manage_change_history manage_change_history_page manage_changeOwnershipType manage_changePermissions manage_changeProperties manage_changePropertyTypes manage_clone manage_CopyContainerAllItems manage_CopyContainerFirstItem manage_copyObjects manage_copyright manage_cutObjects manage_DAVget manage_defined_roles manage_deleteAdministrativeRole manage_deleteComponent manage_deleteEvents manage_deleteMaintenanceWindow manage_deleteObjects manage_deleteUserCommand manage_deleteZenMenu manage_deleteZenMenuItem manage_delLocalRoles manage_delObjects manage_delProperties manage_doUserCommand manage_editAdministrativeRoles manage_editDevice _manage_editedDialog manage_editedDialog manage_editLocalRoles manage_editProperties manage_editRoles manage_editUserCommand manage_exportObject manage_fixupOwnershipAfterAdd manage_form_title manage_FTPlist manage_FTPstat manage_getPermissionMapping manage_getUserRolesAndPermissions manage_hasId (continues on next page)
154 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) manage_historicalComparison manage_historyCompare _manage_historyComparePage manage_historyCopy manage_importExportForm manage_importObject manage_index_main manageIpVersion manage_linkObjects manage_listAdministrativeRoles manage_listLocalRoles manage_main manage_menu manage_owner manage_page_footer manage_page_header manage_page_style.css manage_pasteObjects manage_permission manage_permissionForm manage_propertiesForm manage_propertyTypeForm manage_removeRelation manage_renameForm manage_renameObject manage_renameObjects manage_reportUserPermissions manage_role manage_roleForm manage_saveMenuItemOrdering manage_setLocalRoles manage_setPermissionMapping manage_snmpCommunity manage_tabs manage_takeOwnership manage_top_frame manage_undeleteEvents manage_UndoForm manage_undo_transactions manage_unlinkObjects manage_workspace manage_zmi_logout manage_zmi_prefs MKCOL monitorDevice monitored MOVE moveMeBetweenRels moveObject name _normal_manage_access _notifyOfCopyTo objectIds objectIds_d objectItems objectItems_d objectMap (continues on next page)
5.15. Reserved Words in Zenoss Modeler 155 ZenPackers Documentation
(continued from previous page) objectMap_d objectValues objectValues_d _onlystars OPTIONS _organizerInfo osProcessClassMatchData owner_info _p_activate pastSnmpMaxFailures path _p_deactivate _p_delattr permission_settings permissionsOfRole _p_getattr pingCommand _p_invalidate possible_permissions _postCopy prepId primaryAq primarySortKey propdict propertyDescription propertyIds propertyItems propertyLabel propertyMap _propertyMap propertyValues PROPFIND PROPPATCH _p_setattr pushConfig PUT _redirectToEventConsole redirectToUserCommands reindex_all removeLocalRRDTemplate removeRelation removeZDeviceTemplates rename renameDevice renameDeviceInPerformance resetProductionState restoreCurrentProdStates restrictedTraverse rolesOfPermission rrdPath _sanitizeIPaddress saveCurrentProdStates saveCustProperties saveZenProperties _selectedTabName sendEventWhenBlocked sendEventWhenResultBlocked (continues on next page)
156 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) setAdminLocalRoles setAqProperty setCollector setComments setDescription setEventSeverities setGroups setHWProduct setHWProductKey setHWSerialNumber setHWTag _setId setIpAddress setLastChange setLastPollSnmpUpTime setLocation setManageIp setName _setOb _setObject setOSProduct setOSProductKey setPerformanceMonitor setPreMWProductionState setPriority setProdState setProductInfo setProductionState _setProductionState _setProperty _setPropValue setRackSlot _setRelations _setRoles setSendEventWhenBlockedFlag setSerialNumber setSiteManager setSnmpAgent setSnmpContact setSnmpLastCollection setSnmpLocation setSnmpSysName setSnmpV3EngineId setSystems setTagNumber setTerminalServer setTitle setWorstEventSeverity setZenProperty snmpAgeCheck snmpIgnore snmpMonitorDevice snmpwalkPrefix _subobject_permissions superValues sysUpTime tabs_path_default (continues on next page)
5.15. Reserved Words in Zenoss Modeler 157 ZenPackers Documentation
(continued from previous page) tabs_path_info this title_and_id title_or_id titleOrId todayDate tpURL tpValues TRACE traceRoute tracerouteCommand undoable_transactions unindex_ips unindex_object unlock UNLOCK unrestrictedTraverse unsetSendEventWhenBlockedFlag updateDevice _updateProperty uptimeStr upToOrganizerBreadCrumbs urlLink userCanTakeOwnership userdefined_roles users_with_local_role uuid validate_roles validClipData valid_property_id valid_roles validRoles values _verifyObjectLink _verifyObjectPaste view viewName virtual_url_path visibleCustPropertyMap wl_clearLocks wl_delLock wl_getLock wl_hasLock wl_isLocked wl_lockItems wl_lockmapping wl_lockTokens wl_lockValues wl_setLock _wrapperCheck write yesterdayDate zenpathjoin zenpathsplit zenPropertyIds zenPropertyItems zenPropertyMap (continues on next page)
158 Chapter 5. General Zenpack Development ZenPackers Documentation
(continued from previous page) zenPropertyOptions zenPropertyPath zenPropertyString zenPropIsPassword zenScreenUrl zentinelTabs zmanage_addProperty zmanage_delObjects zmanage_delProperties zmanage_editProperties zmanage_exportObject zmanage_importObject zmanage_importObjects zope_quick_start
5.16 Solr Catalogs (ModelIndex) Review
5.16.1 What is Solr? http://lucene.apache.org/solr/ Solr is the popular, blazing-fast, open source enterprise search platform built on Apache Lucene™. Solr is highly reliable, scalable and fault tolerant, providing distributed indexing, replication and load-balanced query- ing, automated failover and recovery, centralized configuration and more. Solr powers the search and navigation features of many of the world’s largest internet sites.
5.16.2 What do we call it?
• Solr: – Zenoss.resmgr/Infrastructure/Solr service in Control Center – Show Solr UI • modelindex: – Python library: https://github.com/zenoss/modelindex • model_catalog: – The Solr “collection” in which all of this is stored – Currently the only Solr collection we use • ModelCatalog: – Products.Zuul.catalog.model_catalog – Provides an API to index, unindex, and search Zenoss objects using modelindex – Adds ZODB transactional layer via two-phase commit • ModelCatalogTool: – Products.Zuul.catalog.model_catalog_tool – Provides an interface similar to ICatalogTool (IModelCatalogTool)
5.16. Solr Catalogs (ModelIndex) Review 159 ZenPackers Documentation
5.16.3 What catalogs are moved to Solr?
• global_catalog (ICatalogTool still works) • deviceSearch (via LegacyCatalogAdapter) • layer2_catalog (via LegacyCatalogAdapter) • layer3_catalog (via LegacyCatalogAdapter)
5.16.4 What is LegacyCatalogAdapter?
• A ZODB “SimpleItem” that behaves like ZCatalog, but uses ModelCatalog.
5.16.5 What is ZenPackLib going to do?
• Move all “index_scope: global” catalogs into Solr using LegacyCatalogAdapter. • Migrate existing global catalogs to Solr? Maybe, but when? • Backwards compatibility can be maintained. • Going into ZenPackLib 2.1.0.
5.16.6 Other
• Some things that indexed via obj.index_object() now required notify(IndexingEvent(object))
5.16.7 Performance
• Queries in generally have a much more stable response time. • “Hot” queries are intrinsically slower due to networking overhead to Solr. • There are effectively no “cold” queries.
5.16.8 How is this different than CatalogSerice?
• Same in respect to performance trade-offs: Reliable response times, but often slower. • CatalogService uses Lucene. ModelCatalog uses Solr. • Think of Solr as a clustered (distributed) Lucene. • Removes one of the remaining single points of failure. • Enables moving to a future of active/active HA. • ModelCatalog is in the Core platform. CatalogService is only in RM/UCS-PM.
5.16.9 Show LegacyCatalogAdapter usage in zendmd.
160 Chapter 5. General Zenpack Development CHAPTER 6
Special Zenpack Development Topics
6.1 Analytics Bundle Topics
6.1.1 Introduction and General Notes
The following documents outline how Analytics support is built into our Zenpacks. If you see any errors, please contribute to these docs as needed.
Warning: The Analytics Server must have a valid DNS resolution of the Zenoss system that it collects from. This means that either 1) the DNS servers know about your Zenoss system or 2) the /etc/hosts on Analytics has an entry for your Zenoss system. Without this host resolution, collection will not take place.
ZenETL
Issues related to troubleshooting ETL and expeditiing model load and aggregation should be directed to: • https://github.com/zenoss/ZenPacks.zenoss.psSelfMonitoring Specifically the “Additional Analytics related scripts provided by the zenpack” section To install ZenETL, you must find the correct version for your version of Zenoss: • Always use the latest version of the Analytics Server • ZenETL version must match your version of Analytics Server! • You will see the server RPM and Zenoss ZenPack egg in the folder: – http://artifacts.zenoss.loc/releases/X.Y.Z/ga/analytics/
* Ex: http://artifacts.zenoss.loc/releases/5.0.0/analytics/ca1/ – grab the egg and install that on your Zenoss system ZenPacks.zenoss.ZenETL-X.Y.Z.a.b.cccc-py2.7.egg – Replace the numerical values: X.Y.Z and a.b.cccc
161 ZenPackers Documentation
Analytics Server
We briefly mention that TCP network connectivity must be allow TO the Analytics server. The following table lists Analytics pieces that typically must communicate, and the default network ports that should be open for communication:
From To Default Port Numbers Analytics Analytics database server 3306 Analytics Resource Manager 8080 Resource Manager Analytics 7070 Analytics Web Users zenperfetl Analytics 7070 (on remote collectors) Analytics 7070
6.1.2 Analytics Aliases
Analytics relies on aliases for datapoints. Alias must conform to the following rules: • Aliases must be a single word • Aliases must be less than 30 characters • Aliases should be as descriptive as possible • Aliases should end with double-underscore and unit (see below) These aliases can be added easily in ZPL: sysUpTime: aliases: sys_uptime__secs:"100,/" cpuUtilization: aliases: cpu__pct: null datapoints: numOfCpus: aliases: cpu_socket__count:""
numOfCores: aliases: cpu_core__count:""
Non-ZPL aliases are added by modifying the objects/objects.xml files.
Note: Aliases can have an optional RPN operation to normalize data. You can see this in the first example for sysUpTime.
Alias Units
Make sure to use the following unit postfix conventions for aliases:
162 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
Unit Description pct percent (0-100) bytes bytes (base 1024) secs seconds persec (something) per second bytessec bytes per second (none) where the unit is inherent (eg: load) count discrete object count (eg: packets) hz hertz severity Zenoss severity number (0-5) watts power amps electrical current volts electrical voltage celsius Temperature in celsius fahrenheit Temperature in fahrenheit pktssec packets per second
Instance Modifiers
Instance modifiers are used to modify the context or meaning of an alias name. See examples below as well.
Instance Modifier Name Description admin Administrative avg Average value current Current value min Minimum value max Maximum value oper Operational state in Input traffic out Output traffic
Reserved Aliases
It is important to make aliases as uniform across as possible, so that report builders can compare similar quantities across multiple ZenPacks. Here is an incomplete set of reserved aliases for common items. Use these aliases when possible:
Alias Name Description cpu__pct CPU percentage use cpu_idle__pct CPU idle percent cpu_nice__pct CPU nice use cpu_system__pct CPU system use cpu_user__pct CPU user use cpu_wait__pct CPU wait time percent disk_read__bytessec Disk read rate disk_write__bytessec Disk write rate fan_oper_severity Fan operational state fanmodule_oper_severity Fan module operational state Continued on next page
6.1. Analytics Bundle Topics 163 ZenPackers Documentation
Table 1 – continued from previous page Alias Name Description fs__pct filesystem used percent fs_used__pct filesystem used percent (repeat) mem__pct Memory percentage use mem_used__pct Memory percentage use (repeat) mem_buffers__pct Memory percentage use mem_cached__pct Memory cache use mem_free__pct Memory free percent mem_swap_free__pct Memory swap free mem_swap_used__pct Memory swap used percent if_in__pct Interface input traffic percent if_out__pct Interface output traffic percent in__pct input RX % for interfaces out__pct output TX % for interfaces inputOctets__bytes interface RX traffic in bytes outputOcteds__bytes interface TX traffic in bytes power_oper_severity Power operational state bytes_free disk_free__bytes bytes_used disk_used__bytes cache_hit_ratio cache_hit_ratio__pct physicalreads physicalreads__bytes physicalwrites physicalwrites__bytes reads reads__count total_size total_size__bytes writes writes__count writetime writetime__sec
6.1.3 Setting up Zenoss Analytics
Setting up Zenoss Analytics on your Zenoss system has a few steps. Note that we don’t discuss how to setup the actual Analytics Server here.
How to point your Zenoss 5.X system to an existing analytics 5.0.X server
Setup the ZenETL ZenPack and Services
• wget ZenPacks.zenoss.ZenETL-5.0.2-py2.7.egg from your local repo • serviced service run zope zenpack-manager install ZenPacks.zenoss.ZenETL-5.0.2-py2.7.egg • serviced service stop Zenoss.resmgr • Wait until all services is fully stopped • serviced service start Zenoss.resmgr • Ensure that all AnalyticsETL services are started
Setup Analytics Server Information in RM
• Log into a non-admin account.
164 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
• Go to Resource Manager interface navigation bar, select Reports > Configuration. • For Internal and External url, fill in url for analytics and resource manager
Note: The Zenoss entry is usually auto-populated. If not, use the address of the RM endpoint, typically https://zenoss5.yoursite.com
• Example:
Analytics: http://analytics.example.com:7070 Zenoss: https://zenoss5.zenoss.example.com
• Fill in the User and Pass for the query service.
Note: For Query service, input zenoss username/password with Manager access or higher (don’t use the admin account)
How to point your Zenoss 4.2.X system to an existing analytics 5.0.X server
• Become the Zenoss user • wget ZenPacks.zenoss.ZenETL-5.0.2-py2.7.egg from your local repo • zenpack –install ZenPacks.zenoss.ZenETL-5.0.0.0.0-py2.7.egg • zenoss stop; zenoss start • Go to Resource Manager interface navigation bar, select Reports > Configuration. • For Internal and External url, fill in url for analytics and resource manager • Example:
Analytics: http://analytics.example.com:7070 Zenoss: http://zenoss4.example.com:8080
Note: The Zenoss entry is usually auto-populated
References: – https://docs.zenoss.com https://www.zenoss.com/sites/default/files/documentation/Zenoss_Analytics_ Release_Notes_r5.0.2_d1003.16.088.pdf
6.1.4 Loading the Bundle into Analytics
Once you have created the analytics bundle, you can load and test it as follows: • Log into Analytics UI as superuser • Go to Manage => Server Setting • Select Import • Deselect all options • Browse: Select your zip file • Make sure you deselect all options
6.1. Analytics Bundle Topics 165 ZenPackers Documentation
• Hit OK
6.1.5 Steps to create bundle with create-analytics-bundle scirpt: http://zenpackers.readthedocs.io/en/latest/analytics/create-analytics-bundle.html 1. Setup your aliases in the ZP • ensure all aliases are unique per device and component 2. Setup your labels in each component:
_properties= BaseComponent._properties+( {'id':'path','label':'Full Path','type':'string','mode':''}, {'id':'replication','label':'Replication','type':'string','mode':''}, {'id':'did_status','label':'DID status','type':'string','mode':''}, )
• See: https://github.com/zenoss/ZenPacks.zenoss.SolarisMonitor/blob/develop/ZenPacks/zenoss/ SolarisMonitor/ClusterDID.py#L25 • In a ZenPackLib ZP, you simply set valid labels on your properties. 1. Model a device that has all possible components available.
Note: Note you must have a device fully modelled and with all the aliases in place to get the measures to come out!
1. You can also simplify creation of the bundle in the Makefile: https://github.com/zenoss/ZenPacks.zenoss. CiscoAPIC/blob/develop/GNUmakefile#L29 https://github.com/zenoss/ZenPacks.zenoss.Microsoft.HyperV/ blob/develop/GNUmakefile https://github.com/zenoss/ZenPacks.zenoss.Microsoft.HyperV/blob/develop/get_ aliases_dsv.xsl 2. In order to control what properties and aliases get created in the bundle, see the reportables information in Reportables: Modify what is Reported . 3. Run the create-analytics-bundle • Note: the resulting bundle only needs a fully modeled device to work. 4. Install ZenETL, and allow it to run for 24 to 48 hours • Ensure that all your tables (including raw_v2_*, hourly_*, daily_* tables) are in the reporing database on ana- lytics server. 1. Check that Analytics server has your tables created 2. After editing, .dsv file could be added to dmd using chkaliases script, and from there exported to objects.xml, so you don’t have to edit it from UI or by hand in XML. 1. Check results
6.1.6 Debug a Domain Created with a Tool or Script
This test will show if a schema file is correctly formatted and can be used later for Ad-Hoc view creation.
166 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
Assumptions
• You have a domain bundle (.zip) or a schema.xml.data from a domain • Your Zenoss system is configured to talk to Analytics • You have data tables in the Analytics server for your domain
Debugging
• First extract the domain file and copy schema.xml.data to /tmp/schema.xml Find it in your analytics bundle files (approximate):
./resources/public/XYZ_ZenPack/XYZ_Domain_files/schema.xml.data
• Log into the analytics UI as superuser – Click on Domain -> Create – Fill in:
* Name (give it a dummy name) * Resource ID (Should auto populate with Name) * Data Source: /organizations '-> zenoss '-> Data_Sources '-> zenoss_reporting
– Click Upload – Browse: select your schema.xml file • You should see a window with any errors • Look for missing: – tables – fields – other errors
6.1.7 Creating Analytics Bundles Manually
Create your Aliases
First create the aliases for each datapoint in your zenpack. They should conform to the naming convention in: http: //goo.gl/WKOUKI * Alias length should be <= 32 For each subcomponent, join it to its parent component This will possibly do the trick: • dim_oracle_instance.oracle_instance_key -> dim_oracle_table_space.instance_key
6.1. Analytics Bundle Topics 167 ZenPackers Documentation
Configure Analytics in your Zenpack
This step requires you to setup the analytics data in the zenpack so that the metric data can be collected. • Install the ZenPacks.zenoss.ZenETL zenpack if not already • Restart services if needed • Go to Reports and add your Analytics server in the Configuration:
Internal URLS: Analytics: http://your-analytics.zenoss.lab:7070 Zenoss: Should be auto-populated with your Zenoss URL
External URLS: Analytics: http://your-analytics.zenoss.lab:7070 Zenoss: Should be auto-populated with your Zenoss URL
Create the Joins
Create the required joins between your dataources, metrics, and dimention tables. See Creating Joins in UI for details.
Create the Reports
• From Domain-Designer • Click on create -> Ad-Hoc Report • Change pulldown “Crosstab” to “Table”. You should see Groups now. – Add major headings to groups like device_name or component_name – Add metrics to rows
Save the Bundle
Save the data bundle
6.1.8 Creating Joins in UI
Before we start, lets talk about the overall strategy: • Always use left outer joins • Do not join dim device directly to component level metrics • Joins are made in a hierarchical fashion to your metrics. This means that if your metric is device-level, you join it to the device. With components-metrics, you join your component to the device, then join the metric to that component.
Device-Component Joins
Join dim_device to dim_component dimension tables on device key: device.device_key-> component.device_key
168 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
Component-SubComponent Joins
You’ll want to join your related components in some way. For each subcomponent, join it to its parent component This will possibly do the trick: dim_oracle_instance.oracle_instance_key-> dim_oracle_table_space.instance_key
Metric-Date Joins
For each metric in your domain, join its date_key to dim_date.date_key: metric.data_key-> dim_data.date_key
Device Metric Joins
If you have device-level metrics, join them to device_key: device metric.device_key-> dim_device.device_key
Metric-Component Joins
For each of your metrics you’ll want to join them to the right device. You’ll need the dim_device table: metric.device_key-> dim_device.device_key metric.data_key-> dim_data.data_key
Metric-Component Joins
For each metric in your domain, join it’s component key to your component’s key: instance_metric.component_key-> dim_oracle_instance.oracle_instance_key tablespace_metric.component_key-> dim_oracle_tables_space.oracle_table_space_key
You may have to check exactly what keys match up: Example:
SELECT component_key FROM reporting.daily_cache_hit_ratio__pct A inner join reporting.dim_oracle_instance B where A.component_key=B.oracle_instance_key;
6.1.9 Kicking off Quick Aggregation Jobs
Assumptions:
• There are raw_v2_* tables containing yesterday’s data. • The are no records in meta_batch where batch_status is not COMPLETED or FAILED with a timestamp less than today. The aggregation won’t run if it finds incomplete batches.
6.1. Analytics Bundle Topics 169 ZenPackers Documentation
Database Adjustment to Start Aggregation
This involves 3 database-modifying steps: 1. Make sure that Analytics is running. 2. reset yesterday’s aggregation status (‘reporting’ database):
update meta_agg_daily set status='UNRUN' where date_key= to_days(now())-1;
• If you don’t have a record, you can insert one:
insert into meta_agg_daily (date_key) values (to_days(now())-1);
3. Kick off the aggregation job (‘reporting’ database):
update QRTZ_TRIGGERS set next_fire_time= now() where trigger_name="AggregateDataTrigger";
Using the Tools from ZenPacks.zenoss.psSelfMonitoring
• Clone the repo from https://github.com/zenoss/ZenPacks.zenoss.psSelfMonitoring • On the Analytics server, copy the files as zenoss user:
git clone [email protected]:zenoss/ZenPacks.zenoss.psSelfMonitoring.git cd ZenPacks.zenoss.psSelfMonitoring/ZenPacks/zenoss/psSelfMonitoring cp scripts/* /opt/zenoss_analytics/bin
• Ensure you have /opt/zenoss_analytics/bin in your $PATH env variable. Once the files are in place you can use them. Of note are: • set_analytics_env.sh This one sets up all the DB settings for other scripts. Its worth looking at. In fact you can use a similar script to quickly enter the MySql db:
#!/bin/sh # Name: enter_mysql.sh # Function: Enter the analytics database # Sets environment variables based on analytics configuration
if [[ -x $(which mysql) ]]; then MYSQL_PATH=$(which mysql) else echo "No apparent MySQL client exists! exiting..." exit1 fi
CONFIG_FILE='/etc/zenoss_analytics/zenoss_analytics.conf' ETLHOSTNAME=`grep "^etl.jdbc.hostname" $CONFIG_FILE | cut -d"=" -f2` ETLPORT=`grep "^etl.jdbc.port" $CONFIG_FILE | cut -d"=" -f2` DBUSER=`grep "^etl.jdbc.username" $CONFIG_FILE | cut -d"=" -f2` DBPASS=`grep "^etl.jdbc.password" $CONFIG_FILE | cut -d"=" -f2` (continues on next page)
170 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page) ETLDB=`grep "^etl.jdbc.dbname" $CONFIG_FILE | cut -d"=" -f2` $MYSQL_PATH -u $DBUSER --password=$DBPASS -h $ETLHOSTNAME-P$ETLPORT $ETLDB-
˓→A
• start_model_load_now.sh If you haven’t yet loaded your model onto the server, this will expedite. Sets triggering to 10 minutes in the future. • start_aggregation_now.sh If you are ready to aggregate and have setup batch jobs for it, this will speed up that process by setting the trigger time to 10 minutes. • remove_extractor.sh This one can remove some of your unwanted batches The others we list without comment: • show_aggregation_detail.sh • show_aggregation_config.sh • show_aggregation_highlevel.sh • show_analytics_task_status.sh • show_batch_status.sh • show_processlist.sh • show_queries.sh • show_settings.sh • monitor_aggregation_counts.sh • monitor_analytics_counts.sh • monitor_db_connection_counts.sh • monitor_etl_batch_states.sh • monitor_etl_performance.sh • remove_metric.sh • remove_zenoss_instance.sh • retry_batch.sh
Dealing with Time: Forcing Aggregation
Make sure your current time in correct. Analytics is very sensitive to time in terms of aggregation triggering. Things to remember: • If you started your Analytics server from a snapshot, you should IMMEDIATELY reboot that snapshot to ensure all services are running normally. • Check that NTP is running • Check the current data and insure it is correct. • Analytics will only create daily and hourly tables from yesterday’s data! – One dirty trick you can do to force faster aggregation is to set the system time to tomorrow’s date, making Analytics think the data you currently have is from yesterday. – Once aggregation is finished to your satisfaction, start/restart NTP.
6.1. Analytics Bundle Topics 171 ZenPackers Documentation
6.1.10 Reportables: Modify what is Reported
Reportables allow you to control what data Analytis will extract from your zenpack. To invoke reportables you need to create a reportable.py in your zenpack toplevel directory. You can an example of this in: • ZenPacks.zenoss.OpenStackInfrastructure
Things to consider about Reportables:
1. Reportables are reporting on properties that come from metric data, which bogs down zenmodeletl: Sometimes this is unavoidable, but sometimes the property isn’t really that important, or worth the extra load on zenmod- eletl, so it can be removed to save load. 2. Datasource configs are using information from metric data, which bogs down zenperfetl: This is much more likely to be something that is a bug in the zenpack in question, as such values probably are not being propagated to the collector properly anyway, since there would be no invalidations associated with them changing- it is most, at best, a waste of time, and at worst, a bug that will bite us in subtle ways later.
Setup
Reportables are configured by creating a reportable.py which inherits from either: • ZenPacks.zenoss.ZenETL.reportable.Reportable • ZenPacks.zenoss.ZenETL.reportable.BaseReportable In addition you will probably need to import a Factory from: • ZenPacks.zenoss.ZenETL.reportable.BaseReportableFactory Once you have defined a reportable class for your device or component, you will need to register it in configure.zcml or your equivalent.
Note: Many ZenPacks have a basereportable.py that defines a normalized set of properties. This basereportable is then imported into reportable.py. The purpose of this is to create a simpler reportable.py and provide a more appropriate default set of reportables.
Examples
Below is a simple self-contained example which modifies a PortChannel: Note the following: 1. entity_class_name must be defined for each property you modify and it must be less than 28 characters long. Exceeding this created dimension (dim_*) table names that cause problems in Analytics. 2. reportProperties() is the main class method that provides the change. import logging log= logging.getLogger("zen.CiscoMonitor.etl") from zope.interface import implements from zope.component import adapts from ZenPacks.zenoss.CiscoMonitor.PortChannel import PortChannel from Products.Zuul.interfaces import ( (continues on next page)
172 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page) IReportable ) from ZenPacks.zenoss.ZenETL.reportable import ( MARKER_LENGTH, BaseReportableFactory, IpInterfaceReportable, Reportable ) class CiscoMonitorBaseRelReportable(Reportable): implements(IReportable)
def __init__(self, fromObject, toObject): super(CiscoMonitorBaseRelReportable, self).__init__() self.fromObject= fromObject self.toObject= toObject
@property def id(self): return "%s__%s"%(self.fromObject.id, self.toObject.id)
@property def uid(self): return "%s__%s"%(self.fromObject.uid, self.toObject.uid)
@property def sid(self): return "%s__%s"% (IReportable(self.fromObject).sid, IReportable(self.toObject).
˓→sid) class PortChannelReportableFactory(BaseReportableFactory): adapts(PortChannel)
def exports(self): # The interface as Interface yield IpInterfaceReportable(self.context)
for child in self.context.member_interfaces(): yield PortChannelToMemberInterfaceReportable(self.context, child) class PortChannelToMemberInterfaceReportable(CiscoMonitorBaseRelReportable): @property def entity_class_name(self): return 'cisco_portchannel_interfaces'
def reportProperties(self): return [ ('portchannel_ip_interface_key','reference', IReportable(self.
˓→fromObject).sid, MARKER_LENGTH), ('ip_interface_key','reference', IReportable(self.toObject).sid, MARKER_
˓→LENGTH), ]
6.1. Analytics Bundle Topics 173 ZenPackers Documentation
This is the associated ZCML entry:
6.1.11 Analytics trobleshooting guide
Based on docs by William Gerber.
What problem do you have?
• I get no reports.
I get no reports
Are your batches working? • Yes: Can you log into Zenoss Analytics at all? • No: My batches are not working
My batches are not working
See at what state they are stuck in at the web UI: • Unstarted: Zenetl or zenperfetl daemon fails • Extracting: Zenetl or zenperfetl daemon fails • Staging: Is there model data in staging? • Failed: ????
Note: Bi-Directional DNS Resolution Make sure your DNS on both the RM and Analytics side can fully resolve the each other. If required, add lines in /etc/hosts on the Analytics server and RM so that they can resolve whatever configuration URLs you have set. Once you set /etc/hosts on Zenoss, make sure to install, configure, and restart dnsmasq so that containers can get the right DNS data.
Can you log into Zenoss Analytics at all?
Zenetl or zenperfetl daemon fails
Have you configured the 4 URLS for Analytics through the WEB UI? If no, then go configure them.
174 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
Update each Hub and Collector so it gets the proper config properties Check to see if the collector can hit the Analytics server. From the command prompt do the following: wget http:/ /analytics_server_url/reporting Did you get a text file without error? • Yes: Is there enough space on the hard drives to write files to? • No: Check analytics availability from collector
Is there enough space on the hard drives to write files to?
If no then get more space or edit the config file for daemon to point to a drive with more space. Is the log file complaining that it can’t hit the Hub? - Yes: Check daemon connection with the HUB - No: Does deamon try to upload a file at all?
Check analytics availability from collector
Double check that the Analytics server is running by open a browser to the URL http:// analytics_server_url/reporting. If you got a text file than your analytics server is not reachable from collector and you should check firewalss proxies and ports. If you got an error, you should figure out why your Analytics server is not running.
Is there model data in staging?
• If debugging just tail ALL the logs:
cd/opt/zenoss_analytics/logs tail-f *.log *.out --or-- tail-F-n100 *.out *.log|grep-iE'(failed|error)'
• No: Check triggers – Also check the log /opt/zenoss_analytics/catalina.out – Ensure Analytics can resolve (via DNS or /etc/hosts) the Zenoss server! • Yes: Has it been in staging over 4 hours? – No: We only check for Model batches ready to load into DB once every 4 hours. – Yes: Check triggers
Check triggers
Log into the reporting database and run this query a few times over a couple of minutes:
select trigger_name, from_unixtime(next_fire_time/1000) as next_time, from_unixtime(prev_fire_time/1000) as last_time, trigger_state from reporting.QRTZ_TRIGGERS;
6.1. Analytics Bundle Topics 175 ZenPackers Documentation
Are some of the triggers state stuck in blocked? If yes, then something caused one or more of the triggers to fail repeatedly and the system finally gave up. Fix the root cause and then update the QRTZ_TRIGGERS table to a state of WAITING and bounce the zenoss_analytics service. Go to the Analytics application server and look through the logs:
/opt/zenoss_analytics/logs/zenoss_analytics.log: file for glaring errors.
**or**
/opt/zenoss_analytics/webapps/reports/WEB-INF/logs/jasperserver.log
After Importing Bundle, I Get this Error when I edit my bundle
• Cannot get metadata for /organizations/zenoss/Data_Sources/zenoss_reporting: com.mysql.jdbc.Driver This means you imported your bundle without turning off the checkboxs on the import. The Zenoss_reporting datasource is now corrputed. To fix this, you must restore the zenoss_reporting datasource from a working install e.g. export it and reimport it into your broken install, or revert your Analytics server to a clean state. This can be done in a number of ways: 1. Revert your Analytics instance to the default state: You’ll lose your dim tables though. 2. This way restores zenoss_reporting without destroying Reporting db. – Stop Analytics – drop zenoss_analytics db from mysql – As zenoss, run update_db.py – Start Analytics
Check daemon connection with the HUB
–tbd–
Does deamon try to upload a file at all?
–tbd–
6.1.12 Accessing Mysql in Analytics
Quick Access to the Mysql DB
Use this script to get quick access to the Mysql DB:
#!/bin/sh # # Quicly enter Mysql # Sets environment variables based on config
if [[ -x $(which mysql) ]]; then (continues on next page)
176 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page) MYSQL_PATH=$(which mysql) else echo "No apparent MySQL client exists! exiting..." exit1 fi
CONFIG_FILE='/etc/zenoss_analytics/zenoss_analytics.conf' ETLHOSTNAME=`grep "^etl.jdbc.hostname" $CONFIG_FILE | cut -d"=" -f2` ETLPORT=`grep "^etl.jdbc.port" $CONFIG_FILE | cut -d"=" -f2` DBUSER=`grep "^etl.jdbc.username" $CONFIG_FILE | cut -d"=" -f2` DBPASS=`grep "^etl.jdbc.password" $CONFIG_FILE | cut -d"=" -f2` ETLDB=`grep "^etl.jdbc.dbname" $CONFIG_FILE | cut -d"=" -f2`
# Start mysql in this way to get to the right database: $MYSQL_PATH -u $DBUSER --password=$DBPASS \ -h $ETLHOSTNAME-P$ETLPORT $ETLDB --skip-column-names -A
Dealing with Job Triggers
• To see the Cronjobs for triggers from inside mysql:
select * from QRTZ_CRON_TRIGGERS;
• Look at the Triggers:
select * from QRTZ_TRIGGERS; select trigger_name, trigger_state from QRTZ_TRIGGERS;
• Remove all your batches from DB directly:
select extractor_key from meta_extractor where extractor_fqdn='mine.zenoss.loc';
# find the numbers it outputs : Say (20,22,31) delete from meta_batch where extractor_key in (20,22,31);
6.1.13 Tips and Tricks
Disable Session Timeout
By default timeout value is set to 20 minutes. To make sessions endless you can decrease session-timeout property in /opt/zenoss_analytics/webapps/reports/WEB-INF/web.xml file.
0
List Registered RM Systems in Analytics curl http://analytics_server.zenoss.loc:7070/etl/ | jq
6.1. Analytics Bundle Topics 177 ZenPackers Documentation
This will show you the UUID’s for various RM systems communicating with the Analytics server. If you know the UUID, you can specifiy it on the commandline: curl http://solutions-analytics.zenoss.loc:7070/etl/| jq eg: curl http://solutions-analytics.zenoss.loc:7070/etl/8054d966-97ec-11e7-a03a-
˓→0242ac110017| jq
Remove All Batches
From the reporting database: delete from meta_batch; delete from meta_batch_file;
Then use the script: show_batch_status.sh
6.1.14 References
Docs https://docs.google.com/document/d/1I6QeEjzKTyg1d-SAFY5iF_YHlE582_UfGutROvMjpkM/edit#
Videos
We have some local MP4 vids at: https://zenpackers.zenoss.loc/videos/
6.2 Catalog Tool
For fast searching and discovery, Zenoss uses Zope’s Catalog Service. Most catalogs must be created by indexing zProperties, although see the note later about ComponentBase searches.
6.2.1 Debugging Catalog Indexing
This is done in exactly the same was as Debugging Impact.
6.2.2 Setting Index Properties
In order to put a ZenPack property into the catalog you must set its index_type to field. In ZenPackLib it would be done as follows (eg: macAddress):
178 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
classes: !ZenPackSpec DEFAULTS: base: [zenpacklib.Component] ControlCenter: base: [zenpacklib.Device] meta_type: ZenossControlCenter Host: meta_type: ZenossControlCenterHost properties: ipaddr: label: IP Address macAddress: label: Mac Address index_type: field index_scope: device
Notice that index_scope is set to “device” by default, and is not needed. It can also be set to global, which will allow it to be searched globally. When a catalog’s scope is set to global, the actual catalog object will be created on dmd.Devices. The name of the catalog will be the fully-qualified module name of the class under which the property with index_type and index_scope are set with a “Search suffix”. So let’s say you had a ZenPack named ZenPacks.example.Test that defined a class named Widget. The catalog would be dmd.Devices.ZenPacks_example_Test_WidgetSearch.
Note: There is a default device-scoped catalog created by any ZenPack that uses zenpacklib to create a “Component” class. That’s the ComponentBase catalog. This catalog does not require any index_type property to be set. It’s created because the standard componentSearch catalog doesn’t have an id index, and some of the relationship helper methods require an id index to more efficiently find components. It’s normal to see a ComponentBaseSearch on any device associated with a zenpacklib ZenPack.
Note: Removing a ZenPack would remove any associated catalog items.
6.2.3 Examples of Catalog Use
Here is an example of how you use the Catalog:
############################################################################ # Copyright (C) Zenoss, Inc. 2014, all rights reserved. ############################################################################
# Zenoss Imports from Products.Zuul.interfaces import ICatalogTool
# ZenPack Imports from ..APIC import APIC def cisco_apic_devs(self): """Return associated Cisco APIC instances."""
catalog= ICatalogTool(self.getDmdRoot('Devices')) apic_devices=[] (continues on next page)
6.2. Catalog Tool 179 ZenPackers Documentation
(continued from previous page)
for apic_result in catalog.search(types=[APIC]): try: apic= apic_result.getObject() apic_devices.append(apic) except Exception: continue
return apic_devices
Here is a self contained python search:
#!/usr/bin/env zendmd # # Print details of all WebTx datasources. from Products.Zuul.interfaces import ICatalogTool from ZenPacks.zenoss.ZenWebTx.datasources.WebTxDataSource import WebTxDataSource catalog= ICatalogTool(dmd.Devices) for result in catalog.search(WebTxDataSource): datasource= result.getObject() template= datasource.rrdTemplate() label="{} - {}".format(template.getUIPath(), datasource.id) print "--[ {} ]{}".format(label,"-" * (73- len(label))) print "Initial URL: {}".format(datasource.initialURL) print "Initial User: {}".format(datasource.initialUser) print "Timeout: {}".format(datasource.webTxTimeout) print "Interval: {}".format(datasource.cycletime)
if datasource.commandTemplate: print "Twill Script:" print print datasource.commandTemplate else: print "Twill Script: n/a"
print
Here is class method:
from zope.event import notify from Products.Zuul.interfaces import ICatalogTool
@classmethod def reindex_implementation_components(cls, dmd): device_class= dmd.Devices.getOrganizer('/Network/OpenvSwitch') results= ICatalogTool(device_class).search( ('ZenPacks.zenoss.OpenvSwitch.Port.Port', 'ZenPacks.zenoss.OpenvSwitch.Interface.Interface',) )
for brain in results: obj= brain.getObject() obj.index_object() notify(IndexingEvent(obj))
180 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
Here is a (truncated) class method example from ZenPackLib:
def remove(self, app, leaveObjects=False): from Products.Zuul.interfaces import ICatalogTool if not leaveObjects: dc= app.Devices for catalog in self.GLOBAL_CATALOGS: catObj= getattr(dc, catalog, None) if catObj: LOG.info('Removing Catalog %s'% catalog) dc._delObject(catalog)
if self.NEW_COMPONENT_TYPES: LOG.info('Removing %s components'% self.id) cat= ICatalogTool(app.zport.dmd) for brain in cat.search(types=self.NEW_COMPONENT_TYPES): component= brain.getObject() component.getPrimaryParent()._delObject(component.id)
# Remove our Device relations additions. from Products.ZenUtils.Utils import importClass for device_module_id in self.NEW_RELATIONS: Device= importClass(device_module_id) Device._relations= tuple([x for x in Device._relations if x[0] not in self.NEW_RELATIONS[device_
˓→module_id]])
LOG.info('Removing %s relationships from existing devices.'% self.id) self._buildDeviceRelations()
6.2.4 References
• http://docs.zope.org/zope2/zope2book/SearchingZCatalog.html • https://pypi.python.org/pypi/Products.AdvancedQuery • http://community.zenoss.org/docs/DOC-2535 (Removes items from the catalog) • See AdvancedQuery.html in the doc subfolder. • http://wiki.zenoss.org/ZenDMD_Tip_-_Refresh_DeviceSearch_Catalog
6.3 Migration Guide for Slackers
6.3.1 Introduction
Zenpacks that get upgraded often need some sort of migration of configuration data from the older version to the new. This is achieved via migration scripts that are described in this doc.
Note: • zSetZenProperty(): is safest way to set a zproperty. . . • getTitleOrId(): Best way to get name of device. • getOrganizerName(): is best way to get Device Class name.
6.3. Migration Guide for Slackers 181 ZenPackers Documentation
Warning: Never ever set a zProperty directly on a device or device class. Always use setZenProperty() which takes care of ZoDB bookkeeping items.
General Information on Template Migration
Any template migration might do the following things for XYZ ZenPack: • Identify the old zenpack objects, bound to a device. • Find all device classess and devices where “XYZ” template is bound. • Look for zDeviceTemplates that are overridden, you don’t want to migrate templates that a user has customized, they probably want to keep their changes. • Extract the old information from those templates – Leave them bound. . . for continuity – Enable our modeler plugin for device class or device (zCollectorPlugins gets invoked, gets added to list). – Populate zProperties – Create new Instance components from the old templates and populuate the new. • Leave the old ZP objects alone: Provide documentation on how to un-bind the old templates. • XYZ Template needs to be bound to device
General Information on Relationship Migration
In general, we try to avoid changing existing relationships on a device class because that requires users to completely remove those devices, upgrade, and then re-model their devices. We do this becuase existing devices have their relations embedded in ZODB and can’t easily be changed without significant database surgery. On the other hand, adding new relations is considered acceptable behavior. Once you add those relationships on a device you must ensure that you execute: device.buildRelations()
6.3.2 Examples
We assume this basic set of knowledge for this article: • Zenoss ZenPack Development • Python 2.7 • Familiarity with ZenPack development and Python coding. • We work from the base of ZP_DIR. For NetBotz for example:
export ZP_DIR_TOP=$ZENHOME/ZenPacks/ZenPacks.training.NetBotz export ZP_DIR=$ZP_DIR_TOP/ZenPacks/training/NetBotz
Relative to this folder all migration scripts will reside in:
182 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
$ZP_DIR/migrate/
In the first example. we have a simple migration of the a zProperty (zOraclePassword) to the new ZP version. In the secont second-example we will see a much more significant migration() implementation.
6.3.3 Migrating Properties
In this example DatabaseMonitor (OracleDB) is a device that inherits its /Device base from the parent server, be it Linux, AIX, Solaris, or some “other” operating system. This means that it needs to be able to patch itself underneath the device tree of that server target type and not have a stand-alone device root. The basic code strategy is to create a class that has a “migrate” method. The migrate() method gets called automatically by the ZenPack Installer. The first example is a very old version that does nothing but migrate the password from one version to the next. We only need to set a single zProperty value. In order to do so, we use: MigratePassword.py:
############################################################################## # Copyright (C) Zenoss, Inc. 2009, all rights reserved. ##############################################################################
import logging log= logging.getLogger("zen.migrate")
import Globals from Products.ZenModel.migrate.Migrate import Version from Products.ZenModel.ZenPack import ZenPackMigration from Products.ZenModel.migrate.MigrateUtils import migratePropertyType
class MigratePassword(ZenPackMigration): version= Version(2,2,0)
def migrate(self, dmd): log.info("Migrating zOraclePassword") migratePropertyType("zOraclePassword", dmd,"string")
MigratePassword()
Notice that there is a “version” line just after the class definition. This version must identify the new version number of the ZP being migrated to. The migration() method is very simple; in fact just one line that uses the migratePropertyType() method to migrate the zOraclePassword.
6.3.4 Migration of Relations
Here we added a ThinPool relationship for device in ControlCenter. Notice the cc_device.buildRelations():
import logging log= logging.getLogger("zen.migrate")
from Acquisition import aq_base from Products.ZenModel.migrate.Migrate import Version from Products.ZenModel.ZenPack import ZenPackMigration from Products.Zuul.utils import safe_hasattr (continues on next page)
6.3. Migration Guide for Slackers 183 ZenPackers Documentation
(continued from previous page)
class AddThinPoolsRel(ZenPackMigration): version= Version(1,2,2)
def migrate(self, pack): dmd= pack.dmd
if dmd.Devices._getOb("ControlCenter", None) is not None: for cc_device in dmd.Devices.ControlCenter.getSubDevicesGen(): if not safe_hasattr(aq_base(cc_device),'thinPools'): log.info("Updating missing relationship for ControlCenter device
˓→{0}" .format(cc_device.id)) cc_device.buildRelations()
6.3.5 Migration of Modeling Templates
This section is specific to a modeling templates. The Basic idea behind this migration scenario is as follows: • Identify the old zenpack objects, bound to a device. – Find all device classes and devices where “Oracle” modeling template is bound. – Look for all zDeviceTemplates that are overridden • Extract the old information from those templates – Leave them bound, for continuity sake. – Enable our modeler plugin for device class or device (uses zCollectorPlugins) • Populate the New ZP Data Structures – Create new Instance components from the old templates – Populate the new instances or components with data – As always, test your migration script by installing the new ZP over the old. 4. Give Users Instructions on Removing Old Object Templates • Since you may have left the old ZP objects in tact, provide documentation on how to un-bind the old templates.
Migration Example for DatabaseMonitor
This migration updates the older 2.X version to the 3.X version of the ZP, and transitions from a dedicated Device to a pure component model (Instance). You don’t need to worry about handling component binding because that is taken care of by the actual modeler. Here is the ~migration/AddInstances.py code:
1 ############################################################################
2 # Copyright (C) Zenoss, Inc. 2013, all rights reserved.
3 # File: ~migration/AddInstances.py
4 ############################################################################
5
6 import logging (continues on next page)
184 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page)
7 log= logging.getLogger("zen.migrate")
8
9 from Products.ZenModel.DeviceClass import DeviceClass
10 from Products.ZenModel.migrate.Migrate import Version
11 from Products.ZenModel.ZenPack import ZenPackMigration
12
13 # You must have Oracle's modeling template bound for migration to work
14 TEMPLATE_NAME='Oracle'
15 MODELER_PLUGIN_NAME='zenoss.ojdbc.Instances'
16
17 def name_for_thing(thing):
18 ''' Helper function to provide the name of the Device or DeviceClass '''
19
20 if isinstance(thing, DeviceClass):
21 return thing.getOrganizerName()
22
23 return thing.titleOrId()
24
25 class AddInstances(ZenPackMigration):
26 '''
27 Main class that contains the migrate() method. Note version setting.
28 '''
29 version= Version(3,0,0)
30
31 def migrate(self, dmd):
32 '''
33 This is the main method. Its searches for overridden objects (templates)
34 and then migrates the data to the new format or properties.
35 In this case bound objects get assigned the new modeler pluging.
36 '''
37 overridden_on= dmd.Devices.getOverriddenObjects(
38 'zDeviceTemplates', showDevices=True)
39
40 for thing in overridden_on:
41 if TEMPLATE_NAME in thing.zDeviceTemplates:
42 self.enable_plugin(thing)
43 self.populate_connection_strings(thing)
44
45 def enable_plugin(self, thing):
46 ''' Associate a collector plugin with the thing we have found.
47 zCollectorPlugins is used by ModelerService.createDeviceProxy()
48 to add associated (modeler) plugins to the list for self-discovery.
49 ModelerService.remote_getDeviceConfig() actually calls the modelers.
50 '''
51 current_plugins= thing.zCollectorPlugins
52 if MODELER_PLUGIN_NAME in current_plugins:
53 return
54
55 log.info(
56 "Adding %s modeler plugin to %s",
57 MODELER_PLUGIN_NAME, name_for_thing(thing))
58
59 current_plugins.append(MODELER_PLUGIN_NAME)
60 thing.setZenProperty('zCollectorPlugins', current_plugins)
61
62 def populate_connection_strings(self, thing):
63 ''' Just a helper method to collect data for this ZP ''' (continues on next page)
6.3. Migration Guide for Slackers 185 ZenPackers Documentation
(continued from previous page)
64 if thing.zOracleConnectionStrings:
65 return
66
67 connection_string=(
68 'jdbc:oracle:thin:'
69 '${here/zOracleUser}'
70 '/${here/zOraclePassword}'
71 '@${here/manageIp}'
72 ':${here/zOraclePort}'
73 ':${here/zOracleInstance}'
74 )
75
76 log.info(
77 "Setting zOracleConnectionStrings for %s",
78 name_for_thing(thing))
79
80 thing.setZenProperty('zOracleConnectionStrings', [connection_string])
81
82 AddInstances()
6.3.6 Pre-Install and Post-Install Migration Events
There are certain actions that need to happen before or after the installation process, due to different dependency requirements. We discuss both cases. In the file __init__.py we have an install method that looks like this:
def install(self, app):
self.pre_install(app) super(ZenPack, self).install(app) self.post_install(app)
The super method is responsible for calling all the migration scripts.
Pre-Installation and Monitoring Templates
This section is specific to monitoring (collection) templates. Monitoring templates get generated at install time, so it is important to have all the correct templates you need in the right place. Its also important to not-have any wrong templates.
Attention: This works because it’s ultimately the super() method that uses objects.xml to create the monitoring templates you’ve added to the ZenPack. So if you delete what exists before calling super, you’ll be sure to end up with exactly what the objects.xml contains.
Note that the following: • The migration scripts [ in ~/migration/* ] are invoked only after the initial installation has taken place. • We must remove/replace older monitoring templates in __init__.py before the migration scripts are invoked. • We use ZenPacks.zenoss.CiscoMonitor as the prime example
186 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
• We must implement the pre_install() method For our purposes the pre_install() method performs the following inside __init__.py:
1 REPLACED_MONITORING_TEMPLATES=(
2 'Network/Cisco/ACE/rrdTemplates/Device',
3 'Network/Cisco/ASA/rrdTemplates/Device',
4 'Network/Cisco/Nexus/rrdTemplates/Device',
5 'Network/Cisco/rrdTemplates/Device',
6 'Network/Cisco/rrdTemplates/ethernetCsmacd',
7 )
8
9 class ZenPack(ZenPackBase):
10 """CiscoMonitor loader."""
11
12 ...some code...
13
14 def install(self, app):
15 self.pre_install(app)
16 super(ZenPack, self).install(app)
17 self.post_install(app)
18
19 def pre_install(self, app):
20 """Perform work that must be done b efore normal ZenPack install."""
21 # objects.xml assumes /Reports/Enterprise Reports exists.
22 # Validating the organizer exists here is cleaner than defining a
23 # hard requirement on the Enter priseReports ZenPack.
24 app.zport.dmd.Reports.createOrganizer('Enterprise Reports')
25
26 # Allow objects.xml to replace all the following monitoring templates.
27 LOG.info('Preparing monitoring templates for updates')
28 for subpath in REPLACED_MONITORING_TEMPLATES:
29 try:
30 template= app.zport.dmd.Devices.getObjByPath(subpath)
31 template.getPrimaryParent()._delObject(template.id)
32 except (KeyError, NotFound):
33 pass
Post-Installation Migration
The post-installation has its own dependencies and is implemented as:
def post_install(self, app): """Perform work that can be done after normal ZenPack install."""
self.symlinkPlugins()
# Remove event class mappings that this ZenPack supersedes. try: net_link_ec= app.zport.dmd.Events.Net.Link for mapping_id in ('snmp_linkDown','snmp_linkUp'):
if mapping_id in net_link_ec.instances.objectIds(): LOG.info("Removing standard %s mapping", mapping_id) net_link_ec.removeInstances((mapping_id,))
(continues on next page)
6.3. Migration Guide for Slackers 187 ZenPackers Documentation
(continued from previous page) except (AttributeError, KeyError): pass
Dec 10, 2018
6.4 Monkey Patching in ZenPacks
Sometimes you need to monkey-patch platform code, or code in other ZenPacks when developing ZenPacks. Typically you should look for ways to accomplish your objective without monkey-patching first. Once you’ve decided that your ZenPack should do monkey-patching, you should consider the following convention. It has a couple of advantages such as making it clear what patches your ZenPack is performing, separating them into the areas of code they patch, and handling cases where the code to be patched may not even be installed on the user’s system.
6.4.1 Where do the patches go?
Historically ZenPacks have tended to do all of their monkey-patching in their main init__.py. This was done because __init__.py is the only code in a ZenPack that is run automatically when any Zenoss Python process starts. This makes it a great place to monkey-patch because you make sure your patch gets installed before it would be called. My advice to you is to not put your patches into __init__.py, but to build out the following structure instead. 1. Create a patches/ directory in the same directory as the ZenPack’s main __init__.py:
[bash]: mkdir-p patches/
2. Add an __init__.py into the patches directory with the following contents. Note that the calls to optional_import() at the bottom should only include modules that you want to patch. This mechanism makes sure that the module in the first argument exists before it imports the module named by the second argument from the patches/ directory:
from importlib import import_module
def optional_import(module_name, patch_module_name): """Import patch_module_name only if module_name is importable.""" try: import_module(module_name) except ImportError: pass else: import_module( '.{0}'.format(patch_module_name), 'ZenPacks.zenoss.CiscoAPIC.patches')
optional_import('Products.ZenModel','platform') optional_import('ZenPacks.zenoss.vSphere','vSphere')
3. Add a module (file) into the patches/ directory for each module you want to patch.
188 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
For the above example this would mean adding platform.py and vSphere.py into the patches/ direc- tory:
[bash]: touch patches/platform.py [bash]: touch patches/vSphere.py
4. Add your platform patches into patches/platform.py. The following example adds a new method to the standard Device class:
from Products.ZenUtils.Utils import monkeypatch
@monkeypatch('Products.ZenModel.Device.Device') def mycustom_usefulMethod(self): """Return something useful.""" return "This is a useful return value."
5. Lastly import the patches module from the ZenPack’s main __init__.py. The best practice is to import the patches module as the last thing you do in the ZenPack’s __init__.py. This can solve some import recursion problems. If your __init__.py is going to trigger code that needs to execute the monkey-patched functionality, that part would have to go after the patches import:
# Patch last to avoid import recursion problems. from Products.ZenUtils.Utils import unused from. import patches unused(patches)
The first and last line are just there to keep pyflakes happy. In the end you should have a directory and file structure something like the following. Files irrelevant to this monkey- patching exercise have been omitted:
ZenPacks example/ MonkeyPatching/ __init__.py patches/ __init__.py platform.py vSphere.py
6.5 Templating Topics
6.5.1 Override Standard Monitoring Templates
This question about how a user would override standard monitoring templates has come up a few times lately.
Here’s the best way of doing this:
1. Create a ZenPack such as ZenPacks.telus.vSphereExtensions 2. Specifically configure said ZenPack to depend on ZenPacks.zenoss.vSphere 3. Create the threshold in one of the monitoring templates delivered by ZenPacks.zenoss.vSphere
6.5. Templating Topics 189 ZenPackers Documentation
4. Add said monitoring template to the extension ZenPack 5. Export the extension ZenPack 6. Put the extension ZenPack in version control Your must be aware and considerate that there’s now an ordering dependency on installing the original and extension ZenPacks. Anytime the original ZenPack is installed, you must make sure that the extension ZenPack is subsequently installed. Circumstances where this will happen are: • Zenoss upgrade (only if the original ZenPack is installed by default with Zenoss) • Zenoss RPS application (only if the original ZenPack is contained in the RPS) • Manually installation of original ZenPack All of this assumes that you don’t think the extension would make for a good out of the box default for the ZenPack. Let someone know if you think the change would be widely beneficial so we can incorporate it and save you the trouble.
6.5.2 Load-Templates Script
This script is designed to take a yaml file as an input on the commandline and update a live instance of Zenoss. Seee this link first: http://docs.zenosslabs.com/en/latest/zenpack_development/monitoring_http_api/using_yaml_ templates.html Note: load-templates requires PyYAML to be installed. The commandline accepts a single argument, the filename: python load-templates.py template_definition_file.yaml The YAML formatting can be inferred from the zenoss doc for performance monitoring Doc-9385 yaml formatting: headers
The formatting section headers are the paths to the ZenPack. For example, Cisco lives under /Devices/Network, but CiscoUCS exists under /Devices. The CiscoUCS heading might be:
/CiscoUCS:
The Cisco heading might be:
/Network/Cisco:
For a new hypothetical SpecialExample device that lives under /Devices/Network, the Yaml heading might be:
/Network/SpecialExample:
In addition, each of the components and subcomponents may also have a heading. SpecialExample has three com- ponents: Server, Client and Services. The headings for each of these components would look something like the following:
/NetworkSpecialExample/SpecialExampleServer: /NetworkSpecialExample/SpecialExampleClient: /NetworkSpecialExample/SpecialExampleServices:
190 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation yaml formatting: sections
Each section may contain three sub-sections: datasources, thresholds and graphs.
Datasources:
The datasources sub-section contains configuration necessary to capture datapoints. The datasources has a named source with several attributes associated with that source:
... /*header/path*: ... datasources: *name_of_source*: type: datasource_type_ *attributes_associated_with_type01*: *attribute_value* *attributes_associated_with_type02*: *attribute_value* ... datapoints: *name_of_source*: source_type_ *another_name*: type: source_type_ rrdmin: *value* rrdmax: *value* aliases: *alias*: *value* ...
The hypothetical /Network/SpecialExample has an SNMP datasource, a command datasource and a JMX datasource:
/Network/SpecialExample: datasources: hrMemoryUsed: type: SNMP oid:"1.3.6.1.2.1.25.2.3.1.6" datapoints: hrMemoryUsed: GAUGE_MIN_0
hrProcessorLoad: type: SNMP oid:"1.3.6.1.2.1.25.3.3.1.2.1" datapoints: hrProcessorLoad: GAUGE_MIN_0
specialExampleCommand: datasources: echo: type: COMMAND commandTemplate:'echo"OK|val1=123 val2=987.6' parser: Nagios severity: info cycletime: 10 datapoints: val1: rrdmin:0 aliases: (continues on next page)
6.5. Templating Topics 191 ZenPackers Documentation
(continued from previous page) value1:"100,/" val2: DERIVE_MIN_0
heapMemoryUsage: type: JMX jmxPort:"12345" authenticate: True objectName:"java.lang:type=Memory" attributeName:"HeapMemoryUsage" datapoints: committed: GAUGE_MIN_0 used: GAUGE_MIN_0
nonHeapMemoryUsage: type: JMX jmxPort:"12345" authenticate: True objectName:"java.lang:type=Memory" attributeName:"NonHeapMemoryUsage" datapoints: committed: GAUGE_MIN_0 used: GAUGE_MIN_0
• SNMP: • COMMAND: • JMX:
RRD types:
• COUNTER - Saves the rate of change of the value over a step period. This assumes that the value is always increasing (the difference between the current and the previous value is greater than 0). Traffic counters on a router are an ideal candidate for using COUNTER. • GAUGE - Does not save the rate of change, but saves the actual value. There are no divisions or calculations. To see memory consumption in a server, for example, you might want to select this value.
**NOTE** Rather than COUNTER, you may want to define a data point using DERIVED and with a minimum of zero. This creates the same conditions as COUNTER, with one exception. Because COUNTER is a"smart" data type, it can wrap the data when a maximum number of values is reached in the system. An issue can occur when there is a loss of reporting and the system (when looking at COUNTER values) thinks it should wrap the data. This creates an artificial spike in the system and creates statistical anomalies.
• DERIVE - Same as COUNTER, but additionally allows negative values. If you want to see the rate of change in free disk space on your server, for example, then you might want to select this value. • ABSOLUTE - Saves the rate of change, but assumes that the previous value is set to 0. The difference between the current and the previous value is always equal to the current value. Thus, ABSOLUTE stores the current value, divided by the step interval.
192 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
RRD suffixes:
In addition, suffixes can be added to reduce YAML: • _MIN_*value* - sets rrdmin to value • _MAX_*value* - sets rrdmax to value
Examples:
• GAUGE_MIN_0_MAX_100 - sets the rrd type to gauge; rrd minimum to 0 and the rrd maximum to 100 • DERIVE_MAX_10 - sets the rrd type to derive; rrd maximum to 10
Thresholds:
• The thresholds sub-section contains configuration necessary to capture thresholds with relation to the data- sources datapoints. Note:
*Make sure that the threshold is really needed. Too many extra events may be overwhelming to a user.*
The general format for the thresholds is as follows:
... /*header/path*: ... thresholds: *human friendly name*: type: threshold_type_ dsnames: ["*datasource_name*_*datapoint_name*"] *attributes_associated_with_type01*: *attribute_value* *attributes_associated_with_type02*: *attribute_value* ...
The hypothetical /Network/SpecialExample has an SNMP threshold, and a Command threshold. The SNMP threshold looks for a processor load of greater than 95%. The Command threshold looks for a value greater than 99.
/Network/SpecialExample: datasources: ...
thresholds: high load: type: MinMaxThreshold dsnames: ["hrProcessorLoad_hrProcessorLoad"] maxval:"95"
high values: type: MinMaxThreshold dsnames: ["ds1_val1","ds1_val2"] maxval:"99"
Standard Types:
6.5. Templating Topics 193 ZenPackers Documentation
• MinMaxThreshold - • ValueChangeThreshold - • CiscoStatus - • HoltWintersFailure -
Graphs:
• The graphs sub-section contains the configuration necessary to capture graphs with relation to the thresholds and datasources datapoints:
... /*header/path*: ... graphs: *human friendly graph title*: units:"human friendly units" miny: *y-axis minimum value* maxy: *y-axis maximum value* graphpoints: *human friendly datapoint name*: dpName:" *datasource*_*datapoint*" format: rrd_graph_type_format_ rpn: *reverse_polish_notation*
The /Network/SpecialExample device has several graphs that need to be displayed. More specifically, the Server components utilize the SNMP, the clients utilize JMX and the Services require a Command.
/Network/SpecialExample/SpecialExampleServer: graphs: CPU Utilization: units:"percent" miny:0 maxy: 100 graphpoints: Used: dpName:"hrProcessorLoad_hrProcessorLoad" format:" %4.0lf%%" Memory Utilization: units:"percent" miny:0 maxy: 100 graphpoints: Used: dpName:"hrMemoryUsed_hrMemoryUsed" format:" %7.2lf%%" rpn:"1024, *,${here/hw/totalMemory},/,100,*"
/Network/SpecialExample/SpecialExampleClient: graphs: Values: units: number miny:0 graphpoints: Value1: dpName:"ds1_val1" (continues on next page)
194 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page) format:" %7.2lf%s" Value2: dpName:"ds1_val2" format:" %7.2lf%s"
/Network/SpecialExample/SpecialExampleService: JVM Memory Usage: units: bytes base: true miny:0 graphpoints: Heap Committed: dpName: heapMemoryUsage_committed Heap Used: dpName: heapMemoryUsage_used NonHeap Committed: dpName: nonHeapMemoryUsage_committed NonHeap Used: dpName: nonHeapMemoryUsage_used
Stolen from: http://oss.oetiker.ch/rrdtool/doc/rrdgraph_graph.en.html
%% Just prints a literal ‘%’ character %#.#le Prints numbers like 1.2346e+04. The optional integers # denote field width and decimal precision. %#.#lf Prints numbers like 12345.6789, with optional field width and precision. %s Place this after %le, %lf or %lg. This will be replaced by the appropriate SI magnitude unit and the value will be scaled accordingly (123456 -> 123.456 k). %S is similar to %s. It does, however, use a previously defined magnitude unit. If there is no such unit yet, it tries to define one (just like %s) unless the value is zero, in which case the magnitude unit stays undefined. Thus, formatter strings using %S and no %s will all use the same magnitude unit except for zero values. If you PRINT a VDEF value, you can also print the time associated with it by appending the string :strftime to the format. Note that RRDtool uses the strftime function of your OSs C library. This means that the conversion specifier may vary. Check the manual page if you are uncertain. The following is a list of conversion specifiers usually supported across the board. %a The abbreviated weekday name according to the current locale. %A The full weekday name according to the current locale. %b The abbreviated month name according to the current locale. %B The full month name according to the current locale. %c The preferred date and time representation for the current locale. %d The day of the month as a decimal number (range 01 to 31). %H The hour as a decimal number using a 24-hour clock (range 00 to 23). %I The hour as a decimal number using a 12-hour clock (range 01 to 12). %j The day of the year as a decimal number (range 001 to 366). %m The month as a decimal number (range 01 to 12).
6.5. Templating Topics 195 ZenPackers Documentation
%M The minute as a decimal number (range 00 to 59). %p Either ‘AM’ or ‘PM’ according to the given time value, or the corresponding strings for the current locale. Noon is treated as ‘pm’ and midnight as ‘am’. Note that in many locales and ‘pm’ notation is unsupported and in such cases %p will return an empty string. %s The second as a decimal number (range 00 to 61). %S The seconds since the epoch (1.1.1970) (libc dependent non standard!) %U The week number of the current year as a decimal number, range 00 to 53, starting with the first Sunday as the first day of week 01. See also %V and %V The ISO 8601:1988 week number of the current year as a decimal number, range 01 to 53, where week 1 is the first week that has at least 4 days in the current year, and with Monday as the first day of the week. See also %U and %w The day of the week as a decimal, range 0 to 6, Sunday being 0. See also %u. %W The week number of the current year as a decimal number, range 00 to 53, starting with the first Monday as the first day of week 01. %x The preferred date representation for the current locale without the time. %X The preferred time representation for the current locale without the date. %y The year as a decimal number without a century (range 00 to 99). %Y The year as a decimal number including the century. %Z The time zone or name or abbreviation. %% A literal ‘%’ character.
Events:
• Not Yet Implemented
6.5.3 Automatic Templates for Zenpacks
Templating is an important aspect of data modeling and colleciton. This article explores automatic template construc- tion for modeling.
Prerequisites
• The usual
Automatically Applied Setters and Getters on Class Objects
In Products/DataCollector/ApplyDataMap.py there is code that automatically executes all methods in your class that start with In order for this to work, follow these general steps: • Create Methods getXYZ(self, data), setXYZ(self, data) • Create data attribute ‘setXYZ’ (in your datamap) for the setter in modeling • Apply that datamap to your model. ApplyDataMap will process and apply it. Some details: • getXYZ(self, data)
196 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
• setXYZ(self, data) The data that is referenced must be provided by your modeling template in the form of a map attribute with the same name as the method:
{ 'setMonitoringTemplateData': attributes['MonitoringProfile'], 'someOtherAttribute': somedata, ... etc... }
Example of Auto-Generated and Applied Local Templates
The context of this is using ZenPackLib and the ZenPacks.zenoss.ControlPlane . The goal is to have an automatically generated template to be locally applied to every component object that gets created. Based on an API call to the ControlPlane, you need to:
* Create the model data, based on API defs. * Insert Data into datamap. * Append the map to the object map. ApplyDataMap processes all * Object Instantiation: Create the Local Template based on ObjectMaps * Ensure template attribute's name matches a getter in object class.
Create the Model Data
In our example we create a “MonitoringTemplateData” based on the API’s return value of MonitoringProfile data: def map_pool(attributes): """Return ObjectMap data given classname and attributes. """ return { 'id': get_pool_id(attributes['ID']), 'title': attributes['ID'], 'set_parentPool': get_pool_id(attributes['ParentID']), 'priority': attributes['Priority'], 'coreLimit': attributes['CoreLimit'], 'memoryLimit': attributes['MemoryLimit'], 'setMonitoringTemplateData': attributes['MonitoringProfile'], }
Insert Data into Datamap
Later on this data is processed in DataMapProducer class (abbreviated): class DataMapProducer(object): """Produce the DataMap objects required to model """
def __init__(self, client): self.client= client
@inlineCallbacks def getmaps(self): (continues on next page)
6.5. Templating Topics 197 ZenPackers Documentation
(continued from previous page) """ Return datamaps to modeler. """
maps=[]
pools= yield self.client.pools() pool_maps=[] for attributes in pools: pool_map= map_pool(attributes) if pool_map: pool_maps.append(pool_map)
Understand that these modules are called by the Modeler and get fed directly to the other core services. The next section is also part of the same modeler:
Append the Map to the Object Map
This is the usual and traditional step for appending object maps. Once you have the dictionary data (pool_maps in our case) for your object or component, process it in the usual way: maps.append( RelationshipMap( relname='pools', modname='ZenPacks.zenoss.ControlPlane.Pool', objmaps=pool_maps)) returnValue(maps)
Ensure Template Attribute’s Name Matches a Getter in Object Class
After the modeler hands these maps off to the other ancillary parts of the core, the data in the object classes gets initialized with the map data. Consider the Pool.py class code:
# ZenPack Imports from. import schema from .utils import replaceLocalTemplate class Pool(schema.Pool):
"""Custom model code for Pool class."""
_monitoringTemplateData= None
def getMonitoringTemplateData(self): """Return last set monitoring template data.""" return self._monitoringTemplateData
def setMonitoringTemplateData(self, data): """Create local monitoring template using data.""" replaceLocalTemplate(self, data,'ZenPacks.zenoss.ControlPlane.Pool')
Notice that the setMonitoringTemplateData is exactly the same name as that of the mapped data above. This is critical since the ApplyDataMap class is looking for this match, and without it the process fails.
198 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
Object Instantiation: Create the Local Template based on ObjectMap Data
Once ApplyDataMap gets the data above, it calls setMonitoringTemplateData() with the associated data and configures all the templates. If we look in the util.py import you’ll find the replaceLocalTemplate(obj, data, targetPythonClass) helper utility which configures that actual Template: ( See reference: https://github.com/zenoss/ZenPacks.zenoss.Microsoft.Windows/ blob/develop/load-templates) def replaceLocalTemplate(obj, data, targetPythonClass): """Replace local monitoring template using data.""" obj._monitoringTemplateData= data
# Delete the local template if it already exists. template_name= obj.getRRDTemplateName() if not template_name: return
template= getattr(aq_base(obj), template_name, None) if template: obj._delObject(template_name)
# Create the template. template= RRDTemplate(template_name) obj._setObject(template.id, template) template= obj._getOb(template_name)
# Configure the template. template.description= data.get('Description','') template.targetPythonClass= targetPythonClass
# Add datasources to the template. for metricConfig in data.get('MetricConfigs', []): datasource= template.manage_addRRDDataSource( metricConfig['ID'], 'ControlPlaneDataSource.Control Plane')
# Configure the datasource. datasource.component='${here/id}' datasource.eventClass='/Ignore' datasource.severity=0 datasource.cycletime= 300 datasource.perfURL= metricConfig['PerfURL']
# Add datapoints to the datasource. for metric in metricConfig['Metrics']: datapoint= datasource.manage_addRRDDataPoint(metric['ID'])
# Add a default alias for each datapoint. datapoint.addAlias(metric['ID'])
6.5. Templating Topics 199 ZenPackers Documentation
6.6 Unit Testing Rough Guide
6.6.1 Description
Zenpacks need to be tested for internal consistency at build time by Jenkins.
6.6.2 Introduction
Unit Tests are becoming more important as continuous deployment technology grows. These tests provide a sanity check for any software that gets moved into production. Unit Tests in Zenoss are intended to test major components of ZenPacks whenever possible.
6.6.3 Jenkins
Jenkins builds a Zenoss environment in order to test the Zenpacks at build time. This means that it does have a live version of Zenoss to test against. At this time however, Jenkins is not configured to probe Unit Tests.. You will have to test them manually (see below)
6.6.4 Manual Testing
You can test Unit Tests manually with runtests as follows: • Your test class TestCheckOracle is part of ZenPacks.zenoss.DatabaseMonitor (see below for example). • You wish to test this entire ZenPack’s unit tests. • You are on the zenoss system as user zenoss:
[zenoss@cdev:$zpdir/tests]: runtests --type unit ZenPacks.zenoss.DatabaseMonitor ======Packages to be tested: ZenPacks.zenoss.DatabaseMonitor ======Parsing /opt/zenoss/etc/zope.conf Running tests at level 1 Running Products.ZenTestCase.BaseTestCase.ZenossTestCaseLayer tests:
Set up Testing.ZopeTestCase.layer.ZopeLite in 0.422 seconds. Set up Products.ZenTestCase.BaseTestCase.ZenossTestCaseLayer in 0.002 seconds. Running: .. Ran 2 tests with 0 failures and 0 errors in 4.450 seconds. Tearing down left over layers: Tear down Products.ZenTestCase.BaseTestCase.ZenossTestCaseLayer in 0.000
˓→seconds. Tear down Testing.ZopeTestCase.layer.ZopeLite in 0.000 seconds.
In this case we ran 2 tests and had 0 failures. We got lucky. You can also specify the module to run: runtests--type unit ZenPacks.zenoss.DatabaseMonitor-m test_modeler
200 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
6.6.5 Unit Test Framework
The unit test framework test functionality in isolation of any monitored devices. It inherits from the class BaseTest- Case and uses it’s assert methods for determining pass or failure. • Unit tests inherit from BaseTestCase • All tests go in the $ZPDIR/tests/ folder • Inside that folder you need to create your test classes which will be called from the following method:
from Products.ZenTestCase.BaseTestCase import BaseTestCase from ZenPacks.zenoss.GoodieMonitor.Candy import Candy
class TestGoodies(BaseTestCase):
def testCheckGoodies(self): item= Candy("chocolate") state= item.getSweetOrSour self.assertTrue("sweet" in state,"Item %s not sweet"% item)
def test_suite(): from unittest import TestSuite, makeSuite suite= TestSuite() suite.addTest( makeSuite(TestGoodies) ) return suite
6.6.6 DataSource Example
This example shows the test code we created above. We assume: • You are in your $ZP_DIR/tests/ folder • You are in your dev environment. • You have created an empty (or otherwise) __init__.py file • Your test has a fake or simulated set of data, ie. • You cant’t rely on real device to gather data. • The filename we create is test_datasources_plugin.py:
########################################################################## # Copyright (C) Zenoss, Inc. 2013, all rights reserved. # test_datasources_plugin.py ##########################################################################
from Products.ZenTestCase.BaseTestCase import BaseTestCase
class TestCheckOracle(BaseTestCase): ''' This calls check_oracle.py -c "connectionString" -q "query" -t Rewrote check_oracle with -t (test) flag, adjusted txojdbc.py '''
def testCheckOracle(self): import subprocess import os (continues on next page)
6.6. Unit Testing Rough Guide 201 ZenPackers Documentation
(continued from previous page)
connectionString="zenoss/[email protected]:1521:XE" query='select * from v$sysstat' path=os.path.join(os.path.dirname(__file__),"..") checkOracle=os.path.join(path,"check_oracle.py")
output=subprocess.check_output(["python", checkOracle,"-c", connectionString,"-q", query,"-t"]) outputRequired="logonscumulative"
# BaseTestCase.assertTrue is the method that determines pass/fail self.assertTrue(outputRequired in output, "Output does not contain valid data %s"% outputRequired)
def test_suite(): from unittest import TestSuite, makeSuite suite= TestSuite() suite.addTest(makeSuite(TestCheckOracle)) return suite
In this example, the testCheckOracle method of TestCheckOracle will be tested. The check_oracle.py will call a routine (txojdbc.py) that has some pre-made flat files of JSON data, so there is no dependency on an actual device to monitor for data. This is critical because eventually Jenkins will have to run the unit tests is a vacuum environment. Note: The BaseTestCase.assertTrue is the key method that you need to determine pass/fail of your test. If this test cas the “logonscumulative” string, it passes, otherwise it fails. In general you need one of the assert methods in the BaseTestCase class
6.6.7 Impact Example
This example shows how to test Impact. We assume • You are in your $ZP_DIR/tests/ folder • You are in your dev environment. • You have created an empty (or otherwise) __init__.py file • You have your environment setup with Impact installed (for testing). This example uses a lot of boilerplate code. It is much simpler than the XenServer unit tests though. It can be considered one of the simplest impact tests you will find, because the Instance class is only dependent on the containing server. Nothing depends on Instance. Most of the code is simply building a node-link tree diagram. The two methods that are non-boilerplate are: • create_endpoint() • The (decorated) test_Instance(): Notice also that the tests will always pass if Impact ZP is not installed so you won’t be able to test it properly.
############################################################################## # # Copyright (C) Zenoss, Inc. 2013, all rights reserved. # # This content is made available according to terms specified in (continues on next page)
202 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page) # License.zenoss under the directory where your Zenoss product is installed. # ##############################################################################
''' Unit test for all-things-Impact. ''' import transaction from zope.component import subscribers from Products.Five import zcml from Products.ZenTestCase.BaseTestCase import BaseTestCase from Products.ZenUtils.guid.interfaces import IGUIDManager from Products.ZenUtils.Utils import monkeypatch from ZenPacks.zenoss.DatabaseMonitor.utils import guid, require_zenpack from ZenPacks.zenoss.DatabaseMonitor.tests.utils import ( add_contained, add_noncontained, )
@monkeypatch('Products.Zuul') def get_dmd(): ''' Retrieve the DMD object. Handle unit test connection oddities.
This has to be monkeypatched on Products.Zuul instead of Products.Zuul.utils because it's already imported into Products.Zuul by the time this monkeypatch happens. ''' try: # original is injected by the monkeypatch decorator. return original()
except AttributeError: connections= transaction.get()._synchronizers.data.values()[:] for cxn in connections: app= cxn.root()['Application'] if hasattr(app,'zport'): return app.zport.dmd def impacts_for(thing): ''' Return a two element tuple.
First element is a list of object ids impacted by thing. Second element is a list of object ids impacting thing. ''' from ZenPacks.zenoss.Impact.impactd.interfaces \ import IRelationshipDataProvider
impacted_by=[] impacting=[]
guid_manager= IGUIDManager(thing.getDmd()) for subscriber in subscribers([thing], IRelationshipDataProvider): (continues on next page)
6.6. Unit Testing Rough Guide 203 ZenPackers Documentation
(continued from previous page) for edge in subscriber.getEdges(): if edge.source == guid(thing): impacted_by.append(guid_manager.getObject(edge.impacted).id) elif edge.impacted == guid(thing): impacting.append(guid_manager.getObject(edge.source).id)
return (impacted_by, impacting) def triggers_for(thing): ''' Return a dictionary of triggers for thing.
Returned dictionary keys will be triggerId of a Trigger instance and values will be the corresponding Trigger instance. ''' from ZenPacks.zenoss.Impact.impactd.interfaces import INodeTriggers
triggers={}
for sub in subscribers((thing,), INodeTriggers): for trigger in sub.get_triggers(): triggers[trigger.triggerId]= trigger
return triggers def create_endpoint(dmd): ''' Return an Endpoint suitable for Impact functional testing. This is non-boilerplate code... ''' # DeviceClass dc= dmd.Devices.createOrganizer('/Server/Linux') dc.setZenProperty('zPythonClass','') linux= dc.createInstance('linux')
# Instance from ZenPacks.zenoss.DatabaseMonitor.Instance import Instance add_contained(linux,'instances', Instance('instance1'))
return linux class TestImpact(BaseTestCase): def afterSetUp(self): super(TestImpact, self).afterSetUp()
import Products.ZenEvents zcml.load_config('meta.zcml', Products.ZenEvents)
try: import ZenPacks.zenoss.DynamicView zcml.load_config('configure.zcml', ZenPacks.zenoss.DynamicView) except ImportError: return
(continues on next page)
204 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
(continued from previous page) try: import ZenPacks.zenoss.Impact zcml.load_config('meta.zcml', ZenPacks.zenoss.Impact) zcml.load_config('configure.zcml', ZenPacks.zenoss.Impact) except ImportError: return
import ZenPacks.zenoss.DatabaseMonitor zcml.load_config('configure.zcml', ZenPacks.zenoss.DatabaseMonitor)
def endpoint(self): ''' Return a DatabaseMonitor endpoint device populated in a suitable way for Impact testing. ''' if not hasattr(self,'_endpoint'): self._endpoint= create_endpoint(self.dmd)
return self._endpoint
def assertTriggersExist(self, triggers, expected_trigger_ids): ''' Assert that each expected_trigger_id exists in triggers. ''' for trigger_id in expected_trigger_ids: self.assertTrue( trigger_id in triggers,'missing trigger: %s'% trigger_id)
@require_zenpack('ZenPacks.zenoss.Impact') def test_Instance(self): ''' Decorator will disable tests if required ZenPacks are not installed! ZenPacks.zenoss.Impact and ZenPacks.zenoss.DynamicView must be installed! Jenkins will eventually be setup to do unit tests at build time..... ''' instance1= self.endpoint().getObjByPath('instances/instance1') impacts, impacted_by= impacts_for(instance1)
# Host -> Instance self.assertTrue( 'linux' in impacted_by, 'missing impact: {} -> {}'.format('linux', instance1)) def test_suite(): from unittest import TestSuite, makeSuite suite= TestSuite() suite.addTest(makeSuite(TestImpact)) return suite
6.6.8 Another Simple Example
Here is another simple example that may help:
6.6. Unit Testing Rough Guide 205 ZenPackers Documentation
from Products.Five import zcml from Products.ZenTestCase.BaseTestCase import BaseTestCase from Products.Zuul.interfaces import IReportable from ZenPacks.zenoss.OpenVZ.Container import Container class TestAnalytics(BaseTestCase): def afterSetUp(self): super(TestAnalytics, self).afterSetUp()
# Required to prevent erroring out when trying to define viewlets in # ../browser/configure.zcml. import Products.ZenUI3.navigation zcml.load_config('testing.zcml', Products.ZenUI3.navigation)
import ZenPacks.zenoss.OpenVZ zcml.load_config('configure.zcml', ZenPacks.zenoss.OpenVZ)
def testContainerReportable(self): device= self.dmd.Devices.createInstance('openvz_host')
container= Container('101') device.openvz_containers._setObject(container.id, container) container= device.openvz_containers._getOb(container.id)
reportable= IReportable(container) report_properties= reportable.reportProperties()
self.assertEqual(reportable.entity_class_name,'container')
self.assertEqual(len(report_properties),3) self.assertEqual(report_properties[0][0],'id') # .. and so on.. def test_suite(): from unittest import TestSuite, makeSuite suite= TestSuite() suite.addTest(makeSuite(TestAnalytics)) return suite
6.6.9 DMD fixtures
You could use dmd in descendants of BaseTestCase after it afterSetUp method was executed. But, you need to remember that this dmd will be clean, so, if you want to use something like dmd.Devices. Network.Router it will give you AttributeError: Network. To create device class you need to call dmd.Devices.createOrganizer('/Network').
6.6.10 Testing IEventService with Crochet and Twisted
References: * http://docs.zope.org/zope.component/narr.html* Special Events and Their Consequences
206 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
If you need to test a Modeling or Collection that has an IEvent service, you have code that looks like this: import zope.component from Products.ZenCollector.interfaces import IEventService ...... class Temper(object): def __init__(self, calamari, graphite, device): self._eventService= zope.component.queryUtility(IEventService)
evt= dict( device=device.id, component='', summary=summary, severity=severity, eventClass='/Status', eventClassKey='CephTemperConfigurationClass', ) self._eventService.sendEvent(evt) ...... def someMethod():
evt= dict( device=device.id, component='', summary=summary, severity=severity, eventClass='/Status', eventClassKey='CephTemperConfigurationClass', )
self._eventService.sendEvent(evt)
If you try to execute a test on this code from UnitTests you may encounter this type of traceBack:
... File"/opt/zenoss/lib/python2.7/site-packages/crochet-1.5.0-py2.7.egg/crochet/_
˓→eventloop.py", line 461, in wrapper return eventual_result.wait(timeout) File"/opt/zenoss/lib/python2.7/site-packages/crochet-1.5.0-py2.7.egg/crochet/_
˓→eventloop.py", line 231, in wait result.raiseException() File"/opt/zenoss/lib/python/twisted/python/failure.py", line 338, in raiseException raise self.type, self.value, self.tb AttributeError:'NoneType' object has no attribute'sendEvent'
The problem is that there is no sendEvent() method inside the Unit Test environment. That is to say you need to provide the IEventService as a singleton utility. You also need to define the sendEvent() method. The corrected Unit Test code should look like this now: import zope.component from Products.ZenCollector.interfaces import IEventService class TestOSI(zenpacklib.TestCase):
def setUp(self): super(TestModeling, self).setUp() (continues on next page)
6.6. Unit Testing Rough Guide 207 ZenPackers Documentation
(continued from previous page) zope.component.provideUtility(self, IEventService)
def sendEvent(self, event, **kw): pass
...... -- code that tests your original (modeler, collector)---
6.7 ZEP Topics
ZEP is the Zenoss Event Processor. We’ll just include a few snippets that are floating around.
6.7.1 Close Old Acknowledged Events import time from zenoss.protocols.protobufs.zep_pb2 import ( SEVERITY_CRITICAL, SEVERITY_ERROR, STATUS_ACKNOWLEDGED, ) from Products.Zuul import getFacade now= int(time.time()) * 1000 one_hour_ago= now-(3600 * 1000)
# last_seen can be a range: beginning of range, end of range last_seen= (one_hour_ago, now) zep= getFacade('zep') event_filter= zep.createEventFilter( status=[STATUS_ACKNOWLEDGED], severity=[SEVERITY_ERROR, SEVERITY_CRITICAL], last_seen=last_seen, ) zep.closeEventSummaries(eventFilter=event_filter)
6.8 Bread and Butter: A Quick Primer on Pickles
Sometimes you need a way to preserve objects in their exact state to be reproduced later. This is especially useful for testing, or recreating customer issues. Fortunately, Python offers a facility to do just this: Pickles. For a bit more about pickles, check the Python docs: https://docs.python.org/2/library/pickle.html
6.8.1 Mix Up Some Brine: How to Make a Pickle
Any object in Python can be pickled simply by calling:
208 Chapter 6. Special Zenpack Development Topics ZenPackers Documentation
import pickle with open(pickle_filename,'wb') as pickle_file: pickle.dump(results, pickle_file,2)
The following patch, if applied to zenmodeler.py, will create pickle files for all the datamaps generated during model- ing: diff --git a/Products/DataCollector/zenmodeler.py b/Products/DataCollector/zenmodeler.
˓→py index 386847f..ef4eb54 100644 --- a/Products/DataCollector/zenmodeler.py +++ b/Products/DataCollector/zenmodeler.py @@ -634,6 +634,26 @@ class ZenModeler(PBDaemon): try: results = plugin.preprocess(results, self.log) if results: +############################################################################### + import os + if not os.path.isdir(device.id): + os.mkdir(device.id) + + plugin_prefix_length = None + if plugin.__module__.startswith('ZenPacks'): + plugin_prefix_length = 5 + else: + plugin_prefix_length = 0 + + pickle_filename = '%s/%s.pickle' % ( + device.id, '.'.join( + plugin.__module__.split( + '.')[plugin_prefix_length:])) + + self.log.info("Dumping results to %s", pickle_filename) + with open(pickle_filename, 'wb') as pickle_file: + pickle.dump(results, pickle_file, 2) +############################################################################### datamaps = plugin.process(device, results, self.log) if datamaps: pluginStats.setdefault(plugin.name(), plugin.weight)
6.8.2 Opening the Pickle Jar
After you have save an object to a Pickle file, you can load it back again as a Python object as follows: import pickle datamaps= pickle.load("filename")
Once loaded, you can interact with the object just as you would any other object.
6.8.3 I Wanted Sweet, Not Dill: Modifying Pickles
Pickles are saved as binary files, which can be easily copied and transferred. However, they are not so easy to read, or edit. In some cases you may want to take an existing pickle and inspect its contents, or even update the object’s attributes. The following script is a sort editor for pickle files. It loads the pickle file from the specified path to an
6.8. Bread and Butter: A Quick Primer on Pickles 209 ZenPackers Documentation object, and then drops you to an interactive Python shell, where you can interact with the object however you need to. Once finished, you can write the object back to the original pickle file. This was something I created for a specific purpose, but should be generally useful. However, it has not been tested extensively. Please update this doc if you find any bugs.
#!/usr/bin/env python
# from IPython import embed from pprint import pprint import pickle import os.path import os import sys from datetime import datetime
# Greeting print "Welcome to edPick: a simple editor for pickle files\n"
# Get file path path= sys.argv[1] if 1< len(sys.argv) else '' valid_path= os.path.isfile(path) while not valid_path: path= raw_input('Enter file path [./modeling_data.pickle]:') path= path or './modeling_data.pickle' if os.path.isfile(path) : valid_path= True else: print "Weird. I can't find that file. Try again.\n"
# Load pickled data data= pickle.load( open( path,"rb")) pprint(data)
# Modify data print "Modify the object as needed.\n" print "Then hit Ctrl-D to exit interpreter and write out your new pickle file.\n" print "Or hit Ctrl-Z to abort.\n" # embed() import readline # optional, will allow Up/Down/History in the console import code vars= globals().copy() vars.update(locals()) shell= code.InteractiveConsole(vars) shell.interact()
# Backup old file suffix='.'+datetime.fromtimestamp(os.path.getmtime(path)).strftime('%Y%m %d-%H%M')+'.
˓→old' os.rename(path,path+suffix)
# Write out new pickle data pickle.dump( data, open( path,"wb")) print " has been written in place of the file you specified.\n" print "Your original pickle file has been moved to' %s%s'. You can remove it if you
˓→do not need it.\n"% (path, suffix)
Just save it to your path, and you can run it whenever you need it.
210 Chapter 6. Special Zenpack Development Topics CHAPTER 7
Topics for ZenpackLib
Documentation for ZenPackLib
7.1 Welcome to ZenPack Library
The ZenPackLib allows you to automate much of the ZenPack creation process. It does this by: • Automating boilerplate code into dynamically created classes and modules • Automating certain Zenpack components like the GUI and ZCML • Defining standard classes and modules needed for Zenpack modification
7.1.1 ZenPackLib Overview
The ZenPackLib allows you to automate much of the ZenPack creation process. Main Reference: http://zenpacklib.zenoss.com
Description
ZenPackLib’s use of automatic class creation requires a bit of explanation. We will attempt to cover some of this here. The official documentation will be listed at . . . .
Prerequisites
• Zenoss ZenPack Developement • Python 2.7 • ZenPackLib familiarity
211 ZenPackers Documentation
We assume that you are familiar with ZenPack developement and Python coding. We work from the base of $ZP_DIR. A few things to ensure: • You have created a DeviceClass for your ZP • You have set zPythonClass for your DeviceClass to Zenpacks.zenoss.ZP.Class. For example: Zen- packs.zenoss.ControlCenter.ControlCenter
Class Definition and Overview (Simplified Form)
In your class definition (possibly __init__.py) you will have definitions of your Pool classes like:
1 RELATIONSHIPS_YUML= """
2 // ------
3 // Containing Relations
4 // ------
5 [ControlCenter]++pools -controlcenter[Pool]
6 // ------
7 // Non-containing Relations
8 // ------9 [Pool]*parentPool -.-childPools 0..1[Pool] 10 """
11
12 CFG= zenpacklib.ZenPackSpec(
13 name=__name__,
14
15 zProperties={
16 'DEFAULTS':{'category':'Control Plane'},
17
18 'zControlCenterHost': {},
19 'zControlCenterPort':{'default':'8787'},
20 },
21
22 classes={
23 'DEFAULTS':{
24 'base': zenpacklib.Component,
25 },
26
27 'ControlCenter':{
28 'base': zenpacklib.Device,
29 'meta_type':'ZenossControlCenter',
30 'label':'Zenoss Control Plane',
31 },
32
33 'Pool':{
34 'meta_type':'ZenossControlCenterPool', 35 'label':'CP-Pool', # <-- *** Note this label is ZPL magic. 36 # ZenpackLib: Properties that Auto-magically appear in GUI
37 'properties':{
38 'priority':{'label':'Priority'},
39 'coreLimit':{'label':'CPU Core Limit'},
40 'memoryLimit':{'label':'Memory Limit'},
41 },
42 'relationships':{
43 # This is required since the pool <=> pool relationship
44 # can cause recursive ambiguity problems. This breaks that symmetry.
45 'parentPool':{'label':'Parent','order': 1.0}, (continues on next page)
212 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
(continued from previous page)
46 'childPools':{'label':'Children','order': 1.1},
47 }
48 },
49 },
50 class_relationships=zenpacklib.relationships_from_yuml(RELATIONSHIPS_YUML),
51 )
52 CFG.create()
Special things to note and observe: • Note: When you add and new zProperties to your Zenpack you MUST re-install your Zenpack. This is because those properties get created in the ZODB only when you install. New classes create relationship maps that subsequently are stored in ZODB. If your new class doesn’t create a new zProperty then you can get away with a zenos restart and a remodel.
• name is set to the name of the class which will resolve to ZenPacks.zenoss.ControlCenter . This should typically not be changed. • DEFAULTS is a special value that will cause it’s properties to be added as the default for all of the other listed zProperties. Otherwise you have to add them manually to each zProp. • The zProp entry for zControlCenterHost is a shorthand for the more verbose:
'zControlCenterHost':{'type':'string','default':''}
• The class DEFAULTS specifies that all classes will be sub-classes of the standard zenpacklib.Component by default. We could choose from: – zenpacklib.Device – zenpacklib.Component – A user defined class • The Pools class is automatically created based on YAML or YUML definition • Go to Advanced->Monitoring Templates: Hit the + at bottom left • The ‘relationships’ labels are added in order to disambiguate parent-child names. The parent and child name prefix are ZPL automagically determined from the YUML spec defined in RELATIONSHIPS_YUML. • The label attached to Pool is “CP-Pool”. It exists to disambiguate the relationship between Pool and contained- Pool objects. • In particular, you will need to. – Create: A template for each label with the EXACT same name as label. – Ensure: Template is in the appropriate Template Path (/ControlCenter) – Ensure: All relationship names are unique in the YUML spec
About YAML Relationships Map
See the official documentation for this at • http://zenpacklib.zenoss.com/en/latest/ In general the relationships look like this:
7.1. Welcome to ZenPack Library 213 ZenPackers Documentation
class_relationships: - ComponentA Cardinality:Cardinality ComponentB
Cardinality can be one of the following: • 1:MC => One-to-Many containing • 1:M => One-to-Many • M:M => Many-to-Many • 1:1 => One-to-One For example: class_relationships: - CephEndpoint1:MC CephComponent - CephHost1:M CephService - CephPool M:M CephOSD - CephPool1:M CephRBD - CephPool1:M CephSnap - CephRGW1:M CephPool
About YUML Relationships Map (Deprecated)
The YUML relationship maps have a very specific format. In the following generic form:
[LeftClass](l_cardinality)leftToRightName (seperator) rightToLeftName(r_
˓→cardinality)[RightClass]
For example:
[Service]0..1serviceRuns-.-serviceDef *[Running]
• [LeftClass] and [RightClass] are classes • The cardinalities can be: (*, 0..1, 1..7, 1, +) • LeftToRightName and RightToLeftName are the labels that identify the relationships created. For example: – The pools relationship on ControlCenter defines the contained pools. – The controlcenter relationship on Pool defines the containing controlcenter. • Relationships do not need a name unless there is ambiguity in relations. I recommend naming all your relations though just incase you later add a relationship that ambiguates your schema. • Note: Make Sure All Relationships Have Unique Names! If relationships don’t have unique names ZPL will not be able to process the relationships in a predictable way. Make sure all relation names are unique and you should be ok.
Class Definition: Advanced Topics
In the begginning there is ZenPacks.zenoss.XYZ class. Its created by Zenoss when you create the class and install the __init__.py. ZPL creates these two objects by defaults:
214 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
.schema: A module that allows customization (overrides) of the ZPL created Zenpack class .ZenPack: The class that contains all the properties, install(), remove(), and cleanup methods for the Zenpack.
When ZPL creates any components (for example, Pool), it creates several objects relative to ZenPacks.zenoss.XYZ:
.Pool : The Pool component class itself .schema.Pool : The Pool schema space for class modification
If you don’t create your own Pool.py class file (analagous to .ZenPack), ZPL will do this for you. Again, this is for property managment and initializations.
Attribute Definition
In order to modify attributes you must change those attributes in your __init__.py. The various properties you can change are: • base: Base Class Type • meta_type: Component-level identifier • label: The display label in the GUI • index_type: index types for component Catalog search efficiency: (field, keyword) • impacts: What this component impacts: can be list or list-output of a function • impacted_by: What is component is impacted by: can be list or function • order: Order of display in the grid
ZPL Modeling
ZPL Automatic set_ and get_ for Non-Containing Relations
You will automatically get a set_var() and get_var() method when you invoke them in the modeler. The set_ method will create or update the relationship. The set__* method takes a string or list of strings for its values. You use it by first creating a non-containing relationship in the YUML like this:
Tenant1:M Floatingip Floatingip1:M Networks
Now you set the relationship up in the modeler by using set_tenant:
1 floatingips=[]
2 for floatingip in results['floatingips']:
3
4 network_list=[]
5 for net in results['networks']:
6 if net in some_net_list:
7 network_list.append(net)
8
9 floatingips.append(ObjectMap(
10 modname='ZenPacks.zenoss.OpenStackInfrastructure.FloatingIp',
11 data= dict( (continues on next page)
7.1. Welcome to ZenPack Library 215 ZenPackers Documentation
(continued from previous page)
12 id='floatingip-{0}'.format(floatingip['id']),
13 floatingipId= floatingip['id'],
14 set_tenant= tenant_name[0],
15 set_networks= network_list,
16 )))
17
18 tenants=[]
19 ... similar to floatingips above...
20 ... etc...
21
22 objmaps={
23 'tenants': tenants,
24 'floatingips': floatingips,
25 }
26
27 # Apply the objmaps in the right order.
28 componentsMap= RelationshipMap(relname='components')
29 for i in ('tenants','floatingips'):
30 for objmap in objmaps[i]:
31 componentsMap.append(objmap)
32
33 return (componentsMap)
Note: ApplyDataMap does not accept string or list objects by default. ZPL does this for us. If you overide the set_ functions in your Component class you need to either make a call to ZPL’s su- per(ComponentClass, self).set_networks() or write your function as per ApplyDataMap()’s requirements.
ZPL Modeling Templates
Our modeling example is a very simplified version of the ControlCenter ZenPack. The modeler itself grabs a pre-made ObjectMap from the helper class in $ZP_DIR/modeling: • $ZPDIR/modeler/plugins/zenoss/ControlCenter.py (wrapper for modeling.py) • $ZPDIR/modeling (Does the heavy lifting) In the modeler wrapper, ControlCenter.py we have:
1 import logging
2 LOG= logging.getLogger('zen.ControlCenter')
3
4 from twisted.internet.defer import inlineCallbacks, returnValue
5 from Products.DataCollector.plugins.CollectorPlugin import PythonPlugin
6 from ZenPacks.zenoss.ControlCenter import modeling, txcpz
7
8 class ControlCenter(PythonPlugin):
9
10 """ControlCenter modeler plugin."""
11
12 required_properties=(
13 'zControlCenterHost',
14 'zControlCenterPort',
15 'zControlCenterUser',
16 'zControlCenterPassword', (continues on next page)
216 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
(continued from previous page)
17 )
18
19 deviceProperties= PythonPlugin.deviceProperties+ required_properties
20
21 @inlineCallbacks
22 def collect(self, device, unused):
23 """Asynchronously collect data from device. Return a deferred."""
24 LOG.info("%s: Collecting data", device.id)
25
26 # Loop through the required_properties and balk if missing.
27 for required_property in self.required_properties:
28 if not getattr(device, required_property, None):
29 LOG.warn(
30 "%s: %s not set. Modeling aborted",
31 device.id,
32 required_property)
33
34 returnValue(None)
35
36 client= txcpz.Client(
37 device.zControlCenterHost,
38 device.zControlCenterPort,
39 device.zControlCenterUser,
40 device.zControlCenterPassword)
41
42 producer= modeling.DataMapProducer(client)
43
44 try:
45 results= yield producer.getmaps()
46 except Exception as e:
47 LOG.exception(
48 "%s %s ControlCenter error: %s",
49 device.id, self.name(), e)
50
51 returnValue(None)
52
53 returnValue(results)
54
55 def process(self, device, results, unused):
56 """Process results. Return iterable of datamaps or None."""
57 if results is None:
58 return None
59
60 LOG.info("%s: Processing data", device.id)
61 results= tuple(results)
62 return results
In the helper class, $ZPDIR/modeling we have (abbreviated to Pools). Notice in line 26, the set_parentPool attribute is processed by ZPL as a ManyToOne relationship between Pools and sub-Pools.
1 #------
2 # Zenpacks.zenoss.ControlCenter.modeling
3 # ControlCenter Modeling: Modeling code for ControlCenter.
4 #------
5 from twisted.internet.defer import inlineCallbacks, returnValue
6 from Products.DataCollector.plugins.DataMaps import RelationshipMap (continues on next page)
7.1. Welcome to ZenPack Library 217 ZenPackers Documentation
(continued from previous page)
7 from .util import get_pool_id, get_host_id, get_service_id, get_running_id
8
9 def map_pool(attributes):
10 """Return ObjectMap data given attributes.
11
12 Example attributes:
13
14 {
15 "Id": "Alternate",
16 "ParentId": "default",
17 "Priority": 0,
18 "CoreLimit": 1,
19 "MemoryLimit": 1,
20 }
21 """
22 return {
23 'id': get_pool_id(attributes['Id']),
24 'title': attributes['Id'],
25 'set_parentPool': get_pool_id(attributes['ParentId']),
26 'priority': attributes['Priority'],
27 'coreLimit': attributes['CoreLimit'],
28 'memoryLimit': attributes['MemoryLimit'],
29 }
30
31
32 class DataMapProducer(object):
33 """Produce the DataMap objects required to model """
34
35 def __init__(self, client):
36 self.client= client
37
38 @inlineCallbacks
39 def getmaps(self):
40 """Return a datamaps map. """
41 maps=[]
42
43 pools= yield self.client.pools()
44 pool_maps=[]
45 for pool in pools:
46 pool_map= map_pool(pool)
47 if pool_map:
48 pool_maps.append(pool_map)
49
50 maps.append(
51 RelationshipMap(
52 relname='pools',
53 modname='ZenPacks.zenoss.ControlCenter.Pool',
54 objmaps=pool_maps))
55
56 returnValue(maps)
ZPL Monitoring Templaees
The datapoints for this model are essentially the dictionary keys of the JSON data sources. That means the datapoints must match the keys exactly.
218 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
• Create a Template: the name must matche the label in __init__.py: CP-Pool • Add a DataSource: The name is arbitrary • Add a DataPoint to that DataSource: The name must match an attribute (ZPL) • Some example points: – Priority – CoreLimit – MemoryLimit
ZPL Details Auto-Rendering
Thu May 29 16:01:30 CDT 2014
You can now use the same rendering in the details that are used elsewhere. In your __init__.py you set the renderer property in the class properties section:
1 classes={....
2
3 'Flavor':{
4 'base':'LogicalComponent',
5 'meta_type':'OpenStackFlavor',
6 'label':'Flavor',
7 'order':1,
8 'properties':{
9 'flavorId':{'grid_display': False}, # 1
10 'flavorRAM':{'type_':'int',
11 'renderer':'Zenoss.render.bytesString',
12 'label':'RAM'}, # bytes
13 'flavorDisk':{'type_':'int',
14 'renderer':'Zenoss.render.bytesString',
15 'label':'Disk'} # bytes
16 }
17 },
18 ... etc...
19 }
The ZPL will take care of setting this renderer wherever those variables are used. Ref: https://github.com/zenoss/ZenPacks.zenoss.OpenStack
Dynamic Classes
There are several classes that are created on the fly when ZPL is instantiated. This includes: • All the classes created from your YUML description • schema: Classes schema created from the YUML spec. You’ll see this in your class files outside of __init__.py
from. import schema
7.1. Welcome to ZenPack Library 219 ZenPackers Documentation
Impact in ZPL
Impact adapters are provided for in the ZPL. In order to get them to work you must provide the impacts and im- pacted_by attributes in the class specification in __init.py__. The values of these attributes can be one of the follow- ing: • A valid relationship name as defined in the YUML • A valid function that returns a list of component ID’s.
Creating a ZenPack (and git repo) using ZPL
In order to do this all from the commandline, you need the prerequisites: • You use github SSH authentication, and it authenticates without prompting • You use the hub tool https://github.com/github/hub We first start by creating the ZP data using ZPL’s tools: zenpacklib--create ZenPacks.zenoss.SomeUniqueZP cd ZenPacks.zenoss.SomeUniqueZP
Next we create the repo using hub: hub create zenoss/ZenPacks.zenoss.SomeUniqueZP
Now initialize the repo data and push up to origin: git init git commit-m"first commit" git remote add origin [email protected]:zenoss/ZenPacks.zenoss.SomeUniqueZP.git git push-u origin master
7.1.2 Relationship Management in the ZPL
Within the context of Zenoss, a relationship establishes a connection between Zope objects. A relationship MUST be bi-directional: a left side object and a right side object. However, if one side is containing, then the other side is contained. Currently ZenPacks supports three types of relationships: • ToMany • ToOne • ToManyCont Read zenosslabs section Background Information for an explanation of Relationship, and what is meant to be ‘con- taining’. Here is the webpage link: http://docs.zenosslabs.com/en/latest/zenpack_development/background.html# relationship-types The Relationship classes are defined in /opt/zenoss/Products/ZenRelations/RelSchema.py. A relationship is expressed via a yUML statement in ZenPack’s __init__.py. • For a left side to be ToManyCont, there must be ‘++’ on the left. • For a right side to be ToManyCont, there must be ‘++’ on the right.
220 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
• For a left side to be ToMany, there must be ‘*’ on the right. • For a right side to be ToMany, there must be ‘*’ on the left. • If there is no ‘++’ on the left AND no ‘*’ on the right, the left is ToOne. • If there is no ‘++’ on the right AND no ‘*’ on the left, the right is ToOne. • You can specify a number for a fixed quantify for a non-containing • You can also specify a numerical range (0..2) on ends of non-containing relationships.
Example 1:
[Image]1-.-*[Instance]
Here Image and Instance are class names. This yUML statement says an Image object can have many Instance objects; whereas an Instance object can only have one Image object. An Image object has a ToMany relationship w.r.t Instance objects; whereas an Instance object a ToOne relationship w.r.t. Image object. The relationship between Image and Instance is non-containing. ‘-‘ and/or ‘.’ separates left side from right side.
Example 2:
[Endpoint]++components-endpoint1[OpenstackComponent] • Endpoint and OpenstackComponent are again class names. components and endpoint here are relationship names. As pointed out in zenosslabs, relationships are themselves objects. • This yUML statement says Endpoint can contain multiple OpenstackComponent(s); whereas an Openstack- Component can only belong to one Endpoint. • Endpoint has a containing ToMany relationship w.r.t. OpenstackComponent; whereas OpenstackComponent has a contained ToOne relationship w.r.t. Endpoint. • The relationship between Endpoint and OpenstackComponent is containing. zendmd can be used to find the relationship object components:
>>> dev=find('stack') >>> dev.components
Example 3:
[Hypervisor]1-.-1[Host]
• Hypervisor and Host has a mutual ToOne non-containing relationship. • Hypervisor and Host are class names. • The relationship between Hypervisor and Host is non-containing. For those who went through the SNMP ZenPack Development guide, http://docs.zenosslabs.com/en/latest/zenpack_ development/index.html, just as a comparison, the relation expressed in NetBotzDevice.py:
7.1. Welcome to ZenPack Library 221 ZenPackers Documentation
_relations= Device._relations+( ('temperature_sensors', ToManyCont(ToOne, 'ZenPacks.training.NetBotz.TemperatureSensor', 'sensor_device', )), ) and in TemperatureSensor.py:
_relations= ManagedEntity._relations+( ('sensor_device', ToOne(ToManyCont, 'ZenPacks.training.NetBotz.NetBotzDevice', 'temperature_sensors', )), ) is equivalent to:
[NetBotzDevice]++temperature_sensors-.-sensor_device1[TemperatureSensor]
Example 4:
[EtherCard]0..1 switches-.-ecards 0..1 [Switch]
• The endpoint relationships are now named • Named relationships can have their properties changes (GUI, order, etc) • Each side can handle 0 or 1 connections
TODO: Examples of illegal yUML statements.
7.1.3 ZPL Errors
Certain errors are unique to ZPL.
Warning: Never Never Never use mutable values as a default arguments! It causes a horrible side-effect of mutating subsequent call parameters! Use None instead. • => mostly this means default values of {} and [] types • String, booleans, int, float are all safe because they’re immutable • The reason this happens is because default argument values are actually in the class scope, not in the method scope. So doing it as above gets the empty list into method scope and unable to be shared by other instances of the class. In other words, once you mutate it in the function body, that value will be used in all subsequent calls to that function. Thats probably now what you want. • In terms of ZPL, this means that you NEVER set a default value of [] or {} in the YAML. {} is normally not an option in the ZPL YAML spec, but still . . .
222 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
• Best practice for this case (default empty list) you make the default value in the method signature None, then set the default value inside: def genericMethod(a, x= None) if x is None: x=[] ... do something with x...
Reference: https://twitter.com/raymondh/status/576047090905677824
ClassRelationshipSpec Error
If you get a ZPL schema error like this:
zenoss@zenoss> zenpack--list ERROR:zen.ZenMessaging:Error encountered while processing ZenPacks.zenoss.
˓→OpenStackInfrastructure Traceback (most recent call last): File"/opt/zenoss/Products/ZenMessaging/queuemessaging/schema.py", line 58, in _
˓→getZenPackSchemas pkg_path= zpkg.load().__path__[0] File"/opt/zenoss/lib/python/pkg_resources.py", line 1954, in load entry= __import__(self.module_name, globals(),globals(), ['__name__']) File"/zenpacks/ZenPacks.zenoss.OpenStackInfrastructure/ZenPacks/zenoss/
˓→OpenStackInfrastructure/__init__.py", line 624, in class_relationships= zenpacklib.relationships_from_yuml(RELATIONSHIPS_YUML), File"/zenpacks/ZenPacks.zenoss.OpenStackInfrastructure/ZenPacks/zenoss/
˓→OpenStackInfrastructure/zenpacklib.py", line 937, in __init__ if relationship.schema.remoteClass in self.imported_classes.keys(): AttributeError:'ClassRelationshipSpec' object has no attribute'schema' ERROR:zen.ZenossStartup:Error encountered while processing ZenPacks.zenoss.
˓→OpenStackInfrastructure Traceback (most recent call last): File"/opt/zenoss/Products/ZenossStartup/__init__.py", line 27, in pkg_path= zpkg.load().__path__[0] File"/opt/zenoss/lib/python/pkg_resources.py", line 1954, in load entry= __import__(self.module_name, globals(),globals(), ['__name__']) File"/zenpacks/ZenPacks.zenoss.OpenStackInfrastructure/ZenPacks/zenoss/
˓→OpenStackInfrastructure/__init__.py", line 21, in from. import zenpacklib ImportError: cannot import name zenpacklib ERROR:zen.ZenPackCmd:zenpack command failed Traceback ... etc...
This error may indicate you changed your internal class relationship (YUML or JSON) without first removing existing devices from your system. You therefore have an inconsistency between ZODB and your current schema.
One way to fix this is to revert the changes: • Revert your changes to the schema, • Restart your Z services (make sure this works) • Remove the device or zenpack • Make your changes again to your __init__.py • Install/re-install the zenpack
7.1. Welcome to ZenPack Library 223 ZenPackers Documentation
7.1.4 Converting from Old-Style ZenPackLib to the New-Style.
If you started out using the the original ZPL, you have your YUML specified in your $ZP/__init__.py, and a dictionary style of class specification. The new style moves almost all of this to a YAML file called $ZP/zenpack.yaml. Only special overridden classes get left behind in $ZP/__init__.py. You might eventually want to convert to the new format. Fortunately there are tools and notes here to help you do so. • Assumptions: – You zenpack is named ZenPacks.zenoss.XYZ – You zenpack uses an older form of ZPL that has YUML class structure and properties specified inside of $ZP/__init__.py – You want to convert __init__.py’s YUML+dict to YAML format inside of $ZP/zenpack.yaml • Create zenpack.yaml (based on monitoring_templates.yaml.. or an existing ZP) – First create a dummy zenpack (or use the existing one), and get a new copy of zenpacklib.py installed. – Execute:
python zenpacklib py_to_yaml ZenPacks.zenoss.XYZ
– basically nest the existing content under a device class) • For Local Inherited Classes Only: Ensure: In zenpack.yaml: locally inherited $ZP/modules.classes in separate files must (for now) use the fully qualified path specification:
ZenPacks.zenoss.XYZ.module.class
The current py_to_yaml can interpret your local base class modules incorrectly as simply:
class
For example:
'Router':{ 'base': [SomeComponent], .... }
can get incorrectly translated into YAML as:
Router: base: [SomeComponent]
whereas you really need:
Router: base: [ZenPacks.zenoss.OpenStackfrastructure.NeutronIntegrationComponent.
˓→SomeComponent]
• In $ZP/__init__.py: – Remove all unnecessary imports to local modules
224 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
– Add line to init.py to make it load it. In place of:
RELATIONSHIPS_YUML= """ ... some YUML class spec stuff ... """
CFG= zenpacklib.ZenPackSpec( ... lots of old stuff...... lots more old stuff...... etc... class_relationships=zenpacklib.relationships_from_yuml(RELATIONSHIPS_
˓→YUML), ) CFG.create()
You will have something of this sort:
CFG= zenpacklib.load_yaml()
– restart zenoss,
Warning: Some of the next steps will eventually be automatic. You should be able to simply export your objects.xml after setting up your zenpack.yaml and the correct components will be included.
• Pruning Objects.xml You need to remove all the superfluous junk that now resides in objects.xml. This is because zenpack.yaml takes care of most of it. Follow these guidelines for pruning: – Navigate to Advanced -> ZenPacks -> YourZenPack – Go to the the Zenpack’s “Provides” section to reduce objects.xml – Remove all device classes listed in zenpack.yaml:device_classes – Remove all the rrdTemplates items – Ensure: Leave all Event related items in your Provides section for now. – Hint: To get all contiguous Provides, click top item, then Shift-click bottom item. – Export zenpack to regenerate objects.xml, – Check that objects.xml is indeed smaller and has no rrdTemplate items – Finally: Re-Export the ZP if other changes were made.
• Save the new Objects.xml This only needs to be done if you development environment is not mounted/linked to your git repo. Otherwise skip this step. – Copy the new $ZP/objects/objects.xml from above to your ZP source tree
7.1. Welcome to ZenPack Library 225 ZenPackers Documentation
• Remove Old Files that Are No Longer Needed: – remove no-longer-needed monitoring_templates.yaml – remove load-templates files.
• You need to be careful that nothing breaks, but it should be really obvious breakage, – Try zendmd between each change, watching for errors – For example: all monitoring templates vanish or something. – It should not be subtle if it’s not working
• You may wish to manually prune out at least one montioring template to convince yourself that ZPL is re- creating them at install time. • Though it would also be clear when you tried to export.. Either they’d get pruned out or they would not. - (if not, ZPL doesn’t think it’s managing the monitoring templates!) • Double Checking the Results – Remove the existing ZP completely – Restart all services – Install your ZP – Restart all services (again) – Install a device on your ZP class – Check that all is correct
References https://github.com/zenoss/ZenPacks.zenoss.OpenStackInfrastructure https://github.com/zenoss/ZenPacks.zenoss. OpenvSwitch https://github.com/zenoss/ZenPacks.zenoss.ControlCenter
7.1.5 Component-Only Modeling: Add to Existing Device
If your zenpacklib created zenpack does not need a device class and will just be adding components to an existing device, you will need to add a relationship between the base device class and your new component. In your zen- pack.yaml: classes:
ExchangeServer: base: [zenpacklib.Component] class_relationships: (continues on next page)
226 Chapter 7. Topics for ZenpackLib ZenPackers Documentation
(continued from previous page)
- Products.ZenModel.Device.Device1:MC ExchangeServer
... etc...
As always, be sure to either install or reinstall the zenpack and restart your services when making this change. If not, you’ll see this in the info zenhub log: If you see a warning in zenhub.log saying you have no relationship when applying the data map for your new compo- nent objects, you may just need to reinstall your zenpack. If you’ve done things correctly, you’ll see this in the zenhub log next time you model:
2015-08-31 11:28:06,967 INFO zen.zenpacklib: Adding ZenPacks.zenoss.Microsoft.Exchange relationships to existing devices
Now, in your modeler, you can set class level vars that apply to the current (the one that is being processed in the plugin) component: • relname • modname This applies to self.ObjectMap() only just as in http://zenpacklib.zenoss.com/en/latest/tutorial-snmp-device/ component-modeling.html. Alternatively, you can set relname and modname on a per-component basis using local ObjectMap() as in the follow- ing: class WinExchange(WinRMPlugin):
relname='exchangeServers' modname='ZenPacks.zenoss.Microsoft.Exchange.ExchangeServer'
@defer.inlineCallbacks def collect(self, device, log):
.... etc....
maps={}
device_om= ObjectMap() maps['device']= device_om
results= yield get_exchange_server
... etc...
exchange_om= ObjectMap() try: ... etc... exchange_om.title= exchange_om.id= self.prepId(results.stdout[0]) exchange_om.role= results.stdout[1] exchange_om.version= results.stdout[2] exchange_om.relanme='exchangeServers' exchange_om.modname='ZenPacks.
˓→zenoss.Microsoft.Exchange.ExchangeServer' (continues on next page)
7.1. Welcome to ZenPack Library 227 ZenPackers Documentation
(continued from previous page) ... etc... except IndexError: log.info("Invalid data returned from Exchange Server: }")
maps['device']= device_om maps['exchange_server']= exchange_om
.... etc etc....
defer.returnValue(maps)
References: https://github.com/zenoss/ZenPacks.zenoss.Microsoft.Windows
228 Chapter 7. Topics for ZenpackLib CHAPTER 8
Topics for ControlCenter (ControlPlane)
Documentation for ZenPackLib
8.1 Welcome to ControlCenter!
Many of the documents for ControlPlane are in flux, so good luck if you find something useful that stays relevant for more than a few weeks.
8.1.1 General Concepts for CP
Links for Serviced
• https://github.com/zenoss/serviced/wiki • https://github.com/zenoss/serviced/wiki/Starting-CP-Beta
General Links for Zendev
• http://zenoss.github.io/zendev • http://zenoss.github.io/zendev/devimg.html
Development Workflow Cycle
Normally, once you deploy zendev, your workflow is very much the same as it always is: • Create a feature against develop • git flow feature publish (once only) • Fix Fix Fix, Commit, Push
229 ZenPackers Documentation
• Pull Requests etc. . . • Someone merges • All rejoice and continue! However, during release we have a special workflow in Zendev: • zendev restore europa-release • [ cd to right repo ] • CURRENTBRANCH=$(git rev-parse –abbrev-ref HEAD) • git flow feature start CC-1234 $CURRENTBRANCH • git flow feature publish (once only) • [. . . CODE FIX CODE FIX, COMMIT, push. . . ] • Someone merges • All rejoice and continue!
General ZenDev Tasks
ZOPE=$(serviced service list | grep Zope | awk '{print $2}')
# Open zendmd serviced service run -i $ZOPE zendmd
# Run zenup serviced service run $ZOPE zenup [args...]
# Install a zenpack in Zendev: Log into container, then do install. zendev devshell zenpack --link --install /mnt/src/zenpacks/ZenPacks.zenoss.ControlCenter
# Install a zenpack serviced service run $ZOPE zenpack --link --install ZenPacks.zenoss.ControlCenter
# Report daemon stats serviced service action $DAEMON stats
# Set daemon to debug serviced service action $DAEMON debug
# Attach serviced service attach $SERVICEID bash
# Shell serviced service shell -i $SERVICEID bash
Container Creation Specifics
# zendev devshell: defaults to a Zope imports zendev devshell
(continues on next page)
230 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
(continued from previous page) # Create a Zenhub imports environment this way zendev devshell zenhub
8.1.2 Data Storage and Retreival: OpenTSDB and Friends
Data Storage is done through two primary services: • CentralQuery • OpenTSDB
Logical Data Flow for Europa GUI Process
When you view the GUI, the system processes in the general order.
[퐺푈퐼] ⇒ JS/Perf ⇓ Central Query ⇓ OpenTSDB:Reader ⇒ HBase
Logical Data Flow for Europa Modeler
This model is extremely simplified. Data flow for the Modeler looks similar to before.
[푍푒푛푃 푎푐푘] ⇒ Modeler ⇕ Central Query ⇕ ZenHub ⇔ ZODB
Logical Data Flow for Europa Collection
Data flow for Collection looks like this: [푍푒푛푃 푎푐푘] ⇒ Collectors:Various ⇓ CC- Central Query ⇓ MetricShipper ⇐= Redis ⇓ MetricConsumer ⇒ OpenTSDB:Writer ⇓ Hbase
8.1. Welcome to ControlCenter! 231 ZenPackers Documentation
8.2 Setup and Configuration
8.2.1 Startup: Serviced A-la-Carte
This describes how to run and update serviced in a non-development environment. Later we show how to update this environment in a similar way.
Requirements
• You must have setup zendev on an Ubuntu 14.04 system or better. • Make sure your zenoss user is in the docker group, otherwise you need sudo for all docker commands..
Note: See https://github.com/zenoss/serviced/wiki/Starting-CP-Beta
Host Prep
INTERFACE=eth0 IPADDR=$(ifconfig $INTERFACE | grep -o "inet addr:[\.0-9]*" | cut -f2 -d":") BOXNAME=$(uname -n) sudo su -c "echo $IPADDR $BOXNAME zenoss5x.$BOXNAME hbase.$BOXNAME >> /etc/hosts"
Docker Prep
Only do this once. Docker will store credentials in your account: docker login-u zenossinc+alphaeval-e"[email protected]"\ -p GETTHEMAGICKEYFROMTHESOURCELUKE https://quay.io/v1/
Set these local Variables as needed:
BUILD=521 IMAGE=resmgr IMAGE=core
You need to grab the magic key from your docker manager. Only do once per revision or as needed:
# Pull the images... (Make sure to tag the zenoss image). docker pull quay.io/zenossinc/daily-zenoss5-${IMAGE}:5.0.0_${BUILD} docker tag quay.io/zenossinc/daily-zenoss5-${IMAGE}:5.0.0_${BUILD} zenoss/zenoss5x docker pull quay.io/zenossinc/opentsdb:latest docker pull quay.io/zenossinc/hbase docker pull quay.io/zenossinc/isvcs:v9
Setup Zendev Services:
Execute the following:
232 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
zendev use europa cdz serviced
Starting Serviced
You can do it one of 2 ways, I prefer the first, which requires you install serviced.init in your ~/bin. It also logs to /tmp/serviced.log • serviced.init start • cdz serviced ; serviced -master -agent (Don’t use this please) Add a Host: export IP_ADDRESS=$(ifconfig eth0 | grep 'inet addr:'| cut -d: -f2 | awk '{ print $1}
˓→') serviced host add $IP_ADDRESS:4979 default
Compile Template: serviced template compile $(zendev root)/src/service/services/Zenoss.${IMAGE} > /tmp/
˓→Zenoss.xxx.tpl
( or pipe it into the following command )
Add the Template:
TEMPLATE_ID=$(serviced template add /tmp/Zenoss.xxx.tpl)
Note: Make sure you keep track of the TEMPLATE_ID number
Deploy the Template:
Method 1: CLI
Examples: serviced template deploy $TEMPLATE_ID default zebra serviced template deploy $TEMPLATE_ID default zenoss
Method 2: GUI
• Go to the UI at: https://you.own.ip/ and Log in as zenoss/zenoss. • Deploy template. • Done.
8.2. Setup and Configuration 233 ZenPackers Documentation
8.2.2 Cleaning out all Docker Images
• cdz build; ./services/repos/docker_mrclean.sh • Now go through manual install of all docker images • Manually rebuild serviced
8.2.3 Upgrading/Cleaning the Docker Folder /var/lib/docker
If you need to upgrade or purfiy Docker you can do: sudo stop docker umount $(grep 'aufs' /proc/mounts | awk '{print$2}' | sort -r) rm -rf /var/lib/docker # => [Upgrade Docker if needed] # => Re-install your images or rebuild zendev
8.2.4 Cleaning out Serviced Templates
For now these live in /tmp/serviced-root . Here are the steps: • stop serviced • rm -rf /tmp/serviced-root • Re-deploy your templates
8.2.5 Compiling Serviced in Go
Environmental Setting
These environmental settings are required: zendev use europa export GOPATH=$(zendev root)/src/golang export PATH=$(zendev root)/src/golang/bin:${PATH} export ZENHOME=$(zendev root)/zenhome
Now clear out old data: serviced.init stop sudo rm-rf/tmp/serviced-root czd serviced git pull make
Update Compatiblity
Update Europa Build Environment (Before updating template )
Contains service templates. . . .
234 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
• cdz • cd build • git pull • Now rebuild the template and re-deploy
8.2.6 Updating Serviced in Go
Execute these commands: zendev use europa # I'd probably put these into .bashrc export IPADDR=$(ifconfig eth0 | grep 'inet addr:'| cut -d: -f2 | awk '{ print $1}') export GOPATH=$(zendev root)/src/golang export PATH=$(zendev root)/src/golang/bin:${PATH} export ZENHOME=$(zendev root)/zenhome
Clear out old Serviced data:
# stop serviced serviced.init stop sudo rm-rf/tmp/serviced-root
Clear out old Docker data (you may have to do this more than once):
$(zendev root)/build/services/repos/docker_mrclean.sh
Normal Update Serviced:
# Make sure docker is running cdz serviced git pull make
Enhanced Update Serviced:
# go into the europa environment cdz serviced git status # ( This next line may be required if you can't pull properly ) git checkout HEAD isvcs/resources/logstash/logstash.conf git pull make
Update Europa Build Environment (Before updating template )
First you need the OpenTSDB and Hbase images to start. Remember, you just blew those away: docker pull quay.io/zenossinc/opentsdb:v1 docker pull quay.io/zenossinc/hbase:v1
You must update Europa too:
8.2. Setup and Configuration 235 ZenPackers Documentation
cdz cd build git pull # Now rebuild/update the templates and re-deploy
Update Templates Method I (Preferred): Tag Docker image to match template
First you need the template for the generic service (Do this only once): serviced template compile $(zendev root)/build/services/Zenoss.${IMAGE} > /tmp/Zenoss.
˓→xxx.tpl serviced.init start TEMPLATE_ID=$(serviced template add /tmp/Zenoss.xxx.tpl) serviced host add $IPADDR:4979 default
This method pulls the docker image and tags it:
# Alternative to mapping the template: Tag the image: docker pull quay.io/zenossinc/zenoss-${IMAGE}-testing:5.0.0b1_${BUILD} docker tag quay.io/zenossinc/zenoss-${IMAGE}-testing:5.0.0b1_${BUILD} zenoss/zenoss5x
You don’t need to deploy the template since it already matches your docker image. If this is your first time to deploy though: serviced template deploy $TEMPLATE_ID default zenoss
Update Templates Method II (Hard Way): Map the template to match Docker
Set these local Variables as needed:
BUILD=521 IMAGE=resmgr IMAGE=core
Build the Template:
ZVER=daily-zenoss5-${IMAGE}:5.0.0_${BUILD} cdz serviced serviced template compile -map zenoss/zenoss5x,quay.io/zenossinc/$ZVER \ $(zendev root)/build/services/Zenoss.${IMAGE} > /tmp/x.tpl
# serviced template compile $(zendev root)/build/services/Zenoss.${IMAGE} > /tmp/x.tpl
( you probably need to start serviced now ) TEMPLATE_ID=$(serviced template add /tmp/x.tpl) serviced host add $IPADDR:4979 default
Deploy Templates: serviced template deploy $TEMPLATE_ID default zenoss
236 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
Updating Platform-Build with Json-only Changes
Surgically, you could remove the template from the UI, then compile/add template and start all services. You could also serviced service edit each of those services and restart those services after editting, which is probably easiest since there are only a few minor changes The incantation to compile/add: serviced template compile \ -map zenoss/zenoss5x,zendev/devimg $(zendev root)/build/services/Zenoss.core \ | serviced template add
8.2.7 CP Beta and Release Images
Installing Zenpacks in Beta Images
Typical commands in the Beta and Release containers are different from Zendev For example, there is no link-install in the following: serviced service run-i zope zenpack [args] or serviced service run-i zope zenpack install ZenPacks
For example: serviced service run-i zope zenpack install ZenPacks.zenoss.DB2-XYZ.egg
Note: Don’t install from /root where perms are not readable by non-root. The Zenpack egg must be in a readable folder like /tmp Also, note there are NO double-dashes on the zenpack options.
8.2.8 Managing Containers
This section covers various Serviced container topics.
How to Customize and Commit a Container
To customize a container and make those changes permanent:
[zenoss@cc]: serviced service shell-i-s mychange zope bash
[root@ZOPE]: su- zenoss [zenoss@ZOPE]: [make your change change changes in the shell] [zenoss@ZOPE]: exit [root@ZOPE]: exit
[zenoss@cc]: serviced snapshot commit mychange [zenoss@cc]: serviced service restart...
For example: To add PyYAML to the zope containers:
8.2. Setup and Configuration 237 ZenPackers Documentation
[zenoss@cc]: serviced service shell-i-s mychange zope bash
[root@ZOPE]: su- zenoss [zenoss@ZOPE]: easy_install pyyaml [zenoss@ZOPE]: zendmd
In [1]: import yaml =>( if no errors, its installed correctly)
[zenoss@ZOPE]: exit [root@ZOPE]: exit
[zenoss@cc]: serviced snapshot commit mychange [zenoss@cc]: serviced service restart...
To customize a remote container you need to provice the full path to the container instance: serviced service shell-i-s solutions_xxx \ golfballs/Zenoss.resmgr/Zenoss/Collection/localhost/GOLF1/zenpython \ bash
To reconnect to your container after you exit, you need to start it: docker start solutions_xxx docker attach solutions_xxx
Installing Linked Zenpack that needs Maven, Java, etc. . .
ZenPacks that need maven offer a slight problem, because you need to install Maven and Java (OpenSDK) to get them installed in link-mode. There is also a minor package dependency problem in zenoss-centos-deps. They can be installed in the following way in a container:
[zenoss@cc]:serviced service attach zenhub
[root@Zenhub]: yum remove zenoss-centos-deps [root@Zenhub]: yum install maven [root@Zenhub]: su- zenoss
[zenoss@Zenhub]: zenpack--link--install/z/ZenPacks.zenoss.XYZ ... exit out of the containers... [zenoss@cc]: serviced service restart zenoss.core
Installing Impact into RM with other Link-Mounted ZP’s
Overview: 1. Ensure you have the Zenoss Impact docker image installed (assume true) 2. Ensure ImpactServer and Impact zenpacks are in /z on host; (assume true) 3. Create container that has ImpactServer zenpack installed; commit container 4. Start ImpactServer 5. Create a container that has the Impact zenpack installed; commit container 6. Restart services
238 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
Start
• You should first ensure you have the following 2 zp’s in /z: – ZenPacks.zenoss.ImpactServer-5.x.y.z-py2.7.egg – ZenPacks.zenoss.Impact-5.x.y.z-py2.7.egg • Create container that has ImpactServer One issue here is that your /z folder may not be properly mounted. We work around this by manual mounting it in a container:
[you@host]: serviced service shell -i -s impacter --mount /z,/z zope bash [root@65b1f2340c77 /]# su - zenoss Last login: Thu Jul 7 18:27:21 UTC 2016 [zenoss@65b1f2340c77 ~]$ cd /z [zenoss@65b1f2340c77 /z]$ zenpack --install ZenPacks.zenoss.ImpactServer-5.x.y.z-
˓→py2.7.egg [zenoss@65b1f2340c77 /z]$ exit [root@65b1f2340c77 /]# exit [you@host]: serviced snapshot commit impacter
• Start ImpactServer: serviced service start impact • Create container that has Impact: (Similar as for ImpactServer); commit • Restart core zenoss services You should see Impact under Infrastructure and Impactstate under Events in UI
Installing ZenETL as a Linked ZP in a ZSD Installation
This requires some care because you have to ensure the zen*etl tools get installed into the Zope image. This normally does not happen because in a link install, the normal tools don’t get copied to /opt/zenoss/bin. • First compile the ZP with mvn support externally. Copy the ZP in place. • Edit GNUMakefile and comment out the mvn commands so the container doesn barf. • Execute:
serviced service shell-i-s mychange--mount/z,/z zope su- zenoss
[zenoss@612b45db88f8]: cd/z/ZenPacks.zenoss.ZenETL/ZenPacks/zenoss/ZenETL [zenoss@612b45db88f8]: cp daemons/\*.py/opt/zenoss/bin/ [zenoss@612b45db88f8]: cp cp zen\*etl.py/opt/zenoss/ [zenoss@612b45db88f8]: exit
• Grab the container number from the shell’s prompt, say: 612b45db88f8 • cleanup:
serviced service commit 612b45db88f8 docker rm mychange
8.2. Setup and Configuration 239 ZenPackers Documentation
8.2.9 Backups and Snapshots
Backups
Backups for ControlCenter are the most durable way to preserve your Z5 installation. It is larger and more time consuming than a Snapshot but it is more reliable in preserving all (Docker) images and configurations. Backups are done as follows: • First, decide what folder backups should be stored. Often: /opt/serviced/var/backups/ • Second, note that backups can be large. Make sure you have room. • To create a backup from the 5.X system host:
serviced backup/opt/serviced/var/backups/
• Once finished, its advisable to copy that file to another safe system:
scp/opt/serviced/var/backups/file.tgz me @somehost.com:/tmp/
• To restore:
serviced restore/path/to/file.tgz
SnapShots
Snapshots normally temporary and have a limited lifetime in the system. • To perform a snapshot: serviced snapshot commit Zenoss.core
8.3 Zenpack Development
8.3.1 ZenPack Development with Zendev: Setup in ControlCenter
Welcome to the Control Center cetnral for Zenpackers! The following sections are hoped to bring clarity and peace to you in your quest for ZenPack developement. First let us establish the base directory for Zendev: export ZENDEV_ROOT=$(zendev root) $ZENDEV_ROOT/src/service/services
This folder is typically in $HOME/src/europa, but it could be elsewhere.
Setting up the Development Environment
Setup for ZenPack development in ControlCenter requires a fully functional ZenDev environment as specified items 1-10, but stop before starting serviced on the last line if item 10: • https://github.com/zenoss/zendev/blob/zendev2/docs/installation.rst
240 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
Warning: Only Zendev2 is supported now. Everything here relates to Zendev2. When in doubt, refer to the Zendev2 documentation above.
Once you have zendev fulling installed but before starting serviced, you need to do the following items: • cd into your zendev home folder’s Zenpack folder:
cd $ZENDEV_ROOT/src/zenpacks/
• Now git clone all the ZenPacks you need for your project • Edit the following file and add (or subtract) the Zenpack names from the ZENPACKS environment var ZEN- PACKS in $ZENDEV_ROOT/build/devimg/install_core.sh:
ZENPACKS="PythonCollector ControlCenter"
• Note: In order to have a valid Resmgr image, you’ll need a LOT more zenpacks built into into your image. This means that the above line MUST look more like: ZENPACKS= “PythonCollector ControlCenter ZenJMX DynamicView AdvancedSearch EnterpriseCollector EnterpriseSkin DistributedCollector ImpactServer Impact “
• Now rebuild the devimg:
zendev build devimg
• [Optional but recommended]: Pull docker images to avoid timeouts at deployment : docker pull zenoss/serviced-isvcs:v16 docker pull zendev/devimg:latest docker pull zenoss/hbase:v2 docker pull zenoss/opentsdb:v3 • Now start serviced:
zendev serviced--reset--deploy
• Then start the application (yes, case-insensitive):
zendev serviced service start zenoss.core
• Watch the services with:
watch serviced service status
• Warning: Don’t use the GUI to start the application as that currently uses a lot of resources. This could eat up a lot of CPU and memory just to render a few graphs.
• Now Zenoss will run on the same IP but with a virtual name: ‘zenoss5x.*’: So if your host is xyz.zenoss.loc, your Zenoss will run on:
zenoss5x.xyz.zenoss.loc
You will need to either do one of two things to connect to Zenoss: 1. Add an entry into your /etc/hosts:
8.3. Zenpack Development 241 ZenPackers Documentation
192.168.1.45 xyz.zenoss.loc zenoss5x.xyz.zenoss.loc hbase.xyz.zenoss.loc
2. Add a CNAME entry in your DNS that points zenoss5x to xyz.zenoss.loc • You should be able to connect now to: https://zenoss5.xyz.zenoss.loc If not, go back and check your networking setup. • Once you can connect, you must connect to the container for the daemon you are debugging. For example, to debug modeling you connect to the modeler container:
zendev attach zenmodeler -or- serviced service attach zenmodeler
Note: Since you can have multiple containers running a service you may want to reduce that to a single service. This is done in the ControlCenter GUI by changing the Instance value, saving, and restarting.
Warning: The normal user to attach to a container with is root! This will cause you many sleepless nights and untold problems because zenoss commands as root will change file owernship. Instead use the “zenoss” user. To make this easy, you can use this bash function described in section: Attaching to Containers
• References – http://zenoss.github.io/zendev/devimg.html – http://zenoss.github.io/zendev/installation.html#ubuntu
Updating Zendev, Bare Bones Style
Updating Zendev is getting simpler. Eventually there will be a single button to push. Until that time try these directions: • Stop docker:
sudo stop docker sudo umount $(grep 'aufs' /proc/mounts | awk '{print$2}' | sort -r) sudo rm -fr /var/lib/docker
• Log back in to host system:
sudo reboot (Host System) sudo start docker (if not started) zendev selfupdate; zendev sync
• Create devimg and pull in isvcs:
cdz serviced&& make clean&& make zendev build devimg--clean
• start serviced and pull other images:
zendev serviced-dx
To cut-n-paste:
242 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
sudo stop docker sudo umount $(grep 'aufs' /proc/mounts | awk '{print$2}' | sort -r) sudo rm -fr /var/lib/docker sudo reboot # Log in to host sudo start docker zendev selfupdate; zendev sync # Now time to build serviced and zendev cdz serviced && make clean && make zendev build devimg --clean zendev serviced -dx
Installing Zenpacks for Development
In development we usually need to install the zenpacks in link-mode. To do this note that zenpacks in your zendev: $ZENDEV_ROOT/src/zenpackas/* will be located in the container at /mnt/src/zenpacks/* . So here is the process: 1. Attach to the Zope Container. If you have more than one, use the UUID:
serviced service attach Zope
2. cd /mnt/src/zenpacks 3. Make sure your zenpack is present 4. Execute the zenpack command:
zenpack--link--install ZenPacks.zenoss.XYZ
Sometimes you have no choice but to install using Egg. In that case you must be in the host system (zendev or otherwise): serviced service run zope zenpack install ZenPacks.zenoss.OpenStack-XXX.egg
Serviced Essentials
Here are some Serviced topics are relevant.
Getting Listings
You’ll want to remove all non-ascii characters from a serviced command output. This is because serviced service list will output some non-ascii “tree” characters that can make the awk error prone. Do it like this: serviced service list| tr-cd' \11\12\40-\176'
Now use that output to capture any SERVICE_ID like this:
ID=$(serviced service list | grep zenmodeler | tr -cd '\11\12\40-\176' | awk '{print
˓→$2}')
8.3. Zenpack Development 243 ZenPackers Documentation
Attaching to Containers
Serviced has a utility to attach to containers. By default the user you attach with is root, which is BAD if you intend to issue zenoss commands. You can attach to a container as root by simply doing: serviced service attach where is one of the services (zendev, zeneventserver, Zope, etc..). But as mentioned above, doing anything that involves Zenoss will change the ownership of files in /opt/zenoss and potentially BREAK your install. Instead, place this bash function in your .bashrc: attach() { local target=$1 serviced service attach $target su - zenoss } then you can just do a: attach zenhub
You can also just do it manually: serviced service attach zenhub su- zenoss
Editing Serviced Service Definitions From CLI
If you are unwilling or unable to use the GUI to edit services, this will be an invaluable tool for 5X. The method is simple, find the ID, and use serviced to edit the serviced template. • Find the ID for a service. In our example Zope:
ZOPE_SERVICE_ID=$(serviced service list | grep Zope | awk '{print $2}')
• To edit the Zope service definition:
serviced service edit zope # or the old fashioned way: serviced service edit $ZOPE_SERVICE_ID
• Once you have finished editing the service you can verify it by either looking at the GUI or re-editing the GUI. • Restart the Service. There are two ways, the first way in the link serviced.init is preferred: – Using the script:
serviced.init restart
– Manually:
* Kill serviced manually * zendev serviced
244 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
Note: You must restart Zope to activate your changes.
Monitoring Logs in Zendev
Monitoring logs in Zendev is easier than one might think. That is because the entire Zenoss core folder is bind- mounted from the Zendev environment across ALL Zope containers. You don’t need to access ANY container to see them. The logs are located in: $ZENDEV_ROOT/zenhome/log/ . If Serviced and Zenoss are active you should see these files being updated often.
Testing Modelers, Collectors, and Services
In the 4.X world we usually turn off the services and run them manually. This still can work in 5.X. First you must stop the container that has the service you want to test, then you run it manually from another container like Zope. Here are the steps: • Identify the service you want to test, and grab the ID. We use zenmodeler for example: • Turn off the zenmodeler container in the GUI or manually:
[zenoss@mp6:~]: serviced service stop zenmodeler
• Attach to another service like Zope and run zenmodeler manually:
[zenoss@mp6:~]: zendev attach Zope Yo, you can probably just use serviced attach
[root@zope /]# zenmodeler run -d xyz.zenoss.loc -v10
2014-07-05 00:56:58 DEBUG zen.ZenModeler: Run in foreground, starting
˓→immediately. 2014-07-05 00:56:58 DEBUG zen.ZenModeler: Starting PBDaemon initialization ...etc...... etc...
• When you are finished with your debug session just exit the container and restart your zenmodeler service (if you want it to run):
(zenoss)[root@zope /]# exit [zenoss@mp6:~]: serviced service stop 24x2cfz4b16ww8gakhgcgnv87
Cross Mounted Directories!
Experimentation shows that there are several shared directories in the containers. Your core and zenpacks will be shared from your Zendev development directories. If you edit core code in one container it is changed in other containers that share this. This includes:
8.3. Zenpack Development 245 ZenPackers Documentation
Share Source Target Mount Point Mount Type $DEV:$ZENDEV_ROOT/src/core /mnt/src/core NFS (From Dev) $DEV:$ZENDEV_ROOT/zenhome /opt/zenoss NFS (From Dev) /mnt/src/core/Products /opt/zenoss/Products Local /opt/zenoss/otherwise /opt/zenoss/otherwise Local
Questions and Possible Answers
• I don’t have a DNS server. How do I deal with DNS issues? The easiest way is to set your host values in /etc/hosts and run dnsmasq. This will ensure all containers will see your hosts as thought a real DNS was setup and running. If you also point your /etc/resolv.conf to an upstream DNS, you get the benefits of both hosts and DNS resolution.
Note: You can skip /etc/hosts modification. Assume, that hostname of your development server is fqdn.domain.tld, IP address is static value 192.168.0.1 and you want to resolve any subdomains like zenoss5.fqdn.domain.tld into development server’s IP address. So just add the following into /etc/ NetworkManager/dnsmasq.d/zenoss.conf and restart NetworkManager service: address=/fqdn.domain.tld/192.168.0.1
• What is the best way to debug the container processes? Candidates include: – dgbp: http://docs.activestate.com/komodo/4.4/debugpython.html – winpdb: http://winpdb.org/docs/embedded-debugging/ – pdb: put your pdb in the right place and run a service in forground You may need to try several differnent methods. • Can I Run Zenhub in the foreground? According to the experts, Maybe. In fact, you can run zenhub in the foreground using a different shell. However if you actually want other daemons to connect to your new zenhub, that won’t work because of TCP port mismatch. One solution is to attach to the Zenhub container, kill and start Zenhub in the foreground in one step:
zendev attach zenhub pid=$(ps ax | grep -E "[[:digit:]]{2} su - zenoss -c" | awk '{print $1;}') kill $pid; zenhub run -v10 --workers 0
Zenhub must be in in full contact with all the other containers via TCP port connections. The fallback plan is us use a remote debugger like winpdb or dbgp. • How do run Zope in the foreground? – Ensure that there is only one Zope instance running – serviced service attach an existing Zope container, – Kill zopectl in the background and immediately restart in the foreground similarly to Zenhub:
kill 30440 ; su- zenoss-c/opt/zenoss/bin/runzope
246 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
• You upgraded Go, but you can’t build anymore. You get errors like this:
../domain/metric.go:10: import $ZENDEV_ROOT/src/golang/pkg/linux_amd64/github.com/zenoss/glog.a: object is [linux amd64 go1.2.1 X:none] expected [linux amd64 go1.3 X:precisestack]
The problem is that you have older libraries from prior version of go. You need to clean out the older libraries and rebuild:
rm $GOPATH/pkg/* -Rf cdz serviced make clean make
• Your entire Zendev environment seems broken, and builds fail. What to do? You may have broken your zendev environment by upgrading or getting some environment vars wrong. Check those env vars and try this:
zendev restore develop:wq
• Unit Tests:
zendev devshell run tests
8.3.2 Debugging Control Center: Kung-Pow Style
Debugging ControlCenter ain’t easy. Ask anyone. Instead of general theory, lets talk about examples.
Catching a PDB in Zope
First you need to have a single instance of Zope so that your PDB will guarantee to fire in the instance you monitor. You also need to run zope in the foreground and you need to know which one. • Attach to your 5x host • Edit the service definition for zope and set the InstanceLimits:Min parameter to 1:
[[email protected]]: serviced service edit zope (search for "Min", change that value from 2 to1; if already1, continue)
• In the 5x GUI, go into your Zope defintion, click “Edit Services”. Set Instances to 1. You should see the instance count become a singleton. • Now we attach to that unique instance of Zope:
[[email protected]]: serviced service attach zope [root@42b39f16c058 /]#: su - zenoss [zenoss@42b39f16c058 /]#:
• Now insert your pdb in the right place:
8.3. Zenpack Development 247 ZenPackers Documentation
[zenoss@42b39f16c058 /]#: ZP=$ZENHOME/ZenPacks/ZenPacks.zenoss.ZenJMX-3.11.0-py2.
˓→7.egg/ [zenoss@42b39f16c058 /]#: cd $ZP/ZenPacks/zenoss/ZenJMX/ [zenoss@42b39f16c058 /]#: vi __init__.py [zenoss@42b39f16c058 /]#: (... insert your pdb.set_trace() ... )
• Now restart zope in place. First find out the PID. Its the commmand that looks like “su - zenoss -c /opt/zenoss/bin/runzope” You can use this sed expression to get it:
pid=$(ps aux | sed -n 's|^root *\(\w\+\)\? .*su - zenoss .*/runzope$|\1|p') echo $pid
• Restarting Zope will just revert your pdb. You need to kill that process and restart it just afterwards. Here is how you do it:
kill $pid && su - zenoss -c /opt/zenoss/bin/runzope
• Easy way to restart Zope in foreground with pkill:
pkill-f zope.conf ; zopectl fg
• Now you need to trigger your bug by using the GUI. Go to the GUI and do this now. • If you are lucky, your terminal will have caught your pdb at the correct place.
Running Various Daemons in Foreground
For each of these next commands, you must be attached to the and su’d into the zenoss account there like this: serviced service attach su- zenoss
• Zope:
pkill-f zope.conf ; zopectl fg # --or for debug mode-- pkill-f zope.conf ; zopectl-X debug-mode=on fg
• Zenhub:
pkill-f zenhub.conf ; zenhub run--workers=0-v10
• Zeneventd:
pkill-f zeneventd.conf ; zeneventd run--workers=0-v10
• Zenpython, Zenmodeler, Zencommand You would normally stop the container and run the daemon manually from a zope container.
8.3.3 Working with Templates
Templates form the core of the Serviced service definitions. They define nearly everything that goes into Zenoss. In order to use them you will have to start with some base set to make life easier. In zendev these templates correspond to entire folders of templates. They live in:
248 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
export ZENDEV_ROOT=$(zendev root) $ZENDEV_ROOT/src/service/services
You may see these folders which hold many other subfolders: drwxrwxr-x 19 zenoss zenoss 4096 Aug7 20:40 Zenoss.core drwxrwxr-x 17 zenoss zenoss 4096 Aug7 20:40 Zenoss.core drwxrwxr-x 21 zenoss zenoss 4096 Aug7 20:40 Zenoss.resmgr drwxrwxr-x 19 zenoss zenoss 4096 Aug7 20:40 Zenoss.resmgr.lite
Basic Commands
• List a Template:
serviced template list
• Compile a Template:
serviced template compile Zenoss.core serviced template compile Zenoss.core>/tmp/Zenoss.core.tpl
• Add a Template:
serviced template add/tmp/Zenoss.core.tpl
– However, its best to grab the Template ID in this way:
TEMPLATE_ID=$(serviced template add /tmp/Zenoss.core.tpl)
• Deploy a Template: After you deploy a template, it becomes a real Application:
# ------# serviced template deploy TEMPLATE_ID POOLID DEPLOY_ID # ------serviced template deploy $TEMPLATE_ID default zenmaster
• Note: Note on Resmgr In order to compile and deploy Resmgr your Docker image must be created correctly. This means that all the required Zenpacks must be compiled into that Docker image. See the note in Setting up the Development Environment.
Compile the Generic Template
This method is most approprate when you work with a pre-made image like Beta or a Release image. This is because its a bit easier to tag an image with the same tag (as the template) than to change the numbers on the templates. Compile the template:
8.3. Zenpack Development 249 ZenPackers Documentation
serviced template compile $ZENDEV_ROOT/src/services/services/Zenoss.core > /tmp/
˓→Zenoss.core.tpl serviced.init start TEMPLATE_ID=$(serviced template add /tmp/Zenoss.xxx.tpl) serviced template deploy $TEMPLATE_ID default zenoss
Map the template to match your Docker Image
This may be more appropriate if you use Zendev, since the zendev images are already created with tags, and you don’t want to mess with those. Build the Template: cdz serviced serviced template compile -map zenoss/zenoss5x,zendev/devimg \ $ZENDEV_ROOT/src/service/services/Zenoss.core > /tmp/xxx.tpl TEMPLATE_ID=$(serviced template add /tmp/xxx.tpl)
Deploy Templates: serviced template deploy $TEMPLATE_ID default zen_monkey
Updating Templates with Json-only Changes
Surgically, you could remove the template from the UI, then compile/add template and start all services. You could also serviced service edit each of those services and restart those services after editting, which is probably easiest if there are only a few minor changes. The incantation to compile/add (with template mapping): serviced template compile -map zenoss/zenoss5x,zendev/devimg \ $ZENDEV_ROOT/src/service/services/Zenoss.core \ | serviced template add
UtilityScripts: But Wait Folks! Thats not all!
Yes, thats right folks, we’ve worked hard to make life easier for you. How easy you may ask? So easy, you can do it with one hand tied behind your back and both eyes closed! Here is a bash function that will fix up your template and insert it all in one command: liten_up_dude() { IMAGE=core
# Compile the Template and *MAP* it to the right zendev image: serviced template compile -map zenoss/zenoss5x,zendev/devimg \ $ZENDEV_ROOT/src/service/services/Zenoss.${IMAGE} > \ /tmp/Zenoss.xxx.tpl
# Add the Template to serviced definitions TEMPLATE_ID=$(serviced template add /tmp/Zenoss.xxx.tpl)
# Deploy the template (continues on next page)
250 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
(continued from previous page) # serviced template deploy TEMPLATE_ID POOL_ID DEPLOYMENT_ID # ------serviced template deploy $TEMPLATE_ID default zenmaster
# Get rid of the old Zenoss.core application CORE_ID=$(serviced service list | grep -E 'Zenoss.core\s' \ | tr -cd '\11\12\40-\176' | awk '{print $2}')
serviced service remove $CORE_ID unset CORE_ID
# Now you should use the GUI to start the Zenoss.core application # Warning! Untested: You can also add that to this function if you like:: # LITE_ID=$(serviced service list | grep -E 'Zenoss.core' \ # | tr -cd '\11\12\40-\176' | awk '{print $2}') # serviced service start $LITE_ID
}
Warning: Make sure you Don’t start or use the standard Zenoss.core application before starting the Zenoss.core application. Experiments have shown that there is some docker image mismatches that happen as a result of starting Zenoss.core, stopping it, and starting Zenoss.core.
So here is the workflow scenario for this tool: • zendev build devimg • zendev serviced -dx • liten_up_dude • Go into GUI, select Zenoss.core, Start it
Modifying Service Templates in Europa
Modifying templates may be required to add functionality to your zenpacks or to core. Often, the containers have a very restricted access in order for good security and simplicity.
Modfying the Default Templates
If the service you are modifying is in the default template, you will have to ensure that ALL the template definitions have that change, otherwise someone will deploy a service that is missing a requirement. Make sure that the template is modified in folder. The Current Zendev has these template folders:
8.3. Zenpack Development 251 ZenPackers Documentation
Template Location
Zenoss.core $ZENDEV_ROOT/src/service/services/Zenoss.core/ Zenoss.core.full $ZENDEV_ROOT/src/service/services/Zenoss.core.full/ Zenoss.resmgr $ZENDEV_ROOT/src/service/services/Zenoss.resmgr/ Zenoss.resmgr.lite $ZENDEV_ROOT/src/service/services/Zenoss.resmgr.lite/ Zenoss.ucspm $ZENDEV_ROOT/src/service/services/Zenoss.ucspm/ Zenoss.ucspm.lite $ZENDEV_ROOT/src/service/services/Zenoss.ucspm.lite/
Each of these will be modified by adding the following to the Endpoint list:
{ "Name":"rabbitmq", "Application":"rabbitmq", "PortNumber": 5672, "Protocol":"tcp", "Purpose":"import" }
Once the change is made you can compile any of these templates and deploy just as we have outline above.
Adding RabbitMQ Ports to the Zenpython (PythonCollector) Zenpack
If you are adding to a service that is bundle with a Zenpack, you must provide extra templating instructions to Zenoss so that any installation of that Zenpack will guarantee to have the required services you need. In our example, ZenPacks.zenoss.OpenStackInfrastructure requires that zenpython be able to poll the RabbitMQ con- tainer. This was not available when we started. We start by reviewing https://github.com/zenoss/ZenPacks.zenoss. ExampleService which outlines the way to add template support to a zenpack. We’ll try to only outline the critical components that supplement that document. • First create folders in $ZP_DIR of ZenPacks.zenoss.PythonCollector:
su- zenoss cd ZenPacks.zenoss.PythonCollector/ZenPacks/zenoss/PythonCollector mkdir service_definition mkdir-p service_definition/-CONFIGS-/opt/zenoss/etc
• Copy the service configuration file into the right place:
cp/tmp/zenpython.conf service_definition/-CONFIGS-/opt/zenoss/etc/
• Now copy the template into place:
cp Zenoss.core/localhost/localhost/zenpython/service.json service_definition/
• You now must change a few key items in this template (be sure to add quotes_): – servicePath: /hub/collector – serviceDefinition: (Encapsulate the entire contents of original services.json) – serviceDefinition (Make it somewhat different from standard) – ConfigFiles:
252 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
"/opt/zenoss/etc/zenpython.conf":{ "FileName":"/opt/zenoss/etc/zenpython.conf", "Owner":"zenoss:zenoss", "Permissions":"0664" }
– Endpoints (Add):
{ "Name":"rabbitmq", "Application":"rabbitmq", "PortNumber": 5672, "Protocol":"tcp", "Purpose":"import" }
– ImageID: (set to empty string to be overridden later) • Once all this is in place, you need to test it by removing and reinstalling the ZP. Then you can check the service definition in Zendev:
serviced service edit zenpython
If you see your changes, and a nice ImageID, then all is hopefully well. See the actual Zen- Packs.zenoss.PythonCollector repo for more detail.
8.4 Special Topics: ControlCenter and Others
Many of the documents for ControlPlane are in flux, so good luck if you find something useful that stays relevant for more than a few weeks.
8.4.1 Installing Impact in Zendev
Note: This document is outdated. See this link for something more modern: https://github.com/zenoss/impact-server/ wiki/Running-Impact-Server-in-development-mode-(5.x)
• Pull and tag latest impact image (Currently 121):
# Note: Need to use an user that has docker privileges export IMPACT_IMAGE_TAG=4.2.6.70.0_130 # Warning: this tag changes! docker pull zenoss/impact-unstable:$IMPACT_IMAGE_TAG docker tag zenoss/impact-unstable:$IMPACT_IMAGE_TAG zenoss/impact-unstable:latest
• Deploy zenoss.core or resmgr: # Use whatever workflow you wish to add and deploy core/resmgr service • Start services: It is best to start all services. If you prefer, this subset is the minimal that necessary for zenpack install:
for svc in mysql rabbitmq /redis zencatalogservice zeneventserver ; \ do serviced service start $svc ; done
• Install Impact ZenPacks:
8.4. Special Topics: ControlCenter and Others 253 ZenPackers Documentation
Note: – You must install them in link mode – You should not use the devshell environment (its broken): Instead use zope container: zendev attach zope Optionally (from zendev): zendev attach zope su - zenoss -c “zenpack –list” – *CRITICAL*: Make sure that ImpactServer and Impact zenpacks are on the develop branch. Zendev may put them on master by default. – Enterprise zenpacks are in /mnt/src/enterprise-zenpacks/, aka: $EZ: export EZ=/mnt/src/enterprise_zenpacks – Normal zenpacks are in /mnt/src/zenpacks/, aka: $ZP export ZP=/mnt/src/zenpacks The install now:
zenpack --link --install $ZP/ZenPacks.zenoss.ZenJMX
zenpack --link --install $EZ/ZenPacks.zenoss.DynamicView zenpack --link --install $EZ/ZenPacks.zenoss.AdvancedSearch zenpack --link --install $EZ/ZenPacks.zenoss.EnterpriseCollector zenpack --link --install $EZ/ZenPacks.zenoss.EnterpriseSkin zenpack --link --install $EZ/ZenPacks.zenoss.DistributedCollector zenpack --link --install $EZ/ZenPacks.zenoss.ImpactServer
• Before you install the Impact zenpack, you MUST have ImpactServer running:
serviced service start Impact
• Now you can finally install the Impact zenpack:
zenpack --link --install $EZ/ZenPacks.zenoss.Impact
• Optionally install any zenpacks you are testing under Impact • Restart zenoss: – serviced service stop zenoss.core – serviced service start zenoss.core
8.4.2 Docker Images
We use a lot of docker images in Europa.
Finding Docker Images
To find Images for Impact on docker: • https://registry.hub.docker.com/u/zenoss/impact-unstable/tags/manage/ • Use curl:
254 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
curl-s-u <> https://registry.hub.docker.com//v1/repositories/zenoss/impact-unstable/tags \ | python-m json.tool \ | sed-n's/. *"name":"\([^"] *\)"/\1/p'\ | tail-1
Downgrading Docker in ControlCenter
If you find yourself needing to down grade docker: 1. Stop all services:
serviced service stop zenoss.core
2. Stop/Kill/Maim serviced:
service serviced stop (if running Release Images) killall serviced&& killall-9 serviced (Otherwise)
3. Stop Docker:
service docker stop
4. Remove docker:
sudo apt-get purge lxc-docker\* sudo apt-get purge docker
5. Install the new (or older) docker:
sudo apt-get install lxc-docker-1.3.3
6. Restart Docker and Service:
service docker start service serviced start (If running Release Images) zendev serviced>&/tmp/serviced.log& (Otherwise)
7. Start your Zenoss services if required
8.4.3 Getting Data Out of OpenTSDB
• Attach to one of your OpenTSDB instances:
serviced service attach reader
• Idenitfy one of your metrics:
/opt/opentsdb/build/tsdb uid--config/opt/zenoss/etc/opentsdb/opentsdb.conf grep
˓→metrics''
or from API:
lynx'http://mp7:4242/api/suggest?type=metrics&max=1000000'
8.4. Special Topics: ControlCenter and Others 255 ZenPackers Documentation
• Metrics will be of the form:
device_name/metric_name
ex:
solutions-xenserver/rrd_memoryFree
• Query one of your metrics:
/opt/opentsdb/build/tsdb query--config/opt/zenoss/etc/opentsdb/opentsdb.conf2h-
˓→ago sum"solutions-xenserver/rrd_memoryFree"
8.4.4 Convenience Tools for ControlCenter
Mass Editing Serviced Templates in Place
If you had access to the source templates, you could just edit those in place or in some automated way. Often though, you find a stand-alone system without source templates and you may not have that choice. You can still use the EDITOR env variable to effect an automated edit using sed. Here is an example script:
#!/bin/bash # See: https://jira.zenoss.com/browse/CC-888 for automation # This one will set every template in $collectors to manually start collectors=" zenprocess zenmailtx zenstatus zenucsevents zenvsphere zenpython zenmodeler zenpop3 zentrap zenwebtx zencommand zensyslog zenjmx zenping zenmail zenperfsnmp zenpropertymonitor " cat << EOF > /tmp/rename #!/bin/bash sed -i -e 's/^ "Launch": "auto",/ "Launch": "manual",/' "\$1" EOF chmod 700 /tmp/rename for c in $collectors; do EDITOR=/tmp/rename serviced service edit $c done
256 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
8.4.5 Control Center service integration
Preface
Please note that this is not a step by step tutorial on how to create your Control Center(CC) service from scratch. This topic will fill the missing gaps in CC documentation and shed some light on important things such as graphs definition.
Image
Each custom service in CC should have it’s own container. In order to avoid problems, such as important env variables missing, you should use zenoss/centos-base as a base image in your Dockerfile. When you’re done building your container you have to tag & push it into Docker Hub or any other Docker Registry, Makefile should take care of that.
Service Definition fields
To instruct Serviced that your ZenPack has a custom service defined you should have service_definition folder with service.json file inside. Read more at => Defining services.
• ImageID
This field is important, here you specify repo/image:tag that you’ve built before, it will be used by Serviced to create a container for your service. There are couple of things to be aware of: – if you’re using your private Docker Hub repository, make sure you’re logged in with your hub account and then manually pull your image before ZenPack installation to avoid errors – beware that you’ll probably face ControlCenterError: registry image collision when updating your image at development stage and sadly there is no fix for this yet
• Command
This is what Serviced will treat as a service itself and run when you start your CC service. You can think of it as an ENTRYPOINT for your container. Good to know: – make sure you use absolute path for your bin/command as WORKDIR instruction from your Dockerfile will be overridden by Serviced
• Endpoints
If you ever need to make your service available to other services or vice versa - you do that in ‘Endpoints’ section. Beware:
8.4. Special Topics: ControlCenter and Others 257 ZenPackers Documentation
– if your application resides in collector level you must have a unique ‘Application’ field for each service copy.
"Endpoints":[ { "Name":"zendotnet", "Application":"{{(parent .).Name}}_zendotnet", "PortNumber": 5001, "Protocol":"tcp", "Purpose":"export" } ]
Metrics & Graphs
This is very important part of your service. Everyone wants to know some specific to service data and nobody wants to look through tons of JSON so you need graphs as well.
• Technical background
Control Center has it’s own instance of OTSDB, the graphs you see at CC UI are retrieved from that instance. Note that this is not the OTSDB under the Infrastructure part. The graphs that are defined in service.json address individual metrics stored in CC’s OTSDB, more specifically datapoints reference the metrics. To get your data into CC’s OTSDB you should send a POST request to CC’s metricconsumer whose URL is stored in CONTROLPLANE_CONSUMER_URL env variable.
• MetricConfigs
In here you will define your metric sources which you have to reference later on in GraphConfigs. Important things are ID field and Metrics list. Each metric in Metrics list will have it’s own ID as well and these must start with ID of a MetricConfig it’s sitting in followed by a dot. Here’s an example:
"MetricConfigs":[ { "ID":"dotnet", "Name":"dotnet internal metrics", "Description":"dotnet internal metrics", "Metrics":[ { "ID":"dotnet.total_requests", "Name":"Total Requests", "Description":"Number of total requests made by dotnet service" }, { "ID":"dotnet.succeeded_requests", "Name":"Suceeded Requests", "Description":"Number of succeded requests by dotnet service" } (continues on next page)
258 Chapter 8. Topics for ControlCenter (ControlPlane) ZenPackers Documentation
(continued from previous page) ] } ]
• GraphConfigs
In simple terms - here you define what kind and how many graphs you want and map data from metric source to them. Each GraphConfig have one very important field called datapoints, that’s where you do the mapping. In metricSource field you have to put ID of MetricConfig. Inner IDs that are sitting in Metrics list will go in metric field. Make sure to always set the format of data explicitly to avoid unknown values on graphs. CC uses **GO fmt package** for formatting. Here’s an example:
"GraphConfigs":[ { "id":"requestsCount", "name":"Requests Count", "footer": false, "returnset":"EXACT", "type":"line", "format":" %%d", "yAxisLabel":"requests", "description":"Number of requests", "range":{ "start":"1h-ago", "end":"0s-ago" }, "datapoints":[ { "aggregator":"avg", "fill": true, "id":"total_requests", "legend":"Total", "metric":"dotnet.total_requests", "metricSource":"dotnet", "name":"Total", "rate": false, "rateOptions":{ "counter": true, "counterMax": null, "resetThreshold": 1048576 }, "type":"area" }, { "aggregator":"avg", "fill": true, "id":"succeeded_requests", "legend":"Succeeded", (continues on next page)
8.4. Special Topics: ControlCenter and Others 259 ZenPackers Documentation
(continued from previous page) "metric":"dotnet.succeeded_requests", "metricSource":"dotnet", "name":"Suceeded", "rate": false, "rateOptions":{ "counter": true, "counterMax": null, "resetThreshold": 1048576 }, "type":"area" } ] } ]
• POST request
As was previously mentioned, for you to get the data into OTSDB you have to submit a POST request. Typically you should write a service that will POST your data periodically. Here’s an example of that requests’ body:
"metrics":[ { "metric":"dotnet.total_requests", "value":"10", "timestamp":"0000000000", "tags":{ "service":"dotnet" } }, { "metric":"dotnet.succeeded_requests", "value":"8", "timestamp":"0000000000", "tags":{ "service":"dotnet" } } ]
Afterword
An easy way to achieve success with this thing is to do by analogy first. Great repos to take a look at would be: * zenoss-service* ZenPacks.zenoss.ExampleService* Zen- Packs.zenoss.Microsoft.Core
260 Chapter 8. Topics for ControlCenter (ControlPlane) CHAPTER 9
Topics for OpenStack Group
Documentation for OpenStack Group of Zenpacks
9.1 Welcome to the OpenStack Group!
We have many ZenPacks that relate or integrate with OpenStack. Some of them are: • ZenPacks.zenoss.OpenStack • ZenPacks.zenoss.OpenStackInfrastructure • ZenPacks.zenoss.OpenvSwitch • ZenPacks.zenoss.Ceph • ZenPacks.zenoss.CiscoAPIC • ZenPacks.zenoss.NSX We also have projects that enable OpenStack: • ceilometer_zenoss
9.2 Setup and Configuration
9.2.1 Ceilometer Troubleshooting for OpenStack
Ceilometer heartbeats take the following path through the systems:
OpenStack-Ceilometer | V Zenoss5.zenpython-> Zenoss5.rabbitmq (exchange E1) (continues on next page)
261 ZenPackers Documentation
(continued from previous page) | V Zenoss5.rabbitmq (queue Q1) | V OSI.service.OpenStackService.OpenStackService.remote_expected_
˓→ceilometer_heartbeats | V Zenoss5.MetricConsumer
1. Make sure the time for both OpenStack hosts and Zenoss host is correct. A running ntpd helps. 2. Follow Link text , in the Ceilometer Enablement section. 3. Restart ceilometer services: • On a centos 7 OpenStack host:
$ ceil=$(systemctl -t service | grep ceil | awk '{print $1}' | awk -F. '
˓→{print $1}' ) $ echo $ceil $ for i in $ceil; do service $i restart; done
• Check ceilometer collector log on the OpenStack host: /var/log/ceilometer/collector.log - make sure ev- erything is OK. - You should see something like:
INFO ceilometer_zenoss.dispatcher.zenoss [-] Sending heartbeat to zenoss.
˓→openstack.heartbeat...ceilometer-
˓→collector
4. Make sure the rabbitmq port 5672 is open on both (?) openstack host and zenoss host:
$ telnet 5672
5. ngrep is a useful tool for debugging. Download ngrep. Use it as follows:
$ export zenoss= $ export port= $ ngrep -d $device -l -W none -qi "heartbeat" "dst host $zenoss and dst port $port
˓→"
For example to find heartbeats:
export device=$(ip route get 8.8.8.8 | head -1 | cut -d' ' -f5) export zenoss="v8.veggie.com" export zenoss="10.11.12.123" ngrep -d $device -l -W none -qi "heartbeat" "dst host $zenoss and dst port 5672" ngrep -d $device -l -W none -qi "heartbeat"
To see events and not heartbeats:
ngrep-l-W none-qiv"heartbeat""dst host $zenoss and dst port 5672"
262 Chapter 9. Topics for OpenStack Group CHAPTER 10
Pythonic Topics:
Some Useful Python Topics
10.1 Regular Expression
10.1.1 Breaking Regular Expressions into Readable Parts
You can write your regular expression so that you can easily read them: cs_RX= re.compile("^" # BOL "(jdbc:db2://)?" # cs_prefix (jdbc:db2://) "([\w.-]+)\:(\d+)" # hostnme:port "/([\w_\$\d]+)\:" # /DB_name: "user=([\w.-_@]+);" # username=joe; "password=([\w.-_@]+)" # password=rabbits; ";" # Final semicolon "$" # EOL )
10.1.2 Matching Regular Expressions by Key
To match a Regular Expression by named groups, you use the following syntax:
Regex pattern... (?P)
An example of this is: import re
CS_REGEX= re.compile( '^' # BOL (continues on next page)
263 ZenPackers Documentation
(continued from previous page) '(?P\w+)' # Username '\/' # slash '(?P\w+)' # Password '@' # at-symbol '(?P[\w.-]+)' # Host ':' # colon '(?P\d+)' # Port '[:/]' # color or slash '(?P[\.\w_\$\d]+)' # Instance '$' # EOL )
Another Example: def get_engine(eng=None, extra=None): if eng == none: pattern="(?p[0-9] {4})(?p[0-9]{2})(?p[0-9]{2})_" pattern+="(?p[0-9] {2})(?p[0-9]{2})(?p[0-9]{2})_" pattern+="(?p[a-za-z_0-9.]{1,})[.](?plock|partial|tmp)" if extra: pattern+= extra eng= re.compile(pattern) return eng
def get_match(self, filepath): eng= get_engine() match={} filename= os.path.basename(filepath) if eng.match(filename): data= [m.groupdict() for m in eng.finditer(filename)][0] for name, value in data.iteritems(): try: match[name]= int(value) except: match[name]= value
return match
Basically, it looks for a filepath with the following pattern:
__.
Examples might be: 20140122_103244_something.tmp, ‘data’ inside of ‘get_match’ holds the key:value dictionary
10.2 Understanding Twisted
• http://krondo.com/?page_id=1327
264 Chapter 10. Pythonic Topics: ZenPackers Documentation
10.3 Tab Completion in Python
Python supports tab-completion in several areas. This allows you to hit the tab key and get possible attributes and functions listed on the screen.
10.3.1 PDB Completion
Two simple lines in a ~/.pdbrc file are enough to give you tab completion in your pdb session: import rlcompleter pdb.Pdb.complete= rlcompleter.Completer(locals()).complete
10.3.2 Python Interpreter Completion
Add a ~/.pythonstartup.py file which contains:
# ------# ~/.pythonstartup.py # ------try: import readline import rlcompleter import atexit import os except ImportError: print"Python shell enhancement modules not available." else: histfile= os.path.join(os.environ["HOME"],".pythonhistory") import rlcompleter readline.parse_and_bind("tab: complete") if os.path.isfile(histfile): readline.read_history_file(histfile) atexit.register(readline.write_history_file, histfile) del os, histfile, readline, rlcompleter, atexit print"Python shell history and tab completion are enabled."
10.4 PDB Tricks
10.4.1 Running loops in a PDB session
• You could do this while in pdb to launch a temporary interactive Python session with all the local variables available:
(pdb) !import code; code.interact(local=vars()) >>> for k in ctxt: print k
10.3. Tab Completion in Python 265 ZenPackers Documentation
266 Chapter 10. Pythonic Topics: CHAPTER 11
Cisco General:
11.1 Cisco Background Information
11.1.1 CiscoStatus Threshold
The CiscoStatus threshold is a special threshold that uses preconfigured maps of numeric values returned by SNMP datasources to Zenoss event severities. The following OIDs and values are supported. CISCO-ENTITY-FRU-CONTROL-MIB::cefcModuleOperStatus 1. unknown - Critical 2. OK - Clear 3. disabled - Clear 4. OK (diag failed) - Warning 5. boot - Warning 6. self-test - Warning 7. failed - Critical 8. missing - Critical 9. mismatch w/parent - Critical 10. mismatch w/config - Critical 11. diag-failed - Critical 12. dormant - Critical 13. out of service (admin) - Info 14. out of service (environ) - Critical 15. powered down - Critical
267 ZenPackers Documentation
16. powered up - Critical 17. power denied - Critical 18. power cycle - Warning 19. OK (power warning) - Warning 20. OK (power critical) - Error 21. sync in progress - Clear 22. upgrading - Critical 23. OK (auth failed) - Critical ENTITY-STATE-MIB::entStateOperDisabled ENTITY-STATE-MIB::entStateOperEnabled ENTITY-STATE- MIB::entStateOper 1. unknown - Error 2. disabled - Clear 3. enabled - Clear 4. testing - Critical CISCO-ENTITY-FRU-CONTROL-MIB::cefcFRUPowerOperStatus 1. off (other) - Critical 2. on - Clear 3. off (admin) - Info 4. off (denied) - Critical 5. off (environmental) - Critical 6. off (temperature) - Critical 7. off (fan) - Critical 8. failed - Critical 9. on (fan failed) - Error 10. off (cooling) - Critical 11. off (connector rating) - Critical 12. on (no inline power) - Error CISCO-ENTITY-FRU-CONTROL-MIB::cefcModuleAdminStatus 1. enabled - Clear 2. disabled - Clear 3. reset - Info 4. out of service (admin) - Clear IF-MIB::ifOperStatus IPV6-MIB::ipv6IfOperStatus 1. up - Clear 2. down - Critical 3. testing - Critical
268 Chapter 11. Cisco General: ZenPackers Documentation
4. unknown - Critical 5. dormant - Critical 6. not present - Critical 7. lower layer down - Critical IF-MIB::ifAdminStatus IPV6-MIB::ipv6IfAdminStatus 1. up - Clear 2. down - Critical 3. testing - Critical CISCO-ENTITY-FRU-CONTROL-MIB::cefcFanTrayOperStatus 1. unknown - Error 2. up - Clear 3. down - Critical 4. warning - Warning MPLS-L3VPN-STD-MIB::mplsL3VpnVrfOperStatus 1. up - Clear 2. down - Critical CISCO-ENTITY-FRU-CONTROL-MIB::cefcPowerRedundancyOperMode 1. not supported - Clear 2. redundant - Clear 3. combined - Clear 4. non-redundant - Clear 5. output redundant - Clear 6. input redundant - Clear 7. single input redundant - Clear CISCO-ENTITY-FRU-CONTROL-MIB::cefcPowerNonRedundantReason 1. not applicable - Clear 2. unknown - Critical 3. single supply - Critical 4. mismatched supplies - Critical 5. supply error - Critical CISCO-ENTITY-SENSOR-MIB::entSensorStatus 1. OK - Clear 2. unavailable - Error 3. non-operational - Critical CISCO-VIRTUAL-NIC-MIB::cvnVethIfAdditionalState 1. none - Critical
11.1. Cisco Background Information 269 ZenPackers Documentation
2. participating - Clear 3. suspended - Clear 4. error-disabled - Critical 5. non-participating - Clear CISCO-VTP-MIB::vtpVlanState 1. operational - Clear 2. suspended - Critical 3. mtuTooBigForDevice - Critical 4. mtuTooBigForTrunk - Critical CISCO-VRF-MIB::cvVrfOperStatus 1. up - Clear 2. down - Critical CISCO-VSAN-MIB::vsanOperState 1. up - Clear 2. down - Critical
11.1.2 Cisco UCS Glossary
Your guide to Cisco UCS terminology. This document is an attempt to be a comprehensive references to any terms and abbreviations that may come up when discussing Cisco UCS.
Pictures
A picture is worth 1000 words? In case you wanted to know what this Cisco UCS hardware actually looks like. All of these pictures have been taken from the UCS Manager GUI .
6248UP Fabric Interconnect
Front view. The highlighted left-most 32 ports (banks of 16) are built-in. The shaded right-most 16 ports would be added by a fabric expansion module. A production UCS domain has two of these for redundancy. Like a Microsoft Cluster server, there are IP addresses for each Fabric Interconnect (aka FI) and one for the current virtual master. We talk to the master. Fabric interconnects carry both LAN and SAN traffic. Any physical port on a FI can be defined by software as a LAN uplink (out of the UCS Domain to a corporate data network), a SAN uplink (out of the UCS domain to a storage device or network), an appliance port (to a NetApp FAS), a server port (which can connect either to a fabric extender, a chassis fabric extender, or an actual server), or unused. And port channels exist, of course.
270 Chapter 11. Cisco General: ZenPackers Documentation
6248UP Fabric Interconnect 16-Port Expansion Module
Front view. The highlighted right-most 16 ports are added to the fabric interconnect by this expansion module. Not managed separately from the Fabric Interconnect.
6248UP Fabric Interconnect Fan Module
Rear view. The highlighted section is one of two possible fabric interconnect fan modules.
6248UP Fabric Interconnect PSU
Rear view. The highlighted section is one of two possible fabric interconnect PSUs.
2232PP Fabric Extender
Front view. Left-most 32 ports are server ports to be connected to C-Series rack servers. Right-most 8 ports are fabric ports to be connected to fabric interconnects. Fabric Extenders can be used within a UCS Domain or can be connected to a Nexus 5000 switch. If you think of an old-style router with a bunch of blades that slot into a backplane to provide ports, then a Fabric extender is one of those blades, except that its backplane is some Ethernet cables and it can be some distance away. A Fabric Extender is managed from the Fabric Interconnect or Nexus 5000 it is plugged into - it’s not a standalone device. One key thing to know is that both SAN and LAN traffic can be carried on the same set of wires between the Fabric Extender and the Fabric Interconnnect.
11.1. Cisco Background Information 271 ZenPackers Documentation
2232PP Fabric Extender Fan Module
Rear view. Highlighted section is the single possible fabric extender fan module.
2232PP Fabric Extender PSU
Rear view. Highlighted section is the single possible fabric extender PSU.
C200 M2 Rack Server
Front view. 1RU rack server.
C210 M2 Rack Server
Front view. 2RU rack server.
5108 Chassis
Front view. Fully-populated with 4 half-width blades, 2 full-width blades and 4 PSUs.
272 Chapter 11. Cisco General: ZenPackers Documentation
5108 Chassis PSU
Front view. Highlighted section is one of four possible PSUs.
5108 Chassis Fan Module
Rear view. Highlighted section is one of 8 possible fan modules.
11.1. Cisco Background Information 273 ZenPackers Documentation
5108 Chassis Fabric Extender aka IO Module
Rear view. Highlighted section is one of two fabric extenders. Functionally, these are extremely similar to the 2232 Fabric Extender. But they’re a different shape and the ports to the servers are internal to the chassis. Which eliminates a whole bunch of Ethernet cables in the back of the rack, lowering cost, eliminating cabling mistakes, and improving airflow. One key thing to know is that both SAN and LAN traffic is carried on the same set of wires between the Fabric Extender and the Fabric Interconnnect.
274 Chapter 11. Cisco General: ZenPackers Documentation
B200 M1 Blade Server
Front view. Highlighted section is B200 M1 half-width blade server.
B200 M4 Blade Server
Schematic front view
1. Asset pull tag. Each server has a plastic tag that pulls out of the front panel. The tag contains the server serial number as well as the product ID (PID) and version ID (VID). The tag also allows you to add your own asset tracking label withou t interfering with the intended air flow. 2. Blade ejector handle 3. Ejector captive screw 4. Drive bay 1 5. Drive bay 2 6. Power button and LED 7. Network link status LED 8. Blade health LED
11.1. Cisco Background Information 275 ZenPackers Documentation
9. Console connector 10. Reset button access 11. Beaconing LED and button .
B250 M1 Blade Server
Front view. Highlighted section is B250 M1 full-width blade server.
HyperFlex HX220c M4 All Flash Node
Front and back views.
• Node size: 1 rack unit • Node capacity:
276 Chapter 11. Cisco General: ZenPackers Documentation
– 1 x 400 GB SAS SSD – 6 x 3.8 TB / 960 GB SDDs • CPU: 2 x Intel Xeon processor E5 2600 v3 or v4 CPUs • Memory and cache: – 128 GB to 1.5 TB of memory – 400 GB 2.5-Inch Enterprise Performance 12 Gbps SAS SSD • SSD – 6 x 960 GB 6 Gbps SATA SSD or – 6 x 3.8 TB 6 Gbps SATA SSD • Software – VMware 5.5 or 6.0 u1 – Cisco HyperFlex HX Data Platform Release 2.0.1 • Cluster – Minimum of 3 All Flash Nodes – Scale compute-only nodes with additional Cisco UCS B200, C220, and C240 server nodes • Management : Cisco UCS Manager and VMware vCenter plug-in
Background Information
Useful information to understanding Cisco UCS that doesn’t necessarily fit into a glossary.
Cisco Product Versioning
Cisco has a specific way of versioning their hardware product lines that can make it easy to understand a lot about the product just by knowing its name and version. For example, a product named “Cisco UCS 6248UP 48-Port Fabric Interconnect” is usually just referred to as a “6248” because in-the-know people know that.. • Models beginning with 6 are Nexus switches, which means they run the NX-OS operating system. • Models beginning with 61 or 62 (so far) are specialized Nexus switches that are always used as UCS fabric interconnects. • The third and fourth digits (48) commonly refer to the number of ports in a switch model. • The UP stands for “unified ports”. Meaning that the ports can carry LAN and SAN traffic. Given this, you could probably guess what a 6296 is. Typically Cisco uses the hundreds place in the model number to refer to the product line. This leads to people commonly referring to a product line instead of a specific product in conversation because architecturally the specific product doesn’t really matter. So if someone says, “It’s a 6200,” they mean that it’s a current generation fabric interconnect. More examples can be found in the hardware components section below.
11.1. Cisco Background Information 277 ZenPackers Documentation
UCS Manager XML API
This is the web-services API through which UCS Manager can be managed. It runs from the fabric interconnect. Cisco Documentation Link: http://www.cisco.com/en/US/docs/unified_computing/ucs/sw/api/pref.html It doesn’t matter that UCS also publishes data through syslog and SNMP, we only need XML for all of our needs. It’s actually a bit confusing if you try to process the other data sources.
B-Series vs. C-Series
There are two types of servers supported by UCS. B-Series and C-Series. In B-Series the B stands for blade. In C-Series the C stands for chassis. The fact is that within a single UCS domain you can have B- and C-Series servers intermixed, or exclusively one or the other. The C-Series rack-mount servers are just servers and also have the option of being deployed without a connection to UCS Manager . This makes them as difficult to manage as any other standalone server. Specifically it means they are not manageable through the UCS Manager XML API, and therefore are not supported by Zenoss without the UCS C Series ZenPack currently scheduled for 1H 2014. Standalone C-Series servers won’t be supported by UCS Explorer in 2014.
Architecture
Architectural concepts that are distinct from hardware and logical components.
Domain
A Cisco UCS domain refers to the entirety of hardware and software that is managed by a single UCS Manager soft- ware instance. It’s two (almost always) or one Fabric Interconnect, and some number of chassis with B-series servers and/or C-Series servers and a rack to put them in. While I suppose you could call a standalone Fabric Interconnect a UCS Domain that really wouldn’t be very interesting. For most customers, the maximum number of servers maintained in a domain is between 100 and 150. The maximum is currently 160 with 20 chassis and 8 blades each.
Multi-Tier Network Architecture
Most datacenter networks have a tiered architecture. The tiers are usually referred to as layers. How many layers varies, the following three layers are common. Wikipedia Link: http://en.wikipedia.org/wiki/Hierarchical_internetworking_model
Core
The core layer aggregates the distribution layer. There is typically only one core per datacenter. The core is normally expected to be fully redundant and have enough bandwidth to interconnect all distribution.
278 Chapter 11. Cisco General: ZenPackers Documentation
Distribution Layer
The distribution layer aggregates the access layer. There are typically multiple distribution points per datacenter that feed into a single core. The distribution layer is normally expected to be fully redundant and have enough bandwidth to provide transit between connected access switches and transit between the connected access switches and the core. For many customers, a UCS system is a distribution node.
Access
The access layer connects downstream directly to servers and provides transit to the distribution layer. Historically the access layer wasn’t redundant because that requires servers have multiple NICs connected to different switches and perform multipathing. In today’s network even the access layer is commonly redundant. In a UCS Domain, access is redundant. And software defined.
Software
Software that runs within or in association with Cisco UCS.
UCS Manager
UCS Manager is server software responsible all management of a single domain . The server software can be run on a single fabric interconnect, but is typically run on a redundant pair of fabric interconnects . Zenoss considers each UCS Manager to be a device. All of its managed hardware and software components are components of that device. Abbreviations: UCSM, UCS-M Cisco Product Link: http://www.cisco.com/en/US/products/ps10281/
UCS Manager GUI
The UCS Manager GUI is a Java application that is often referred to as UCS Manager. This should be avoided as it creates ambiguity about the true meaning of UCS Manager. The UCS Manager GUI provides an easy to use administration interface by which UCS Manager can be configured. You can’t download or install the UCS Manager GUI indepedently. You must go browse the UCS Manager’s web interface and click to launch the Java application. This forces you to use the right UCS Manager GUI version for the UCS Manager.
UCS Central
Cisco UCS Central integrates with many UCS Manager instances to provide a single point of control for many UCS domains . UCS Central does not provide monitoring facility, instead focusing on administrative tasks. Abbreviations: UCS-C Cisco Product Link: http://www.cisco.com/en/US/products/ps12502/
11.1. Cisco Background Information 279 ZenPackers Documentation
UCS Director
Cisco UCS Director is a software suite previously known as Cloupia Unified Infrastructure Controller which was acquired by Cisco on December 10, 2012. Its purpose is primarily to act as an orchestration system for UCS, but it is not a required component of the system. It has limited monitoring capabilities but is primarily focused on provisioning orchestration. Cisco Product Link: http://www.cisco.com/en/US/products/ps13050/
Hardware Components
Hardware components as differentiated from logical or software components. Components that have physical weight and dimensions.
Fabric Interconnect
A fabric interconnect is a specialized Nexus 6100 Series or 6200 Series switch that acts both as the network distribution layer, and as the UCS Manager server. Abbreviations: FI
6200 Series
The 6200 series are the current generation UCS fabric interconnects. The product line includes the 6148UP and 6196UP which are 48 and 96 ports respectively. Cisco Product Link: http://www.cisco.com/en/US/products/ps11544/
6100 Series
The 6100 series were the first generation UCS fabric interconnects, and are now officially “end-of-sale.” The product line includes only the 6120XP and 6140XP which are 20 and 40 ports respectively. Cisco Product Link: http://www.cisco.com/en/US/products/ps10276/
Fabric Extender
There are two major types of fabric extenders to consider in UCS. The first are used to connect blade (B-Series) servers to fabric interconnects. These are often referred to as IO Modules, and they are installed as a pair in each chassis. The second are used to connect rack-mount servers to the fabric interconnects. These are rack-mount switches, and visually indistinguishable from any other rack-mount switch. Abbreviations: FEX
2200 Series
All current generation UCS fabric extenders are in the 2200 series. It can be hard to figure out from Cisco’s product literature which models are installed into a UCS chassis and which are rack-mount switches. The rack-mount switches are also referred to as top-of-rack (TOR) fabric extenders. In-Chassis: 2204XP, 2208XP. 4 and 8 10GB ports respectively.
280 Chapter 11. Cisco General: ZenPackers Documentation
Rack-Mount: 2232PP, 2248PQ, and many more. In-Chassis Product Link: http://www.cisco.com/en/US/prod/collateral/ps10265/ps10276/data_sheet_c78-675243.html Rack-Mount Product Link: http://www.cisco.com/en/US/products/ps10110/
2100 Series
These are all end-of-life. Cisco Product Link: http://www.cisco.com/en/US/products/ps10278/
Chassis
A server blade chassis. Not to be confused with a C-Series server. For the most part the chassis is a hunk of metal with slots for more useful things to plug into. There has only ever been one chassis model released by Cisco.
5100 Series
For now, the 5108 is Cisco’s only server chassis. It can be populated with up to 4 full-width, or 8 half-width server blades. It can contain up to 2 fabric extenders, up to 4 PSUs and up to 8 fans. Product Link: http://www.cisco.com/en/US/products/ps10279/
Servers
There are two types of servers supported by UCS. B-Series and C-Series. In B-Series the B stands for blade. In C-Series the C stands for chassis. The fact is that within a single UCS domain you can have B- and C-Series servers intermixed, or exclusively one or the other.
B-Series Blade Servers
The B-Series blades are designed only to be installed into a UCS Chassis . They come in half-width and full-width configurations. Some are optimized for CPU, some for storage. Product Link: http://www.cisco.com/en/US/products/ps10280/
C-Series Rack Servers
The C-Series rack-mount servers are just servers and also have the option of being deployed without a connection to UCS Manager . This makes them as difficult to manage as any other standalone server. Specifically it means they are not manageable through the UCS Manager XML API, and therefore are not supported by Zenoss. There is more variance with the configuration of rack servers than blade servers because they can potentially be larger. There are 1RU, 2RU and 4RU servers available. Product Link: http://www.cisco.com/en/US/products/ps10493/
11.1. Cisco Background Information 281 ZenPackers Documentation
Converged Network Adapter
The Cisco Unified Computing System supports converged network adapters (CNAs) from a broad set of technology leaders such as Emulex, Qlogic, Broadcom, Intel, and Cisco. CNAs obviate the need for multiple network interface cards (NICs) and host bus adapters (HBAs) by converging LAN and SAN traffic in a single interface. CNAs seem to be also interchangeably referred to as ‘host adapters’ or simply “adapters.” CNAs are available for B-Series blade servers and C-Series rack servers. Example models: VIC1280, M72KR-E, M71KR-E, M72KR-Q, M71KR-Q, 82598KR, etc. Abbreviations: CNA Product Link: http://www.cisco.com/en/US/products/ps10277/prod_models_comparison.html
Power Supply
There are many power supply units in a UCS system. Nearly all of them are FRUs . The following hardware components have PSUs. • Fabric Interconnect • Chassis • Fabric Extender (rack-mount only) • C-Series Rack Server Abbreviations: PSU
Fans
There are many fans in a UCS system. Nearly all of them are FRUs . The following hardware components have fans. • Fabric Interconnect • Chassis • Fabric Extender (rack-mount only) • C-Series Rack Server
Software Components
UCS Software components. These are mostly logical conceptualizations provided by the underlying hardware com- ponents.
Port Channel
A logical bundling of multiple physical network interfaces. Also commonly known as aggregate or aggregation inter- faces, or bundles.
282 Chapter 11. Cisco General: ZenPackers Documentation
Unified Port
A port that handles LAN and SAN traffic. All fabric interconnect and fabric extender ports meet this criteria.
VM-FEX
Short for “Virtual Machine Fabric Extender.” You’ll always hear this called VM-FEX because it’s just too long to say otherwise. These extend the physical switch port through a hypervisor to a specific virtual interface on a VM. This skips the overhead of the hypervisor’s software-based networking stack to provide excellent network performance to VMs. All current-generation CNAs provide this capability. Cisco Solution Link: http://www.cisco.com/en/US/netsol/ns1124/
Abbreviations and Other Terms
Space for other terms that don’t fit elsewhere. There are more in the document under their non-abbrievated concept.
CNA
Converged network adapter. A network adapter that provides LAN (NIC) and SAN (HBA) devices to the host operating system while being connected to the network by only Ethernet.
FEX
Fabric extender. A dumb switch that is connected via Ethernet to a parent switch primarily to add ports to it. If you’re familiar with a line card, a fabric extender is the same thing except that instead of being plugged into a switch-chassis’ backplane, it’s connected via Ethernet.
Fibre Channel
Fibre Channel is a high-speed network technology (commonly running at 2-, 4-, 8- and 16-gigabit per second rates) primarily used to connect computer data storage.
Fibre Channel over Ethernet (FCoE)
FCoE is the technology Cisco uses to transmit Fibre Channel traffic over the same wire as regular ethernet traffic transmitted to the Fiber Interconnect.
FRU
Field replaceable unit. You can replace these items individually. Many times without powering off the associated equipment.
11.1. Cisco Background Information 283 ZenPackers Documentation
UCS
Unified Computing System.
UCSPE
UCS Platform Emulator. A VMware VM that emulates UCS Manager.
VM
Virtual machine.
284 Chapter 11. Cisco General: CHAPTER 12
Other Topics:
12.1 Ansible Rough Guide
12.1.1 Description
Ansible is a DevOps tool. Its actions are called Plays and groups of those actions are called Playbooks. Ansible is designed to be easy to install, implement and use.
12.1.2 Prerequisites
• Linux • Python 2.7 • Ansible
285 ZenPackers Documentation
12.1.3 Setup
Once you setup your server with the ansible software (its CLI driven), you simply make sure you can SSH into your target hosts. You can Setup SSH passphrases if you like, but it works regardless. You can also run on non-standard (non-22) tcp port numbers.
12.1.4 Inventory
You define your managed hosts with Inventory Files.
# ------# file: production
[monitors] mp1.zenoss.loc mp2.zenoss.loc ansible_ssh_port=2201 ansible_ssh_host=90.14.17.12
[locals] localhost ansible_connection=local
12.1.5 Group Vars and Host Vars
Group and Host vars are simple YAML files. From your ansible root, they are located in:
$ANSIBLE/group_vars $ANSIBLE/host_vars
An example of group and host vars: [joe@zen:~/ansible]: cat group_vars/all
# ======Ansible Group Vars: ALL ======--- # Variables listed here are applicable to all host groups http_port: 80 https_port: 443 ntpserver: us.pool.ntp.org git_repo: http://github.com/xxx/mywebapp.git sudo_user: joe opt_bin:/opt/bin opt_log:/opt/log maven_url: http://www.eng.lsu.edu/3.0/binaries/apache-maven-3.0.5-bin.tar.gz and some host vars. . . . [joe@zen:~/ansible]: cat host_vars/mp1.zenoss.loc backup_tools: - backup_files - backup_home - disk_backup_dirvish - find_duplicates - report_df - reset_cronlog (continues on next page)
286 Chapter 12. Other Topics: ZenPackers Documentation
(continued from previous page) - restart_apcmon - sudo_dirvish - rsync-new dirvish_master: False hourly_jobs: - check_raid - check_space daily_jobs: []
12.1.6 Playbooks
Playbooks contain the commands that configure the targets. A playbook is YAML file that has some commands in it. It can be simply a bunch of commands in a single file or group of files. Each section is YAML and is indented by 2 at each level. A simple example is:
# A very simple example for a CentOS box --- - name: Ensure needed packages are the latest version yum: pkg={{item}} state=latest with_items: - lxc - make - icewm - openbox
12.1.7 Roles: Getting Organized
Roles allow you to organize your tasks. A Roles folder lives in the Ansible root and has a series of folders that correspond to task groups: ansible | |-- group_vars |\-- all |-- host_vars ||-- 192.168.12.7 |\-- mp2.zenoss.loc |-- laptops.yml |-- production |-- roles ||-- common ||-- network ||-- security |\-- zenoss \-- zenoss.yml
Inside of each role are the following directories which ansible will automatically search for needed files: [joe@zenpad:~/ansible]: tree -L 1 roles/security
12.1. Ansible Rough Guide 287 ZenPackers Documentation
ansible/roles/security/ |-- files |-- handlers |-- tasks |-- templates \-- vars
Here is the lowdown on what goes in these folders: • files: Just plain old files for copy • handlers: plays that get triggered by a notify event • tasks: the big enchilada play • templates: files that get templated • vars: any vars local to the role So full blown security folder looks like this: [joe@zenpad:~/ansible]: tree -L 5 roles/network roles/network/ |-- files |-- handlers ||-- main.yml |-- tasks ||-- centos.yml ||-- debian.yml ||-- main.yml ||-- ubuntu.yml |-- templates ||-- ifcfg-eth0 ||-- ifcfg-eth2 ||-- ifcfg-static ||-- network |\-- sysconfig |\-- network-scripts \- vars
Thats enough theory. Lets do some demonstrations! Links: • Ansible Intro: http://docs.ansible.com/intro.html • Ansible Modules: http://www.ansibleworks.com/docs/modules.html • Best Practices: http://www.ansibleworks.com/docs/playbooks_best_practices.html • http://jpmens.net/2012/08/30/ansible-variables-variables-and-more-variables/ • Download the associated video ansible.avi
12.2 Git for Gits ;)
Git is a development version control tool.
288 Chapter 12. Other Topics: ZenPackers Documentation
12.2.1 References
• https://git-scm.com/book/en/v2 • https://danielkummer.github.io/git-flow-cheatsheet/
12.2.2 Git Access For Zenossians
If you yourself need a change in user permissions, or are asking on someone else’s behalf, here’s how we handle it going forward: 1. Send an e-mail to [email protected] with a summary of your request in the subject. Examples: (a) Need to grant pull and push access to joe_user to xyz_repo (b) Need to remove all access from former_employee (c) Need to enable write access for joe_user to all Zenoss repos 2. The email body needs to include the following: (a) The user’s full name (b) The user’s email address (c) The user’s github name (d) The user’s department or role (why are they being added to the org in the first place?) (e) The user’s Slack name (f) The user’s Docker Hub username, if available (g) Whether the user is an employee, or a contractor
12.2.3 Special note on the Owners group in Github
Owners have access to the billing information for the organization, and can create and delete teams. Owners can also delete any repo, which is one of the main risks we want to contain by better managing GitHub permissions.
12.2.4 Setup Your GitHub Credentials
Important: Make sure the one of the first things you do when you setup your development system is to setup your github credentials and identifying username and email. This is to assist in tracking and documenting development history: git config-- global user.name"Jane Smith" git config-- global user.email"[email protected]"
12.2.5 Clone a Repo
To just pull (download) a repository from the web: • Find the repo online and get the clone string. • Copy that string.
12.2. Git for Gits ;) 289 ZenPackers Documentation
• pull it down with “git clone”
[bash]: git clone [email protected]:zenoss/ZenPackers.git
- or for https-
[bash]: git clone https://github.com/zenoss/ZenPackers.git
12.2.6 Recommended Commit Message Format (Important!)
We regard the git log as an important tool to track down and summarize problems and solutions in our code. Therefore its critical that the commit messages be as descriptive as possible. Remember that the summary line is the one that carries a lot of weight and is only 50 characters or less.
Note: Here are the rules for commit message 1. First line is Capitalized description, under 50 chars, no period 2. Second line blank 3. Third line: Fixes ZPS-13123, ZPS-34345, and ZPS-99099 4. Fourth Line blank 5. Next lines all less than 72 charactors, elaborated description. It can be a bit more open.
Here is an ideal commit message:
Correct the instance-table relations in the modeler
Fixes ZPS-123455.
Here is a more description of the fix (but only if needed). Its wrapped to 72 lines so that it doesn't get ugly.
If your commit is bigger and you need more bullets or description you can still do it. For example:
Correct the hypervisor-ip relations in the zenpack.yaml
Fixes ZPS-123455, ZPS-92548.
We had to fix the relationships so that the original model was not modified. We still limit to 72 lines to avoid ugliness. I also added another tire to the system so that it would roll better. These are other things I have fixed:
* Fixed model to keep old model intact * Added migration script to ensure model was correct at upgrade * Added a big fat tire for better rolling * Updated docs
Reasons for this Format
Why do we prefer this format? Partly because it allows a quick history view with a command like:
290 Chapter 12. Other Topics: ZenPackers Documentation
git log--decorate--graph--oneline--date-order
If the first line is just ‘Fixes ZPS-XYZ123’ you get things that are not helpful at all like:
|// * | c568cfe Merge pull request #93 from zenoss/feature/ZPS-22568 |\\ | * | a367040 (origin/feature/ZPS-22568) fix ZPS-22568 |// * | 160f150 Merge pull request #92 from zenoss/feature/ZPS-22599 |\\ | * | f2d37aa (origin/feature/ZPS-22599) fix ZPS-22599
. . . instead of somthing useful like this:
|/| * |8f38040 Merge pull request #106 from zenoss/feature/ZPS-22673 |\\ | * | 58f9857 (origin/feature/ZPS-22673) add default values to properties; fix ZPS- ˓→22673 |// * | f18a2ac (feature/ZPS-22598) Merge pull request #105 from zenoss/feature/ZPS- ˓→22639 |\\ | * | 01d4868 (origin/feature/ZPS-22639, feature/ZPS-22639) Ensure modeling on ˓→Centos5, Suse 12
. . . there are other reasons too. See references. References: • http://chris.beams.io/posts/git-commit/ • http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
12.2.7 Cool Ways to Show Logs and Diffs
Often you will need to see your logs and compare different versions: git log git log--oneline--graph--decorate--all git diff git diff fe492a1 #(between current and some other node) git diff be158f6 aee7163 #(between two nodes)
12.2.8 Typical Workflow Scenario
Now that you have a repo, go into the repo folder. • Add any files you want • Make any changes you want to files • Commit your changes • Push your changes To add your new files:
12.2. Git for Gits ;) 291 ZenPackers Documentation
[bash]: git add-a abc.py def.py [bash]: git add-A (Danger: adds and removes files from working tree)
To commit all changes execute:
[bash]: git commit-a
To finally push up your changes to your Repo hub (or github.com):
[bash]: git push
12.2.9 New Repo Workflow Scenario
• First go to github.com and create your account • Then create an empty repository in the GUI • Now on your workstation, pull down (clone) the empty repo:
[bash]: git clone https://github.com/zenoss/bogus.git [bash]: cd bogus/
• Now start writing your code, make files...... • Now add files to your repo and push:
[bash]: git add-A [bash]: git commit-a [bash]: git push [bash]: git status .. Already up-to-date..
12.2.10 Setting Repo Parameters
• Configure the Username and Email for the Repo:
[bash]: git config-- global user.name"Joe Frazer" [bash]: git config-- global user.email [email protected]
• Reset the Author for the Repo:
[bash]: git commit--amend--reset-author
12.2.11 Changing Branches
• Change branch from master to develop with checkout:
[bash]: git checkout develop [bash]: git status
292 Chapter 12. Other Topics: ZenPackers Documentation
12.2.12 Merging Branches
You like the work you’ve done in develop and think it should be merged into master. You can do this by using the merge option. • First change branches from develop to master:
[bash]: git checkout master
Now things are as before with master in its original state. • Now you want to merge from develop:
[bash]: git merge develop Updating 1530600..2873dc4 Fast-forward .gitignore|2+ Makefile| 11+- ....
• Now you must push these changes up to your Hub:
[bash]: git push Total0 (delta0), reused0 (delta0) To [email protected]:zenoss/ZenPackers.git 1530600..2873dc4 master-> master
12.2.13 Delete Unwanted Branches
If you want to eject unwanted branches from your repo, make sure to read the git-branch docs and the warnings about being fully merged (–delete option). To remove a local branch: git branch-D
To remove a remote branch: git push origin--delete
12.2.14 Synchronizing Local Branches and References: Pruning
Sometimes you’ll have a lot of old remote branch references that have been long deleted on the hub. You can synchro- nize them with fetch: git fetch-p
12.2.15 Revert a Branch to a Prior Commit git revert will create a new commit that will undo what the prior commit(s) have done and put that into your history. It gives you a log of your undo.
12.2. Git for Gits ;) 293 ZenPackers Documentation
12.2.16 Resetting a Branch to a Prior Commit
• git checkout feature/area51 • Identify the number of your last “good” commit:
git log (grab the good commit number: e3f1e37)
• Reset your feature/area51 to that commit level:
git reset--hard e3f1e37
• Push it up to github:
git push--force origin feature/area51
• Test the diff between local and remote: Should show nothing:
git diff feature/area51..origin/feature/area51
12.2.17 Comparison of Git Branches
• Show only relevant commits between two git refs:
git log--no-merges master..develop
12.2.18 Avoiding Many Small Commits
You can make as many small changes as you like and still have a clean single commit by using git’s amend flag on your commit: git commit--amend (make your commit message) (write/quit)
Every time you make a new commit in this way, you get the benefit of small incremental changes and a clean commit log. If you have already made a mess of things you can try the next technique to Squash your commits.
12.2.19 Squashing Multiple Commits
This allows you to take a lot of many small commits (and their messages) and convert them to a single coherent commit. It keeps the history clean and clear. In order to do this safely, we recommend only doing this in a feature branch (based on develop) that is not being shared. • From your feature branch, do a rebase with the -i flag:
git rebase-i develop
• When it shows you the multiple commits, change command in commits after the first “pick” to “squash”. Thus something like this:
294 Chapter 12. Other Topics: ZenPackers Documentation
pick 01d1124 Adding license pick 6340aaa Moving license into its own file pick ebfd367 Jekyll has become self-aware. pick 30e0ccb Changed the tagline in the binary, too.
now becomes:
pick 01d1124 Adding license squash 6340aaa Moving license into its own file squash ebfd367 Jekyll has become self-aware. squash 30e0ccb Changed the tagline in the binary, too.
• Now you write that out and it will ask you to fix-up the commit logs. Do this by changing to a unified commit message:
# This is a combination of 4 commits. # The first commit's message is: Dr Jekyll's final revisions to persona.
Add that license thing Moving license into its own file Jekyll has become self-aware. Changed the tagline in the binary, too.
• Once you write that out, you need to push it up with force flag to rewrite history:
git push-f
• If you have already pushed it up prior to this, or even created a Pull, your upstream commits and pulls will get replaced with the unified commit.
12.3 Git and Code Reviews: Keeping History Clean
Keep the code history clean!
Note: Its a common practice to review the git log history by using git-log and git-blame. This history is important and vital to understanding the evolution of the code and how to effectively modify it without repeating historical mistakes.
It’s not as academic a point as it might seem. Almost every day we dig through code while troubleshooting, and one of the most useful things is finding the JIRA related to a line of code. We use git blame for this, and if the last time a line of interest was modified (or lines around it) wasn’t functional, we have to find the parent of that commit and re-run blame. We have to repeat this process for each non-functional change. It’s very annoying. It also has the affect of making a section of code appear to be modified by many authors over a long period of time, which might indicate a troublesome area of code, even though the code is exactly the same functionally as the first and only time it was really written. The following recommendations are made in order to maximize the clarity of the history and blame records:
12.3. Git and Code Reviews: Keeping History Clean 295 ZenPackers Documentation
12.3.1 For new code: Always Check for PEP Errors
Getting those correct initially avoids any cleanup or PEP annoyances later. You don’t want to fix formatting errors later because they muddy the log and blame history.
12.3.2 Reviewing Code: Don’t Make PEP Style Corrections
If style corrections are to be made they should be done during code development. Don’t make style/PEP fixes for defects.
12.3.3 Reviewing Code: Question Unrelated Cosmetic Changes
If you are reviewing code for a defect, and you see style changes that are unrelated to the bug or feature at hand, ask the author to revert those changes so that the git-blame log will not be altered.
12.3.4 Avoid Many Small Commits for Defects
This doesn’t mean that you should have super-huge 10000 line commits. It does mean that small commits that are re- lated should be put into the same commit. See above sections on how to Avoiding Many Small Commits and Squashing Multiple Commits .
12.4 Git-Flow
Git flow simplifies development flow cycle. See http://danielkummer.github.io/git-flow-cheatsheet/
12.4.1 Installing Git-Flow
Note: There are two versions of Git-Flow: NVIE and AVH (newer). Although both versions have the same basic com- mands, some of the commands in AHV are easier to use than older NVIE. The NVIE version has not been active since 2012. For this reason we recommend using the active AVH version.
Git flow installation is well documented and simple. Refer to the following links for installation instructions: • AVH: https://github.com/petervanderdoes/gitflow-avh • NVIE: https://github.com/nvie/gitflow (last commit Sept 2012) – It appears that no development will continue on this branch. – Discussion: https://github.com/nvie/gitflow/issues/285
296 Chapter 12. Other Topics: ZenPackers Documentation
12.4.2 Setup Git-Flow in the Existing Repo
First go into the repo base folder. Make sure you get a clean git status. Then you initialize the git repo for git flow as follows:
[bash]: git flow init
Which branch should be used for bringing forth production releases? - develop Branch name for production releases: [] master
Which branch should be used for integration of the "next release"? - develop Branch name for "next release" development: [develop]
How to name your supporting branch prefixes? Feature branches? [feature/] Release branches? [release/] Hotfix branches? [hotfix/] Support branches? [support/] Version tag prefix? [] Hooks and filters directory? [/data/zp/ZenPacks.zenoss.DB2/.git/hooks]
12.4.3 Create New Features and Work Flow
In features, you don’t want to use version numbers because it can cause chaos when multiple authors work the same project. Instead give the version a name, and only after the resulting develop is reviewed, you give it a version. (Source Unknown: Rob B). To start a new feature:
[bash]: git flow feature start area51 [bash]: git flow feature publish - (This creates the feature branch on Github, and allows"push") [bash]: git status On branch feature/area51 nothing to commit (working directory clean)
.... do some work...... do some more work...... you are finished...... now commit....
[bash]: git commit-am"Comment: This fixes bug ZPS-3234823" [bash]: git push (nothing happens) . Counting objects:4, done. . Writing objects: 100%(4/4), 647 bytes|0 bytes/s, done. . Total4 (delta3), reused0 (delta0) . To [email protected]:zenoss/DB2.git 6f1c83e..faf56f5 feature/area51-> feature/area51
- (Later you ask for a Pull Request or continue modifications) - (Someone may merge your Pull req into develop) - (Now you are finished with this feature...) - (You can either delete this branch or git-flow finish it)
(continues on next page)
12.4. Git-Flow 297 ZenPackers Documentation
(continued from previous page) [bash]: git flow feature finish area51 [bash]: git status On branch develop nothing to commit (working directory clean)
Now you are back on develop. You still need to push your changes up:
[bash]: git push Total0 (delta0), reused0 (delta0) To [email protected]:zenoss/ZenPackers.git 1530600..2873dc4 develop-> develop
12.4.4 Push the Develop onto the old Feature that is Stale
Warning: This flow can be dangerous. Use with caution!
You have created a branch (forgotten) that has been left behind and wish upgrade it with all the new changes that have been made with other feature enhancements. You don’t have anything to save in it. Use these commands (with caution) to merge develop back onto feature/forgotten:
[bash]: git checkout feature/forgotten [bash]: git push. develop:feature/forgotten [bash]: get checkout feature/forgotten [bash]: git commit-a [bash]: git push
12.4.5 Push a new Feature up to Origin for storage:
Sometimes you want a feature to be stored on your Hub. Git-Flow does not automatically push your features. You can push it up to the hub like this:
[bash]: git push-u origin feature/new
12.4.6 Heavy Feature Workflow IMPORTANT: PLEASE READ
During heavy workflows on a project, we expect multiple teams to concurrently work on multiple features that get merged into develop. This is common. This heavy workflow can be managed by the following workflow: • Create your feature • Work/commit/push feature branch • Another team does works on feature/Atlantis (don’t be jealous!) • Another teams merges feature/Atlantis into develop • You need to rebase those changes into feature/area51 as follows:
git checkout develop&& git pull # First update the local develop branch git checkout feature/area51 git rebase develop
298 Chapter 12. Other Topics: ZenPackers Documentation
• If you have conflicts see merge_conflicts below, else continue • Now continue work on feature/area51 • Repeat the rebases as needed when team Atlantis updates develop • Once finished, create your final Pull Request • Once merged, delete the feature branch.
12.4.7 Rebasing a Feature on Develop
Warning: If your team is still working on a feature and you notice that develop has been updated, you should try to rebase those changes into your feature. This will avoid conflicts later when you merge back into develop.
You may get these messages
Note: Branches ‘develop’ and ‘origin/develop’ have diverged. Fatal: And branch ‘develop’ may be fast-forwarded.
Someone has added to develop during your work on feature/area51. This is common in a multi-user environment. You will have to merge the two together. To solve this, you need to: • Sync local develop with origin: checkout develop, pull from origin to develop:
git checkout develop&& git pull origin
• Rebase your feature on develop. You may have conflicts here if you’re unlucky:
git checkout feature/area51; git rebase develop # Same as: git flow feature rebase area51
• Check that nothing is broken:
git status git push # Push feature/area51 up to origin
• If there are conflicts you have to fix here. See merge_conflicts
12.4.8 Merge Conflicts: Fixing a Rebase
If you do have conflicts with your merge you can take a simple approach to fixing them: • Rebase against develop:
[zenoss@austin]: git rebase develop First, rewinding head to replay your work on top of it... Applying: Make Tenant rels concrete Using index info to reconstruct a base tree... M ZenPacks/zenoss/DB2/__init__.py Falling back to patching base and 3-way merge... Auto-merging ZenPacks/zenoss/DB2/__init__.py Falling back to patching base and 3-way merge...
˓→ (continues on next page) ˓→ [588/1329]
12.4. Git-Flow 299 ZenPackers Documentation
(continued from previous page) Auto-merging ZenPacks/zenoss/DB2/__init__.py CONFLICT (content): Merge conflict in ZenPacks/zenoss/DB2/__init__.py Failed to merge in the changes. Patch failed at 0001 Make Tenant rels concrete The copy of the patch that failed is found in: /data/zp/ZenPacks.zenoss.DB2/.git/rebase-apply/patch
When you have resolved this problem, run"git rebase --continue". If you prefer to skip this patch, run"git rebase --skip" instead. To check out the original branch and stop rebasing, run"git rebase --abort".
• Edit the problem file and fix:
[zenoss@austin]: vi __init__.py ( fix fix fix )
[zenoss@austin]: git status
rebase in progress; onto 34ae002 You are currently rebasing branch'feature/ZPS-17143_installWarnings' on
˓→'34ae002'. (fix conflicts and then run"git rebase --continue") (use"git rebase --skip" to skip this patch) (use"git rebase --abort" to check out the original branch)
Unmerged paths: (use"git reset HEAD ..." to unstage) (use"git add ..." to mark resolution)
both modified: __init__.py
• Add this file back into to index:
[zenoss@austin]: git add __init__.py
• Continue:
[zenoss@austin]: git rebase-- continue
Applying: Make Tenant rels concrete Applying: fix impact relations Using index info to reconstruct a base tree... M ZenPacks/zenoss/DB2/Tenant.py M ZenPacks/zenoss/DB2/__init__.py Falling back to patching base and 3-way merge... Auto-merging ZenPacks/zenoss/DB2/__init__.py CONFLICT (content): Merge conflict in ZenPacks/zenoss/DB2/__init__.py CONFLICT (modify/delete): ZenPacks/zenoss/DB2/Tenant.py deleted in fix impact
˓→relations and modified in HEAD. Version HEAD of ZenPacks/zenoss/DB2/Tenant. py left in tree. Failed to merge in the changes. Patch failed at 0002 fix impact relations The copy of the patch that failed is found in: /data/zp/ZenPacks.zenoss.DB2/.git/rebase-apply/patch
When you have resolved this problem, run"git rebase --continue". (continues on next page)
300 Chapter 12. Other Topics: ZenPackers Documentation
(continued from previous page) If you prefer to skip this patch, run"git rebase --skip" instead. To check out the original branch and stop rebasing, run"git rebase --abort"
• Repeat: You may have to edit/re-edit a file, re-add, and continue as before:
[zenoss@austin]: vi __init__,py [zenoss@austin]: git add __init__.py [zenoss@austin]: git rebase-- continue ZenPacks/zenoss/DB2/Tenant.py: needs merge You must edit all merge conflicts and then mark them as resolved using git add
• Delete what is required. You deleted a file but it is confused by this:
[zenoss@austin]: git rm Tenant.py ZenPacks/zenoss/DB2/Tenant.py: needs merge rm'ZenPacks/zenoss/DB2/Tenant.py'
[zenoss@austin]: git rebase-- continue Applying: fix impact relations
• If at this point the merge is good, but it asks you to pull, don’t pull! You really want to push your changes:
[zenoss@austin]: git status On branch feature/ZPS-17143_installWarnings Your branch and 'origin/feature/ZPS-17143_installWarnings' have diverged, and have 14 and 2 different commits each, respectively. (use "git pull" to merge the remote branch into yours) nothing to commit, working directory clean
[zenoss@austin]: git push To [email protected]:zenoss/ZenPacks.zenoss.DB2.git ! [rejected] feature/ZPS-17143_installWarnings -> feature/ZPS-17143_installWarnings (non-fast-forward) error: failed to push some refs to '[email protected]:zenoss/ZenPacks.zenoss.DB2.git' hint: Updates were rejected because the tip of your current branch is behind hint: its remote counterpart. Integrate the remote changes (e.g. hint: 'git pull ...') before pushing again. hint: See the 'Note about fast-forwards' in 'git push --help' for details.
[zenoss@austin]: git push --force Counting objects: 12, done. Delta compression using up to 8 threads. Compressing objects: 100% (12/12), done. Writing objects: 100% (12/12), 1.22 KiB | 0 bytes/s, done. Total 12 (delta 6), reused 0 (delta 0) To [email protected]:zenoss/ZenPacks.zenoss.DB2.git + 2bfc0a6...f7ddee9 feature/ZPS-17143_installWarnings -> feature/ZPS-17143_installWarnings (forced update)
• If you see a clean status, its probably good. Make sure to test:
[zenoss@austin]: git status On branch feature/ZPS-17143_installWarnings (continues on next page)
12.4. Git-Flow 301 ZenPackers Documentation
(continued from previous page) Your branch is up-to-date with 'origin/feature/ZPS-17143_installWarnings'. nothing to commit, working directory clean
12.4.9 Git Stash: Stashing Modified Files
Git’s stash option allows you to put modified files into a temporary holding area. The usual scenario is to stash your mods away then pull from the origin, and then re-place your stash’ed files into the tree. Then you can push the results back up to origin. Here is a possible workflow:
.... you made changes to develop, but you'd rather it be in a feature....
[bash]: git stash > Saved working directory and index state WIP on develop: e38b798 post release: 1.0.1-> 1.0.2dev.....
[bash]: git flow feature start cleanup_on_aisle_7 > Switched to a new branch'feature/cleanup_on_aisle_7'
[bash]: git stash pop .... now you have your new mods overlaid...... make whatever other modifications...... now you can commit all your mods....
[bash]: git commit-a
[bash]: git flow feature finish cleanup_on_aisle_7
[bash]: git push
12.4.10 Pull Requests: The Easy Way
The easiest way we have to get your code reviewed and merged into a major branch is to use Git-Flow to create a feature, push that feature up to Github, and have someone review it. Here is the workflow in a nutshell: • Create your feature with git flow • Make your mods • Commit your mods • Push (or publish) your feature up to Gitflow • Go into the Github GUI, select your feature • Make your pull request • Ask for a review • That reviewer then merges your changes into develop • Finish your feature locally: – Using git push.default=simple: Everything on Github is cleaned for you. (See the Push Defaults section) – Otherwise: After finishing, remove the feature repo in Github
302 Chapter 12. Other Topics: ZenPackers Documentation
• Finally, from your local repo, do a “git pull” to sync up
12.4.11 Push Defaults
To set your push defaults you can edit your .gitconfig and put this option:
[user] name= Pat Mibak email= patmibak @zenoss.com [push] default= simple
• Note: See git-config man page: Search /push.default for more details
12.4.12 Git 1.X and 2.X Warnings and Errors
• You may you get this warning when trying to push a new branch to origin:
[bash]: git push fatal: The current branch develop has no upstream branch. To push the current branch and set the remote as upstream, use
git push--set-upstream origin develop
Its usually safe to follow this suggestion
12.5 Installing a VIM IDE setup with Synstastic
See http://sontek.net/blog/detail/turning-vim-into-a-modern-python-ide#file-browser as a base reference. We’ve changed up some of the packages that follow for a more modern environment. You want to do the following: 1. Copy your old .vimrc (and .vim if you have on) to a backup 2. Make some folders and init git:
[zenoss@~]: mkdir-p~/.vim/{autoload,bundle} [zenoss@~]: cd~/.vim/ [[email protected]]: git init
3. Install Pathogen: curl -Sso ~/.vim/autoload/pathogen.vim https://raw.github.com/tpope/vim-pathogen/master/autoload/ pathogen.vim 4. Paste the following into your .vimrc:: execute pathogen#infect() syntax on filetype plugin indent on 5. Now execute the following inside the .vim folder
12.5. Installing a VIM IDE setup with Synstastic 303 ZenPackers Documentation
git submodule add http://github.com/tpope/vim-fugitive.git bundle/fugitive git submodule add https://github.com/msanders/snipmate.vim.git bundle/snipmate git submodule add https://github.com/tpope/vim-surround.git bundle/surround git submodule add https://github.com/tpope/vim-git.git bundle/git git submodule add https://github.com/ervandew/supertab.git bundle/supertab git submodule add https://github.com/sontek/minibufexpl.vim.git bundle/minibufexpl git submodule add https://github.com/wincent/Command-T.git bundle/command-t git submodule add https://github.com/mitechie/pyflakes-pathogen.git git submodule add https://github.com/mileszs/ack.vim.git bundle/ack git submodule add https://github.com/sjl/gundo.vim.git bundle/gundo git submodule add https://github.com/fs111/pydoc.vim.git bundle/pydoc git submodule add https://github.com/scrooloose/syntastic.git bundle/systastic git submodule add https://github.com/alfredodeza/pytest.vim.git bundle/py.test git submodule add https://github.com/reinh/vim-makegreen bundle/makegreen git submodule add https://github.com/vim-scripts/TaskList.vim.git bundle/tasklist git submodule add https://github.com/vim-scripts/The-NERD-tree.git bundle/nerdtree git submodule add https://github.com/sontek/rope-vim.git bundle/ropevim git submodule init git submodule update git submodule foreach git submodule init git submodule foreach git submodule update
12.6 DB2 Information for Scavengers
12.6.1 Some Base links:
• http://www.tldp.org/HOWTO/DB2-HOWTO/planning.html • http://www.centos.org/docs/2/rh-cm-en-1.0/s1-service-db2.html • http://www.dbforums.com/db2/1655091-db2-cpu-time-wait-time.html • http://stackoverflow.com/questions/15382561/adding-db2-jars-to-java-webapp-using-maven
12.6.2 Supported drivers for JDBC and SQLJ
The DB2 product includes support for two types of JDBC driver architecture. According to the JDBC specification, there are four types of JDBC driver architectures: • Type 1 (Wont work for Zenpack. Not supported in DB2) Drivers that implement the JDBC API as a mapping to another data access API, such as Open Database Connectivity (ODBC). Drivers of this type are generally dependent on a native library, which limits their portability. The DB2 database system does not provide a type 1 driver. • Type 2 (Might work, but not portable) Drivers that are written partly in the Java programming language and partly in native code. The drivers use a native client library specific to the data source to which they connect. Because of the native code, their portability is limited. • Type 3 (Standard Choice without the fancy Type 4 options. Use if possible) Drivers that use a pure Java client and communicate with a data server using a data-server- independent protocol. The data server then communicates the client’s requests to the data source. • Type 4 (Fancy Options: Not needed)
304 Chapter 12. Other Topics: ZenPackers Documentation
Drivers that are pure Java and implement the network protocol for a specific data source. The client connects directly to the data source.
12.6.3 DB2 vs Oracle
DB2-Oracle Terminology Mapping
Because Oracle applications can be enabled to work with DB2® data servers when the DB2 environment is set up appropriately, it is important to understand how certain Oracle concepts map to DB2 concepts.
Table 1 provides a concise summary of commonly used Oracle terms and their DB2 equivalents.
Table 1. Mapping of common Oracle concepts to DB2 concepts +------+ | Oracle concept | DB2 concept | Notes | |------+------+------| | active log | active log | This is the same concept. | |------+------+------| | actual parameter | argument | This is the same concept. | |------+------+------| | alert log | db2diag log files | The db2diag log files are | | | and | primarily intended for | | | administration | use by IBM Software | | | notification log | Support for | | | | troubleshooting purposes. | | | | The administration | | | | notification log is | | | | primarily intended for | | | | troubleshooting use by | | | | database and system | | | | administrators. | | | | Administration | | | | notification log messages | | | | are also logged to the | | | | db2diag log files using a | | | | standardized message | | | | format. | |------+------+------| | archive log | offline-archive | This is the same concept. | | | log | | |------+------+------| | archive log mode | log archiving | This is the same concept. | |------+------+------| | background_dump_dest | diagpath | This is the same concept. | |------+------+------| | created global | created global | This is the same concept. | | temporary table | temporary table | | |------+------+------| | cursor sharing | statement | This is the same concept. | | | concentrator | | |------+------+------| | data block | data page | This is the same concept. | |------+------+------| | data buffer cache | buffer pool | This is the same concept. | (continues on next page)
12.6. DB2 Information for Scavengers 305 ZenPackers Documentation
(continued from previous page) | | | However, in DB2 you can | | | | have as many buffer pools | | | | of any page size you | | | | like. | |------+------+------| | data dictionary | system catalog | The DB2 system catalog | | | | contains metadata in the | | | | form of tables and views. | | | | The database manager | | | | creates and maintains two | | | | sets of system catalog | | | | views that are defined on | | | | the base system catalog | | | | tables: | | | | | | | | * SYSCAT views, which | | | | are read-only views | | | | * SYSSTAT views, which | | | | are updatable views | | | | that contain | | | | statistical | | | | information that is | | | | used by the optimizer | |------+------+------| | data dictionary cache | catalog cache | This is the same concept. | |------+------+------| | data file | container | DB2 data is physically | | | | stored in containers, | | | | which contain objects. | |------+------+------| | database link | nickname | A nickname is an | | | | identifier that refers to | | | | an object at a remote | | | | data source (a federated | | | | database object). | |------+------+------| | dual table | dual table | This is the same concept. | |------+------+------| | dynamic performance | snapshot monitor | Snapshot monitor SQL | | views | SQL | administrative views, | | | administrative | which use schema | | | views | SYSIBMADM, return monitor | | | | data about a specific | | | | area of the database | | | | system. For example, the | | | | SYSIBMADM.SNAPBP SQL | | | | administrative view | | | | provides a snapshot of | | | | buffer pool information. | |------+------+------| | extent | extent | A DB2 extent is made up | | | | of a set of contiguous | | | | data pages. | |------+------+------| | formal parameter | parameter | This is the same concept. | |------+------+------| | global index | nonpartitioned | This is the same concept. | (continues on next page)
306 Chapter 12. Other Topics: ZenPackers Documentation
(continued from previous page) | | index | | |------+------+------| | inactive log | online-archive | This is the same concept. | | | log | | |------+------+------| | init.ora and Server | database manager | A DB2 instance can | | Parameter File | configuration | contain multiple | | (SPFILE) | file and database | databases. Therefore, | | | configuration | configuration parameters | | | file | and their values are | | | | stored at both the | | | | instance level, in the | | | | database manager | | | | configuration file, and | | | | at the database level, in | | | | the database | | | | configuration file. These | | | | files are managed through | | | | the GET or UPDATE DBM CFG | | | | command and the GET or | | | | UPDATE DB CFG command, | | | | respectively. | |------+------+------| | instance | instance or | An instance is a | | | database manager | combination of background | | | | processes and shared | | | | memory. A DB2 instance is | | | | also known as a database | | | | manager. Because a DB2 | | | | instance can contain | | | | multiple databases, there | | | | are DB2 configuration | | | | files at both the | | | | instance level (the | | | | database manager | | | | configuration file) and | | | | at the database level | | | | (the database | | | | configuration file). | |------+------+------| | large pool | utility heap | The utility heap is used | | | | by the backup, restore, | | | | and load utilities. | |------+------+------| | library cache | package cache | The package cache, which | | | | is allocated from | | | | database shared memory, | | | | is used to cache sections | | | | for static and dynamic | | | | SQL and XQuery statements | | | | on a database. | |------+------+------| | local index | partitioned index | This is the same concept. | |------+------+------| | materialized view | materialized | An MQT is a table whose | | | query table (MQT) | definition is based on | | | | the results of a query | (continues on next page)
12.6. DB2 Information for Scavengers 307 ZenPackers Documentation
(continued from previous page) | | | and is meant to be used | | | | to improve performance. | | | | The DB2 SQL compiler | | | | determines whether a | | | | query would run more | | | | efficiently against an | | | | MQT than it would against | | | | the base table on which | | | | the MQT is based. | |------+------+------| | noarchive log mode | circular logging | This is the same concept. | |------+------+------| | Oracle Call Interface | DB2CI Interface | DB2CI is a 'C' and 'C++' | | (OCI) | | application programming | | | | interface that uses | | | | function calls to connect | | | | to DB2 Version 9.7 | | | | databases, manage | | | | cursors, and perform SQL | | | | statements. See [4]IBM | | | | Data Server Driver for | | | | DB2CI for a list of OCI | | | | APIs supported by the | | | | DB2CI driver. | |------+------+------| | Oracle Call Interface | Call Level | CLI is a C and C++ | | (OCI) | Interface (CLI) | application programming | | | | interface that uses | | | | function calls to pass | | | | dynamic SQL statements as | | | | function arguments. In | | | | most cases, you can | | | | replace an OCI function | | | | with a CLI function and | | | | relevant changes to the | | | | supporting program code. | |------+------+------| | ORACLE_SID environment | DB2INSTANCE | This is the same concept. | | variable | environment | | | | variable | | |------+------+------| | partitioned tables | partitioned | This is the same concept. | | | tables | | |------+------+------| | Procedural | SQL Procedural | SQL PL is an extension of | | Language/Structured | Language (SQL PL) | SQL that consists of | | Query Language | | statements and language | | (PL/SQL) | | elements. SQL PL provides | | | | statements for declaring | | | | variables and condition | | | | handlers, assigning | | | | values to variables, and | | | | implementing procedural | | | | logic. SQL PL is a subset | | | | of the SQL Persistent | | | | Stored Modules (SQL/PSM) | | | | language standard. Oracle | (continues on next page)
308 Chapter 12. Other Topics: ZenPackers Documentation
(continued from previous page) | | | PL/SQL statements can be | | | | compiled and executed | | | | using DB2 interfaces. | |------+------+------| | program global area | application | Application shared memory | | (PGA) | shared memory and | stores information that | | | agent private | is shared between a | | | memory | database and a particular | | | | application: primarily, | | | | rows of data being passed | | | | to or from the database. | | | | Agent private memory | | | | stores information used | | | | to service a particular | | | | application, such as sort | | | | heaps, cursor | | | | information, and session | | | | contexts. | |------+------+------| | redo log | transaction log | The transaction log | | | | records database | | | | transactions and can be | | | | used for recovery. | |------+------+------| | role | role | This is the same concept. | |------+------+------| | segment | storage object | This is the same concept. | |------+------+------| | session | session; database | This is the same concept. | | | connection | | |------+------+------| | startup nomount | db2start | The command that starts | | | | the instance. | |------+------+------| | synonym | alias | An alias is an | | | | alternative name for a | | | | table, view, nickname, or | | | | another alias. The term | | | | "synonym" is tolerated | | | | and can be specified in | | | | place of "alias". Aliases | | | | are not used to control | | | | what version of a DB2 | | | | procedure or user-defined | | | | function is being used by | | | | an application; to do | | | | this, use the SET PATH | | | | statement to add the | | | | required schema to the | | | | value of the CURRENT PATH | | | | special register. | |------+------+------| | system global area | instance shared | The instance shared | | (SGA) | memory and | memory stores all of the | | | database shared | information for a | | | memory | particular instance, such | | | | as lists of all active | (continues on next page)
12.6. DB2 Information for Scavengers 309 ZenPackers Documentation
(continued from previous page) | | | connections and security | | | | information. The database | | | | shared memory stores | | | | information for a | | | | particular database, such | | | | as package caches, log | | | | buffers, and buffer | | | | pools. | |------+------+------| | SYSTEM table space | SYSCATSPACE table | The SYSCATSPACE table | | | space | space contains the system | | | | catalog. This table space | | | | is created by default | | | | when you create a | | | | database. | |------+------+------| | table space | table space | This is the same concept. | |------+------+------| | user global area (UGA) | application | Application global memory | | | global memory | comprises application | | | | shared memory and | | | | application-specific | | | | memory. | +------+
12.6.4 Installing DB2 Express on Linux
• First you must install Linux • Next install the prerequisites for DB2 • Make sure the client can do X11 Forwarding to your workstation • Download DB2 Express, extract it in /opt • SSH into your instance as root, X11 forwarding on • Run the installer at /opt/expc/db2setup • Select defaults and make sure to save set in the Response File • If you can’t type into Java, just cut-n-paste passwords • Make sure the installer finishes without error • Save the Response File • Next time use the Response File to install all • Once installed, issue “db2sampl” to create a sample db. - db2 connect to sample - db2 ‘select * from dept’ Links for Installation: • http://www.ibiblio.org/pub/linux/docs/HOWTO/DB2-HOWTO • http://www.tldp.org/HOWTO/DB2-HOWTO/prerequisites.html • http://www.sqlpanda.com/2013/08/install-db2-105-on-centos-64.html
310 Chapter 12. Other Topics: ZenPackers Documentation
12.6.5 Removing DB2 on Linux
• Remove the Database Administration Server:
sudo su- dasusr1 db2admin stop /opt/ibm/db2/V10.5/instance/dasdrop dasusr1
• Remove the DB2 instance(s):
sudo su- db2inst1-c db2stop /opt/ibm/db2/V10.5/instance/db2ilist /opt/ibm/db2/V10.5/instance/db2idrop db2inst1 /opt/ibm/db2/V10.5/instance/db2ilist
• Remove the software installation:
/opt/ibm/db2/V10.5/install/db2_deinstall-a
• Remote the user accounts too:
userdel-r db2inst1 userdel-r dasusr1 userdel-r db2fenc1
12.7 Convenience Tools
Tools can make work easier and efficient.
12.7.1 Bash Navigation Tool
This tool allows you to quickly get to your ZP source tree. It has filename completion: export zpdefault=Ceph if [ -z $zpbase]; then if [ -d /z]; then zpbase="/z" elif [ -d /mnt/src/zenpacks]; then zpbase="/mnt/src/zenpacks" elif [ -d /home/zenoss/src/europa/src/zenpacks]; then zpbase="/home/zenoss/src/europa/src/zenpacks" elif [ -d /zenpacks]; then zpbase="/zenpacks" elif [ -d /data/zp]; then zpbase="/data/zp" else zpbase="/tmp" fi
export zpbase fi zp() { (continues on next page)
12.7. Convenience Tools 311 ZenPackers Documentation
(continued from previous page) # ------# Setup up variables.. # ------if [[ -z"$zpdefault"]]; then export zpdefault=PythonCollector fi
if [[ -z"$zplast"&& -z"$zpcurrent"]]; then export zplast export zpcurrent fi
# ------# Find and go to right folders # ------if [ -n"$1"]; then if [ -d"$zpbase"/"$1"]; then echo "zenpack exists"; cd"$zpbase"/"$1"; return; fi; if [["$1"== "-"]]; then
if [[ -n"$zplast"&& -n"$zpcurrent"]]; then echo "returning to prior zenpack"; zptemp=$zplast zplast=$zpcurrent zpcurrent=$zptemp cd"$zpcurrent" fi return; fi; second="ZenPacks.zenoss.$1"; third=${second//./\/}; final="$zpbase"/"$second"/"$third"/;
if [ -d $final]; then echo "zenpack exists"; zplast="$PWD"; zpcurrent="$final"; cd"$final"; else echo"no such Zenpack $final"; return; fi; else echo "Using default zp"; zp $zpdefault; fi } function _zp() { local cmd="${1##*/}" local word=${COMP_WORDS[COMP_CWORD]} # Pointer to current completion word. (continues on next page)
312 Chapter 12. Other Topics: ZenPackers Documentation
(continued from previous page) # By convention, it's named "cur" but this isn't strictly necessary.
words="" case "$word" in *) for i in $zpbase/ZenPacks.zenoss.${word}*; do if [[ -d $i]]; then words+="${i##*.}" words+="" fi done esac
COMPREPLY=() # Array variable storing the possible completions. COMPREPLY=( $( compgen-W"$words"-- $cur ) ) } complete -o filenames -F _zp zp
To use it, put the contents into a zp.sh file and source it from your .bashrc and then use it as follows: zp Py
It should complete the name to PythonCollector, assuming you have the PythonCollector ZenPack installed, and take you to the folder:
$zpbase/ZenPacks.zenoss.PythonCollector/ZenPacks/zenoss/PythonCollector/
Make sure your ZenPacks are stored in one of the following directories: • /z • /mnt/src/zenpacks • /home/zenoss/src/europa/src/zenpacks • /zenpacks • /data/zp • /tmp
12.8 Simulate devices
12.8.1 Simulate devices for NetAppMonitor ZenPack
In this example we will use data from zenmodeler’s debug log for 7-Mode or C-Mode devices to simulate the same device with all components in our test environment. To do this, we need to perform some additional steps: 1. Create a folder on Zenoss system and name it ‘data’:
/home/zenoss/data
2. Open debug log and copy a list with data which goes after “ results = ”:
NetApp.ZAPI.CMode.SystemNodes results=
12.8. Simulate devices 313 ZenPackers Documentation
3. Save the file with this list into the folder you have created earlier. The file should be named according to this template .dump:
NetApp.ZAPI.CMode.SystemNodes.dump
4. Optionally, copy existing PythonClient.py file, it is located in /opt/zenoss/Products/ DataCollector/
cp/opt/zenoss/Products/DataCollector/PythonClient.py/opt/zenoss/Products/
˓→DataCollector/ original_PythonClient.py
5. Copy PythonClient.patch file into zenoss home directory:
/home/zenoss/
6. Apply patch to PythonClient.py, if you are in /home/zenoss folder you can perform:
patch/opt/zenoss/Products/DataCollector/PythonClient.py< PythonClient.patch
7. Create NetApp device through Zenoss UI with any IP Address but do not model it 8. Run zenmodeler for already existing NetApp device with appropriate device class through terminal:
zenmodeler run-d or zenmodeler run-d-collect=˓→name>
9. In case you have done copy in step 4 you can move original file back:
cp/opt/zenoss/Products/DataCollector/original_PythonClient.py/opt/zenoss/
˓→Products/ DataCollector/PythonClient.py
12.8.2 Patch for PythonClient.py
PythonClient.patch:
@@-50,8+50,21@@ """ deferreds = [] for plugin in self.plugins: - log.debug("Running collection for plugin %s", plugin.name()) - r = plugin.collect(self.device, log) + import os + dump_fname ='/home/zenoss/data/ %s.dump'% (plugin.name(),) + if os.path.exists(dump_fname): + log.info('Use %s dump file for %s plugin', + dump_fname, + plugin.name()) + with open(dump_fname,'r') as f: + data = f.read() + + results = eval(data) + r = Deferred() + r.callback(results) + else: + log.debug("Running collection for plugin %s", plugin.name()) + r = plugin.collect(self.device, log) if isinstance(r, Deferred): (continues on next page)
314 Chapter 12. Other Topics: ZenPackers Documentation
(continued from previous page) deferreds.append(r) r.addBoth(self.collectComplete, plugin)
12.8. Simulate devices 315